Show More
@@ -1,705 +1,706 b'' | |||||
1 | # dirstatemap.py |
|
1 | # dirstatemap.py | |
2 | # |
|
2 | # | |
3 | # This software may be used and distributed according to the terms of the |
|
3 | # This software may be used and distributed according to the terms of the | |
4 | # GNU General Public License version 2 or any later version. |
|
4 | # GNU General Public License version 2 or any later version. | |
5 |
|
5 | |||
6 |
|
6 | |||
7 | from .i18n import _ |
|
7 | from .i18n import _ | |
8 |
|
8 | |||
9 | from . import ( |
|
9 | from . import ( | |
10 | error, |
|
10 | error, | |
11 | pathutil, |
|
11 | pathutil, | |
12 | policy, |
|
12 | policy, | |
13 | txnutil, |
|
13 | txnutil, | |
14 | util, |
|
14 | util, | |
15 | ) |
|
15 | ) | |
16 |
|
16 | |||
17 | from .dirstateutils import ( |
|
17 | from .dirstateutils import ( | |
18 | docket as docketmod, |
|
18 | docket as docketmod, | |
19 | v2, |
|
19 | v2, | |
20 | ) |
|
20 | ) | |
21 |
|
21 | |||
22 | parsers = policy.importmod('parsers') |
|
22 | parsers = policy.importmod('parsers') | |
23 | rustmod = policy.importrust('dirstate') |
|
23 | rustmod = policy.importrust('dirstate') | |
24 |
|
24 | |||
25 | propertycache = util.propertycache |
|
25 | propertycache = util.propertycache | |
26 |
|
26 | |||
27 | if rustmod is None: |
|
27 | if rustmod is None: | |
28 | DirstateItem = parsers.DirstateItem |
|
28 | DirstateItem = parsers.DirstateItem | |
29 | else: |
|
29 | else: | |
30 | DirstateItem = rustmod.DirstateItem |
|
30 | DirstateItem = rustmod.DirstateItem | |
31 |
|
31 | |||
32 | rangemask = 0x7FFFFFFF |
|
32 | rangemask = 0x7FFFFFFF | |
33 |
|
33 | |||
34 |
|
34 | |||
35 | class _dirstatemapcommon: |
|
35 | class _dirstatemapcommon: | |
36 | """ |
|
36 | """ | |
37 | Methods that are identical for both implementations of the dirstatemap |
|
37 | Methods that are identical for both implementations of the dirstatemap | |
38 | class, with and without Rust extensions enabled. |
|
38 | class, with and without Rust extensions enabled. | |
39 | """ |
|
39 | """ | |
40 |
|
40 | |||
41 | # please pytype |
|
41 | # please pytype | |
42 |
|
42 | |||
43 | _map = None |
|
43 | _map = None | |
44 | copymap = None |
|
44 | copymap = None | |
45 |
|
45 | |||
46 | def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2): |
|
46 | def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2): | |
47 | self._use_dirstate_v2 = use_dirstate_v2 |
|
47 | self._use_dirstate_v2 = use_dirstate_v2 | |
48 | self._nodeconstants = nodeconstants |
|
48 | self._nodeconstants = nodeconstants | |
49 | self._ui = ui |
|
49 | self._ui = ui | |
50 | self._opener = opener |
|
50 | self._opener = opener | |
51 | self._root = root |
|
51 | self._root = root | |
52 | self._filename = b'dirstate' |
|
52 | self._filename = b'dirstate' | |
53 | self._nodelen = 20 # Also update Rust code when changing this! |
|
53 | self._nodelen = 20 # Also update Rust code when changing this! | |
54 | self._parents = None |
|
54 | self._parents = None | |
55 | self._dirtyparents = False |
|
55 | self._dirtyparents = False | |
56 | self._docket = None |
|
56 | self._docket = None | |
57 |
|
57 | |||
58 | # for consistent view between _pl() and _read() invocations |
|
58 | # for consistent view between _pl() and _read() invocations | |
59 | self._pendingmode = None |
|
59 | self._pendingmode = None | |
60 |
|
60 | |||
|
61 | def _set_identity(self): | |||
|
62 | self.identity = self._get_current_identity() | |||
|
63 | ||||
|
64 | def _get_current_identity(self): | |||
|
65 | try: | |||
|
66 | return util.cachestat(self._opener.join(self._filename)) | |||
|
67 | except FileNotFoundError: | |||
|
68 | return None | |||
|
69 | ||||
61 | def preload(self): |
|
70 | def preload(self): | |
62 | """Loads the underlying data, if it's not already loaded""" |
|
71 | """Loads the underlying data, if it's not already loaded""" | |
63 | self._map |
|
72 | self._map | |
64 |
|
73 | |||
65 | def get(self, key, default=None): |
|
74 | def get(self, key, default=None): | |
66 | return self._map.get(key, default) |
|
75 | return self._map.get(key, default) | |
67 |
|
76 | |||
68 | def __len__(self): |
|
77 | def __len__(self): | |
69 | return len(self._map) |
|
78 | return len(self._map) | |
70 |
|
79 | |||
71 | def __iter__(self): |
|
80 | def __iter__(self): | |
72 | return iter(self._map) |
|
81 | return iter(self._map) | |
73 |
|
82 | |||
74 | def __contains__(self, key): |
|
83 | def __contains__(self, key): | |
75 | return key in self._map |
|
84 | return key in self._map | |
76 |
|
85 | |||
77 | def __getitem__(self, item): |
|
86 | def __getitem__(self, item): | |
78 | return self._map[item] |
|
87 | return self._map[item] | |
79 |
|
88 | |||
80 | ### disk interaction |
|
89 | ### disk interaction | |
81 |
|
90 | |||
82 | def _opendirstatefile(self): |
|
91 | def _opendirstatefile(self): | |
83 | fp, mode = txnutil.trypending(self._root, self._opener, self._filename) |
|
92 | fp, mode = txnutil.trypending(self._root, self._opener, self._filename) | |
84 | if self._pendingmode is not None and self._pendingmode != mode: |
|
93 | if self._pendingmode is not None and self._pendingmode != mode: | |
85 | fp.close() |
|
94 | fp.close() | |
86 | raise error.Abort( |
|
95 | raise error.Abort( | |
87 | _(b'working directory state may be changed parallelly') |
|
96 | _(b'working directory state may be changed parallelly') | |
88 | ) |
|
97 | ) | |
89 | self._pendingmode = mode |
|
98 | self._pendingmode = mode | |
90 | return fp |
|
99 | return fp | |
91 |
|
100 | |||
92 | def _readdirstatefile(self, size=-1): |
|
101 | def _readdirstatefile(self, size=-1): | |
93 | try: |
|
102 | try: | |
94 | with self._opendirstatefile() as fp: |
|
103 | with self._opendirstatefile() as fp: | |
95 | return fp.read(size) |
|
104 | return fp.read(size) | |
96 | except FileNotFoundError: |
|
105 | except FileNotFoundError: | |
97 | # File doesn't exist, so the current state is empty |
|
106 | # File doesn't exist, so the current state is empty | |
98 | return b'' |
|
107 | return b'' | |
99 |
|
108 | |||
100 | @property |
|
109 | @property | |
101 | def docket(self): |
|
110 | def docket(self): | |
102 | if not self._docket: |
|
111 | if not self._docket: | |
103 | if not self._use_dirstate_v2: |
|
112 | if not self._use_dirstate_v2: | |
104 | raise error.ProgrammingError( |
|
113 | raise error.ProgrammingError( | |
105 | b'dirstate only has a docket in v2 format' |
|
114 | b'dirstate only has a docket in v2 format' | |
106 | ) |
|
115 | ) | |
107 | self._docket = docketmod.DirstateDocket.parse( |
|
116 | self._docket = docketmod.DirstateDocket.parse( | |
108 | self._readdirstatefile(), self._nodeconstants |
|
117 | self._readdirstatefile(), self._nodeconstants | |
109 | ) |
|
118 | ) | |
110 | return self._docket |
|
119 | return self._docket | |
111 |
|
120 | |||
112 | def write_v2_no_append(self, tr, st, meta, packed): |
|
121 | def write_v2_no_append(self, tr, st, meta, packed): | |
113 | old_docket = self.docket |
|
122 | old_docket = self.docket | |
114 | new_docket = docketmod.DirstateDocket.with_new_uuid( |
|
123 | new_docket = docketmod.DirstateDocket.with_new_uuid( | |
115 | self.parents(), len(packed), meta |
|
124 | self.parents(), len(packed), meta | |
116 | ) |
|
125 | ) | |
117 | if old_docket.uuid == new_docket.uuid: |
|
126 | if old_docket.uuid == new_docket.uuid: | |
118 | raise error.ProgrammingError(b'dirstate docket name collision') |
|
127 | raise error.ProgrammingError(b'dirstate docket name collision') | |
119 | data_filename = new_docket.data_filename() |
|
128 | data_filename = new_docket.data_filename() | |
120 | self._opener.write(data_filename, packed) |
|
129 | self._opener.write(data_filename, packed) | |
121 | # tell the transaction that we are adding a new file |
|
130 | # tell the transaction that we are adding a new file | |
122 | if tr is not None: |
|
131 | if tr is not None: | |
123 | tr.addbackup(data_filename, location=b'plain') |
|
132 | tr.addbackup(data_filename, location=b'plain') | |
124 | # Write the new docket after the new data file has been |
|
133 | # Write the new docket after the new data file has been | |
125 | # written. Because `st` was opened with `atomictemp=True`, |
|
134 | # written. Because `st` was opened with `atomictemp=True`, | |
126 | # the actual `.hg/dirstate` file is only affected on close. |
|
135 | # the actual `.hg/dirstate` file is only affected on close. | |
127 | st.write(new_docket.serialize()) |
|
136 | st.write(new_docket.serialize()) | |
128 | st.close() |
|
137 | st.close() | |
129 | # Remove the old data file after the new docket pointing to |
|
138 | # Remove the old data file after the new docket pointing to | |
130 | # the new data file was written. |
|
139 | # the new data file was written. | |
131 | if old_docket.uuid: |
|
140 | if old_docket.uuid: | |
132 | data_filename = old_docket.data_filename() |
|
141 | data_filename = old_docket.data_filename() | |
133 | if tr is not None: |
|
142 | if tr is not None: | |
134 | tr.addbackup(data_filename, location=b'plain') |
|
143 | tr.addbackup(data_filename, location=b'plain') | |
135 | unlink = lambda _tr=None: self._opener.unlink(data_filename) |
|
144 | unlink = lambda _tr=None: self._opener.unlink(data_filename) | |
136 | if tr: |
|
145 | if tr: | |
137 | category = b"dirstate-v2-clean-" + old_docket.uuid |
|
146 | category = b"dirstate-v2-clean-" + old_docket.uuid | |
138 | tr.addpostclose(category, unlink) |
|
147 | tr.addpostclose(category, unlink) | |
139 | else: |
|
148 | else: | |
140 | unlink() |
|
149 | unlink() | |
141 | self._docket = new_docket |
|
150 | self._docket = new_docket | |
142 |
|
151 | |||
143 | ### reading/setting parents |
|
152 | ### reading/setting parents | |
144 |
|
153 | |||
145 | def parents(self): |
|
154 | def parents(self): | |
146 | if not self._parents: |
|
155 | if not self._parents: | |
147 | if self._use_dirstate_v2: |
|
156 | if self._use_dirstate_v2: | |
148 | self._parents = self.docket.parents |
|
157 | self._parents = self.docket.parents | |
149 | else: |
|
158 | else: | |
150 | read_len = self._nodelen * 2 |
|
159 | read_len = self._nodelen * 2 | |
151 | st = self._readdirstatefile(read_len) |
|
160 | st = self._readdirstatefile(read_len) | |
152 | l = len(st) |
|
161 | l = len(st) | |
153 | if l == read_len: |
|
162 | if l == read_len: | |
154 | self._parents = ( |
|
163 | self._parents = ( | |
155 | st[: self._nodelen], |
|
164 | st[: self._nodelen], | |
156 | st[self._nodelen : 2 * self._nodelen], |
|
165 | st[self._nodelen : 2 * self._nodelen], | |
157 | ) |
|
166 | ) | |
158 | elif l == 0: |
|
167 | elif l == 0: | |
159 | self._parents = ( |
|
168 | self._parents = ( | |
160 | self._nodeconstants.nullid, |
|
169 | self._nodeconstants.nullid, | |
161 | self._nodeconstants.nullid, |
|
170 | self._nodeconstants.nullid, | |
162 | ) |
|
171 | ) | |
163 | else: |
|
172 | else: | |
164 | raise error.Abort( |
|
173 | raise error.Abort( | |
165 | _(b'working directory state appears damaged!') |
|
174 | _(b'working directory state appears damaged!') | |
166 | ) |
|
175 | ) | |
167 |
|
176 | |||
168 | return self._parents |
|
177 | return self._parents | |
169 |
|
178 | |||
170 |
|
179 | |||
171 | class dirstatemap(_dirstatemapcommon): |
|
180 | class dirstatemap(_dirstatemapcommon): | |
172 | """Map encapsulating the dirstate's contents. |
|
181 | """Map encapsulating the dirstate's contents. | |
173 |
|
182 | |||
174 | The dirstate contains the following state: |
|
183 | The dirstate contains the following state: | |
175 |
|
184 | |||
176 | - `identity` is the identity of the dirstate file, which can be used to |
|
185 | - `identity` is the identity of the dirstate file, which can be used to | |
177 | detect when changes have occurred to the dirstate file. |
|
186 | detect when changes have occurred to the dirstate file. | |
178 |
|
187 | |||
179 | - `parents` is a pair containing the parents of the working copy. The |
|
188 | - `parents` is a pair containing the parents of the working copy. The | |
180 | parents are updated by calling `setparents`. |
|
189 | parents are updated by calling `setparents`. | |
181 |
|
190 | |||
182 | - the state map maps filenames to tuples of (state, mode, size, mtime), |
|
191 | - the state map maps filenames to tuples of (state, mode, size, mtime), | |
183 | where state is a single character representing 'normal', 'added', |
|
192 | where state is a single character representing 'normal', 'added', | |
184 | 'removed', or 'merged'. It is read by treating the dirstate as a |
|
193 | 'removed', or 'merged'. It is read by treating the dirstate as a | |
185 | dict. File state is updated by calling various methods (see each |
|
194 | dict. File state is updated by calling various methods (see each | |
186 | documentation for details): |
|
195 | documentation for details): | |
187 |
|
196 | |||
188 | - `reset_state`, |
|
197 | - `reset_state`, | |
189 | - `set_tracked` |
|
198 | - `set_tracked` | |
190 | - `set_untracked` |
|
199 | - `set_untracked` | |
191 | - `set_clean` |
|
200 | - `set_clean` | |
192 | - `set_possibly_dirty` |
|
201 | - `set_possibly_dirty` | |
193 |
|
202 | |||
194 | - `copymap` maps destination filenames to their source filename. |
|
203 | - `copymap` maps destination filenames to their source filename. | |
195 |
|
204 | |||
196 | The dirstate also provides the following views onto the state: |
|
205 | The dirstate also provides the following views onto the state: | |
197 |
|
206 | |||
198 | - `filefoldmap` is a dict mapping normalized filenames to the denormalized |
|
207 | - `filefoldmap` is a dict mapping normalized filenames to the denormalized | |
199 | form that they appear as in the dirstate. |
|
208 | form that they appear as in the dirstate. | |
200 |
|
209 | |||
201 | - `dirfoldmap` is a dict mapping normalized directory names to the |
|
210 | - `dirfoldmap` is a dict mapping normalized directory names to the | |
202 | denormalized form that they appear as in the dirstate. |
|
211 | denormalized form that they appear as in the dirstate. | |
203 | """ |
|
212 | """ | |
204 |
|
213 | |||
205 | ### Core data storage and access |
|
214 | ### Core data storage and access | |
206 |
|
215 | |||
207 | @propertycache |
|
216 | @propertycache | |
208 | def _map(self): |
|
217 | def _map(self): | |
209 | self._map = {} |
|
218 | self._map = {} | |
210 | self.read() |
|
219 | self.read() | |
211 | return self._map |
|
220 | return self._map | |
212 |
|
221 | |||
213 | @propertycache |
|
222 | @propertycache | |
214 | def copymap(self): |
|
223 | def copymap(self): | |
215 | self.copymap = {} |
|
224 | self.copymap = {} | |
216 | self._map |
|
225 | self._map | |
217 | return self.copymap |
|
226 | return self.copymap | |
218 |
|
227 | |||
219 | def clear(self): |
|
228 | def clear(self): | |
220 | self._map.clear() |
|
229 | self._map.clear() | |
221 | self.copymap.clear() |
|
230 | self.copymap.clear() | |
222 | self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid) |
|
231 | self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid) | |
223 | util.clearcachedproperty(self, b"_dirs") |
|
232 | util.clearcachedproperty(self, b"_dirs") | |
224 | util.clearcachedproperty(self, b"_alldirs") |
|
233 | util.clearcachedproperty(self, b"_alldirs") | |
225 | util.clearcachedproperty(self, b"filefoldmap") |
|
234 | util.clearcachedproperty(self, b"filefoldmap") | |
226 | util.clearcachedproperty(self, b"dirfoldmap") |
|
235 | util.clearcachedproperty(self, b"dirfoldmap") | |
227 |
|
236 | |||
228 | def items(self): |
|
237 | def items(self): | |
229 | return self._map.items() |
|
238 | return self._map.items() | |
230 |
|
239 | |||
231 | # forward for python2,3 compat |
|
240 | # forward for python2,3 compat | |
232 | iteritems = items |
|
241 | iteritems = items | |
233 |
|
242 | |||
234 | def debug_iter(self, all): |
|
243 | def debug_iter(self, all): | |
235 | """ |
|
244 | """ | |
236 | Return an iterator of (filename, state, mode, size, mtime) tuples |
|
245 | Return an iterator of (filename, state, mode, size, mtime) tuples | |
237 |
|
246 | |||
238 | `all` is unused when Rust is not enabled |
|
247 | `all` is unused when Rust is not enabled | |
239 | """ |
|
248 | """ | |
240 | for (filename, item) in self.items(): |
|
249 | for (filename, item) in self.items(): | |
241 | yield (filename, item.state, item.mode, item.size, item.mtime) |
|
250 | yield (filename, item.state, item.mode, item.size, item.mtime) | |
242 |
|
251 | |||
243 | def keys(self): |
|
252 | def keys(self): | |
244 | return self._map.keys() |
|
253 | return self._map.keys() | |
245 |
|
254 | |||
246 | ### reading/setting parents |
|
255 | ### reading/setting parents | |
247 |
|
256 | |||
248 | def setparents(self, p1, p2, fold_p2=False): |
|
257 | def setparents(self, p1, p2, fold_p2=False): | |
249 | self._parents = (p1, p2) |
|
258 | self._parents = (p1, p2) | |
250 | self._dirtyparents = True |
|
259 | self._dirtyparents = True | |
251 | copies = {} |
|
260 | copies = {} | |
252 | if fold_p2: |
|
261 | if fold_p2: | |
253 | for f, s in self._map.items(): |
|
262 | for f, s in self._map.items(): | |
254 | # Discard "merged" markers when moving away from a merge state |
|
263 | # Discard "merged" markers when moving away from a merge state | |
255 | if s.p2_info: |
|
264 | if s.p2_info: | |
256 | source = self.copymap.pop(f, None) |
|
265 | source = self.copymap.pop(f, None) | |
257 | if source: |
|
266 | if source: | |
258 | copies[f] = source |
|
267 | copies[f] = source | |
259 | s.drop_merge_data() |
|
268 | s.drop_merge_data() | |
260 | return copies |
|
269 | return copies | |
261 |
|
270 | |||
262 | ### disk interaction |
|
271 | ### disk interaction | |
263 |
|
272 | |||
264 | def read(self): |
|
273 | def read(self): | |
265 | # ignore HG_PENDING because identity is used only for writing |
|
274 | # ignore HG_PENDING because identity is used only for writing | |
266 | try: |
|
275 | self._set_identity() | |
267 | self.identity = util.cachestat(self._opener.join(self._filename)) |
|
|||
268 | except FileNotFoundError: |
|
|||
269 | self.identity = None |
|
|||
270 |
|
276 | |||
271 | if self._use_dirstate_v2: |
|
277 | if self._use_dirstate_v2: | |
272 | if not self.docket.uuid: |
|
278 | if not self.docket.uuid: | |
273 | return |
|
279 | return | |
274 | st = self._opener.read(self.docket.data_filename()) |
|
280 | st = self._opener.read(self.docket.data_filename()) | |
275 | else: |
|
281 | else: | |
276 | st = self._readdirstatefile() |
|
282 | st = self._readdirstatefile() | |
277 |
|
283 | |||
278 | if not st: |
|
284 | if not st: | |
279 | return |
|
285 | return | |
280 |
|
286 | |||
281 | # TODO: adjust this estimate for dirstate-v2 |
|
287 | # TODO: adjust this estimate for dirstate-v2 | |
282 | if util.safehasattr(parsers, b'dict_new_presized'): |
|
288 | if util.safehasattr(parsers, b'dict_new_presized'): | |
283 | # Make an estimate of the number of files in the dirstate based on |
|
289 | # Make an estimate of the number of files in the dirstate based on | |
284 | # its size. This trades wasting some memory for avoiding costly |
|
290 | # its size. This trades wasting some memory for avoiding costly | |
285 | # resizes. Each entry have a prefix of 17 bytes followed by one or |
|
291 | # resizes. Each entry have a prefix of 17 bytes followed by one or | |
286 | # two path names. Studies on various large-scale real-world repositories |
|
292 | # two path names. Studies on various large-scale real-world repositories | |
287 | # found 54 bytes a reasonable upper limit for the average path names. |
|
293 | # found 54 bytes a reasonable upper limit for the average path names. | |
288 | # Copy entries are ignored for the sake of this estimate. |
|
294 | # Copy entries are ignored for the sake of this estimate. | |
289 | self._map = parsers.dict_new_presized(len(st) // 71) |
|
295 | self._map = parsers.dict_new_presized(len(st) // 71) | |
290 |
|
296 | |||
291 | # Python's garbage collector triggers a GC each time a certain number |
|
297 | # Python's garbage collector triggers a GC each time a certain number | |
292 | # of container objects (the number being defined by |
|
298 | # of container objects (the number being defined by | |
293 | # gc.get_threshold()) are allocated. parse_dirstate creates a tuple |
|
299 | # gc.get_threshold()) are allocated. parse_dirstate creates a tuple | |
294 | # for each file in the dirstate. The C version then immediately marks |
|
300 | # for each file in the dirstate. The C version then immediately marks | |
295 | # them as not to be tracked by the collector. However, this has no |
|
301 | # them as not to be tracked by the collector. However, this has no | |
296 | # effect on when GCs are triggered, only on what objects the GC looks |
|
302 | # effect on when GCs are triggered, only on what objects the GC looks | |
297 | # into. This means that O(number of files) GCs are unavoidable. |
|
303 | # into. This means that O(number of files) GCs are unavoidable. | |
298 | # Depending on when in the process's lifetime the dirstate is parsed, |
|
304 | # Depending on when in the process's lifetime the dirstate is parsed, | |
299 | # this can get very expensive. As a workaround, disable GC while |
|
305 | # this can get very expensive. As a workaround, disable GC while | |
300 | # parsing the dirstate. |
|
306 | # parsing the dirstate. | |
301 | # |
|
307 | # | |
302 | # (we cannot decorate the function directly since it is in a C module) |
|
308 | # (we cannot decorate the function directly since it is in a C module) | |
303 | if self._use_dirstate_v2: |
|
309 | if self._use_dirstate_v2: | |
304 | p = self.docket.parents |
|
310 | p = self.docket.parents | |
305 | meta = self.docket.tree_metadata |
|
311 | meta = self.docket.tree_metadata | |
306 | parse_dirstate = util.nogc(v2.parse_dirstate) |
|
312 | parse_dirstate = util.nogc(v2.parse_dirstate) | |
307 | parse_dirstate(self._map, self.copymap, st, meta) |
|
313 | parse_dirstate(self._map, self.copymap, st, meta) | |
308 | else: |
|
314 | else: | |
309 | parse_dirstate = util.nogc(parsers.parse_dirstate) |
|
315 | parse_dirstate = util.nogc(parsers.parse_dirstate) | |
310 | p = parse_dirstate(self._map, self.copymap, st) |
|
316 | p = parse_dirstate(self._map, self.copymap, st) | |
311 | if not self._dirtyparents: |
|
317 | if not self._dirtyparents: | |
312 | self.setparents(*p) |
|
318 | self.setparents(*p) | |
313 |
|
319 | |||
314 | # Avoid excess attribute lookups by fast pathing certain checks |
|
320 | # Avoid excess attribute lookups by fast pathing certain checks | |
315 | self.__contains__ = self._map.__contains__ |
|
321 | self.__contains__ = self._map.__contains__ | |
316 | self.__getitem__ = self._map.__getitem__ |
|
322 | self.__getitem__ = self._map.__getitem__ | |
317 | self.get = self._map.get |
|
323 | self.get = self._map.get | |
318 |
|
324 | |||
319 | def write(self, tr, st): |
|
325 | def write(self, tr, st): | |
320 | if self._use_dirstate_v2: |
|
326 | if self._use_dirstate_v2: | |
321 | packed, meta = v2.pack_dirstate(self._map, self.copymap) |
|
327 | packed, meta = v2.pack_dirstate(self._map, self.copymap) | |
322 | self.write_v2_no_append(tr, st, meta, packed) |
|
328 | self.write_v2_no_append(tr, st, meta, packed) | |
323 | else: |
|
329 | else: | |
324 | packed = parsers.pack_dirstate( |
|
330 | packed = parsers.pack_dirstate( | |
325 | self._map, self.copymap, self.parents() |
|
331 | self._map, self.copymap, self.parents() | |
326 | ) |
|
332 | ) | |
327 | st.write(packed) |
|
333 | st.write(packed) | |
328 | st.close() |
|
334 | st.close() | |
329 | self._dirtyparents = False |
|
335 | self._dirtyparents = False | |
330 |
|
336 | |||
331 | @propertycache |
|
337 | @propertycache | |
332 | def identity(self): |
|
338 | def identity(self): | |
333 | self._map |
|
339 | self._map | |
334 | return self.identity |
|
340 | return self.identity | |
335 |
|
341 | |||
336 | ### code related to maintaining and accessing "extra" property |
|
342 | ### code related to maintaining and accessing "extra" property | |
337 | # (e.g. "has_dir") |
|
343 | # (e.g. "has_dir") | |
338 |
|
344 | |||
339 | def _dirs_incr(self, filename, old_entry=None): |
|
345 | def _dirs_incr(self, filename, old_entry=None): | |
340 | """increment the dirstate counter if applicable""" |
|
346 | """increment the dirstate counter if applicable""" | |
341 | if ( |
|
347 | if ( | |
342 | old_entry is None or old_entry.removed |
|
348 | old_entry is None or old_entry.removed | |
343 | ) and "_dirs" in self.__dict__: |
|
349 | ) and "_dirs" in self.__dict__: | |
344 | self._dirs.addpath(filename) |
|
350 | self._dirs.addpath(filename) | |
345 | if old_entry is None and "_alldirs" in self.__dict__: |
|
351 | if old_entry is None and "_alldirs" in self.__dict__: | |
346 | self._alldirs.addpath(filename) |
|
352 | self._alldirs.addpath(filename) | |
347 |
|
353 | |||
348 | def _dirs_decr(self, filename, old_entry=None, remove_variant=False): |
|
354 | def _dirs_decr(self, filename, old_entry=None, remove_variant=False): | |
349 | """decrement the dirstate counter if applicable""" |
|
355 | """decrement the dirstate counter if applicable""" | |
350 | if old_entry is not None: |
|
356 | if old_entry is not None: | |
351 | if "_dirs" in self.__dict__ and not old_entry.removed: |
|
357 | if "_dirs" in self.__dict__ and not old_entry.removed: | |
352 | self._dirs.delpath(filename) |
|
358 | self._dirs.delpath(filename) | |
353 | if "_alldirs" in self.__dict__ and not remove_variant: |
|
359 | if "_alldirs" in self.__dict__ and not remove_variant: | |
354 | self._alldirs.delpath(filename) |
|
360 | self._alldirs.delpath(filename) | |
355 | elif remove_variant and "_alldirs" in self.__dict__: |
|
361 | elif remove_variant and "_alldirs" in self.__dict__: | |
356 | self._alldirs.addpath(filename) |
|
362 | self._alldirs.addpath(filename) | |
357 | if "filefoldmap" in self.__dict__: |
|
363 | if "filefoldmap" in self.__dict__: | |
358 | normed = util.normcase(filename) |
|
364 | normed = util.normcase(filename) | |
359 | self.filefoldmap.pop(normed, None) |
|
365 | self.filefoldmap.pop(normed, None) | |
360 |
|
366 | |||
361 | @propertycache |
|
367 | @propertycache | |
362 | def filefoldmap(self): |
|
368 | def filefoldmap(self): | |
363 | """Returns a dictionary mapping normalized case paths to their |
|
369 | """Returns a dictionary mapping normalized case paths to their | |
364 | non-normalized versions. |
|
370 | non-normalized versions. | |
365 | """ |
|
371 | """ | |
366 | try: |
|
372 | try: | |
367 | makefilefoldmap = parsers.make_file_foldmap |
|
373 | makefilefoldmap = parsers.make_file_foldmap | |
368 | except AttributeError: |
|
374 | except AttributeError: | |
369 | pass |
|
375 | pass | |
370 | else: |
|
376 | else: | |
371 | return makefilefoldmap( |
|
377 | return makefilefoldmap( | |
372 | self._map, util.normcasespec, util.normcasefallback |
|
378 | self._map, util.normcasespec, util.normcasefallback | |
373 | ) |
|
379 | ) | |
374 |
|
380 | |||
375 | f = {} |
|
381 | f = {} | |
376 | normcase = util.normcase |
|
382 | normcase = util.normcase | |
377 | for name, s in self._map.items(): |
|
383 | for name, s in self._map.items(): | |
378 | if not s.removed: |
|
384 | if not s.removed: | |
379 | f[normcase(name)] = name |
|
385 | f[normcase(name)] = name | |
380 | f[b'.'] = b'.' # prevents useless util.fspath() invocation |
|
386 | f[b'.'] = b'.' # prevents useless util.fspath() invocation | |
381 | return f |
|
387 | return f | |
382 |
|
388 | |||
383 | @propertycache |
|
389 | @propertycache | |
384 | def dirfoldmap(self): |
|
390 | def dirfoldmap(self): | |
385 | f = {} |
|
391 | f = {} | |
386 | normcase = util.normcase |
|
392 | normcase = util.normcase | |
387 | for name in self._dirs: |
|
393 | for name in self._dirs: | |
388 | f[normcase(name)] = name |
|
394 | f[normcase(name)] = name | |
389 | return f |
|
395 | return f | |
390 |
|
396 | |||
391 | def hastrackeddir(self, d): |
|
397 | def hastrackeddir(self, d): | |
392 | """ |
|
398 | """ | |
393 | Returns True if the dirstate contains a tracked (not removed) file |
|
399 | Returns True if the dirstate contains a tracked (not removed) file | |
394 | in this directory. |
|
400 | in this directory. | |
395 | """ |
|
401 | """ | |
396 | return d in self._dirs |
|
402 | return d in self._dirs | |
397 |
|
403 | |||
398 | def hasdir(self, d): |
|
404 | def hasdir(self, d): | |
399 | """ |
|
405 | """ | |
400 | Returns True if the dirstate contains a file (tracked or removed) |
|
406 | Returns True if the dirstate contains a file (tracked or removed) | |
401 | in this directory. |
|
407 | in this directory. | |
402 | """ |
|
408 | """ | |
403 | return d in self._alldirs |
|
409 | return d in self._alldirs | |
404 |
|
410 | |||
405 | @propertycache |
|
411 | @propertycache | |
406 | def _dirs(self): |
|
412 | def _dirs(self): | |
407 | return pathutil.dirs(self._map, only_tracked=True) |
|
413 | return pathutil.dirs(self._map, only_tracked=True) | |
408 |
|
414 | |||
409 | @propertycache |
|
415 | @propertycache | |
410 | def _alldirs(self): |
|
416 | def _alldirs(self): | |
411 | return pathutil.dirs(self._map) |
|
417 | return pathutil.dirs(self._map) | |
412 |
|
418 | |||
413 | ### code related to manipulation of entries and copy-sources |
|
419 | ### code related to manipulation of entries and copy-sources | |
414 |
|
420 | |||
415 | def reset_state( |
|
421 | def reset_state( | |
416 | self, |
|
422 | self, | |
417 | filename, |
|
423 | filename, | |
418 | wc_tracked=False, |
|
424 | wc_tracked=False, | |
419 | p1_tracked=False, |
|
425 | p1_tracked=False, | |
420 | p2_info=False, |
|
426 | p2_info=False, | |
421 | has_meaningful_mtime=True, |
|
427 | has_meaningful_mtime=True, | |
422 | parentfiledata=None, |
|
428 | parentfiledata=None, | |
423 | ): |
|
429 | ): | |
424 | """Set a entry to a given state, diregarding all previous state |
|
430 | """Set a entry to a given state, diregarding all previous state | |
425 |
|
431 | |||
426 | This is to be used by the part of the dirstate API dedicated to |
|
432 | This is to be used by the part of the dirstate API dedicated to | |
427 | adjusting the dirstate after a update/merge. |
|
433 | adjusting the dirstate after a update/merge. | |
428 |
|
434 | |||
429 | note: calling this might result to no entry existing at all if the |
|
435 | note: calling this might result to no entry existing at all if the | |
430 | dirstate map does not see any point at having one for this file |
|
436 | dirstate map does not see any point at having one for this file | |
431 | anymore. |
|
437 | anymore. | |
432 | """ |
|
438 | """ | |
433 | # copy information are now outdated |
|
439 | # copy information are now outdated | |
434 | # (maybe new information should be in directly passed to this function) |
|
440 | # (maybe new information should be in directly passed to this function) | |
435 | self.copymap.pop(filename, None) |
|
441 | self.copymap.pop(filename, None) | |
436 |
|
442 | |||
437 | if not (p1_tracked or p2_info or wc_tracked): |
|
443 | if not (p1_tracked or p2_info or wc_tracked): | |
438 | old_entry = self._map.get(filename) |
|
444 | old_entry = self._map.get(filename) | |
439 | self._drop_entry(filename) |
|
445 | self._drop_entry(filename) | |
440 | self._dirs_decr(filename, old_entry=old_entry) |
|
446 | self._dirs_decr(filename, old_entry=old_entry) | |
441 | return |
|
447 | return | |
442 |
|
448 | |||
443 | old_entry = self._map.get(filename) |
|
449 | old_entry = self._map.get(filename) | |
444 | self._dirs_incr(filename, old_entry) |
|
450 | self._dirs_incr(filename, old_entry) | |
445 | entry = DirstateItem( |
|
451 | entry = DirstateItem( | |
446 | wc_tracked=wc_tracked, |
|
452 | wc_tracked=wc_tracked, | |
447 | p1_tracked=p1_tracked, |
|
453 | p1_tracked=p1_tracked, | |
448 | p2_info=p2_info, |
|
454 | p2_info=p2_info, | |
449 | has_meaningful_mtime=has_meaningful_mtime, |
|
455 | has_meaningful_mtime=has_meaningful_mtime, | |
450 | parentfiledata=parentfiledata, |
|
456 | parentfiledata=parentfiledata, | |
451 | ) |
|
457 | ) | |
452 | self._map[filename] = entry |
|
458 | self._map[filename] = entry | |
453 |
|
459 | |||
454 | def set_tracked(self, filename): |
|
460 | def set_tracked(self, filename): | |
455 | new = False |
|
461 | new = False | |
456 | entry = self.get(filename) |
|
462 | entry = self.get(filename) | |
457 | if entry is None: |
|
463 | if entry is None: | |
458 | self._dirs_incr(filename) |
|
464 | self._dirs_incr(filename) | |
459 | entry = DirstateItem( |
|
465 | entry = DirstateItem( | |
460 | wc_tracked=True, |
|
466 | wc_tracked=True, | |
461 | ) |
|
467 | ) | |
462 |
|
468 | |||
463 | self._map[filename] = entry |
|
469 | self._map[filename] = entry | |
464 | new = True |
|
470 | new = True | |
465 | elif not entry.tracked: |
|
471 | elif not entry.tracked: | |
466 | self._dirs_incr(filename, entry) |
|
472 | self._dirs_incr(filename, entry) | |
467 | entry.set_tracked() |
|
473 | entry.set_tracked() | |
468 | self._refresh_entry(filename, entry) |
|
474 | self._refresh_entry(filename, entry) | |
469 | new = True |
|
475 | new = True | |
470 | else: |
|
476 | else: | |
471 | # XXX This is probably overkill for more case, but we need this to |
|
477 | # XXX This is probably overkill for more case, but we need this to | |
472 | # fully replace the `normallookup` call with `set_tracked` one. |
|
478 | # fully replace the `normallookup` call with `set_tracked` one. | |
473 | # Consider smoothing this in the future. |
|
479 | # Consider smoothing this in the future. | |
474 | entry.set_possibly_dirty() |
|
480 | entry.set_possibly_dirty() | |
475 | self._refresh_entry(filename, entry) |
|
481 | self._refresh_entry(filename, entry) | |
476 | return new |
|
482 | return new | |
477 |
|
483 | |||
478 | def set_untracked(self, f): |
|
484 | def set_untracked(self, f): | |
479 | """Mark a file as no longer tracked in the dirstate map""" |
|
485 | """Mark a file as no longer tracked in the dirstate map""" | |
480 | entry = self.get(f) |
|
486 | entry = self.get(f) | |
481 | if entry is None: |
|
487 | if entry is None: | |
482 | return False |
|
488 | return False | |
483 | else: |
|
489 | else: | |
484 | self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added) |
|
490 | self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added) | |
485 | if not entry.p2_info: |
|
491 | if not entry.p2_info: | |
486 | self.copymap.pop(f, None) |
|
492 | self.copymap.pop(f, None) | |
487 | entry.set_untracked() |
|
493 | entry.set_untracked() | |
488 | self._refresh_entry(f, entry) |
|
494 | self._refresh_entry(f, entry) | |
489 | return True |
|
495 | return True | |
490 |
|
496 | |||
491 | def set_clean(self, filename, mode, size, mtime): |
|
497 | def set_clean(self, filename, mode, size, mtime): | |
492 | """mark a file as back to a clean state""" |
|
498 | """mark a file as back to a clean state""" | |
493 | entry = self[filename] |
|
499 | entry = self[filename] | |
494 | size = size & rangemask |
|
500 | size = size & rangemask | |
495 | entry.set_clean(mode, size, mtime) |
|
501 | entry.set_clean(mode, size, mtime) | |
496 | self._refresh_entry(filename, entry) |
|
502 | self._refresh_entry(filename, entry) | |
497 | self.copymap.pop(filename, None) |
|
503 | self.copymap.pop(filename, None) | |
498 |
|
504 | |||
499 | def set_possibly_dirty(self, filename): |
|
505 | def set_possibly_dirty(self, filename): | |
500 | """record that the current state of the file on disk is unknown""" |
|
506 | """record that the current state of the file on disk is unknown""" | |
501 | entry = self[filename] |
|
507 | entry = self[filename] | |
502 | entry.set_possibly_dirty() |
|
508 | entry.set_possibly_dirty() | |
503 | self._refresh_entry(filename, entry) |
|
509 | self._refresh_entry(filename, entry) | |
504 |
|
510 | |||
505 | def _refresh_entry(self, f, entry): |
|
511 | def _refresh_entry(self, f, entry): | |
506 | """record updated state of an entry""" |
|
512 | """record updated state of an entry""" | |
507 | if not entry.any_tracked: |
|
513 | if not entry.any_tracked: | |
508 | self._map.pop(f, None) |
|
514 | self._map.pop(f, None) | |
509 |
|
515 | |||
510 | def _drop_entry(self, f): |
|
516 | def _drop_entry(self, f): | |
511 | """remove any entry for file f |
|
517 | """remove any entry for file f | |
512 |
|
518 | |||
513 | This should also drop associated copy information |
|
519 | This should also drop associated copy information | |
514 |
|
520 | |||
515 | The fact we actually need to drop it is the responsability of the caller""" |
|
521 | The fact we actually need to drop it is the responsability of the caller""" | |
516 | self._map.pop(f, None) |
|
522 | self._map.pop(f, None) | |
517 | self.copymap.pop(f, None) |
|
523 | self.copymap.pop(f, None) | |
518 |
|
524 | |||
519 |
|
525 | |||
520 | if rustmod is not None: |
|
526 | if rustmod is not None: | |
521 |
|
527 | |||
522 | class dirstatemap(_dirstatemapcommon): |
|
528 | class dirstatemap(_dirstatemapcommon): | |
523 |
|
529 | |||
524 | ### Core data storage and access |
|
530 | ### Core data storage and access | |
525 |
|
531 | |||
526 | @propertycache |
|
532 | @propertycache | |
527 | def _map(self): |
|
533 | def _map(self): | |
528 | """ |
|
534 | """ | |
529 | Fills the Dirstatemap when called. |
|
535 | Fills the Dirstatemap when called. | |
530 | """ |
|
536 | """ | |
531 | # ignore HG_PENDING because identity is used only for writing |
|
537 | # ignore HG_PENDING because identity is used only for writing | |
532 | try: |
|
538 | self._set_identity() | |
533 | self.identity = util.cachestat( |
|
|||
534 | self._opener.join(self._filename) |
|
|||
535 | ) |
|
|||
536 | except FileNotFoundError: |
|
|||
537 | self.identity = None |
|
|||
538 |
|
539 | |||
539 | if self._use_dirstate_v2: |
|
540 | if self._use_dirstate_v2: | |
540 | if self.docket.uuid: |
|
541 | if self.docket.uuid: | |
541 | # TODO: use mmap when possible |
|
542 | # TODO: use mmap when possible | |
542 | data = self._opener.read(self.docket.data_filename()) |
|
543 | data = self._opener.read(self.docket.data_filename()) | |
543 | else: |
|
544 | else: | |
544 | data = b'' |
|
545 | data = b'' | |
545 | self._map = rustmod.DirstateMap.new_v2( |
|
546 | self._map = rustmod.DirstateMap.new_v2( | |
546 | data, self.docket.data_size, self.docket.tree_metadata |
|
547 | data, self.docket.data_size, self.docket.tree_metadata | |
547 | ) |
|
548 | ) | |
548 | parents = self.docket.parents |
|
549 | parents = self.docket.parents | |
549 | else: |
|
550 | else: | |
550 | self._map, parents = rustmod.DirstateMap.new_v1( |
|
551 | self._map, parents = rustmod.DirstateMap.new_v1( | |
551 | self._readdirstatefile() |
|
552 | self._readdirstatefile() | |
552 | ) |
|
553 | ) | |
553 |
|
554 | |||
554 | if parents and not self._dirtyparents: |
|
555 | if parents and not self._dirtyparents: | |
555 | self.setparents(*parents) |
|
556 | self.setparents(*parents) | |
556 |
|
557 | |||
557 | self.__contains__ = self._map.__contains__ |
|
558 | self.__contains__ = self._map.__contains__ | |
558 | self.__getitem__ = self._map.__getitem__ |
|
559 | self.__getitem__ = self._map.__getitem__ | |
559 | self.get = self._map.get |
|
560 | self.get = self._map.get | |
560 | return self._map |
|
561 | return self._map | |
561 |
|
562 | |||
562 | @property |
|
563 | @property | |
563 | def copymap(self): |
|
564 | def copymap(self): | |
564 | return self._map.copymap() |
|
565 | return self._map.copymap() | |
565 |
|
566 | |||
566 | def debug_iter(self, all): |
|
567 | def debug_iter(self, all): | |
567 | """ |
|
568 | """ | |
568 | Return an iterator of (filename, state, mode, size, mtime) tuples |
|
569 | Return an iterator of (filename, state, mode, size, mtime) tuples | |
569 |
|
570 | |||
570 | `all`: also include with `state == b' '` dirstate tree nodes that |
|
571 | `all`: also include with `state == b' '` dirstate tree nodes that | |
571 | don't have an associated `DirstateItem`. |
|
572 | don't have an associated `DirstateItem`. | |
572 |
|
573 | |||
573 | """ |
|
574 | """ | |
574 | return self._map.debug_iter(all) |
|
575 | return self._map.debug_iter(all) | |
575 |
|
576 | |||
576 | def clear(self): |
|
577 | def clear(self): | |
577 | self._map.clear() |
|
578 | self._map.clear() | |
578 | self.setparents( |
|
579 | self.setparents( | |
579 | self._nodeconstants.nullid, self._nodeconstants.nullid |
|
580 | self._nodeconstants.nullid, self._nodeconstants.nullid | |
580 | ) |
|
581 | ) | |
581 | util.clearcachedproperty(self, b"_dirs") |
|
582 | util.clearcachedproperty(self, b"_dirs") | |
582 | util.clearcachedproperty(self, b"_alldirs") |
|
583 | util.clearcachedproperty(self, b"_alldirs") | |
583 | util.clearcachedproperty(self, b"dirfoldmap") |
|
584 | util.clearcachedproperty(self, b"dirfoldmap") | |
584 |
|
585 | |||
585 | def items(self): |
|
586 | def items(self): | |
586 | return self._map.items() |
|
587 | return self._map.items() | |
587 |
|
588 | |||
588 | # forward for python2,3 compat |
|
589 | # forward for python2,3 compat | |
589 | iteritems = items |
|
590 | iteritems = items | |
590 |
|
591 | |||
591 | def keys(self): |
|
592 | def keys(self): | |
592 | return iter(self._map) |
|
593 | return iter(self._map) | |
593 |
|
594 | |||
594 | ### reading/setting parents |
|
595 | ### reading/setting parents | |
595 |
|
596 | |||
596 | def setparents(self, p1, p2, fold_p2=False): |
|
597 | def setparents(self, p1, p2, fold_p2=False): | |
597 | self._parents = (p1, p2) |
|
598 | self._parents = (p1, p2) | |
598 | self._dirtyparents = True |
|
599 | self._dirtyparents = True | |
599 | copies = {} |
|
600 | copies = {} | |
600 | if fold_p2: |
|
601 | if fold_p2: | |
601 | copies = self._map.setparents_fixup() |
|
602 | copies = self._map.setparents_fixup() | |
602 | return copies |
|
603 | return copies | |
603 |
|
604 | |||
604 | ### disk interaction |
|
605 | ### disk interaction | |
605 |
|
606 | |||
606 | @propertycache |
|
607 | @propertycache | |
607 | def identity(self): |
|
608 | def identity(self): | |
608 | self._map |
|
609 | self._map | |
609 | return self.identity |
|
610 | return self.identity | |
610 |
|
611 | |||
611 | def write(self, tr, st): |
|
612 | def write(self, tr, st): | |
612 | if not self._use_dirstate_v2: |
|
613 | if not self._use_dirstate_v2: | |
613 | p1, p2 = self.parents() |
|
614 | p1, p2 = self.parents() | |
614 | packed = self._map.write_v1(p1, p2) |
|
615 | packed = self._map.write_v1(p1, p2) | |
615 | st.write(packed) |
|
616 | st.write(packed) | |
616 | st.close() |
|
617 | st.close() | |
617 | self._dirtyparents = False |
|
618 | self._dirtyparents = False | |
618 | return |
|
619 | return | |
619 |
|
620 | |||
620 | # We can only append to an existing data file if there is one |
|
621 | # We can only append to an existing data file if there is one | |
621 | can_append = self.docket.uuid is not None |
|
622 | can_append = self.docket.uuid is not None | |
622 | packed, meta, append = self._map.write_v2(can_append) |
|
623 | packed, meta, append = self._map.write_v2(can_append) | |
623 | if append: |
|
624 | if append: | |
624 | docket = self.docket |
|
625 | docket = self.docket | |
625 | data_filename = docket.data_filename() |
|
626 | data_filename = docket.data_filename() | |
626 | # We mark it for backup to make sure a future `hg rollback` (or |
|
627 | # We mark it for backup to make sure a future `hg rollback` (or | |
627 | # `hg recover`?) call find the data it needs to restore a |
|
628 | # `hg recover`?) call find the data it needs to restore a | |
628 | # working repository. |
|
629 | # working repository. | |
629 | # |
|
630 | # | |
630 | # The backup can use a hardlink because the format is resistant |
|
631 | # The backup can use a hardlink because the format is resistant | |
631 | # to trailing "dead" data. |
|
632 | # to trailing "dead" data. | |
632 | if tr is not None: |
|
633 | if tr is not None: | |
633 | tr.addbackup(data_filename, location=b'plain') |
|
634 | tr.addbackup(data_filename, location=b'plain') | |
634 | with self._opener(data_filename, b'r+b') as fp: |
|
635 | with self._opener(data_filename, b'r+b') as fp: | |
635 | fp.seek(docket.data_size) |
|
636 | fp.seek(docket.data_size) | |
636 | assert fp.tell() == docket.data_size |
|
637 | assert fp.tell() == docket.data_size | |
637 | written = fp.write(packed) |
|
638 | written = fp.write(packed) | |
638 | if written is not None: # py2 may return None |
|
639 | if written is not None: # py2 may return None | |
639 | assert written == len(packed), (written, len(packed)) |
|
640 | assert written == len(packed), (written, len(packed)) | |
640 | docket.data_size += len(packed) |
|
641 | docket.data_size += len(packed) | |
641 | docket.parents = self.parents() |
|
642 | docket.parents = self.parents() | |
642 | docket.tree_metadata = meta |
|
643 | docket.tree_metadata = meta | |
643 | st.write(docket.serialize()) |
|
644 | st.write(docket.serialize()) | |
644 | st.close() |
|
645 | st.close() | |
645 | else: |
|
646 | else: | |
646 | self.write_v2_no_append(tr, st, meta, packed) |
|
647 | self.write_v2_no_append(tr, st, meta, packed) | |
647 | # Reload from the newly-written file |
|
648 | # Reload from the newly-written file | |
648 | util.clearcachedproperty(self, b"_map") |
|
649 | util.clearcachedproperty(self, b"_map") | |
649 | self._dirtyparents = False |
|
650 | self._dirtyparents = False | |
650 |
|
651 | |||
651 | ### code related to maintaining and accessing "extra" property |
|
652 | ### code related to maintaining and accessing "extra" property | |
652 | # (e.g. "has_dir") |
|
653 | # (e.g. "has_dir") | |
653 |
|
654 | |||
654 | @propertycache |
|
655 | @propertycache | |
655 | def filefoldmap(self): |
|
656 | def filefoldmap(self): | |
656 | """Returns a dictionary mapping normalized case paths to their |
|
657 | """Returns a dictionary mapping normalized case paths to their | |
657 | non-normalized versions. |
|
658 | non-normalized versions. | |
658 | """ |
|
659 | """ | |
659 | return self._map.filefoldmapasdict() |
|
660 | return self._map.filefoldmapasdict() | |
660 |
|
661 | |||
661 | def hastrackeddir(self, d): |
|
662 | def hastrackeddir(self, d): | |
662 | return self._map.hastrackeddir(d) |
|
663 | return self._map.hastrackeddir(d) | |
663 |
|
664 | |||
664 | def hasdir(self, d): |
|
665 | def hasdir(self, d): | |
665 | return self._map.hasdir(d) |
|
666 | return self._map.hasdir(d) | |
666 |
|
667 | |||
667 | @propertycache |
|
668 | @propertycache | |
668 | def dirfoldmap(self): |
|
669 | def dirfoldmap(self): | |
669 | f = {} |
|
670 | f = {} | |
670 | normcase = util.normcase |
|
671 | normcase = util.normcase | |
671 | for name in self._map.tracked_dirs(): |
|
672 | for name in self._map.tracked_dirs(): | |
672 | f[normcase(name)] = name |
|
673 | f[normcase(name)] = name | |
673 | return f |
|
674 | return f | |
674 |
|
675 | |||
675 | ### code related to manipulation of entries and copy-sources |
|
676 | ### code related to manipulation of entries and copy-sources | |
676 |
|
677 | |||
677 | def set_tracked(self, f): |
|
678 | def set_tracked(self, f): | |
678 | return self._map.set_tracked(f) |
|
679 | return self._map.set_tracked(f) | |
679 |
|
680 | |||
680 | def set_untracked(self, f): |
|
681 | def set_untracked(self, f): | |
681 | return self._map.set_untracked(f) |
|
682 | return self._map.set_untracked(f) | |
682 |
|
683 | |||
683 | def set_clean(self, filename, mode, size, mtime): |
|
684 | def set_clean(self, filename, mode, size, mtime): | |
684 | self._map.set_clean(filename, mode, size, mtime) |
|
685 | self._map.set_clean(filename, mode, size, mtime) | |
685 |
|
686 | |||
686 | def set_possibly_dirty(self, f): |
|
687 | def set_possibly_dirty(self, f): | |
687 | self._map.set_possibly_dirty(f) |
|
688 | self._map.set_possibly_dirty(f) | |
688 |
|
689 | |||
689 | def reset_state( |
|
690 | def reset_state( | |
690 | self, |
|
691 | self, | |
691 | filename, |
|
692 | filename, | |
692 | wc_tracked=False, |
|
693 | wc_tracked=False, | |
693 | p1_tracked=False, |
|
694 | p1_tracked=False, | |
694 | p2_info=False, |
|
695 | p2_info=False, | |
695 | has_meaningful_mtime=True, |
|
696 | has_meaningful_mtime=True, | |
696 | parentfiledata=None, |
|
697 | parentfiledata=None, | |
697 | ): |
|
698 | ): | |
698 | return self._map.reset_state( |
|
699 | return self._map.reset_state( | |
699 | filename, |
|
700 | filename, | |
700 | wc_tracked, |
|
701 | wc_tracked, | |
701 | p1_tracked, |
|
702 | p1_tracked, | |
702 | p2_info, |
|
703 | p2_info, | |
703 | has_meaningful_mtime, |
|
704 | has_meaningful_mtime, | |
704 | parentfiledata, |
|
705 | parentfiledata, | |
705 | ) |
|
706 | ) |
General Comments 0
You need to be logged in to leave comments.
Login now