##// END OF EJS Templates
dirstatemap: use common code for set_clean...
marmoute -
r48943:f903a357 default
parent child Browse files
Show More
@@ -1,933 +1,925 b''
1 # dirstatemap.py
1 # dirstatemap.py
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 import errno
8 import errno
9
9
10 from .i18n import _
10 from .i18n import _
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 pathutil,
14 pathutil,
15 policy,
15 policy,
16 pycompat,
16 pycompat,
17 txnutil,
17 txnutil,
18 util,
18 util,
19 )
19 )
20
20
21 from .dirstateutils import (
21 from .dirstateutils import (
22 docket as docketmod,
22 docket as docketmod,
23 )
23 )
24
24
25 parsers = policy.importmod('parsers')
25 parsers = policy.importmod('parsers')
26 rustmod = policy.importrust('dirstate')
26 rustmod = policy.importrust('dirstate')
27
27
28 propertycache = util.propertycache
28 propertycache = util.propertycache
29
29
30 if rustmod is None:
30 if rustmod is None:
31 DirstateItem = parsers.DirstateItem
31 DirstateItem = parsers.DirstateItem
32 else:
32 else:
33 DirstateItem = rustmod.DirstateItem
33 DirstateItem = rustmod.DirstateItem
34
34
35 rangemask = 0x7FFFFFFF
35 rangemask = 0x7FFFFFFF
36
36
37
37
38 class _dirstatemapcommon(object):
38 class _dirstatemapcommon(object):
39 """
39 """
40 Methods that are identical for both implementations of the dirstatemap
40 Methods that are identical for both implementations of the dirstatemap
41 class, with and without Rust extensions enabled.
41 class, with and without Rust extensions enabled.
42 """
42 """
43
43
44 # please pytype
44 # please pytype
45
45
46 _map = None
46 _map = None
47 copymap = None
47 copymap = None
48
48
49 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
49 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
50 self._use_dirstate_v2 = use_dirstate_v2
50 self._use_dirstate_v2 = use_dirstate_v2
51 self._nodeconstants = nodeconstants
51 self._nodeconstants = nodeconstants
52 self._ui = ui
52 self._ui = ui
53 self._opener = opener
53 self._opener = opener
54 self._root = root
54 self._root = root
55 self._filename = b'dirstate'
55 self._filename = b'dirstate'
56 self._nodelen = 20 # Also update Rust code when changing this!
56 self._nodelen = 20 # Also update Rust code when changing this!
57 self._parents = None
57 self._parents = None
58 self._dirtyparents = False
58 self._dirtyparents = False
59
59
60 # for consistent view between _pl() and _read() invocations
60 # for consistent view between _pl() and _read() invocations
61 self._pendingmode = None
61 self._pendingmode = None
62
62
63 def preload(self):
63 def preload(self):
64 """Loads the underlying data, if it's not already loaded"""
64 """Loads the underlying data, if it's not already loaded"""
65 self._map
65 self._map
66
66
67 def get(self, key, default=None):
67 def get(self, key, default=None):
68 return self._map.get(key, default)
68 return self._map.get(key, default)
69
69
70 def __len__(self):
70 def __len__(self):
71 return len(self._map)
71 return len(self._map)
72
72
73 def __iter__(self):
73 def __iter__(self):
74 return iter(self._map)
74 return iter(self._map)
75
75
76 def __contains__(self, key):
76 def __contains__(self, key):
77 return key in self._map
77 return key in self._map
78
78
79 def __getitem__(self, item):
79 def __getitem__(self, item):
80 return self._map[item]
80 return self._map[item]
81
81
82 ### sub-class utility method
82 ### sub-class utility method
83 #
83 #
84 # Use to allow for generic implementation of some method while still coping
84 # Use to allow for generic implementation of some method while still coping
85 # with minor difference between implementation.
85 # with minor difference between implementation.
86
86
87 def _dirs_incr(self, filename, old_entry=None):
87 def _dirs_incr(self, filename, old_entry=None):
88 """incremente the dirstate counter if applicable
88 """incremente the dirstate counter if applicable
89
89
90 This might be a no-op for some subclass who deal with directory
90 This might be a no-op for some subclass who deal with directory
91 tracking in a different way.
91 tracking in a different way.
92 """
92 """
93
93
94 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
94 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
95 """decremente the dirstate counter if applicable
95 """decremente the dirstate counter if applicable
96
96
97 This might be a no-op for some subclass who deal with directory
97 This might be a no-op for some subclass who deal with directory
98 tracking in a different way.
98 tracking in a different way.
99 """
99 """
100
100
101 def _refresh_entry(self, f, entry):
101 def _refresh_entry(self, f, entry):
102 """record updated state of an entry"""
102 """record updated state of an entry"""
103
103
104 def _insert_entry(self, f, entry):
104 def _insert_entry(self, f, entry):
105 """add a new dirstate entry (or replace an unrelated one)
105 """add a new dirstate entry (or replace an unrelated one)
106
106
107 The fact it is actually new is the responsability of the caller
107 The fact it is actually new is the responsability of the caller
108 """
108 """
109
109
110 ### method to manipulate the entries
110 ### method to manipulate the entries
111
111
112 def set_possibly_dirty(self, filename):
112 def set_possibly_dirty(self, filename):
113 """record that the current state of the file on disk is unknown"""
113 """record that the current state of the file on disk is unknown"""
114 entry = self[filename]
114 entry = self[filename]
115 entry.set_possibly_dirty()
115 entry.set_possibly_dirty()
116 self._refresh_entry(filename, entry)
116 self._refresh_entry(filename, entry)
117
117
118 def set_clean(self, filename, mode, size, mtime):
119 """mark a file as back to a clean state"""
120 entry = self[filename]
121 mtime = mtime & rangemask
122 size = size & rangemask
123 entry.set_clean(mode, size, mtime)
124 self._refresh_entry(filename, entry)
125 self.copymap.pop(filename, None)
126
118 def set_tracked(self, filename):
127 def set_tracked(self, filename):
119 new = False
128 new = False
120 entry = self.get(filename)
129 entry = self.get(filename)
121 if entry is None:
130 if entry is None:
122 self._dirs_incr(filename)
131 self._dirs_incr(filename)
123 entry = DirstateItem(
132 entry = DirstateItem(
124 p1_tracked=False,
133 p1_tracked=False,
125 p2_tracked=False,
134 p2_tracked=False,
126 wc_tracked=True,
135 wc_tracked=True,
127 merged=False,
136 merged=False,
128 clean_p1=False,
137 clean_p1=False,
129 clean_p2=False,
138 clean_p2=False,
130 possibly_dirty=False,
139 possibly_dirty=False,
131 parentfiledata=None,
140 parentfiledata=None,
132 )
141 )
133
142
134 self._insert_entry(filename, entry)
143 self._insert_entry(filename, entry)
135 new = True
144 new = True
136 elif not entry.tracked:
145 elif not entry.tracked:
137 self._dirs_incr(filename, entry)
146 self._dirs_incr(filename, entry)
138 entry.set_tracked()
147 entry.set_tracked()
139 self._refresh_entry(filename, entry)
148 self._refresh_entry(filename, entry)
140 new = True
149 new = True
141 else:
150 else:
142 # XXX This is probably overkill for more case, but we need this to
151 # XXX This is probably overkill for more case, but we need this to
143 # fully replace the `normallookup` call with `set_tracked` one.
152 # fully replace the `normallookup` call with `set_tracked` one.
144 # Consider smoothing this in the future.
153 # Consider smoothing this in the future.
145 entry.set_possibly_dirty()
154 entry.set_possibly_dirty()
146 self._refresh_entry(filename, entry)
155 self._refresh_entry(filename, entry)
147 return new
156 return new
148
157
149 def set_untracked(self, f):
158 def set_untracked(self, f):
150 """Mark a file as no longer tracked in the dirstate map"""
159 """Mark a file as no longer tracked in the dirstate map"""
151 entry = self.get(f)
160 entry = self.get(f)
152 if entry is None:
161 if entry is None:
153 return False
162 return False
154 else:
163 else:
155 self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added)
164 self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added)
156 if not entry.merged:
165 if not entry.merged:
157 self.copymap.pop(f, None)
166 self.copymap.pop(f, None)
158 entry.set_untracked()
167 entry.set_untracked()
159 self._refresh_entry(f, entry)
168 self._refresh_entry(f, entry)
160 return True
169 return True
161
170
162
171
163 class dirstatemap(_dirstatemapcommon):
172 class dirstatemap(_dirstatemapcommon):
164 """Map encapsulating the dirstate's contents.
173 """Map encapsulating the dirstate's contents.
165
174
166 The dirstate contains the following state:
175 The dirstate contains the following state:
167
176
168 - `identity` is the identity of the dirstate file, which can be used to
177 - `identity` is the identity of the dirstate file, which can be used to
169 detect when changes have occurred to the dirstate file.
178 detect when changes have occurred to the dirstate file.
170
179
171 - `parents` is a pair containing the parents of the working copy. The
180 - `parents` is a pair containing the parents of the working copy. The
172 parents are updated by calling `setparents`.
181 parents are updated by calling `setparents`.
173
182
174 - the state map maps filenames to tuples of (state, mode, size, mtime),
183 - the state map maps filenames to tuples of (state, mode, size, mtime),
175 where state is a single character representing 'normal', 'added',
184 where state is a single character representing 'normal', 'added',
176 'removed', or 'merged'. It is read by treating the dirstate as a
185 'removed', or 'merged'. It is read by treating the dirstate as a
177 dict. File state is updated by calling various methods (see each
186 dict. File state is updated by calling various methods (see each
178 documentation for details):
187 documentation for details):
179
188
180 - `reset_state`,
189 - `reset_state`,
181 - `set_tracked`
190 - `set_tracked`
182 - `set_untracked`
191 - `set_untracked`
183 - `set_clean`
192 - `set_clean`
184 - `set_possibly_dirty`
193 - `set_possibly_dirty`
185
194
186 - `copymap` maps destination filenames to their source filename.
195 - `copymap` maps destination filenames to their source filename.
187
196
188 The dirstate also provides the following views onto the state:
197 The dirstate also provides the following views onto the state:
189
198
190 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
199 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
191 form that they appear as in the dirstate.
200 form that they appear as in the dirstate.
192
201
193 - `dirfoldmap` is a dict mapping normalized directory names to the
202 - `dirfoldmap` is a dict mapping normalized directory names to the
194 denormalized form that they appear as in the dirstate.
203 denormalized form that they appear as in the dirstate.
195 """
204 """
196
205
197 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
206 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
198 super(dirstatemap, self).__init__(
207 super(dirstatemap, self).__init__(
199 ui, opener, root, nodeconstants, use_dirstate_v2
208 ui, opener, root, nodeconstants, use_dirstate_v2
200 )
209 )
201 if self._use_dirstate_v2:
210 if self._use_dirstate_v2:
202 msg = "Dirstate V2 not supportedi"
211 msg = "Dirstate V2 not supportedi"
203 msg += "(should have detected unsupported requirement)"
212 msg += "(should have detected unsupported requirement)"
204 raise error.ProgrammingError(msg)
213 raise error.ProgrammingError(msg)
205
214
206 ### Core data storage and access
215 ### Core data storage and access
207
216
208 @propertycache
217 @propertycache
209 def _map(self):
218 def _map(self):
210 self._map = {}
219 self._map = {}
211 self.read()
220 self.read()
212 return self._map
221 return self._map
213
222
214 @propertycache
223 @propertycache
215 def copymap(self):
224 def copymap(self):
216 self.copymap = {}
225 self.copymap = {}
217 self._map
226 self._map
218 return self.copymap
227 return self.copymap
219
228
220 def clear(self):
229 def clear(self):
221 self._map.clear()
230 self._map.clear()
222 self.copymap.clear()
231 self.copymap.clear()
223 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
232 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
224 util.clearcachedproperty(self, b"_dirs")
233 util.clearcachedproperty(self, b"_dirs")
225 util.clearcachedproperty(self, b"_alldirs")
234 util.clearcachedproperty(self, b"_alldirs")
226 util.clearcachedproperty(self, b"filefoldmap")
235 util.clearcachedproperty(self, b"filefoldmap")
227 util.clearcachedproperty(self, b"dirfoldmap")
236 util.clearcachedproperty(self, b"dirfoldmap")
228
237
229 def items(self):
238 def items(self):
230 return pycompat.iteritems(self._map)
239 return pycompat.iteritems(self._map)
231
240
232 # forward for python2,3 compat
241 # forward for python2,3 compat
233 iteritems = items
242 iteritems = items
234
243
235 def debug_iter(self, all):
244 def debug_iter(self, all):
236 """
245 """
237 Return an iterator of (filename, state, mode, size, mtime) tuples
246 Return an iterator of (filename, state, mode, size, mtime) tuples
238
247
239 `all` is unused when Rust is not enabled
248 `all` is unused when Rust is not enabled
240 """
249 """
241 for (filename, item) in self.items():
250 for (filename, item) in self.items():
242 yield (filename, item.state, item.mode, item.size, item.mtime)
251 yield (filename, item.state, item.mode, item.size, item.mtime)
243
252
244 def keys(self):
253 def keys(self):
245 return self._map.keys()
254 return self._map.keys()
246
255
247 ### reading/setting parents
256 ### reading/setting parents
248
257
249 def parents(self):
258 def parents(self):
250 if not self._parents:
259 if not self._parents:
251 try:
260 try:
252 fp = self._opendirstatefile()
261 fp = self._opendirstatefile()
253 st = fp.read(2 * self._nodelen)
262 st = fp.read(2 * self._nodelen)
254 fp.close()
263 fp.close()
255 except IOError as err:
264 except IOError as err:
256 if err.errno != errno.ENOENT:
265 if err.errno != errno.ENOENT:
257 raise
266 raise
258 # File doesn't exist, so the current state is empty
267 # File doesn't exist, so the current state is empty
259 st = b''
268 st = b''
260
269
261 l = len(st)
270 l = len(st)
262 if l == self._nodelen * 2:
271 if l == self._nodelen * 2:
263 self._parents = (
272 self._parents = (
264 st[: self._nodelen],
273 st[: self._nodelen],
265 st[self._nodelen : 2 * self._nodelen],
274 st[self._nodelen : 2 * self._nodelen],
266 )
275 )
267 elif l == 0:
276 elif l == 0:
268 self._parents = (
277 self._parents = (
269 self._nodeconstants.nullid,
278 self._nodeconstants.nullid,
270 self._nodeconstants.nullid,
279 self._nodeconstants.nullid,
271 )
280 )
272 else:
281 else:
273 raise error.Abort(
282 raise error.Abort(
274 _(b'working directory state appears damaged!')
283 _(b'working directory state appears damaged!')
275 )
284 )
276
285
277 return self._parents
286 return self._parents
278
287
279 def setparents(self, p1, p2, fold_p2=False):
288 def setparents(self, p1, p2, fold_p2=False):
280 self._parents = (p1, p2)
289 self._parents = (p1, p2)
281 self._dirtyparents = True
290 self._dirtyparents = True
282 copies = {}
291 copies = {}
283 if fold_p2:
292 if fold_p2:
284 for f, s in pycompat.iteritems(self._map):
293 for f, s in pycompat.iteritems(self._map):
285 # Discard "merged" markers when moving away from a merge state
294 # Discard "merged" markers when moving away from a merge state
286 if s.merged or s.from_p2:
295 if s.merged or s.from_p2:
287 source = self.copymap.pop(f, None)
296 source = self.copymap.pop(f, None)
288 if source:
297 if source:
289 copies[f] = source
298 copies[f] = source
290 s.drop_merge_data()
299 s.drop_merge_data()
291 return copies
300 return copies
292
301
293 ### disk interaction
302 ### disk interaction
294
303
295 def read(self):
304 def read(self):
296 # ignore HG_PENDING because identity is used only for writing
305 # ignore HG_PENDING because identity is used only for writing
297 self.identity = util.filestat.frompath(
306 self.identity = util.filestat.frompath(
298 self._opener.join(self._filename)
307 self._opener.join(self._filename)
299 )
308 )
300
309
301 try:
310 try:
302 fp = self._opendirstatefile()
311 fp = self._opendirstatefile()
303 try:
312 try:
304 st = fp.read()
313 st = fp.read()
305 finally:
314 finally:
306 fp.close()
315 fp.close()
307 except IOError as err:
316 except IOError as err:
308 if err.errno != errno.ENOENT:
317 if err.errno != errno.ENOENT:
309 raise
318 raise
310 return
319 return
311 if not st:
320 if not st:
312 return
321 return
313
322
314 if util.safehasattr(parsers, b'dict_new_presized'):
323 if util.safehasattr(parsers, b'dict_new_presized'):
315 # Make an estimate of the number of files in the dirstate based on
324 # Make an estimate of the number of files in the dirstate based on
316 # its size. This trades wasting some memory for avoiding costly
325 # its size. This trades wasting some memory for avoiding costly
317 # resizes. Each entry have a prefix of 17 bytes followed by one or
326 # resizes. Each entry have a prefix of 17 bytes followed by one or
318 # two path names. Studies on various large-scale real-world repositories
327 # two path names. Studies on various large-scale real-world repositories
319 # found 54 bytes a reasonable upper limit for the average path names.
328 # found 54 bytes a reasonable upper limit for the average path names.
320 # Copy entries are ignored for the sake of this estimate.
329 # Copy entries are ignored for the sake of this estimate.
321 self._map = parsers.dict_new_presized(len(st) // 71)
330 self._map = parsers.dict_new_presized(len(st) // 71)
322
331
323 # Python's garbage collector triggers a GC each time a certain number
332 # Python's garbage collector triggers a GC each time a certain number
324 # of container objects (the number being defined by
333 # of container objects (the number being defined by
325 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
334 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
326 # for each file in the dirstate. The C version then immediately marks
335 # for each file in the dirstate. The C version then immediately marks
327 # them as not to be tracked by the collector. However, this has no
336 # them as not to be tracked by the collector. However, this has no
328 # effect on when GCs are triggered, only on what objects the GC looks
337 # effect on when GCs are triggered, only on what objects the GC looks
329 # into. This means that O(number of files) GCs are unavoidable.
338 # into. This means that O(number of files) GCs are unavoidable.
330 # Depending on when in the process's lifetime the dirstate is parsed,
339 # Depending on when in the process's lifetime the dirstate is parsed,
331 # this can get very expensive. As a workaround, disable GC while
340 # this can get very expensive. As a workaround, disable GC while
332 # parsing the dirstate.
341 # parsing the dirstate.
333 #
342 #
334 # (we cannot decorate the function directly since it is in a C module)
343 # (we cannot decorate the function directly since it is in a C module)
335 parse_dirstate = util.nogc(parsers.parse_dirstate)
344 parse_dirstate = util.nogc(parsers.parse_dirstate)
336 p = parse_dirstate(self._map, self.copymap, st)
345 p = parse_dirstate(self._map, self.copymap, st)
337 if not self._dirtyparents:
346 if not self._dirtyparents:
338 self.setparents(*p)
347 self.setparents(*p)
339
348
340 # Avoid excess attribute lookups by fast pathing certain checks
349 # Avoid excess attribute lookups by fast pathing certain checks
341 self.__contains__ = self._map.__contains__
350 self.__contains__ = self._map.__contains__
342 self.__getitem__ = self._map.__getitem__
351 self.__getitem__ = self._map.__getitem__
343 self.get = self._map.get
352 self.get = self._map.get
344
353
345 def write(self, _tr, st, now):
354 def write(self, _tr, st, now):
346 d = parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
355 d = parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
347 st.write(d)
356 st.write(d)
348 st.close()
357 st.close()
349 self._dirtyparents = False
358 self._dirtyparents = False
350
359
351 def _opendirstatefile(self):
360 def _opendirstatefile(self):
352 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
361 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
353 if self._pendingmode is not None and self._pendingmode != mode:
362 if self._pendingmode is not None and self._pendingmode != mode:
354 fp.close()
363 fp.close()
355 raise error.Abort(
364 raise error.Abort(
356 _(b'working directory state may be changed parallelly')
365 _(b'working directory state may be changed parallelly')
357 )
366 )
358 self._pendingmode = mode
367 self._pendingmode = mode
359 return fp
368 return fp
360
369
361 @propertycache
370 @propertycache
362 def identity(self):
371 def identity(self):
363 self._map
372 self._map
364 return self.identity
373 return self.identity
365
374
366 ### code related to maintaining and accessing "extra" property
375 ### code related to maintaining and accessing "extra" property
367 # (e.g. "has_dir")
376 # (e.g. "has_dir")
368
377
369 def _dirs_incr(self, filename, old_entry=None):
378 def _dirs_incr(self, filename, old_entry=None):
370 """incremente the dirstate counter if applicable"""
379 """incremente the dirstate counter if applicable"""
371 if (
380 if (
372 old_entry is None or old_entry.removed
381 old_entry is None or old_entry.removed
373 ) and "_dirs" in self.__dict__:
382 ) and "_dirs" in self.__dict__:
374 self._dirs.addpath(filename)
383 self._dirs.addpath(filename)
375 if old_entry is None and "_alldirs" in self.__dict__:
384 if old_entry is None and "_alldirs" in self.__dict__:
376 self._alldirs.addpath(filename)
385 self._alldirs.addpath(filename)
377
386
378 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
387 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
379 """decremente the dirstate counter if applicable"""
388 """decremente the dirstate counter if applicable"""
380 if old_entry is not None:
389 if old_entry is not None:
381 if "_dirs" in self.__dict__ and not old_entry.removed:
390 if "_dirs" in self.__dict__ and not old_entry.removed:
382 self._dirs.delpath(filename)
391 self._dirs.delpath(filename)
383 if "_alldirs" in self.__dict__ and not remove_variant:
392 if "_alldirs" in self.__dict__ and not remove_variant:
384 self._alldirs.delpath(filename)
393 self._alldirs.delpath(filename)
385 elif remove_variant and "_alldirs" in self.__dict__:
394 elif remove_variant and "_alldirs" in self.__dict__:
386 self._alldirs.addpath(filename)
395 self._alldirs.addpath(filename)
387 if "filefoldmap" in self.__dict__:
396 if "filefoldmap" in self.__dict__:
388 normed = util.normcase(filename)
397 normed = util.normcase(filename)
389 self.filefoldmap.pop(normed, None)
398 self.filefoldmap.pop(normed, None)
390
399
391 @propertycache
400 @propertycache
392 def filefoldmap(self):
401 def filefoldmap(self):
393 """Returns a dictionary mapping normalized case paths to their
402 """Returns a dictionary mapping normalized case paths to their
394 non-normalized versions.
403 non-normalized versions.
395 """
404 """
396 try:
405 try:
397 makefilefoldmap = parsers.make_file_foldmap
406 makefilefoldmap = parsers.make_file_foldmap
398 except AttributeError:
407 except AttributeError:
399 pass
408 pass
400 else:
409 else:
401 return makefilefoldmap(
410 return makefilefoldmap(
402 self._map, util.normcasespec, util.normcasefallback
411 self._map, util.normcasespec, util.normcasefallback
403 )
412 )
404
413
405 f = {}
414 f = {}
406 normcase = util.normcase
415 normcase = util.normcase
407 for name, s in pycompat.iteritems(self._map):
416 for name, s in pycompat.iteritems(self._map):
408 if not s.removed:
417 if not s.removed:
409 f[normcase(name)] = name
418 f[normcase(name)] = name
410 f[b'.'] = b'.' # prevents useless util.fspath() invocation
419 f[b'.'] = b'.' # prevents useless util.fspath() invocation
411 return f
420 return f
412
421
413 @propertycache
422 @propertycache
414 def dirfoldmap(self):
423 def dirfoldmap(self):
415 f = {}
424 f = {}
416 normcase = util.normcase
425 normcase = util.normcase
417 for name in self._dirs:
426 for name in self._dirs:
418 f[normcase(name)] = name
427 f[normcase(name)] = name
419 return f
428 return f
420
429
421 def hastrackeddir(self, d):
430 def hastrackeddir(self, d):
422 """
431 """
423 Returns True if the dirstate contains a tracked (not removed) file
432 Returns True if the dirstate contains a tracked (not removed) file
424 in this directory.
433 in this directory.
425 """
434 """
426 return d in self._dirs
435 return d in self._dirs
427
436
428 def hasdir(self, d):
437 def hasdir(self, d):
429 """
438 """
430 Returns True if the dirstate contains a file (tracked or removed)
439 Returns True if the dirstate contains a file (tracked or removed)
431 in this directory.
440 in this directory.
432 """
441 """
433 return d in self._alldirs
442 return d in self._alldirs
434
443
435 @propertycache
444 @propertycache
436 def _dirs(self):
445 def _dirs(self):
437 return pathutil.dirs(self._map, only_tracked=True)
446 return pathutil.dirs(self._map, only_tracked=True)
438
447
439 @propertycache
448 @propertycache
440 def _alldirs(self):
449 def _alldirs(self):
441 return pathutil.dirs(self._map)
450 return pathutil.dirs(self._map)
442
451
443 ### code related to manipulation of entries and copy-sources
452 ### code related to manipulation of entries and copy-sources
444
453
445 def _refresh_entry(self, f, entry):
454 def _refresh_entry(self, f, entry):
446 if not entry.any_tracked:
455 if not entry.any_tracked:
447 self._map.pop(f, None)
456 self._map.pop(f, None)
448
457
449 def _insert_entry(self, f, entry):
458 def _insert_entry(self, f, entry):
450 self._map[f] = entry
459 self._map[f] = entry
451
460
452 def set_clean(self, filename, mode, size, mtime):
453 """mark a file as back to a clean state"""
454 entry = self[filename]
455 mtime = mtime & rangemask
456 size = size & rangemask
457 entry.set_clean(mode, size, mtime)
458 self.copymap.pop(filename, None)
459
460 def reset_state(
461 def reset_state(
461 self,
462 self,
462 filename,
463 filename,
463 wc_tracked=False,
464 wc_tracked=False,
464 p1_tracked=False,
465 p1_tracked=False,
465 p2_tracked=False,
466 p2_tracked=False,
466 merged=False,
467 merged=False,
467 clean_p1=False,
468 clean_p1=False,
468 clean_p2=False,
469 clean_p2=False,
469 possibly_dirty=False,
470 possibly_dirty=False,
470 parentfiledata=None,
471 parentfiledata=None,
471 ):
472 ):
472 """Set a entry to a given state, diregarding all previous state
473 """Set a entry to a given state, diregarding all previous state
473
474
474 This is to be used by the part of the dirstate API dedicated to
475 This is to be used by the part of the dirstate API dedicated to
475 adjusting the dirstate after a update/merge.
476 adjusting the dirstate after a update/merge.
476
477
477 note: calling this might result to no entry existing at all if the
478 note: calling this might result to no entry existing at all if the
478 dirstate map does not see any point at having one for this file
479 dirstate map does not see any point at having one for this file
479 anymore.
480 anymore.
480 """
481 """
481 if merged and (clean_p1 or clean_p2):
482 if merged and (clean_p1 or clean_p2):
482 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
483 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
483 raise error.ProgrammingError(msg)
484 raise error.ProgrammingError(msg)
484 # copy information are now outdated
485 # copy information are now outdated
485 # (maybe new information should be in directly passed to this function)
486 # (maybe new information should be in directly passed to this function)
486 self.copymap.pop(filename, None)
487 self.copymap.pop(filename, None)
487
488
488 if not (p1_tracked or p2_tracked or wc_tracked):
489 if not (p1_tracked or p2_tracked or wc_tracked):
489 old_entry = self._map.pop(filename, None)
490 old_entry = self._map.pop(filename, None)
490 self._dirs_decr(filename, old_entry=old_entry)
491 self._dirs_decr(filename, old_entry=old_entry)
491 self.copymap.pop(filename, None)
492 self.copymap.pop(filename, None)
492 return
493 return
493 elif merged:
494 elif merged:
494 pass
495 pass
495 elif not (p1_tracked or p2_tracked) and wc_tracked:
496 elif not (p1_tracked or p2_tracked) and wc_tracked:
496 pass # file is added, nothing special to adjust
497 pass # file is added, nothing special to adjust
497 elif (p1_tracked or p2_tracked) and not wc_tracked:
498 elif (p1_tracked or p2_tracked) and not wc_tracked:
498 pass
499 pass
499 elif clean_p2 and wc_tracked:
500 elif clean_p2 and wc_tracked:
500 pass
501 pass
501 elif not p1_tracked and p2_tracked and wc_tracked:
502 elif not p1_tracked and p2_tracked and wc_tracked:
502 clean_p2 = True
503 clean_p2 = True
503 elif possibly_dirty:
504 elif possibly_dirty:
504 pass
505 pass
505 elif wc_tracked:
506 elif wc_tracked:
506 # this is a "normal" file
507 # this is a "normal" file
507 if parentfiledata is None:
508 if parentfiledata is None:
508 msg = b'failed to pass parentfiledata for a normal file: %s'
509 msg = b'failed to pass parentfiledata for a normal file: %s'
509 msg %= filename
510 msg %= filename
510 raise error.ProgrammingError(msg)
511 raise error.ProgrammingError(msg)
511 else:
512 else:
512 assert False, 'unreachable'
513 assert False, 'unreachable'
513
514
514 old_entry = self._map.get(filename)
515 old_entry = self._map.get(filename)
515 self._dirs_incr(filename, old_entry)
516 self._dirs_incr(filename, old_entry)
516 entry = DirstateItem(
517 entry = DirstateItem(
517 wc_tracked=wc_tracked,
518 wc_tracked=wc_tracked,
518 p1_tracked=p1_tracked,
519 p1_tracked=p1_tracked,
519 p2_tracked=p2_tracked,
520 p2_tracked=p2_tracked,
520 merged=merged,
521 merged=merged,
521 clean_p1=clean_p1,
522 clean_p1=clean_p1,
522 clean_p2=clean_p2,
523 clean_p2=clean_p2,
523 possibly_dirty=possibly_dirty,
524 possibly_dirty=possibly_dirty,
524 parentfiledata=parentfiledata,
525 parentfiledata=parentfiledata,
525 )
526 )
526 self._map[filename] = entry
527 self._map[filename] = entry
527
528
528
529
529 if rustmod is not None:
530 if rustmod is not None:
530
531
531 class dirstatemap(_dirstatemapcommon):
532 class dirstatemap(_dirstatemapcommon):
532 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
533 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
533 super(dirstatemap, self).__init__(
534 super(dirstatemap, self).__init__(
534 ui, opener, root, nodeconstants, use_dirstate_v2
535 ui, opener, root, nodeconstants, use_dirstate_v2
535 )
536 )
536 self._docket = None
537 self._docket = None
537
538
538 ### Core data storage and access
539 ### Core data storage and access
539
540
540 @property
541 @property
541 def docket(self):
542 def docket(self):
542 if not self._docket:
543 if not self._docket:
543 if not self._use_dirstate_v2:
544 if not self._use_dirstate_v2:
544 raise error.ProgrammingError(
545 raise error.ProgrammingError(
545 b'dirstate only has a docket in v2 format'
546 b'dirstate only has a docket in v2 format'
546 )
547 )
547 self._docket = docketmod.DirstateDocket.parse(
548 self._docket = docketmod.DirstateDocket.parse(
548 self._readdirstatefile(), self._nodeconstants
549 self._readdirstatefile(), self._nodeconstants
549 )
550 )
550 return self._docket
551 return self._docket
551
552
552 @propertycache
553 @propertycache
553 def _map(self):
554 def _map(self):
554 """
555 """
555 Fills the Dirstatemap when called.
556 Fills the Dirstatemap when called.
556 """
557 """
557 # ignore HG_PENDING because identity is used only for writing
558 # ignore HG_PENDING because identity is used only for writing
558 self.identity = util.filestat.frompath(
559 self.identity = util.filestat.frompath(
559 self._opener.join(self._filename)
560 self._opener.join(self._filename)
560 )
561 )
561
562
562 if self._use_dirstate_v2:
563 if self._use_dirstate_v2:
563 if self.docket.uuid:
564 if self.docket.uuid:
564 # TODO: use mmap when possible
565 # TODO: use mmap when possible
565 data = self._opener.read(self.docket.data_filename())
566 data = self._opener.read(self.docket.data_filename())
566 else:
567 else:
567 data = b''
568 data = b''
568 self._map = rustmod.DirstateMap.new_v2(
569 self._map = rustmod.DirstateMap.new_v2(
569 data, self.docket.data_size, self.docket.tree_metadata
570 data, self.docket.data_size, self.docket.tree_metadata
570 )
571 )
571 parents = self.docket.parents
572 parents = self.docket.parents
572 else:
573 else:
573 self._map, parents = rustmod.DirstateMap.new_v1(
574 self._map, parents = rustmod.DirstateMap.new_v1(
574 self._readdirstatefile()
575 self._readdirstatefile()
575 )
576 )
576
577
577 if parents and not self._dirtyparents:
578 if parents and not self._dirtyparents:
578 self.setparents(*parents)
579 self.setparents(*parents)
579
580
580 self.__contains__ = self._map.__contains__
581 self.__contains__ = self._map.__contains__
581 self.__getitem__ = self._map.__getitem__
582 self.__getitem__ = self._map.__getitem__
582 self.get = self._map.get
583 self.get = self._map.get
583 return self._map
584 return self._map
584
585
585 @property
586 @property
586 def copymap(self):
587 def copymap(self):
587 return self._map.copymap()
588 return self._map.copymap()
588
589
589 def debug_iter(self, all):
590 def debug_iter(self, all):
590 """
591 """
591 Return an iterator of (filename, state, mode, size, mtime) tuples
592 Return an iterator of (filename, state, mode, size, mtime) tuples
592
593
593 `all`: also include with `state == b' '` dirstate tree nodes that
594 `all`: also include with `state == b' '` dirstate tree nodes that
594 don't have an associated `DirstateItem`.
595 don't have an associated `DirstateItem`.
595
596
596 """
597 """
597 return self._map.debug_iter(all)
598 return self._map.debug_iter(all)
598
599
599 def clear(self):
600 def clear(self):
600 self._map.clear()
601 self._map.clear()
601 self.setparents(
602 self.setparents(
602 self._nodeconstants.nullid, self._nodeconstants.nullid
603 self._nodeconstants.nullid, self._nodeconstants.nullid
603 )
604 )
604 util.clearcachedproperty(self, b"_dirs")
605 util.clearcachedproperty(self, b"_dirs")
605 util.clearcachedproperty(self, b"_alldirs")
606 util.clearcachedproperty(self, b"_alldirs")
606 util.clearcachedproperty(self, b"dirfoldmap")
607 util.clearcachedproperty(self, b"dirfoldmap")
607
608
608 def items(self):
609 def items(self):
609 return self._map.items()
610 return self._map.items()
610
611
611 # forward for python2,3 compat
612 # forward for python2,3 compat
612 iteritems = items
613 iteritems = items
613
614
614 def keys(self):
615 def keys(self):
615 return iter(self._map)
616 return iter(self._map)
616
617
617 ### reading/setting parents
618 ### reading/setting parents
618
619
619 def setparents(self, p1, p2, fold_p2=False):
620 def setparents(self, p1, p2, fold_p2=False):
620 self._parents = (p1, p2)
621 self._parents = (p1, p2)
621 self._dirtyparents = True
622 self._dirtyparents = True
622 copies = {}
623 copies = {}
623 if fold_p2:
624 if fold_p2:
624 # Collect into an intermediate list to avoid a `RuntimeError`
625 # Collect into an intermediate list to avoid a `RuntimeError`
625 # exception due to mutation during iteration.
626 # exception due to mutation during iteration.
626 # TODO: move this the whole loop to Rust where `iter_mut`
627 # TODO: move this the whole loop to Rust where `iter_mut`
627 # enables in-place mutation of elements of a collection while
628 # enables in-place mutation of elements of a collection while
628 # iterating it, without mutating the collection itself.
629 # iterating it, without mutating the collection itself.
629 candidatefiles = [
630 candidatefiles = [
630 (f, s)
631 (f, s)
631 for f, s in self._map.items()
632 for f, s in self._map.items()
632 if s.merged or s.from_p2
633 if s.merged or s.from_p2
633 ]
634 ]
634 for f, s in candidatefiles:
635 for f, s in candidatefiles:
635 # Discard "merged" markers when moving away from a merge state
636 # Discard "merged" markers when moving away from a merge state
636 if s.merged:
637 if s.merged:
637 source = self.copymap.get(f)
638 source = self.copymap.get(f)
638 if source:
639 if source:
639 copies[f] = source
640 copies[f] = source
640 self.reset_state(
641 self.reset_state(
641 f,
642 f,
642 wc_tracked=True,
643 wc_tracked=True,
643 p1_tracked=True,
644 p1_tracked=True,
644 possibly_dirty=True,
645 possibly_dirty=True,
645 )
646 )
646 # Also fix up otherparent markers
647 # Also fix up otherparent markers
647 elif s.from_p2:
648 elif s.from_p2:
648 source = self.copymap.get(f)
649 source = self.copymap.get(f)
649 if source:
650 if source:
650 copies[f] = source
651 copies[f] = source
651 self.reset_state(
652 self.reset_state(
652 f,
653 f,
653 p1_tracked=False,
654 p1_tracked=False,
654 wc_tracked=True,
655 wc_tracked=True,
655 )
656 )
656 return copies
657 return copies
657
658
658 def parents(self):
659 def parents(self):
659 if not self._parents:
660 if not self._parents:
660 if self._use_dirstate_v2:
661 if self._use_dirstate_v2:
661 self._parents = self.docket.parents
662 self._parents = self.docket.parents
662 else:
663 else:
663 read_len = self._nodelen * 2
664 read_len = self._nodelen * 2
664 st = self._readdirstatefile(read_len)
665 st = self._readdirstatefile(read_len)
665 l = len(st)
666 l = len(st)
666 if l == read_len:
667 if l == read_len:
667 self._parents = (
668 self._parents = (
668 st[: self._nodelen],
669 st[: self._nodelen],
669 st[self._nodelen : 2 * self._nodelen],
670 st[self._nodelen : 2 * self._nodelen],
670 )
671 )
671 elif l == 0:
672 elif l == 0:
672 self._parents = (
673 self._parents = (
673 self._nodeconstants.nullid,
674 self._nodeconstants.nullid,
674 self._nodeconstants.nullid,
675 self._nodeconstants.nullid,
675 )
676 )
676 else:
677 else:
677 raise error.Abort(
678 raise error.Abort(
678 _(b'working directory state appears damaged!')
679 _(b'working directory state appears damaged!')
679 )
680 )
680
681
681 return self._parents
682 return self._parents
682
683
683 ### disk interaction
684 ### disk interaction
684
685
685 @propertycache
686 @propertycache
686 def identity(self):
687 def identity(self):
687 self._map
688 self._map
688 return self.identity
689 return self.identity
689
690
690 def write(self, tr, st, now):
691 def write(self, tr, st, now):
691 if not self._use_dirstate_v2:
692 if not self._use_dirstate_v2:
692 p1, p2 = self.parents()
693 p1, p2 = self.parents()
693 packed = self._map.write_v1(p1, p2, now)
694 packed = self._map.write_v1(p1, p2, now)
694 st.write(packed)
695 st.write(packed)
695 st.close()
696 st.close()
696 self._dirtyparents = False
697 self._dirtyparents = False
697 return
698 return
698
699
699 # We can only append to an existing data file if there is one
700 # We can only append to an existing data file if there is one
700 can_append = self.docket.uuid is not None
701 can_append = self.docket.uuid is not None
701 packed, meta, append = self._map.write_v2(now, can_append)
702 packed, meta, append = self._map.write_v2(now, can_append)
702 if append:
703 if append:
703 docket = self.docket
704 docket = self.docket
704 data_filename = docket.data_filename()
705 data_filename = docket.data_filename()
705 if tr:
706 if tr:
706 tr.add(data_filename, docket.data_size)
707 tr.add(data_filename, docket.data_size)
707 with self._opener(data_filename, b'r+b') as fp:
708 with self._opener(data_filename, b'r+b') as fp:
708 fp.seek(docket.data_size)
709 fp.seek(docket.data_size)
709 assert fp.tell() == docket.data_size
710 assert fp.tell() == docket.data_size
710 written = fp.write(packed)
711 written = fp.write(packed)
711 if written is not None: # py2 may return None
712 if written is not None: # py2 may return None
712 assert written == len(packed), (written, len(packed))
713 assert written == len(packed), (written, len(packed))
713 docket.data_size += len(packed)
714 docket.data_size += len(packed)
714 docket.parents = self.parents()
715 docket.parents = self.parents()
715 docket.tree_metadata = meta
716 docket.tree_metadata = meta
716 st.write(docket.serialize())
717 st.write(docket.serialize())
717 st.close()
718 st.close()
718 else:
719 else:
719 old_docket = self.docket
720 old_docket = self.docket
720 new_docket = docketmod.DirstateDocket.with_new_uuid(
721 new_docket = docketmod.DirstateDocket.with_new_uuid(
721 self.parents(), len(packed), meta
722 self.parents(), len(packed), meta
722 )
723 )
723 data_filename = new_docket.data_filename()
724 data_filename = new_docket.data_filename()
724 if tr:
725 if tr:
725 tr.add(data_filename, 0)
726 tr.add(data_filename, 0)
726 self._opener.write(data_filename, packed)
727 self._opener.write(data_filename, packed)
727 # Write the new docket after the new data file has been
728 # Write the new docket after the new data file has been
728 # written. Because `st` was opened with `atomictemp=True`,
729 # written. Because `st` was opened with `atomictemp=True`,
729 # the actual `.hg/dirstate` file is only affected on close.
730 # the actual `.hg/dirstate` file is only affected on close.
730 st.write(new_docket.serialize())
731 st.write(new_docket.serialize())
731 st.close()
732 st.close()
732 # Remove the old data file after the new docket pointing to
733 # Remove the old data file after the new docket pointing to
733 # the new data file was written.
734 # the new data file was written.
734 if old_docket.uuid:
735 if old_docket.uuid:
735 data_filename = old_docket.data_filename()
736 data_filename = old_docket.data_filename()
736 unlink = lambda _tr=None: self._opener.unlink(data_filename)
737 unlink = lambda _tr=None: self._opener.unlink(data_filename)
737 if tr:
738 if tr:
738 category = b"dirstate-v2-clean-" + old_docket.uuid
739 category = b"dirstate-v2-clean-" + old_docket.uuid
739 tr.addpostclose(category, unlink)
740 tr.addpostclose(category, unlink)
740 else:
741 else:
741 unlink()
742 unlink()
742 self._docket = new_docket
743 self._docket = new_docket
743 # Reload from the newly-written file
744 # Reload from the newly-written file
744 util.clearcachedproperty(self, b"_map")
745 util.clearcachedproperty(self, b"_map")
745 self._dirtyparents = False
746 self._dirtyparents = False
746
747
747 def _opendirstatefile(self):
748 def _opendirstatefile(self):
748 fp, mode = txnutil.trypending(
749 fp, mode = txnutil.trypending(
749 self._root, self._opener, self._filename
750 self._root, self._opener, self._filename
750 )
751 )
751 if self._pendingmode is not None and self._pendingmode != mode:
752 if self._pendingmode is not None and self._pendingmode != mode:
752 fp.close()
753 fp.close()
753 raise error.Abort(
754 raise error.Abort(
754 _(b'working directory state may be changed parallelly')
755 _(b'working directory state may be changed parallelly')
755 )
756 )
756 self._pendingmode = mode
757 self._pendingmode = mode
757 return fp
758 return fp
758
759
759 def _readdirstatefile(self, size=-1):
760 def _readdirstatefile(self, size=-1):
760 try:
761 try:
761 with self._opendirstatefile() as fp:
762 with self._opendirstatefile() as fp:
762 return fp.read(size)
763 return fp.read(size)
763 except IOError as err:
764 except IOError as err:
764 if err.errno != errno.ENOENT:
765 if err.errno != errno.ENOENT:
765 raise
766 raise
766 # File doesn't exist, so the current state is empty
767 # File doesn't exist, so the current state is empty
767 return b''
768 return b''
768
769
769 ### code related to maintaining and accessing "extra" property
770 ### code related to maintaining and accessing "extra" property
770 # (e.g. "has_dir")
771 # (e.g. "has_dir")
771
772
772 @propertycache
773 @propertycache
773 def filefoldmap(self):
774 def filefoldmap(self):
774 """Returns a dictionary mapping normalized case paths to their
775 """Returns a dictionary mapping normalized case paths to their
775 non-normalized versions.
776 non-normalized versions.
776 """
777 """
777 return self._map.filefoldmapasdict()
778 return self._map.filefoldmapasdict()
778
779
779 def hastrackeddir(self, d):
780 def hastrackeddir(self, d):
780 return self._map.hastrackeddir(d)
781 return self._map.hastrackeddir(d)
781
782
782 def hasdir(self, d):
783 def hasdir(self, d):
783 return self._map.hasdir(d)
784 return self._map.hasdir(d)
784
785
785 @propertycache
786 @propertycache
786 def dirfoldmap(self):
787 def dirfoldmap(self):
787 f = {}
788 f = {}
788 normcase = util.normcase
789 normcase = util.normcase
789 for name in self._map.tracked_dirs():
790 for name in self._map.tracked_dirs():
790 f[normcase(name)] = name
791 f[normcase(name)] = name
791 return f
792 return f
792
793
793 ### code related to manipulation of entries and copy-sources
794 ### code related to manipulation of entries and copy-sources
794
795
795 def _refresh_entry(self, f, entry):
796 def _refresh_entry(self, f, entry):
796 if not entry.any_tracked:
797 if not entry.any_tracked:
797 self._map.drop_item_and_copy_source(f)
798 self._map.drop_item_and_copy_source(f)
798 else:
799 else:
799 self._map.addfile(f, entry)
800 self._map.addfile(f, entry)
800
801
801 def _insert_entry(self, f, entry):
802 def _insert_entry(self, f, entry):
802 self._map.addfile(f, entry)
803 self._map.addfile(f, entry)
803
804
804 def set_clean(self, filename, mode, size, mtime):
805 """mark a file as back to a clean state"""
806 entry = self[filename]
807 mtime = mtime & rangemask
808 size = size & rangemask
809 entry.set_clean(mode, size, mtime)
810 self._map.set_dirstate_item(filename, entry)
811 self._map.copymap().pop(filename, None)
812
813 def __setitem__(self, key, value):
805 def __setitem__(self, key, value):
814 assert isinstance(value, DirstateItem)
806 assert isinstance(value, DirstateItem)
815 self._map.set_dirstate_item(key, value)
807 self._map.set_dirstate_item(key, value)
816
808
817 def reset_state(
809 def reset_state(
818 self,
810 self,
819 filename,
811 filename,
820 wc_tracked=False,
812 wc_tracked=False,
821 p1_tracked=False,
813 p1_tracked=False,
822 p2_tracked=False,
814 p2_tracked=False,
823 merged=False,
815 merged=False,
824 clean_p1=False,
816 clean_p1=False,
825 clean_p2=False,
817 clean_p2=False,
826 possibly_dirty=False,
818 possibly_dirty=False,
827 parentfiledata=None,
819 parentfiledata=None,
828 ):
820 ):
829 """Set a entry to a given state, disregarding all previous state
821 """Set a entry to a given state, disregarding all previous state
830
822
831 This is to be used by the part of the dirstate API dedicated to
823 This is to be used by the part of the dirstate API dedicated to
832 adjusting the dirstate after a update/merge.
824 adjusting the dirstate after a update/merge.
833
825
834 note: calling this might result to no entry existing at all if the
826 note: calling this might result to no entry existing at all if the
835 dirstate map does not see any point at having one for this file
827 dirstate map does not see any point at having one for this file
836 anymore.
828 anymore.
837 """
829 """
838 if merged and (clean_p1 or clean_p2):
830 if merged and (clean_p1 or clean_p2):
839 msg = (
831 msg = (
840 b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
832 b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
841 )
833 )
842 raise error.ProgrammingError(msg)
834 raise error.ProgrammingError(msg)
843 # copy information are now outdated
835 # copy information are now outdated
844 # (maybe new information should be in directly passed to this function)
836 # (maybe new information should be in directly passed to this function)
845 self.copymap.pop(filename, None)
837 self.copymap.pop(filename, None)
846
838
847 if not (p1_tracked or p2_tracked or wc_tracked):
839 if not (p1_tracked or p2_tracked or wc_tracked):
848 self._map.drop_item_and_copy_source(filename)
840 self._map.drop_item_and_copy_source(filename)
849 elif merged:
841 elif merged:
850 # XXX might be merged and removed ?
842 # XXX might be merged and removed ?
851 entry = self.get(filename)
843 entry = self.get(filename)
852 if entry is not None and entry.tracked:
844 if entry is not None and entry.tracked:
853 # XXX mostly replicate dirstate.other parent. We should get
845 # XXX mostly replicate dirstate.other parent. We should get
854 # the higher layer to pass us more reliable data where `merged`
846 # the higher layer to pass us more reliable data where `merged`
855 # actually mean merged. Dropping the else clause will show
847 # actually mean merged. Dropping the else clause will show
856 # failure in `test-graft.t`
848 # failure in `test-graft.t`
857 self.addfile(filename, merged=True)
849 self.addfile(filename, merged=True)
858 else:
850 else:
859 self.addfile(filename, from_p2=True)
851 self.addfile(filename, from_p2=True)
860 elif not (p1_tracked or p2_tracked) and wc_tracked:
852 elif not (p1_tracked or p2_tracked) and wc_tracked:
861 self.addfile(
853 self.addfile(
862 filename, added=True, possibly_dirty=possibly_dirty
854 filename, added=True, possibly_dirty=possibly_dirty
863 )
855 )
864 elif (p1_tracked or p2_tracked) and not wc_tracked:
856 elif (p1_tracked or p2_tracked) and not wc_tracked:
865 # XXX might be merged and removed ?
857 # XXX might be merged and removed ?
866 self[filename] = DirstateItem.from_v1_data(b'r', 0, 0, 0)
858 self[filename] = DirstateItem.from_v1_data(b'r', 0, 0, 0)
867 elif clean_p2 and wc_tracked:
859 elif clean_p2 and wc_tracked:
868 if p1_tracked or self.get(filename) is not None:
860 if p1_tracked or self.get(filename) is not None:
869 # XXX the `self.get` call is catching some case in
861 # XXX the `self.get` call is catching some case in
870 # `test-merge-remove.t` where the file is tracked in p1, the
862 # `test-merge-remove.t` where the file is tracked in p1, the
871 # p1_tracked argument is False.
863 # p1_tracked argument is False.
872 #
864 #
873 # In addition, this seems to be a case where the file is marked
865 # In addition, this seems to be a case where the file is marked
874 # as merged without actually being the result of a merge
866 # as merged without actually being the result of a merge
875 # action. So thing are not ideal here.
867 # action. So thing are not ideal here.
876 self.addfile(filename, merged=True)
868 self.addfile(filename, merged=True)
877 else:
869 else:
878 self.addfile(filename, from_p2=True)
870 self.addfile(filename, from_p2=True)
879 elif not p1_tracked and p2_tracked and wc_tracked:
871 elif not p1_tracked and p2_tracked and wc_tracked:
880 self.addfile(
872 self.addfile(
881 filename, from_p2=True, possibly_dirty=possibly_dirty
873 filename, from_p2=True, possibly_dirty=possibly_dirty
882 )
874 )
883 elif possibly_dirty:
875 elif possibly_dirty:
884 self.addfile(filename, possibly_dirty=possibly_dirty)
876 self.addfile(filename, possibly_dirty=possibly_dirty)
885 elif wc_tracked:
877 elif wc_tracked:
886 # this is a "normal" file
878 # this is a "normal" file
887 if parentfiledata is None:
879 if parentfiledata is None:
888 msg = b'failed to pass parentfiledata for a normal file: %s'
880 msg = b'failed to pass parentfiledata for a normal file: %s'
889 msg %= filename
881 msg %= filename
890 raise error.ProgrammingError(msg)
882 raise error.ProgrammingError(msg)
891 mode, size, mtime = parentfiledata
883 mode, size, mtime = parentfiledata
892 self.addfile(filename, mode=mode, size=size, mtime=mtime)
884 self.addfile(filename, mode=mode, size=size, mtime=mtime)
893 else:
885 else:
894 assert False, 'unreachable'
886 assert False, 'unreachable'
895
887
896 ### Legacy method we need to get rid of
888 ### Legacy method we need to get rid of
897
889
898 def addfile(
890 def addfile(
899 self,
891 self,
900 f,
892 f,
901 mode=0,
893 mode=0,
902 size=None,
894 size=None,
903 mtime=None,
895 mtime=None,
904 added=False,
896 added=False,
905 merged=False,
897 merged=False,
906 from_p2=False,
898 from_p2=False,
907 possibly_dirty=False,
899 possibly_dirty=False,
908 ):
900 ):
909 if added:
901 if added:
910 assert not possibly_dirty
902 assert not possibly_dirty
911 assert not from_p2
903 assert not from_p2
912 item = DirstateItem.new_added()
904 item = DirstateItem.new_added()
913 elif merged:
905 elif merged:
914 assert not possibly_dirty
906 assert not possibly_dirty
915 assert not from_p2
907 assert not from_p2
916 item = DirstateItem.new_merged()
908 item = DirstateItem.new_merged()
917 elif from_p2:
909 elif from_p2:
918 assert not possibly_dirty
910 assert not possibly_dirty
919 item = DirstateItem.new_from_p2()
911 item = DirstateItem.new_from_p2()
920 elif possibly_dirty:
912 elif possibly_dirty:
921 item = DirstateItem.new_possibly_dirty()
913 item = DirstateItem.new_possibly_dirty()
922 else:
914 else:
923 assert size is not None
915 assert size is not None
924 assert mtime is not None
916 assert mtime is not None
925 size = size & rangemask
917 size = size & rangemask
926 mtime = mtime & rangemask
918 mtime = mtime & rangemask
927 item = DirstateItem.new_normal(mode, size, mtime)
919 item = DirstateItem.new_normal(mode, size, mtime)
928 self._map.addfile(f, item)
920 self._map.addfile(f, item)
929 if added:
921 if added:
930 self.copymap.pop(f, None)
922 self.copymap.pop(f, None)
931
923
932 def removefile(self, *args, **kwargs):
924 def removefile(self, *args, **kwargs):
933 return self._map.removefile(*args, **kwargs)
925 return self._map.removefile(*args, **kwargs)
General Comments 0
You need to be logged in to leave comments. Login now