##// END OF EJS Templates
dirstate: infer the 'n' state from `possibly_dirty`...
marmoute -
r48317:80617f3c default
parent child Browse files
Show More
@@ -1,1439 +1,1439 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 dirstatetuple = parsers.dirstatetuple
48 dirstatetuple = parsers.dirstatetuple
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 @interfaceutil.implementer(intdirstate.idirstate)
75 @interfaceutil.implementer(intdirstate.idirstate)
76 class dirstate(object):
76 class dirstate(object):
77 def __init__(
77 def __init__(
78 self,
78 self,
79 opener,
79 opener,
80 ui,
80 ui,
81 root,
81 root,
82 validate,
82 validate,
83 sparsematchfn,
83 sparsematchfn,
84 nodeconstants,
84 nodeconstants,
85 use_dirstate_v2,
85 use_dirstate_v2,
86 ):
86 ):
87 """Create a new dirstate object.
87 """Create a new dirstate object.
88
88
89 opener is an open()-like callable that can be used to open the
89 opener is an open()-like callable that can be used to open the
90 dirstate file; root is the root of the directory tracked by
90 dirstate file; root is the root of the directory tracked by
91 the dirstate.
91 the dirstate.
92 """
92 """
93 self._use_dirstate_v2 = use_dirstate_v2
93 self._use_dirstate_v2 = use_dirstate_v2
94 self._nodeconstants = nodeconstants
94 self._nodeconstants = nodeconstants
95 self._opener = opener
95 self._opener = opener
96 self._validate = validate
96 self._validate = validate
97 self._root = root
97 self._root = root
98 self._sparsematchfn = sparsematchfn
98 self._sparsematchfn = sparsematchfn
99 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
99 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
100 # UNC path pointing to root share (issue4557)
100 # UNC path pointing to root share (issue4557)
101 self._rootdir = pathutil.normasprefix(root)
101 self._rootdir = pathutil.normasprefix(root)
102 self._dirty = False
102 self._dirty = False
103 self._lastnormaltime = 0
103 self._lastnormaltime = 0
104 self._ui = ui
104 self._ui = ui
105 self._filecache = {}
105 self._filecache = {}
106 self._parentwriters = 0
106 self._parentwriters = 0
107 self._filename = b'dirstate'
107 self._filename = b'dirstate'
108 self._pendingfilename = b'%s.pending' % self._filename
108 self._pendingfilename = b'%s.pending' % self._filename
109 self._plchangecallbacks = {}
109 self._plchangecallbacks = {}
110 self._origpl = None
110 self._origpl = None
111 self._updatedfiles = set()
111 self._updatedfiles = set()
112 self._mapcls = dirstatemap.dirstatemap
112 self._mapcls = dirstatemap.dirstatemap
113 # Access and cache cwd early, so we don't access it for the first time
113 # Access and cache cwd early, so we don't access it for the first time
114 # after a working-copy update caused it to not exist (accessing it then
114 # after a working-copy update caused it to not exist (accessing it then
115 # raises an exception).
115 # raises an exception).
116 self._cwd
116 self._cwd
117
117
118 def prefetch_parents(self):
118 def prefetch_parents(self):
119 """make sure the parents are loaded
119 """make sure the parents are loaded
120
120
121 Used to avoid a race condition.
121 Used to avoid a race condition.
122 """
122 """
123 self._pl
123 self._pl
124
124
125 @contextlib.contextmanager
125 @contextlib.contextmanager
126 def parentchange(self):
126 def parentchange(self):
127 """Context manager for handling dirstate parents.
127 """Context manager for handling dirstate parents.
128
128
129 If an exception occurs in the scope of the context manager,
129 If an exception occurs in the scope of the context manager,
130 the incoherent dirstate won't be written when wlock is
130 the incoherent dirstate won't be written when wlock is
131 released.
131 released.
132 """
132 """
133 self._parentwriters += 1
133 self._parentwriters += 1
134 yield
134 yield
135 # Typically we want the "undo" step of a context manager in a
135 # Typically we want the "undo" step of a context manager in a
136 # finally block so it happens even when an exception
136 # finally block so it happens even when an exception
137 # occurs. In this case, however, we only want to decrement
137 # occurs. In this case, however, we only want to decrement
138 # parentwriters if the code in the with statement exits
138 # parentwriters if the code in the with statement exits
139 # normally, so we don't have a try/finally here on purpose.
139 # normally, so we don't have a try/finally here on purpose.
140 self._parentwriters -= 1
140 self._parentwriters -= 1
141
141
142 def pendingparentchange(self):
142 def pendingparentchange(self):
143 """Returns true if the dirstate is in the middle of a set of changes
143 """Returns true if the dirstate is in the middle of a set of changes
144 that modify the dirstate parent.
144 that modify the dirstate parent.
145 """
145 """
146 return self._parentwriters > 0
146 return self._parentwriters > 0
147
147
148 @propertycache
148 @propertycache
149 def _map(self):
149 def _map(self):
150 """Return the dirstate contents (see documentation for dirstatemap)."""
150 """Return the dirstate contents (see documentation for dirstatemap)."""
151 self._map = self._mapcls(
151 self._map = self._mapcls(
152 self._ui,
152 self._ui,
153 self._opener,
153 self._opener,
154 self._root,
154 self._root,
155 self._nodeconstants,
155 self._nodeconstants,
156 self._use_dirstate_v2,
156 self._use_dirstate_v2,
157 )
157 )
158 return self._map
158 return self._map
159
159
160 @property
160 @property
161 def _sparsematcher(self):
161 def _sparsematcher(self):
162 """The matcher for the sparse checkout.
162 """The matcher for the sparse checkout.
163
163
164 The working directory may not include every file from a manifest. The
164 The working directory may not include every file from a manifest. The
165 matcher obtained by this property will match a path if it is to be
165 matcher obtained by this property will match a path if it is to be
166 included in the working directory.
166 included in the working directory.
167 """
167 """
168 # TODO there is potential to cache this property. For now, the matcher
168 # TODO there is potential to cache this property. For now, the matcher
169 # is resolved on every access. (But the called function does use a
169 # is resolved on every access. (But the called function does use a
170 # cache to keep the lookup fast.)
170 # cache to keep the lookup fast.)
171 return self._sparsematchfn()
171 return self._sparsematchfn()
172
172
173 @repocache(b'branch')
173 @repocache(b'branch')
174 def _branch(self):
174 def _branch(self):
175 try:
175 try:
176 return self._opener.read(b"branch").strip() or b"default"
176 return self._opener.read(b"branch").strip() or b"default"
177 except IOError as inst:
177 except IOError as inst:
178 if inst.errno != errno.ENOENT:
178 if inst.errno != errno.ENOENT:
179 raise
179 raise
180 return b"default"
180 return b"default"
181
181
182 @property
182 @property
183 def _pl(self):
183 def _pl(self):
184 return self._map.parents()
184 return self._map.parents()
185
185
186 def hasdir(self, d):
186 def hasdir(self, d):
187 return self._map.hastrackeddir(d)
187 return self._map.hastrackeddir(d)
188
188
189 @rootcache(b'.hgignore')
189 @rootcache(b'.hgignore')
190 def _ignore(self):
190 def _ignore(self):
191 files = self._ignorefiles()
191 files = self._ignorefiles()
192 if not files:
192 if not files:
193 return matchmod.never()
193 return matchmod.never()
194
194
195 pats = [b'include:%s' % f for f in files]
195 pats = [b'include:%s' % f for f in files]
196 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
196 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
197
197
198 @propertycache
198 @propertycache
199 def _slash(self):
199 def _slash(self):
200 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
200 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
201
201
202 @propertycache
202 @propertycache
203 def _checklink(self):
203 def _checklink(self):
204 return util.checklink(self._root)
204 return util.checklink(self._root)
205
205
206 @propertycache
206 @propertycache
207 def _checkexec(self):
207 def _checkexec(self):
208 return bool(util.checkexec(self._root))
208 return bool(util.checkexec(self._root))
209
209
210 @propertycache
210 @propertycache
211 def _checkcase(self):
211 def _checkcase(self):
212 return not util.fscasesensitive(self._join(b'.hg'))
212 return not util.fscasesensitive(self._join(b'.hg'))
213
213
214 def _join(self, f):
214 def _join(self, f):
215 # much faster than os.path.join()
215 # much faster than os.path.join()
216 # it's safe because f is always a relative path
216 # it's safe because f is always a relative path
217 return self._rootdir + f
217 return self._rootdir + f
218
218
219 def flagfunc(self, buildfallback):
219 def flagfunc(self, buildfallback):
220 if self._checklink and self._checkexec:
220 if self._checklink and self._checkexec:
221
221
222 def f(x):
222 def f(x):
223 try:
223 try:
224 st = os.lstat(self._join(x))
224 st = os.lstat(self._join(x))
225 if util.statislink(st):
225 if util.statislink(st):
226 return b'l'
226 return b'l'
227 if util.statisexec(st):
227 if util.statisexec(st):
228 return b'x'
228 return b'x'
229 except OSError:
229 except OSError:
230 pass
230 pass
231 return b''
231 return b''
232
232
233 return f
233 return f
234
234
235 fallback = buildfallback()
235 fallback = buildfallback()
236 if self._checklink:
236 if self._checklink:
237
237
238 def f(x):
238 def f(x):
239 if os.path.islink(self._join(x)):
239 if os.path.islink(self._join(x)):
240 return b'l'
240 return b'l'
241 if b'x' in fallback(x):
241 if b'x' in fallback(x):
242 return b'x'
242 return b'x'
243 return b''
243 return b''
244
244
245 return f
245 return f
246 if self._checkexec:
246 if self._checkexec:
247
247
248 def f(x):
248 def f(x):
249 if b'l' in fallback(x):
249 if b'l' in fallback(x):
250 return b'l'
250 return b'l'
251 if util.isexec(self._join(x)):
251 if util.isexec(self._join(x)):
252 return b'x'
252 return b'x'
253 return b''
253 return b''
254
254
255 return f
255 return f
256 else:
256 else:
257 return fallback
257 return fallback
258
258
259 @propertycache
259 @propertycache
260 def _cwd(self):
260 def _cwd(self):
261 # internal config: ui.forcecwd
261 # internal config: ui.forcecwd
262 forcecwd = self._ui.config(b'ui', b'forcecwd')
262 forcecwd = self._ui.config(b'ui', b'forcecwd')
263 if forcecwd:
263 if forcecwd:
264 return forcecwd
264 return forcecwd
265 return encoding.getcwd()
265 return encoding.getcwd()
266
266
267 def getcwd(self):
267 def getcwd(self):
268 """Return the path from which a canonical path is calculated.
268 """Return the path from which a canonical path is calculated.
269
269
270 This path should be used to resolve file patterns or to convert
270 This path should be used to resolve file patterns or to convert
271 canonical paths back to file paths for display. It shouldn't be
271 canonical paths back to file paths for display. It shouldn't be
272 used to get real file paths. Use vfs functions instead.
272 used to get real file paths. Use vfs functions instead.
273 """
273 """
274 cwd = self._cwd
274 cwd = self._cwd
275 if cwd == self._root:
275 if cwd == self._root:
276 return b''
276 return b''
277 # self._root ends with a path separator if self._root is '/' or 'C:\'
277 # self._root ends with a path separator if self._root is '/' or 'C:\'
278 rootsep = self._root
278 rootsep = self._root
279 if not util.endswithsep(rootsep):
279 if not util.endswithsep(rootsep):
280 rootsep += pycompat.ossep
280 rootsep += pycompat.ossep
281 if cwd.startswith(rootsep):
281 if cwd.startswith(rootsep):
282 return cwd[len(rootsep) :]
282 return cwd[len(rootsep) :]
283 else:
283 else:
284 # we're outside the repo. return an absolute path.
284 # we're outside the repo. return an absolute path.
285 return cwd
285 return cwd
286
286
287 def pathto(self, f, cwd=None):
287 def pathto(self, f, cwd=None):
288 if cwd is None:
288 if cwd is None:
289 cwd = self.getcwd()
289 cwd = self.getcwd()
290 path = util.pathto(self._root, cwd, f)
290 path = util.pathto(self._root, cwd, f)
291 if self._slash:
291 if self._slash:
292 return util.pconvert(path)
292 return util.pconvert(path)
293 return path
293 return path
294
294
295 def __getitem__(self, key):
295 def __getitem__(self, key):
296 """Return the current state of key (a filename) in the dirstate.
296 """Return the current state of key (a filename) in the dirstate.
297
297
298 States are:
298 States are:
299 n normal
299 n normal
300 m needs merging
300 m needs merging
301 r marked for removal
301 r marked for removal
302 a marked for addition
302 a marked for addition
303 ? not tracked
303 ? not tracked
304
304
305 XXX The "state" is a bit obscure to be in the "public" API. we should
305 XXX The "state" is a bit obscure to be in the "public" API. we should
306 consider migrating all user of this to going through the dirstate entry
306 consider migrating all user of this to going through the dirstate entry
307 instead.
307 instead.
308 """
308 """
309 entry = self._map.get(key)
309 entry = self._map.get(key)
310 if entry is not None:
310 if entry is not None:
311 return entry.state
311 return entry.state
312 return b'?'
312 return b'?'
313
313
314 def __contains__(self, key):
314 def __contains__(self, key):
315 return key in self._map
315 return key in self._map
316
316
317 def __iter__(self):
317 def __iter__(self):
318 return iter(sorted(self._map))
318 return iter(sorted(self._map))
319
319
320 def items(self):
320 def items(self):
321 return pycompat.iteritems(self._map)
321 return pycompat.iteritems(self._map)
322
322
323 iteritems = items
323 iteritems = items
324
324
325 def directories(self):
325 def directories(self):
326 return self._map.directories()
326 return self._map.directories()
327
327
328 def parents(self):
328 def parents(self):
329 return [self._validate(p) for p in self._pl]
329 return [self._validate(p) for p in self._pl]
330
330
331 def p1(self):
331 def p1(self):
332 return self._validate(self._pl[0])
332 return self._validate(self._pl[0])
333
333
334 def p2(self):
334 def p2(self):
335 return self._validate(self._pl[1])
335 return self._validate(self._pl[1])
336
336
337 @property
337 @property
338 def in_merge(self):
338 def in_merge(self):
339 """True if a merge is in progress"""
339 """True if a merge is in progress"""
340 return self._pl[1] != self._nodeconstants.nullid
340 return self._pl[1] != self._nodeconstants.nullid
341
341
342 def branch(self):
342 def branch(self):
343 return encoding.tolocal(self._branch)
343 return encoding.tolocal(self._branch)
344
344
345 def setparents(self, p1, p2=None):
345 def setparents(self, p1, p2=None):
346 """Set dirstate parents to p1 and p2.
346 """Set dirstate parents to p1 and p2.
347
347
348 When moving from two parents to one, "merged" entries a
348 When moving from two parents to one, "merged" entries a
349 adjusted to normal and previous copy records discarded and
349 adjusted to normal and previous copy records discarded and
350 returned by the call.
350 returned by the call.
351
351
352 See localrepo.setparents()
352 See localrepo.setparents()
353 """
353 """
354 if p2 is None:
354 if p2 is None:
355 p2 = self._nodeconstants.nullid
355 p2 = self._nodeconstants.nullid
356 if self._parentwriters == 0:
356 if self._parentwriters == 0:
357 raise ValueError(
357 raise ValueError(
358 b"cannot set dirstate parent outside of "
358 b"cannot set dirstate parent outside of "
359 b"dirstate.parentchange context manager"
359 b"dirstate.parentchange context manager"
360 )
360 )
361
361
362 self._dirty = True
362 self._dirty = True
363 oldp2 = self._pl[1]
363 oldp2 = self._pl[1]
364 if self._origpl is None:
364 if self._origpl is None:
365 self._origpl = self._pl
365 self._origpl = self._pl
366 self._map.setparents(p1, p2)
366 self._map.setparents(p1, p2)
367 copies = {}
367 copies = {}
368 if (
368 if (
369 oldp2 != self._nodeconstants.nullid
369 oldp2 != self._nodeconstants.nullid
370 and p2 == self._nodeconstants.nullid
370 and p2 == self._nodeconstants.nullid
371 ):
371 ):
372 candidatefiles = self._map.non_normal_or_other_parent_paths()
372 candidatefiles = self._map.non_normal_or_other_parent_paths()
373
373
374 for f in candidatefiles:
374 for f in candidatefiles:
375 s = self._map.get(f)
375 s = self._map.get(f)
376 if s is None:
376 if s is None:
377 continue
377 continue
378
378
379 # Discard "merged" markers when moving away from a merge state
379 # Discard "merged" markers when moving away from a merge state
380 if s.merged:
380 if s.merged:
381 source = self._map.copymap.get(f)
381 source = self._map.copymap.get(f)
382 if source:
382 if source:
383 copies[f] = source
383 copies[f] = source
384 self.normallookup(f)
384 self.normallookup(f)
385 # Also fix up otherparent markers
385 # Also fix up otherparent markers
386 elif s.from_p2:
386 elif s.from_p2:
387 source = self._map.copymap.get(f)
387 source = self._map.copymap.get(f)
388 if source:
388 if source:
389 copies[f] = source
389 copies[f] = source
390 self.add(f)
390 self.add(f)
391 return copies
391 return copies
392
392
393 def setbranch(self, branch):
393 def setbranch(self, branch):
394 self.__class__._branch.set(self, encoding.fromlocal(branch))
394 self.__class__._branch.set(self, encoding.fromlocal(branch))
395 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
395 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
396 try:
396 try:
397 f.write(self._branch + b'\n')
397 f.write(self._branch + b'\n')
398 f.close()
398 f.close()
399
399
400 # make sure filecache has the correct stat info for _branch after
400 # make sure filecache has the correct stat info for _branch after
401 # replacing the underlying file
401 # replacing the underlying file
402 ce = self._filecache[b'_branch']
402 ce = self._filecache[b'_branch']
403 if ce:
403 if ce:
404 ce.refresh()
404 ce.refresh()
405 except: # re-raises
405 except: # re-raises
406 f.discard()
406 f.discard()
407 raise
407 raise
408
408
409 def invalidate(self):
409 def invalidate(self):
410 """Causes the next access to reread the dirstate.
410 """Causes the next access to reread the dirstate.
411
411
412 This is different from localrepo.invalidatedirstate() because it always
412 This is different from localrepo.invalidatedirstate() because it always
413 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
413 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
414 check whether the dirstate has changed before rereading it."""
414 check whether the dirstate has changed before rereading it."""
415
415
416 for a in ("_map", "_branch", "_ignore"):
416 for a in ("_map", "_branch", "_ignore"):
417 if a in self.__dict__:
417 if a in self.__dict__:
418 delattr(self, a)
418 delattr(self, a)
419 self._lastnormaltime = 0
419 self._lastnormaltime = 0
420 self._dirty = False
420 self._dirty = False
421 self._updatedfiles.clear()
421 self._updatedfiles.clear()
422 self._parentwriters = 0
422 self._parentwriters = 0
423 self._origpl = None
423 self._origpl = None
424
424
425 def copy(self, source, dest):
425 def copy(self, source, dest):
426 """Mark dest as a copy of source. Unmark dest if source is None."""
426 """Mark dest as a copy of source. Unmark dest if source is None."""
427 if source == dest:
427 if source == dest:
428 return
428 return
429 self._dirty = True
429 self._dirty = True
430 if source is not None:
430 if source is not None:
431 self._map.copymap[dest] = source
431 self._map.copymap[dest] = source
432 self._updatedfiles.add(source)
432 self._updatedfiles.add(source)
433 self._updatedfiles.add(dest)
433 self._updatedfiles.add(dest)
434 elif self._map.copymap.pop(dest, None):
434 elif self._map.copymap.pop(dest, None):
435 self._updatedfiles.add(dest)
435 self._updatedfiles.add(dest)
436
436
437 def copied(self, file):
437 def copied(self, file):
438 return self._map.copymap.get(file, None)
438 return self._map.copymap.get(file, None)
439
439
440 def copies(self):
440 def copies(self):
441 return self._map.copymap
441 return self._map.copymap
442
442
443 def _addpath(
443 def _addpath(
444 self,
444 self,
445 f,
445 f,
446 state=None,
446 state=None,
447 mode=0,
447 mode=0,
448 size=None,
448 size=None,
449 mtime=None,
449 mtime=None,
450 added=False,
450 added=False,
451 merged=False,
451 merged=False,
452 from_p2=False,
452 from_p2=False,
453 possibly_dirty=False,
453 possibly_dirty=False,
454 ):
454 ):
455 entry = self._map.get(f)
455 entry = self._map.get(f)
456 if added or entry is not None and entry.removed:
456 if added or entry is not None and entry.removed:
457 scmutil.checkfilename(f)
457 scmutil.checkfilename(f)
458 if self._map.hastrackeddir(f):
458 if self._map.hastrackeddir(f):
459 msg = _(b'directory %r already in dirstate')
459 msg = _(b'directory %r already in dirstate')
460 msg %= pycompat.bytestr(f)
460 msg %= pycompat.bytestr(f)
461 raise error.Abort(msg)
461 raise error.Abort(msg)
462 # shadows
462 # shadows
463 for d in pathutil.finddirs(f):
463 for d in pathutil.finddirs(f):
464 if self._map.hastrackeddir(d):
464 if self._map.hastrackeddir(d):
465 break
465 break
466 entry = self._map.get(d)
466 entry = self._map.get(d)
467 if entry is not None and not entry.removed:
467 if entry is not None and not entry.removed:
468 msg = _(b'file %r in dirstate clashes with %r')
468 msg = _(b'file %r in dirstate clashes with %r')
469 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
469 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
470 raise error.Abort(msg)
470 raise error.Abort(msg)
471 self._dirty = True
471 self._dirty = True
472 self._updatedfiles.add(f)
472 self._updatedfiles.add(f)
473 self._map.addfile(
473 self._map.addfile(
474 f,
474 f,
475 state=state,
475 state=state,
476 mode=mode,
476 mode=mode,
477 size=size,
477 size=size,
478 mtime=mtime,
478 mtime=mtime,
479 added=added,
479 added=added,
480 merged=merged,
480 merged=merged,
481 from_p2=from_p2,
481 from_p2=from_p2,
482 possibly_dirty=possibly_dirty,
482 possibly_dirty=possibly_dirty,
483 )
483 )
484
484
485 def normal(self, f, parentfiledata=None):
485 def normal(self, f, parentfiledata=None):
486 """Mark a file normal and clean.
486 """Mark a file normal and clean.
487
487
488 parentfiledata: (mode, size, mtime) of the clean file
488 parentfiledata: (mode, size, mtime) of the clean file
489
489
490 parentfiledata should be computed from memory (for mode,
490 parentfiledata should be computed from memory (for mode,
491 size), as or close as possible from the point where we
491 size), as or close as possible from the point where we
492 determined the file was clean, to limit the risk of the
492 determined the file was clean, to limit the risk of the
493 file having been changed by an external process between the
493 file having been changed by an external process between the
494 moment where the file was determined to be clean and now."""
494 moment where the file was determined to be clean and now."""
495 if parentfiledata:
495 if parentfiledata:
496 (mode, size, mtime) = parentfiledata
496 (mode, size, mtime) = parentfiledata
497 else:
497 else:
498 s = os.lstat(self._join(f))
498 s = os.lstat(self._join(f))
499 mode = s.st_mode
499 mode = s.st_mode
500 size = s.st_size
500 size = s.st_size
501 mtime = s[stat.ST_MTIME]
501 mtime = s[stat.ST_MTIME]
502 self._addpath(f, b'n', mode, size, mtime)
502 self._addpath(f, b'n', mode, size, mtime)
503 self._map.copymap.pop(f, None)
503 self._map.copymap.pop(f, None)
504 if f in self._map.nonnormalset:
504 if f in self._map.nonnormalset:
505 self._map.nonnormalset.remove(f)
505 self._map.nonnormalset.remove(f)
506 if mtime > self._lastnormaltime:
506 if mtime > self._lastnormaltime:
507 # Remember the most recent modification timeslot for status(),
507 # Remember the most recent modification timeslot for status(),
508 # to make sure we won't miss future size-preserving file content
508 # to make sure we won't miss future size-preserving file content
509 # modifications that happen within the same timeslot.
509 # modifications that happen within the same timeslot.
510 self._lastnormaltime = mtime
510 self._lastnormaltime = mtime
511
511
512 def normallookup(self, f):
512 def normallookup(self, f):
513 '''Mark a file normal, but possibly dirty.'''
513 '''Mark a file normal, but possibly dirty.'''
514 if self.in_merge:
514 if self.in_merge:
515 # if there is a merge going on and the file was either
515 # if there is a merge going on and the file was either
516 # "merged" or coming from other parent (-2) before
516 # "merged" or coming from other parent (-2) before
517 # being removed, restore that state.
517 # being removed, restore that state.
518 entry = self._map.get(f)
518 entry = self._map.get(f)
519 if entry is not None:
519 if entry is not None:
520 # XXX this should probably be dealt with a a lower level
520 # XXX this should probably be dealt with a a lower level
521 # (see `merged_removed` and `from_p2_removed`)
521 # (see `merged_removed` and `from_p2_removed`)
522 if entry.merged_removed or entry.from_p2_removed:
522 if entry.merged_removed or entry.from_p2_removed:
523 source = self._map.copymap.get(f)
523 source = self._map.copymap.get(f)
524 if entry.merged_removed:
524 if entry.merged_removed:
525 self.merge(f)
525 self.merge(f)
526 elif entry.from_p2_removed:
526 elif entry.from_p2_removed:
527 self.otherparent(f)
527 self.otherparent(f)
528 if source is not None:
528 if source is not None:
529 self.copy(source, f)
529 self.copy(source, f)
530 return
530 return
531 elif entry.merged or entry.from_p2:
531 elif entry.merged or entry.from_p2:
532 return
532 return
533 self._addpath(f, b'n', 0, possibly_dirty=True)
533 self._addpath(f, possibly_dirty=True)
534 self._map.copymap.pop(f, None)
534 self._map.copymap.pop(f, None)
535
535
536 def otherparent(self, f):
536 def otherparent(self, f):
537 '''Mark as coming from the other parent, always dirty.'''
537 '''Mark as coming from the other parent, always dirty.'''
538 if not self.in_merge:
538 if not self.in_merge:
539 msg = _(b"setting %r to other parent only allowed in merges") % f
539 msg = _(b"setting %r to other parent only allowed in merges") % f
540 raise error.Abort(msg)
540 raise error.Abort(msg)
541 if f in self and self[f] == b'n':
541 if f in self and self[f] == b'n':
542 # merge-like
542 # merge-like
543 self._addpath(f, merged=True)
543 self._addpath(f, merged=True)
544 else:
544 else:
545 # add-like
545 # add-like
546 self._addpath(f, b'n', 0, from_p2=True)
546 self._addpath(f, b'n', 0, from_p2=True)
547 self._map.copymap.pop(f, None)
547 self._map.copymap.pop(f, None)
548
548
549 def add(self, f):
549 def add(self, f):
550 '''Mark a file added.'''
550 '''Mark a file added.'''
551 self._addpath(f, added=True)
551 self._addpath(f, added=True)
552 self._map.copymap.pop(f, None)
552 self._map.copymap.pop(f, None)
553
553
554 def remove(self, f):
554 def remove(self, f):
555 '''Mark a file removed.'''
555 '''Mark a file removed.'''
556 self._dirty = True
556 self._dirty = True
557 self._updatedfiles.add(f)
557 self._updatedfiles.add(f)
558 self._map.removefile(f, in_merge=self.in_merge)
558 self._map.removefile(f, in_merge=self.in_merge)
559
559
560 def merge(self, f):
560 def merge(self, f):
561 '''Mark a file merged.'''
561 '''Mark a file merged.'''
562 if not self.in_merge:
562 if not self.in_merge:
563 return self.normallookup(f)
563 return self.normallookup(f)
564 return self.otherparent(f)
564 return self.otherparent(f)
565
565
566 def drop(self, f):
566 def drop(self, f):
567 '''Drop a file from the dirstate'''
567 '''Drop a file from the dirstate'''
568 oldstate = self[f]
568 oldstate = self[f]
569 if self._map.dropfile(f, oldstate):
569 if self._map.dropfile(f, oldstate):
570 self._dirty = True
570 self._dirty = True
571 self._updatedfiles.add(f)
571 self._updatedfiles.add(f)
572 self._map.copymap.pop(f, None)
572 self._map.copymap.pop(f, None)
573
573
574 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
574 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
575 if exists is None:
575 if exists is None:
576 exists = os.path.lexists(os.path.join(self._root, path))
576 exists = os.path.lexists(os.path.join(self._root, path))
577 if not exists:
577 if not exists:
578 # Maybe a path component exists
578 # Maybe a path component exists
579 if not ignoremissing and b'/' in path:
579 if not ignoremissing and b'/' in path:
580 d, f = path.rsplit(b'/', 1)
580 d, f = path.rsplit(b'/', 1)
581 d = self._normalize(d, False, ignoremissing, None)
581 d = self._normalize(d, False, ignoremissing, None)
582 folded = d + b"/" + f
582 folded = d + b"/" + f
583 else:
583 else:
584 # No path components, preserve original case
584 # No path components, preserve original case
585 folded = path
585 folded = path
586 else:
586 else:
587 # recursively normalize leading directory components
587 # recursively normalize leading directory components
588 # against dirstate
588 # against dirstate
589 if b'/' in normed:
589 if b'/' in normed:
590 d, f = normed.rsplit(b'/', 1)
590 d, f = normed.rsplit(b'/', 1)
591 d = self._normalize(d, False, ignoremissing, True)
591 d = self._normalize(d, False, ignoremissing, True)
592 r = self._root + b"/" + d
592 r = self._root + b"/" + d
593 folded = d + b"/" + util.fspath(f, r)
593 folded = d + b"/" + util.fspath(f, r)
594 else:
594 else:
595 folded = util.fspath(normed, self._root)
595 folded = util.fspath(normed, self._root)
596 storemap[normed] = folded
596 storemap[normed] = folded
597
597
598 return folded
598 return folded
599
599
600 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
600 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
601 normed = util.normcase(path)
601 normed = util.normcase(path)
602 folded = self._map.filefoldmap.get(normed, None)
602 folded = self._map.filefoldmap.get(normed, None)
603 if folded is None:
603 if folded is None:
604 if isknown:
604 if isknown:
605 folded = path
605 folded = path
606 else:
606 else:
607 folded = self._discoverpath(
607 folded = self._discoverpath(
608 path, normed, ignoremissing, exists, self._map.filefoldmap
608 path, normed, ignoremissing, exists, self._map.filefoldmap
609 )
609 )
610 return folded
610 return folded
611
611
612 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
612 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
613 normed = util.normcase(path)
613 normed = util.normcase(path)
614 folded = self._map.filefoldmap.get(normed, None)
614 folded = self._map.filefoldmap.get(normed, None)
615 if folded is None:
615 if folded is None:
616 folded = self._map.dirfoldmap.get(normed, None)
616 folded = self._map.dirfoldmap.get(normed, None)
617 if folded is None:
617 if folded is None:
618 if isknown:
618 if isknown:
619 folded = path
619 folded = path
620 else:
620 else:
621 # store discovered result in dirfoldmap so that future
621 # store discovered result in dirfoldmap so that future
622 # normalizefile calls don't start matching directories
622 # normalizefile calls don't start matching directories
623 folded = self._discoverpath(
623 folded = self._discoverpath(
624 path, normed, ignoremissing, exists, self._map.dirfoldmap
624 path, normed, ignoremissing, exists, self._map.dirfoldmap
625 )
625 )
626 return folded
626 return folded
627
627
628 def normalize(self, path, isknown=False, ignoremissing=False):
628 def normalize(self, path, isknown=False, ignoremissing=False):
629 """
629 """
630 normalize the case of a pathname when on a casefolding filesystem
630 normalize the case of a pathname when on a casefolding filesystem
631
631
632 isknown specifies whether the filename came from walking the
632 isknown specifies whether the filename came from walking the
633 disk, to avoid extra filesystem access.
633 disk, to avoid extra filesystem access.
634
634
635 If ignoremissing is True, missing path are returned
635 If ignoremissing is True, missing path are returned
636 unchanged. Otherwise, we try harder to normalize possibly
636 unchanged. Otherwise, we try harder to normalize possibly
637 existing path components.
637 existing path components.
638
638
639 The normalized case is determined based on the following precedence:
639 The normalized case is determined based on the following precedence:
640
640
641 - version of name already stored in the dirstate
641 - version of name already stored in the dirstate
642 - version of name stored on disk
642 - version of name stored on disk
643 - version provided via command arguments
643 - version provided via command arguments
644 """
644 """
645
645
646 if self._checkcase:
646 if self._checkcase:
647 return self._normalize(path, isknown, ignoremissing)
647 return self._normalize(path, isknown, ignoremissing)
648 return path
648 return path
649
649
650 def clear(self):
650 def clear(self):
651 self._map.clear()
651 self._map.clear()
652 self._lastnormaltime = 0
652 self._lastnormaltime = 0
653 self._updatedfiles.clear()
653 self._updatedfiles.clear()
654 self._dirty = True
654 self._dirty = True
655
655
656 def rebuild(self, parent, allfiles, changedfiles=None):
656 def rebuild(self, parent, allfiles, changedfiles=None):
657 if changedfiles is None:
657 if changedfiles is None:
658 # Rebuild entire dirstate
658 # Rebuild entire dirstate
659 to_lookup = allfiles
659 to_lookup = allfiles
660 to_drop = []
660 to_drop = []
661 lastnormaltime = self._lastnormaltime
661 lastnormaltime = self._lastnormaltime
662 self.clear()
662 self.clear()
663 self._lastnormaltime = lastnormaltime
663 self._lastnormaltime = lastnormaltime
664 elif len(changedfiles) < 10:
664 elif len(changedfiles) < 10:
665 # Avoid turning allfiles into a set, which can be expensive if it's
665 # Avoid turning allfiles into a set, which can be expensive if it's
666 # large.
666 # large.
667 to_lookup = []
667 to_lookup = []
668 to_drop = []
668 to_drop = []
669 for f in changedfiles:
669 for f in changedfiles:
670 if f in allfiles:
670 if f in allfiles:
671 to_lookup.append(f)
671 to_lookup.append(f)
672 else:
672 else:
673 to_drop.append(f)
673 to_drop.append(f)
674 else:
674 else:
675 changedfilesset = set(changedfiles)
675 changedfilesset = set(changedfiles)
676 to_lookup = changedfilesset & set(allfiles)
676 to_lookup = changedfilesset & set(allfiles)
677 to_drop = changedfilesset - to_lookup
677 to_drop = changedfilesset - to_lookup
678
678
679 if self._origpl is None:
679 if self._origpl is None:
680 self._origpl = self._pl
680 self._origpl = self._pl
681 self._map.setparents(parent, self._nodeconstants.nullid)
681 self._map.setparents(parent, self._nodeconstants.nullid)
682
682
683 for f in to_lookup:
683 for f in to_lookup:
684 self.normallookup(f)
684 self.normallookup(f)
685 for f in to_drop:
685 for f in to_drop:
686 self.drop(f)
686 self.drop(f)
687
687
688 self._dirty = True
688 self._dirty = True
689
689
690 def identity(self):
690 def identity(self):
691 """Return identity of dirstate itself to detect changing in storage
691 """Return identity of dirstate itself to detect changing in storage
692
692
693 If identity of previous dirstate is equal to this, writing
693 If identity of previous dirstate is equal to this, writing
694 changes based on the former dirstate out can keep consistency.
694 changes based on the former dirstate out can keep consistency.
695 """
695 """
696 return self._map.identity
696 return self._map.identity
697
697
698 def write(self, tr):
698 def write(self, tr):
699 if not self._dirty:
699 if not self._dirty:
700 return
700 return
701
701
702 filename = self._filename
702 filename = self._filename
703 if tr:
703 if tr:
704 # 'dirstate.write()' is not only for writing in-memory
704 # 'dirstate.write()' is not only for writing in-memory
705 # changes out, but also for dropping ambiguous timestamp.
705 # changes out, but also for dropping ambiguous timestamp.
706 # delayed writing re-raise "ambiguous timestamp issue".
706 # delayed writing re-raise "ambiguous timestamp issue".
707 # See also the wiki page below for detail:
707 # See also the wiki page below for detail:
708 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
708 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
709
709
710 # emulate dropping timestamp in 'parsers.pack_dirstate'
710 # emulate dropping timestamp in 'parsers.pack_dirstate'
711 now = _getfsnow(self._opener)
711 now = _getfsnow(self._opener)
712 self._map.clearambiguoustimes(self._updatedfiles, now)
712 self._map.clearambiguoustimes(self._updatedfiles, now)
713
713
714 # emulate that all 'dirstate.normal' results are written out
714 # emulate that all 'dirstate.normal' results are written out
715 self._lastnormaltime = 0
715 self._lastnormaltime = 0
716 self._updatedfiles.clear()
716 self._updatedfiles.clear()
717
717
718 # delay writing in-memory changes out
718 # delay writing in-memory changes out
719 tr.addfilegenerator(
719 tr.addfilegenerator(
720 b'dirstate',
720 b'dirstate',
721 (self._filename,),
721 (self._filename,),
722 self._writedirstate,
722 self._writedirstate,
723 location=b'plain',
723 location=b'plain',
724 )
724 )
725 return
725 return
726
726
727 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
727 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
728 self._writedirstate(st)
728 self._writedirstate(st)
729
729
730 def addparentchangecallback(self, category, callback):
730 def addparentchangecallback(self, category, callback):
731 """add a callback to be called when the wd parents are changed
731 """add a callback to be called when the wd parents are changed
732
732
733 Callback will be called with the following arguments:
733 Callback will be called with the following arguments:
734 dirstate, (oldp1, oldp2), (newp1, newp2)
734 dirstate, (oldp1, oldp2), (newp1, newp2)
735
735
736 Category is a unique identifier to allow overwriting an old callback
736 Category is a unique identifier to allow overwriting an old callback
737 with a newer callback.
737 with a newer callback.
738 """
738 """
739 self._plchangecallbacks[category] = callback
739 self._plchangecallbacks[category] = callback
740
740
741 def _writedirstate(self, st):
741 def _writedirstate(self, st):
742 # notify callbacks about parents change
742 # notify callbacks about parents change
743 if self._origpl is not None and self._origpl != self._pl:
743 if self._origpl is not None and self._origpl != self._pl:
744 for c, callback in sorted(
744 for c, callback in sorted(
745 pycompat.iteritems(self._plchangecallbacks)
745 pycompat.iteritems(self._plchangecallbacks)
746 ):
746 ):
747 callback(self, self._origpl, self._pl)
747 callback(self, self._origpl, self._pl)
748 self._origpl = None
748 self._origpl = None
749 # use the modification time of the newly created temporary file as the
749 # use the modification time of the newly created temporary file as the
750 # filesystem's notion of 'now'
750 # filesystem's notion of 'now'
751 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
751 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
752
752
753 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
753 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
754 # timestamp of each entries in dirstate, because of 'now > mtime'
754 # timestamp of each entries in dirstate, because of 'now > mtime'
755 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
755 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
756 if delaywrite > 0:
756 if delaywrite > 0:
757 # do we have any files to delay for?
757 # do we have any files to delay for?
758 for f, e in pycompat.iteritems(self._map):
758 for f, e in pycompat.iteritems(self._map):
759 if e.state == b'n' and e[3] == now:
759 if e.state == b'n' and e[3] == now:
760 import time # to avoid useless import
760 import time # to avoid useless import
761
761
762 # rather than sleep n seconds, sleep until the next
762 # rather than sleep n seconds, sleep until the next
763 # multiple of n seconds
763 # multiple of n seconds
764 clock = time.time()
764 clock = time.time()
765 start = int(clock) - (int(clock) % delaywrite)
765 start = int(clock) - (int(clock) % delaywrite)
766 end = start + delaywrite
766 end = start + delaywrite
767 time.sleep(end - clock)
767 time.sleep(end - clock)
768 now = end # trust our estimate that the end is near now
768 now = end # trust our estimate that the end is near now
769 break
769 break
770
770
771 self._map.write(st, now)
771 self._map.write(st, now)
772 self._lastnormaltime = 0
772 self._lastnormaltime = 0
773 self._dirty = False
773 self._dirty = False
774
774
775 def _dirignore(self, f):
775 def _dirignore(self, f):
776 if self._ignore(f):
776 if self._ignore(f):
777 return True
777 return True
778 for p in pathutil.finddirs(f):
778 for p in pathutil.finddirs(f):
779 if self._ignore(p):
779 if self._ignore(p):
780 return True
780 return True
781 return False
781 return False
782
782
783 def _ignorefiles(self):
783 def _ignorefiles(self):
784 files = []
784 files = []
785 if os.path.exists(self._join(b'.hgignore')):
785 if os.path.exists(self._join(b'.hgignore')):
786 files.append(self._join(b'.hgignore'))
786 files.append(self._join(b'.hgignore'))
787 for name, path in self._ui.configitems(b"ui"):
787 for name, path in self._ui.configitems(b"ui"):
788 if name == b'ignore' or name.startswith(b'ignore.'):
788 if name == b'ignore' or name.startswith(b'ignore.'):
789 # we need to use os.path.join here rather than self._join
789 # we need to use os.path.join here rather than self._join
790 # because path is arbitrary and user-specified
790 # because path is arbitrary and user-specified
791 files.append(os.path.join(self._rootdir, util.expandpath(path)))
791 files.append(os.path.join(self._rootdir, util.expandpath(path)))
792 return files
792 return files
793
793
794 def _ignorefileandline(self, f):
794 def _ignorefileandline(self, f):
795 files = collections.deque(self._ignorefiles())
795 files = collections.deque(self._ignorefiles())
796 visited = set()
796 visited = set()
797 while files:
797 while files:
798 i = files.popleft()
798 i = files.popleft()
799 patterns = matchmod.readpatternfile(
799 patterns = matchmod.readpatternfile(
800 i, self._ui.warn, sourceinfo=True
800 i, self._ui.warn, sourceinfo=True
801 )
801 )
802 for pattern, lineno, line in patterns:
802 for pattern, lineno, line in patterns:
803 kind, p = matchmod._patsplit(pattern, b'glob')
803 kind, p = matchmod._patsplit(pattern, b'glob')
804 if kind == b"subinclude":
804 if kind == b"subinclude":
805 if p not in visited:
805 if p not in visited:
806 files.append(p)
806 files.append(p)
807 continue
807 continue
808 m = matchmod.match(
808 m = matchmod.match(
809 self._root, b'', [], [pattern], warn=self._ui.warn
809 self._root, b'', [], [pattern], warn=self._ui.warn
810 )
810 )
811 if m(f):
811 if m(f):
812 return (i, lineno, line)
812 return (i, lineno, line)
813 visited.add(i)
813 visited.add(i)
814 return (None, -1, b"")
814 return (None, -1, b"")
815
815
816 def _walkexplicit(self, match, subrepos):
816 def _walkexplicit(self, match, subrepos):
817 """Get stat data about the files explicitly specified by match.
817 """Get stat data about the files explicitly specified by match.
818
818
819 Return a triple (results, dirsfound, dirsnotfound).
819 Return a triple (results, dirsfound, dirsnotfound).
820 - results is a mapping from filename to stat result. It also contains
820 - results is a mapping from filename to stat result. It also contains
821 listings mapping subrepos and .hg to None.
821 listings mapping subrepos and .hg to None.
822 - dirsfound is a list of files found to be directories.
822 - dirsfound is a list of files found to be directories.
823 - dirsnotfound is a list of files that the dirstate thinks are
823 - dirsnotfound is a list of files that the dirstate thinks are
824 directories and that were not found."""
824 directories and that were not found."""
825
825
826 def badtype(mode):
826 def badtype(mode):
827 kind = _(b'unknown')
827 kind = _(b'unknown')
828 if stat.S_ISCHR(mode):
828 if stat.S_ISCHR(mode):
829 kind = _(b'character device')
829 kind = _(b'character device')
830 elif stat.S_ISBLK(mode):
830 elif stat.S_ISBLK(mode):
831 kind = _(b'block device')
831 kind = _(b'block device')
832 elif stat.S_ISFIFO(mode):
832 elif stat.S_ISFIFO(mode):
833 kind = _(b'fifo')
833 kind = _(b'fifo')
834 elif stat.S_ISSOCK(mode):
834 elif stat.S_ISSOCK(mode):
835 kind = _(b'socket')
835 kind = _(b'socket')
836 elif stat.S_ISDIR(mode):
836 elif stat.S_ISDIR(mode):
837 kind = _(b'directory')
837 kind = _(b'directory')
838 return _(b'unsupported file type (type is %s)') % kind
838 return _(b'unsupported file type (type is %s)') % kind
839
839
840 badfn = match.bad
840 badfn = match.bad
841 dmap = self._map
841 dmap = self._map
842 lstat = os.lstat
842 lstat = os.lstat
843 getkind = stat.S_IFMT
843 getkind = stat.S_IFMT
844 dirkind = stat.S_IFDIR
844 dirkind = stat.S_IFDIR
845 regkind = stat.S_IFREG
845 regkind = stat.S_IFREG
846 lnkkind = stat.S_IFLNK
846 lnkkind = stat.S_IFLNK
847 join = self._join
847 join = self._join
848 dirsfound = []
848 dirsfound = []
849 foundadd = dirsfound.append
849 foundadd = dirsfound.append
850 dirsnotfound = []
850 dirsnotfound = []
851 notfoundadd = dirsnotfound.append
851 notfoundadd = dirsnotfound.append
852
852
853 if not match.isexact() and self._checkcase:
853 if not match.isexact() and self._checkcase:
854 normalize = self._normalize
854 normalize = self._normalize
855 else:
855 else:
856 normalize = None
856 normalize = None
857
857
858 files = sorted(match.files())
858 files = sorted(match.files())
859 subrepos.sort()
859 subrepos.sort()
860 i, j = 0, 0
860 i, j = 0, 0
861 while i < len(files) and j < len(subrepos):
861 while i < len(files) and j < len(subrepos):
862 subpath = subrepos[j] + b"/"
862 subpath = subrepos[j] + b"/"
863 if files[i] < subpath:
863 if files[i] < subpath:
864 i += 1
864 i += 1
865 continue
865 continue
866 while i < len(files) and files[i].startswith(subpath):
866 while i < len(files) and files[i].startswith(subpath):
867 del files[i]
867 del files[i]
868 j += 1
868 j += 1
869
869
870 if not files or b'' in files:
870 if not files or b'' in files:
871 files = [b'']
871 files = [b'']
872 # constructing the foldmap is expensive, so don't do it for the
872 # constructing the foldmap is expensive, so don't do it for the
873 # common case where files is ['']
873 # common case where files is ['']
874 normalize = None
874 normalize = None
875 results = dict.fromkeys(subrepos)
875 results = dict.fromkeys(subrepos)
876 results[b'.hg'] = None
876 results[b'.hg'] = None
877
877
878 for ff in files:
878 for ff in files:
879 if normalize:
879 if normalize:
880 nf = normalize(ff, False, True)
880 nf = normalize(ff, False, True)
881 else:
881 else:
882 nf = ff
882 nf = ff
883 if nf in results:
883 if nf in results:
884 continue
884 continue
885
885
886 try:
886 try:
887 st = lstat(join(nf))
887 st = lstat(join(nf))
888 kind = getkind(st.st_mode)
888 kind = getkind(st.st_mode)
889 if kind == dirkind:
889 if kind == dirkind:
890 if nf in dmap:
890 if nf in dmap:
891 # file replaced by dir on disk but still in dirstate
891 # file replaced by dir on disk but still in dirstate
892 results[nf] = None
892 results[nf] = None
893 foundadd((nf, ff))
893 foundadd((nf, ff))
894 elif kind == regkind or kind == lnkkind:
894 elif kind == regkind or kind == lnkkind:
895 results[nf] = st
895 results[nf] = st
896 else:
896 else:
897 badfn(ff, badtype(kind))
897 badfn(ff, badtype(kind))
898 if nf in dmap:
898 if nf in dmap:
899 results[nf] = None
899 results[nf] = None
900 except OSError as inst: # nf not found on disk - it is dirstate only
900 except OSError as inst: # nf not found on disk - it is dirstate only
901 if nf in dmap: # does it exactly match a missing file?
901 if nf in dmap: # does it exactly match a missing file?
902 results[nf] = None
902 results[nf] = None
903 else: # does it match a missing directory?
903 else: # does it match a missing directory?
904 if self._map.hasdir(nf):
904 if self._map.hasdir(nf):
905 notfoundadd(nf)
905 notfoundadd(nf)
906 else:
906 else:
907 badfn(ff, encoding.strtolocal(inst.strerror))
907 badfn(ff, encoding.strtolocal(inst.strerror))
908
908
909 # match.files() may contain explicitly-specified paths that shouldn't
909 # match.files() may contain explicitly-specified paths that shouldn't
910 # be taken; drop them from the list of files found. dirsfound/notfound
910 # be taken; drop them from the list of files found. dirsfound/notfound
911 # aren't filtered here because they will be tested later.
911 # aren't filtered here because they will be tested later.
912 if match.anypats():
912 if match.anypats():
913 for f in list(results):
913 for f in list(results):
914 if f == b'.hg' or f in subrepos:
914 if f == b'.hg' or f in subrepos:
915 # keep sentinel to disable further out-of-repo walks
915 # keep sentinel to disable further out-of-repo walks
916 continue
916 continue
917 if not match(f):
917 if not match(f):
918 del results[f]
918 del results[f]
919
919
920 # Case insensitive filesystems cannot rely on lstat() failing to detect
920 # Case insensitive filesystems cannot rely on lstat() failing to detect
921 # a case-only rename. Prune the stat object for any file that does not
921 # a case-only rename. Prune the stat object for any file that does not
922 # match the case in the filesystem, if there are multiple files that
922 # match the case in the filesystem, if there are multiple files that
923 # normalize to the same path.
923 # normalize to the same path.
924 if match.isexact() and self._checkcase:
924 if match.isexact() and self._checkcase:
925 normed = {}
925 normed = {}
926
926
927 for f, st in pycompat.iteritems(results):
927 for f, st in pycompat.iteritems(results):
928 if st is None:
928 if st is None:
929 continue
929 continue
930
930
931 nc = util.normcase(f)
931 nc = util.normcase(f)
932 paths = normed.get(nc)
932 paths = normed.get(nc)
933
933
934 if paths is None:
934 if paths is None:
935 paths = set()
935 paths = set()
936 normed[nc] = paths
936 normed[nc] = paths
937
937
938 paths.add(f)
938 paths.add(f)
939
939
940 for norm, paths in pycompat.iteritems(normed):
940 for norm, paths in pycompat.iteritems(normed):
941 if len(paths) > 1:
941 if len(paths) > 1:
942 for path in paths:
942 for path in paths:
943 folded = self._discoverpath(
943 folded = self._discoverpath(
944 path, norm, True, None, self._map.dirfoldmap
944 path, norm, True, None, self._map.dirfoldmap
945 )
945 )
946 if path != folded:
946 if path != folded:
947 results[path] = None
947 results[path] = None
948
948
949 return results, dirsfound, dirsnotfound
949 return results, dirsfound, dirsnotfound
950
950
951 def walk(self, match, subrepos, unknown, ignored, full=True):
951 def walk(self, match, subrepos, unknown, ignored, full=True):
952 """
952 """
953 Walk recursively through the directory tree, finding all files
953 Walk recursively through the directory tree, finding all files
954 matched by match.
954 matched by match.
955
955
956 If full is False, maybe skip some known-clean files.
956 If full is False, maybe skip some known-clean files.
957
957
958 Return a dict mapping filename to stat-like object (either
958 Return a dict mapping filename to stat-like object (either
959 mercurial.osutil.stat instance or return value of os.stat()).
959 mercurial.osutil.stat instance or return value of os.stat()).
960
960
961 """
961 """
962 # full is a flag that extensions that hook into walk can use -- this
962 # full is a flag that extensions that hook into walk can use -- this
963 # implementation doesn't use it at all. This satisfies the contract
963 # implementation doesn't use it at all. This satisfies the contract
964 # because we only guarantee a "maybe".
964 # because we only guarantee a "maybe".
965
965
966 if ignored:
966 if ignored:
967 ignore = util.never
967 ignore = util.never
968 dirignore = util.never
968 dirignore = util.never
969 elif unknown:
969 elif unknown:
970 ignore = self._ignore
970 ignore = self._ignore
971 dirignore = self._dirignore
971 dirignore = self._dirignore
972 else:
972 else:
973 # if not unknown and not ignored, drop dir recursion and step 2
973 # if not unknown and not ignored, drop dir recursion and step 2
974 ignore = util.always
974 ignore = util.always
975 dirignore = util.always
975 dirignore = util.always
976
976
977 matchfn = match.matchfn
977 matchfn = match.matchfn
978 matchalways = match.always()
978 matchalways = match.always()
979 matchtdir = match.traversedir
979 matchtdir = match.traversedir
980 dmap = self._map
980 dmap = self._map
981 listdir = util.listdir
981 listdir = util.listdir
982 lstat = os.lstat
982 lstat = os.lstat
983 dirkind = stat.S_IFDIR
983 dirkind = stat.S_IFDIR
984 regkind = stat.S_IFREG
984 regkind = stat.S_IFREG
985 lnkkind = stat.S_IFLNK
985 lnkkind = stat.S_IFLNK
986 join = self._join
986 join = self._join
987
987
988 exact = skipstep3 = False
988 exact = skipstep3 = False
989 if match.isexact(): # match.exact
989 if match.isexact(): # match.exact
990 exact = True
990 exact = True
991 dirignore = util.always # skip step 2
991 dirignore = util.always # skip step 2
992 elif match.prefix(): # match.match, no patterns
992 elif match.prefix(): # match.match, no patterns
993 skipstep3 = True
993 skipstep3 = True
994
994
995 if not exact and self._checkcase:
995 if not exact and self._checkcase:
996 normalize = self._normalize
996 normalize = self._normalize
997 normalizefile = self._normalizefile
997 normalizefile = self._normalizefile
998 skipstep3 = False
998 skipstep3 = False
999 else:
999 else:
1000 normalize = self._normalize
1000 normalize = self._normalize
1001 normalizefile = None
1001 normalizefile = None
1002
1002
1003 # step 1: find all explicit files
1003 # step 1: find all explicit files
1004 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1004 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1005 if matchtdir:
1005 if matchtdir:
1006 for d in work:
1006 for d in work:
1007 matchtdir(d[0])
1007 matchtdir(d[0])
1008 for d in dirsnotfound:
1008 for d in dirsnotfound:
1009 matchtdir(d)
1009 matchtdir(d)
1010
1010
1011 skipstep3 = skipstep3 and not (work or dirsnotfound)
1011 skipstep3 = skipstep3 and not (work or dirsnotfound)
1012 work = [d for d in work if not dirignore(d[0])]
1012 work = [d for d in work if not dirignore(d[0])]
1013
1013
1014 # step 2: visit subdirectories
1014 # step 2: visit subdirectories
1015 def traverse(work, alreadynormed):
1015 def traverse(work, alreadynormed):
1016 wadd = work.append
1016 wadd = work.append
1017 while work:
1017 while work:
1018 tracing.counter('dirstate.walk work', len(work))
1018 tracing.counter('dirstate.walk work', len(work))
1019 nd = work.pop()
1019 nd = work.pop()
1020 visitentries = match.visitchildrenset(nd)
1020 visitentries = match.visitchildrenset(nd)
1021 if not visitentries:
1021 if not visitentries:
1022 continue
1022 continue
1023 if visitentries == b'this' or visitentries == b'all':
1023 if visitentries == b'this' or visitentries == b'all':
1024 visitentries = None
1024 visitentries = None
1025 skip = None
1025 skip = None
1026 if nd != b'':
1026 if nd != b'':
1027 skip = b'.hg'
1027 skip = b'.hg'
1028 try:
1028 try:
1029 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1029 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1030 entries = listdir(join(nd), stat=True, skip=skip)
1030 entries = listdir(join(nd), stat=True, skip=skip)
1031 except OSError as inst:
1031 except OSError as inst:
1032 if inst.errno in (errno.EACCES, errno.ENOENT):
1032 if inst.errno in (errno.EACCES, errno.ENOENT):
1033 match.bad(
1033 match.bad(
1034 self.pathto(nd), encoding.strtolocal(inst.strerror)
1034 self.pathto(nd), encoding.strtolocal(inst.strerror)
1035 )
1035 )
1036 continue
1036 continue
1037 raise
1037 raise
1038 for f, kind, st in entries:
1038 for f, kind, st in entries:
1039 # Some matchers may return files in the visitentries set,
1039 # Some matchers may return files in the visitentries set,
1040 # instead of 'this', if the matcher explicitly mentions them
1040 # instead of 'this', if the matcher explicitly mentions them
1041 # and is not an exactmatcher. This is acceptable; we do not
1041 # and is not an exactmatcher. This is acceptable; we do not
1042 # make any hard assumptions about file-or-directory below
1042 # make any hard assumptions about file-or-directory below
1043 # based on the presence of `f` in visitentries. If
1043 # based on the presence of `f` in visitentries. If
1044 # visitchildrenset returned a set, we can always skip the
1044 # visitchildrenset returned a set, we can always skip the
1045 # entries *not* in the set it provided regardless of whether
1045 # entries *not* in the set it provided regardless of whether
1046 # they're actually a file or a directory.
1046 # they're actually a file or a directory.
1047 if visitentries and f not in visitentries:
1047 if visitentries and f not in visitentries:
1048 continue
1048 continue
1049 if normalizefile:
1049 if normalizefile:
1050 # even though f might be a directory, we're only
1050 # even though f might be a directory, we're only
1051 # interested in comparing it to files currently in the
1051 # interested in comparing it to files currently in the
1052 # dmap -- therefore normalizefile is enough
1052 # dmap -- therefore normalizefile is enough
1053 nf = normalizefile(
1053 nf = normalizefile(
1054 nd and (nd + b"/" + f) or f, True, True
1054 nd and (nd + b"/" + f) or f, True, True
1055 )
1055 )
1056 else:
1056 else:
1057 nf = nd and (nd + b"/" + f) or f
1057 nf = nd and (nd + b"/" + f) or f
1058 if nf not in results:
1058 if nf not in results:
1059 if kind == dirkind:
1059 if kind == dirkind:
1060 if not ignore(nf):
1060 if not ignore(nf):
1061 if matchtdir:
1061 if matchtdir:
1062 matchtdir(nf)
1062 matchtdir(nf)
1063 wadd(nf)
1063 wadd(nf)
1064 if nf in dmap and (matchalways or matchfn(nf)):
1064 if nf in dmap and (matchalways or matchfn(nf)):
1065 results[nf] = None
1065 results[nf] = None
1066 elif kind == regkind or kind == lnkkind:
1066 elif kind == regkind or kind == lnkkind:
1067 if nf in dmap:
1067 if nf in dmap:
1068 if matchalways or matchfn(nf):
1068 if matchalways or matchfn(nf):
1069 results[nf] = st
1069 results[nf] = st
1070 elif (matchalways or matchfn(nf)) and not ignore(
1070 elif (matchalways or matchfn(nf)) and not ignore(
1071 nf
1071 nf
1072 ):
1072 ):
1073 # unknown file -- normalize if necessary
1073 # unknown file -- normalize if necessary
1074 if not alreadynormed:
1074 if not alreadynormed:
1075 nf = normalize(nf, False, True)
1075 nf = normalize(nf, False, True)
1076 results[nf] = st
1076 results[nf] = st
1077 elif nf in dmap and (matchalways or matchfn(nf)):
1077 elif nf in dmap and (matchalways or matchfn(nf)):
1078 results[nf] = None
1078 results[nf] = None
1079
1079
1080 for nd, d in work:
1080 for nd, d in work:
1081 # alreadynormed means that processwork doesn't have to do any
1081 # alreadynormed means that processwork doesn't have to do any
1082 # expensive directory normalization
1082 # expensive directory normalization
1083 alreadynormed = not normalize or nd == d
1083 alreadynormed = not normalize or nd == d
1084 traverse([d], alreadynormed)
1084 traverse([d], alreadynormed)
1085
1085
1086 for s in subrepos:
1086 for s in subrepos:
1087 del results[s]
1087 del results[s]
1088 del results[b'.hg']
1088 del results[b'.hg']
1089
1089
1090 # step 3: visit remaining files from dmap
1090 # step 3: visit remaining files from dmap
1091 if not skipstep3 and not exact:
1091 if not skipstep3 and not exact:
1092 # If a dmap file is not in results yet, it was either
1092 # If a dmap file is not in results yet, it was either
1093 # a) not matching matchfn b) ignored, c) missing, or d) under a
1093 # a) not matching matchfn b) ignored, c) missing, or d) under a
1094 # symlink directory.
1094 # symlink directory.
1095 if not results and matchalways:
1095 if not results and matchalways:
1096 visit = [f for f in dmap]
1096 visit = [f for f in dmap]
1097 else:
1097 else:
1098 visit = [f for f in dmap if f not in results and matchfn(f)]
1098 visit = [f for f in dmap if f not in results and matchfn(f)]
1099 visit.sort()
1099 visit.sort()
1100
1100
1101 if unknown:
1101 if unknown:
1102 # unknown == True means we walked all dirs under the roots
1102 # unknown == True means we walked all dirs under the roots
1103 # that wasn't ignored, and everything that matched was stat'ed
1103 # that wasn't ignored, and everything that matched was stat'ed
1104 # and is already in results.
1104 # and is already in results.
1105 # The rest must thus be ignored or under a symlink.
1105 # The rest must thus be ignored or under a symlink.
1106 audit_path = pathutil.pathauditor(self._root, cached=True)
1106 audit_path = pathutil.pathauditor(self._root, cached=True)
1107
1107
1108 for nf in iter(visit):
1108 for nf in iter(visit):
1109 # If a stat for the same file was already added with a
1109 # If a stat for the same file was already added with a
1110 # different case, don't add one for this, since that would
1110 # different case, don't add one for this, since that would
1111 # make it appear as if the file exists under both names
1111 # make it appear as if the file exists under both names
1112 # on disk.
1112 # on disk.
1113 if (
1113 if (
1114 normalizefile
1114 normalizefile
1115 and normalizefile(nf, True, True) in results
1115 and normalizefile(nf, True, True) in results
1116 ):
1116 ):
1117 results[nf] = None
1117 results[nf] = None
1118 # Report ignored items in the dmap as long as they are not
1118 # Report ignored items in the dmap as long as they are not
1119 # under a symlink directory.
1119 # under a symlink directory.
1120 elif audit_path.check(nf):
1120 elif audit_path.check(nf):
1121 try:
1121 try:
1122 results[nf] = lstat(join(nf))
1122 results[nf] = lstat(join(nf))
1123 # file was just ignored, no links, and exists
1123 # file was just ignored, no links, and exists
1124 except OSError:
1124 except OSError:
1125 # file doesn't exist
1125 # file doesn't exist
1126 results[nf] = None
1126 results[nf] = None
1127 else:
1127 else:
1128 # It's either missing or under a symlink directory
1128 # It's either missing or under a symlink directory
1129 # which we in this case report as missing
1129 # which we in this case report as missing
1130 results[nf] = None
1130 results[nf] = None
1131 else:
1131 else:
1132 # We may not have walked the full directory tree above,
1132 # We may not have walked the full directory tree above,
1133 # so stat and check everything we missed.
1133 # so stat and check everything we missed.
1134 iv = iter(visit)
1134 iv = iter(visit)
1135 for st in util.statfiles([join(i) for i in visit]):
1135 for st in util.statfiles([join(i) for i in visit]):
1136 results[next(iv)] = st
1136 results[next(iv)] = st
1137 return results
1137 return results
1138
1138
1139 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1139 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1140 # Force Rayon (Rust parallelism library) to respect the number of
1140 # Force Rayon (Rust parallelism library) to respect the number of
1141 # workers. This is a temporary workaround until Rust code knows
1141 # workers. This is a temporary workaround until Rust code knows
1142 # how to read the config file.
1142 # how to read the config file.
1143 numcpus = self._ui.configint(b"worker", b"numcpus")
1143 numcpus = self._ui.configint(b"worker", b"numcpus")
1144 if numcpus is not None:
1144 if numcpus is not None:
1145 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1145 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1146
1146
1147 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1147 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1148 if not workers_enabled:
1148 if not workers_enabled:
1149 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1149 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1150
1150
1151 (
1151 (
1152 lookup,
1152 lookup,
1153 modified,
1153 modified,
1154 added,
1154 added,
1155 removed,
1155 removed,
1156 deleted,
1156 deleted,
1157 clean,
1157 clean,
1158 ignored,
1158 ignored,
1159 unknown,
1159 unknown,
1160 warnings,
1160 warnings,
1161 bad,
1161 bad,
1162 traversed,
1162 traversed,
1163 dirty,
1163 dirty,
1164 ) = rustmod.status(
1164 ) = rustmod.status(
1165 self._map._rustmap,
1165 self._map._rustmap,
1166 matcher,
1166 matcher,
1167 self._rootdir,
1167 self._rootdir,
1168 self._ignorefiles(),
1168 self._ignorefiles(),
1169 self._checkexec,
1169 self._checkexec,
1170 self._lastnormaltime,
1170 self._lastnormaltime,
1171 bool(list_clean),
1171 bool(list_clean),
1172 bool(list_ignored),
1172 bool(list_ignored),
1173 bool(list_unknown),
1173 bool(list_unknown),
1174 bool(matcher.traversedir),
1174 bool(matcher.traversedir),
1175 )
1175 )
1176
1176
1177 self._dirty |= dirty
1177 self._dirty |= dirty
1178
1178
1179 if matcher.traversedir:
1179 if matcher.traversedir:
1180 for dir in traversed:
1180 for dir in traversed:
1181 matcher.traversedir(dir)
1181 matcher.traversedir(dir)
1182
1182
1183 if self._ui.warn:
1183 if self._ui.warn:
1184 for item in warnings:
1184 for item in warnings:
1185 if isinstance(item, tuple):
1185 if isinstance(item, tuple):
1186 file_path, syntax = item
1186 file_path, syntax = item
1187 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1187 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1188 file_path,
1188 file_path,
1189 syntax,
1189 syntax,
1190 )
1190 )
1191 self._ui.warn(msg)
1191 self._ui.warn(msg)
1192 else:
1192 else:
1193 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1193 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1194 self._ui.warn(
1194 self._ui.warn(
1195 msg
1195 msg
1196 % (
1196 % (
1197 pathutil.canonpath(
1197 pathutil.canonpath(
1198 self._rootdir, self._rootdir, item
1198 self._rootdir, self._rootdir, item
1199 ),
1199 ),
1200 b"No such file or directory",
1200 b"No such file or directory",
1201 )
1201 )
1202 )
1202 )
1203
1203
1204 for (fn, message) in bad:
1204 for (fn, message) in bad:
1205 matcher.bad(fn, encoding.strtolocal(message))
1205 matcher.bad(fn, encoding.strtolocal(message))
1206
1206
1207 status = scmutil.status(
1207 status = scmutil.status(
1208 modified=modified,
1208 modified=modified,
1209 added=added,
1209 added=added,
1210 removed=removed,
1210 removed=removed,
1211 deleted=deleted,
1211 deleted=deleted,
1212 unknown=unknown,
1212 unknown=unknown,
1213 ignored=ignored,
1213 ignored=ignored,
1214 clean=clean,
1214 clean=clean,
1215 )
1215 )
1216 return (lookup, status)
1216 return (lookup, status)
1217
1217
1218 def status(self, match, subrepos, ignored, clean, unknown):
1218 def status(self, match, subrepos, ignored, clean, unknown):
1219 """Determine the status of the working copy relative to the
1219 """Determine the status of the working copy relative to the
1220 dirstate and return a pair of (unsure, status), where status is of type
1220 dirstate and return a pair of (unsure, status), where status is of type
1221 scmutil.status and:
1221 scmutil.status and:
1222
1222
1223 unsure:
1223 unsure:
1224 files that might have been modified since the dirstate was
1224 files that might have been modified since the dirstate was
1225 written, but need to be read to be sure (size is the same
1225 written, but need to be read to be sure (size is the same
1226 but mtime differs)
1226 but mtime differs)
1227 status.modified:
1227 status.modified:
1228 files that have definitely been modified since the dirstate
1228 files that have definitely been modified since the dirstate
1229 was written (different size or mode)
1229 was written (different size or mode)
1230 status.clean:
1230 status.clean:
1231 files that have definitely not been modified since the
1231 files that have definitely not been modified since the
1232 dirstate was written
1232 dirstate was written
1233 """
1233 """
1234 listignored, listclean, listunknown = ignored, clean, unknown
1234 listignored, listclean, listunknown = ignored, clean, unknown
1235 lookup, modified, added, unknown, ignored = [], [], [], [], []
1235 lookup, modified, added, unknown, ignored = [], [], [], [], []
1236 removed, deleted, clean = [], [], []
1236 removed, deleted, clean = [], [], []
1237
1237
1238 dmap = self._map
1238 dmap = self._map
1239 dmap.preload()
1239 dmap.preload()
1240
1240
1241 use_rust = True
1241 use_rust = True
1242
1242
1243 allowed_matchers = (
1243 allowed_matchers = (
1244 matchmod.alwaysmatcher,
1244 matchmod.alwaysmatcher,
1245 matchmod.exactmatcher,
1245 matchmod.exactmatcher,
1246 matchmod.includematcher,
1246 matchmod.includematcher,
1247 )
1247 )
1248
1248
1249 if rustmod is None:
1249 if rustmod is None:
1250 use_rust = False
1250 use_rust = False
1251 elif self._checkcase:
1251 elif self._checkcase:
1252 # Case-insensitive filesystems are not handled yet
1252 # Case-insensitive filesystems are not handled yet
1253 use_rust = False
1253 use_rust = False
1254 elif subrepos:
1254 elif subrepos:
1255 use_rust = False
1255 use_rust = False
1256 elif sparse.enabled:
1256 elif sparse.enabled:
1257 use_rust = False
1257 use_rust = False
1258 elif not isinstance(match, allowed_matchers):
1258 elif not isinstance(match, allowed_matchers):
1259 # Some matchers have yet to be implemented
1259 # Some matchers have yet to be implemented
1260 use_rust = False
1260 use_rust = False
1261
1261
1262 if use_rust:
1262 if use_rust:
1263 try:
1263 try:
1264 return self._rust_status(
1264 return self._rust_status(
1265 match, listclean, listignored, listunknown
1265 match, listclean, listignored, listunknown
1266 )
1266 )
1267 except rustmod.FallbackError:
1267 except rustmod.FallbackError:
1268 pass
1268 pass
1269
1269
1270 def noop(f):
1270 def noop(f):
1271 pass
1271 pass
1272
1272
1273 dcontains = dmap.__contains__
1273 dcontains = dmap.__contains__
1274 dget = dmap.__getitem__
1274 dget = dmap.__getitem__
1275 ladd = lookup.append # aka "unsure"
1275 ladd = lookup.append # aka "unsure"
1276 madd = modified.append
1276 madd = modified.append
1277 aadd = added.append
1277 aadd = added.append
1278 uadd = unknown.append if listunknown else noop
1278 uadd = unknown.append if listunknown else noop
1279 iadd = ignored.append if listignored else noop
1279 iadd = ignored.append if listignored else noop
1280 radd = removed.append
1280 radd = removed.append
1281 dadd = deleted.append
1281 dadd = deleted.append
1282 cadd = clean.append if listclean else noop
1282 cadd = clean.append if listclean else noop
1283 mexact = match.exact
1283 mexact = match.exact
1284 dirignore = self._dirignore
1284 dirignore = self._dirignore
1285 checkexec = self._checkexec
1285 checkexec = self._checkexec
1286 copymap = self._map.copymap
1286 copymap = self._map.copymap
1287 lastnormaltime = self._lastnormaltime
1287 lastnormaltime = self._lastnormaltime
1288
1288
1289 # We need to do full walks when either
1289 # We need to do full walks when either
1290 # - we're listing all clean files, or
1290 # - we're listing all clean files, or
1291 # - match.traversedir does something, because match.traversedir should
1291 # - match.traversedir does something, because match.traversedir should
1292 # be called for every dir in the working dir
1292 # be called for every dir in the working dir
1293 full = listclean or match.traversedir is not None
1293 full = listclean or match.traversedir is not None
1294 for fn, st in pycompat.iteritems(
1294 for fn, st in pycompat.iteritems(
1295 self.walk(match, subrepos, listunknown, listignored, full=full)
1295 self.walk(match, subrepos, listunknown, listignored, full=full)
1296 ):
1296 ):
1297 if not dcontains(fn):
1297 if not dcontains(fn):
1298 if (listignored or mexact(fn)) and dirignore(fn):
1298 if (listignored or mexact(fn)) and dirignore(fn):
1299 if listignored:
1299 if listignored:
1300 iadd(fn)
1300 iadd(fn)
1301 else:
1301 else:
1302 uadd(fn)
1302 uadd(fn)
1303 continue
1303 continue
1304
1304
1305 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1305 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1306 # written like that for performance reasons. dmap[fn] is not a
1306 # written like that for performance reasons. dmap[fn] is not a
1307 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1307 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1308 # opcode has fast paths when the value to be unpacked is a tuple or
1308 # opcode has fast paths when the value to be unpacked is a tuple or
1309 # a list, but falls back to creating a full-fledged iterator in
1309 # a list, but falls back to creating a full-fledged iterator in
1310 # general. That is much slower than simply accessing and storing the
1310 # general. That is much slower than simply accessing and storing the
1311 # tuple members one by one.
1311 # tuple members one by one.
1312 t = dget(fn)
1312 t = dget(fn)
1313 state = t.state
1313 state = t.state
1314 mode = t[1]
1314 mode = t[1]
1315 size = t[2]
1315 size = t[2]
1316 time = t[3]
1316 time = t[3]
1317
1317
1318 if not st and state in b"nma":
1318 if not st and state in b"nma":
1319 dadd(fn)
1319 dadd(fn)
1320 elif state == b'n':
1320 elif state == b'n':
1321 if (
1321 if (
1322 size >= 0
1322 size >= 0
1323 and (
1323 and (
1324 (size != st.st_size and size != st.st_size & _rangemask)
1324 (size != st.st_size and size != st.st_size & _rangemask)
1325 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1325 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1326 )
1326 )
1327 or t.from_p2
1327 or t.from_p2
1328 or fn in copymap
1328 or fn in copymap
1329 ):
1329 ):
1330 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1330 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1331 # issue6456: Size returned may be longer due to
1331 # issue6456: Size returned may be longer due to
1332 # encryption on EXT-4 fscrypt, undecided.
1332 # encryption on EXT-4 fscrypt, undecided.
1333 ladd(fn)
1333 ladd(fn)
1334 else:
1334 else:
1335 madd(fn)
1335 madd(fn)
1336 elif (
1336 elif (
1337 time != st[stat.ST_MTIME]
1337 time != st[stat.ST_MTIME]
1338 and time != st[stat.ST_MTIME] & _rangemask
1338 and time != st[stat.ST_MTIME] & _rangemask
1339 ):
1339 ):
1340 ladd(fn)
1340 ladd(fn)
1341 elif st[stat.ST_MTIME] == lastnormaltime:
1341 elif st[stat.ST_MTIME] == lastnormaltime:
1342 # fn may have just been marked as normal and it may have
1342 # fn may have just been marked as normal and it may have
1343 # changed in the same second without changing its size.
1343 # changed in the same second without changing its size.
1344 # This can happen if we quickly do multiple commits.
1344 # This can happen if we quickly do multiple commits.
1345 # Force lookup, so we don't miss such a racy file change.
1345 # Force lookup, so we don't miss such a racy file change.
1346 ladd(fn)
1346 ladd(fn)
1347 elif listclean:
1347 elif listclean:
1348 cadd(fn)
1348 cadd(fn)
1349 elif t.merged:
1349 elif t.merged:
1350 madd(fn)
1350 madd(fn)
1351 elif t.added:
1351 elif t.added:
1352 aadd(fn)
1352 aadd(fn)
1353 elif t.removed:
1353 elif t.removed:
1354 radd(fn)
1354 radd(fn)
1355 status = scmutil.status(
1355 status = scmutil.status(
1356 modified, added, removed, deleted, unknown, ignored, clean
1356 modified, added, removed, deleted, unknown, ignored, clean
1357 )
1357 )
1358 return (lookup, status)
1358 return (lookup, status)
1359
1359
1360 def matches(self, match):
1360 def matches(self, match):
1361 """
1361 """
1362 return files in the dirstate (in whatever state) filtered by match
1362 return files in the dirstate (in whatever state) filtered by match
1363 """
1363 """
1364 dmap = self._map
1364 dmap = self._map
1365 if rustmod is not None:
1365 if rustmod is not None:
1366 dmap = self._map._rustmap
1366 dmap = self._map._rustmap
1367
1367
1368 if match.always():
1368 if match.always():
1369 return dmap.keys()
1369 return dmap.keys()
1370 files = match.files()
1370 files = match.files()
1371 if match.isexact():
1371 if match.isexact():
1372 # fast path -- filter the other way around, since typically files is
1372 # fast path -- filter the other way around, since typically files is
1373 # much smaller than dmap
1373 # much smaller than dmap
1374 return [f for f in files if f in dmap]
1374 return [f for f in files if f in dmap]
1375 if match.prefix() and all(fn in dmap for fn in files):
1375 if match.prefix() and all(fn in dmap for fn in files):
1376 # fast path -- all the values are known to be files, so just return
1376 # fast path -- all the values are known to be files, so just return
1377 # that
1377 # that
1378 return list(files)
1378 return list(files)
1379 return [f for f in dmap if match(f)]
1379 return [f for f in dmap if match(f)]
1380
1380
1381 def _actualfilename(self, tr):
1381 def _actualfilename(self, tr):
1382 if tr:
1382 if tr:
1383 return self._pendingfilename
1383 return self._pendingfilename
1384 else:
1384 else:
1385 return self._filename
1385 return self._filename
1386
1386
1387 def savebackup(self, tr, backupname):
1387 def savebackup(self, tr, backupname):
1388 '''Save current dirstate into backup file'''
1388 '''Save current dirstate into backup file'''
1389 filename = self._actualfilename(tr)
1389 filename = self._actualfilename(tr)
1390 assert backupname != filename
1390 assert backupname != filename
1391
1391
1392 # use '_writedirstate' instead of 'write' to write changes certainly,
1392 # use '_writedirstate' instead of 'write' to write changes certainly,
1393 # because the latter omits writing out if transaction is running.
1393 # because the latter omits writing out if transaction is running.
1394 # output file will be used to create backup of dirstate at this point.
1394 # output file will be used to create backup of dirstate at this point.
1395 if self._dirty or not self._opener.exists(filename):
1395 if self._dirty or not self._opener.exists(filename):
1396 self._writedirstate(
1396 self._writedirstate(
1397 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1397 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1398 )
1398 )
1399
1399
1400 if tr:
1400 if tr:
1401 # ensure that subsequent tr.writepending returns True for
1401 # ensure that subsequent tr.writepending returns True for
1402 # changes written out above, even if dirstate is never
1402 # changes written out above, even if dirstate is never
1403 # changed after this
1403 # changed after this
1404 tr.addfilegenerator(
1404 tr.addfilegenerator(
1405 b'dirstate',
1405 b'dirstate',
1406 (self._filename,),
1406 (self._filename,),
1407 self._writedirstate,
1407 self._writedirstate,
1408 location=b'plain',
1408 location=b'plain',
1409 )
1409 )
1410
1410
1411 # ensure that pending file written above is unlinked at
1411 # ensure that pending file written above is unlinked at
1412 # failure, even if tr.writepending isn't invoked until the
1412 # failure, even if tr.writepending isn't invoked until the
1413 # end of this transaction
1413 # end of this transaction
1414 tr.registertmp(filename, location=b'plain')
1414 tr.registertmp(filename, location=b'plain')
1415
1415
1416 self._opener.tryunlink(backupname)
1416 self._opener.tryunlink(backupname)
1417 # hardlink backup is okay because _writedirstate is always called
1417 # hardlink backup is okay because _writedirstate is always called
1418 # with an "atomictemp=True" file.
1418 # with an "atomictemp=True" file.
1419 util.copyfile(
1419 util.copyfile(
1420 self._opener.join(filename),
1420 self._opener.join(filename),
1421 self._opener.join(backupname),
1421 self._opener.join(backupname),
1422 hardlink=True,
1422 hardlink=True,
1423 )
1423 )
1424
1424
1425 def restorebackup(self, tr, backupname):
1425 def restorebackup(self, tr, backupname):
1426 '''Restore dirstate by backup file'''
1426 '''Restore dirstate by backup file'''
1427 # this "invalidate()" prevents "wlock.release()" from writing
1427 # this "invalidate()" prevents "wlock.release()" from writing
1428 # changes of dirstate out after restoring from backup file
1428 # changes of dirstate out after restoring from backup file
1429 self.invalidate()
1429 self.invalidate()
1430 filename = self._actualfilename(tr)
1430 filename = self._actualfilename(tr)
1431 o = self._opener
1431 o = self._opener
1432 if util.samefile(o.join(backupname), o.join(filename)):
1432 if util.samefile(o.join(backupname), o.join(filename)):
1433 o.unlink(backupname)
1433 o.unlink(backupname)
1434 else:
1434 else:
1435 o.rename(backupname, filename, checkambig=True)
1435 o.rename(backupname, filename, checkambig=True)
1436
1436
1437 def clearbackup(self, tr, backupname):
1437 def clearbackup(self, tr, backupname):
1438 '''Clear backup file'''
1438 '''Clear backup file'''
1439 self._opener.unlink(backupname)
1439 self._opener.unlink(backupname)
@@ -1,683 +1,684 b''
1 # dirstatemap.py
1 # dirstatemap.py
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 import errno
8 import errno
9
9
10 from .i18n import _
10 from .i18n import _
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 pathutil,
14 pathutil,
15 policy,
15 policy,
16 pycompat,
16 pycompat,
17 txnutil,
17 txnutil,
18 util,
18 util,
19 )
19 )
20
20
21 parsers = policy.importmod('parsers')
21 parsers = policy.importmod('parsers')
22 rustmod = policy.importrust('dirstate')
22 rustmod = policy.importrust('dirstate')
23
23
24 propertycache = util.propertycache
24 propertycache = util.propertycache
25
25
26 dirstatetuple = parsers.dirstatetuple
26 dirstatetuple = parsers.dirstatetuple
27
27
28
28
29 # a special value used internally for `size` if the file come from the other parent
29 # a special value used internally for `size` if the file come from the other parent
30 FROM_P2 = -2
30 FROM_P2 = -2
31
31
32 # a special value used internally for `size` if the file is modified/merged/added
32 # a special value used internally for `size` if the file is modified/merged/added
33 NONNORMAL = -1
33 NONNORMAL = -1
34
34
35 # a special value used internally for `time` if the time is ambigeous
35 # a special value used internally for `time` if the time is ambigeous
36 AMBIGUOUS_TIME = -1
36 AMBIGUOUS_TIME = -1
37
37
38 rangemask = 0x7FFFFFFF
38 rangemask = 0x7FFFFFFF
39
39
40
40
41 class dirstatemap(object):
41 class dirstatemap(object):
42 """Map encapsulating the dirstate's contents.
42 """Map encapsulating the dirstate's contents.
43
43
44 The dirstate contains the following state:
44 The dirstate contains the following state:
45
45
46 - `identity` is the identity of the dirstate file, which can be used to
46 - `identity` is the identity of the dirstate file, which can be used to
47 detect when changes have occurred to the dirstate file.
47 detect when changes have occurred to the dirstate file.
48
48
49 - `parents` is a pair containing the parents of the working copy. The
49 - `parents` is a pair containing the parents of the working copy. The
50 parents are updated by calling `setparents`.
50 parents are updated by calling `setparents`.
51
51
52 - the state map maps filenames to tuples of (state, mode, size, mtime),
52 - the state map maps filenames to tuples of (state, mode, size, mtime),
53 where state is a single character representing 'normal', 'added',
53 where state is a single character representing 'normal', 'added',
54 'removed', or 'merged'. It is read by treating the dirstate as a
54 'removed', or 'merged'. It is read by treating the dirstate as a
55 dict. File state is updated by calling the `addfile`, `removefile` and
55 dict. File state is updated by calling the `addfile`, `removefile` and
56 `dropfile` methods.
56 `dropfile` methods.
57
57
58 - `copymap` maps destination filenames to their source filename.
58 - `copymap` maps destination filenames to their source filename.
59
59
60 The dirstate also provides the following views onto the state:
60 The dirstate also provides the following views onto the state:
61
61
62 - `nonnormalset` is a set of the filenames that have state other
62 - `nonnormalset` is a set of the filenames that have state other
63 than 'normal', or are normal but have an mtime of -1 ('normallookup').
63 than 'normal', or are normal but have an mtime of -1 ('normallookup').
64
64
65 - `otherparentset` is a set of the filenames that are marked as coming
65 - `otherparentset` is a set of the filenames that are marked as coming
66 from the second parent when the dirstate is currently being merged.
66 from the second parent when the dirstate is currently being merged.
67
67
68 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
68 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
69 form that they appear as in the dirstate.
69 form that they appear as in the dirstate.
70
70
71 - `dirfoldmap` is a dict mapping normalized directory names to the
71 - `dirfoldmap` is a dict mapping normalized directory names to the
72 denormalized form that they appear as in the dirstate.
72 denormalized form that they appear as in the dirstate.
73 """
73 """
74
74
75 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
75 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
76 self._ui = ui
76 self._ui = ui
77 self._opener = opener
77 self._opener = opener
78 self._root = root
78 self._root = root
79 self._filename = b'dirstate'
79 self._filename = b'dirstate'
80 self._nodelen = 20
80 self._nodelen = 20
81 self._nodeconstants = nodeconstants
81 self._nodeconstants = nodeconstants
82 assert (
82 assert (
83 not use_dirstate_v2
83 not use_dirstate_v2
84 ), "should have detected unsupported requirement"
84 ), "should have detected unsupported requirement"
85
85
86 self._parents = None
86 self._parents = None
87 self._dirtyparents = False
87 self._dirtyparents = False
88
88
89 # for consistent view between _pl() and _read() invocations
89 # for consistent view between _pl() and _read() invocations
90 self._pendingmode = None
90 self._pendingmode = None
91
91
92 @propertycache
92 @propertycache
93 def _map(self):
93 def _map(self):
94 self._map = {}
94 self._map = {}
95 self.read()
95 self.read()
96 return self._map
96 return self._map
97
97
98 @propertycache
98 @propertycache
99 def copymap(self):
99 def copymap(self):
100 self.copymap = {}
100 self.copymap = {}
101 self._map
101 self._map
102 return self.copymap
102 return self.copymap
103
103
104 def directories(self):
104 def directories(self):
105 # Rust / dirstate-v2 only
105 # Rust / dirstate-v2 only
106 return []
106 return []
107
107
108 def clear(self):
108 def clear(self):
109 self._map.clear()
109 self._map.clear()
110 self.copymap.clear()
110 self.copymap.clear()
111 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
111 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
112 util.clearcachedproperty(self, b"_dirs")
112 util.clearcachedproperty(self, b"_dirs")
113 util.clearcachedproperty(self, b"_alldirs")
113 util.clearcachedproperty(self, b"_alldirs")
114 util.clearcachedproperty(self, b"filefoldmap")
114 util.clearcachedproperty(self, b"filefoldmap")
115 util.clearcachedproperty(self, b"dirfoldmap")
115 util.clearcachedproperty(self, b"dirfoldmap")
116 util.clearcachedproperty(self, b"nonnormalset")
116 util.clearcachedproperty(self, b"nonnormalset")
117 util.clearcachedproperty(self, b"otherparentset")
117 util.clearcachedproperty(self, b"otherparentset")
118
118
119 def items(self):
119 def items(self):
120 return pycompat.iteritems(self._map)
120 return pycompat.iteritems(self._map)
121
121
122 # forward for python2,3 compat
122 # forward for python2,3 compat
123 iteritems = items
123 iteritems = items
124
124
125 def __len__(self):
125 def __len__(self):
126 return len(self._map)
126 return len(self._map)
127
127
128 def __iter__(self):
128 def __iter__(self):
129 return iter(self._map)
129 return iter(self._map)
130
130
131 def get(self, key, default=None):
131 def get(self, key, default=None):
132 return self._map.get(key, default)
132 return self._map.get(key, default)
133
133
134 def __contains__(self, key):
134 def __contains__(self, key):
135 return key in self._map
135 return key in self._map
136
136
137 def __getitem__(self, key):
137 def __getitem__(self, key):
138 return self._map[key]
138 return self._map[key]
139
139
140 def keys(self):
140 def keys(self):
141 return self._map.keys()
141 return self._map.keys()
142
142
143 def preload(self):
143 def preload(self):
144 """Loads the underlying data, if it's not already loaded"""
144 """Loads the underlying data, if it's not already loaded"""
145 self._map
145 self._map
146
146
147 def addfile(
147 def addfile(
148 self,
148 self,
149 f,
149 f,
150 state=None,
150 state=None,
151 mode=0,
151 mode=0,
152 size=None,
152 size=None,
153 mtime=None,
153 mtime=None,
154 added=False,
154 added=False,
155 merged=False,
155 merged=False,
156 from_p2=False,
156 from_p2=False,
157 possibly_dirty=False,
157 possibly_dirty=False,
158 ):
158 ):
159 """Add a tracked file to the dirstate."""
159 """Add a tracked file to the dirstate."""
160 if added:
160 if added:
161 assert not merged
161 assert not merged
162 assert not possibly_dirty
162 assert not possibly_dirty
163 assert not from_p2
163 assert not from_p2
164 state = b'a'
164 state = b'a'
165 size = NONNORMAL
165 size = NONNORMAL
166 mtime = AMBIGUOUS_TIME
166 mtime = AMBIGUOUS_TIME
167 elif merged:
167 elif merged:
168 assert not possibly_dirty
168 assert not possibly_dirty
169 assert not from_p2
169 assert not from_p2
170 state = b'm'
170 state = b'm'
171 size = FROM_P2
171 size = FROM_P2
172 mtime = AMBIGUOUS_TIME
172 mtime = AMBIGUOUS_TIME
173 elif from_p2:
173 elif from_p2:
174 assert not possibly_dirty
174 assert not possibly_dirty
175 size = FROM_P2
175 size = FROM_P2
176 mtime = AMBIGUOUS_TIME
176 mtime = AMBIGUOUS_TIME
177 elif possibly_dirty:
177 elif possibly_dirty:
178 state = b'n'
178 size = NONNORMAL
179 size = NONNORMAL
179 mtime = AMBIGUOUS_TIME
180 mtime = AMBIGUOUS_TIME
180 else:
181 else:
181 assert state != b'a'
182 assert state != b'a'
182 assert size != FROM_P2
183 assert size != FROM_P2
183 assert size != NONNORMAL
184 assert size != NONNORMAL
184 size = size & rangemask
185 size = size & rangemask
185 mtime = mtime & rangemask
186 mtime = mtime & rangemask
186 assert state is not None
187 assert state is not None
187 assert size is not None
188 assert size is not None
188 assert mtime is not None
189 assert mtime is not None
189 old_entry = self.get(f)
190 old_entry = self.get(f)
190 if (
191 if (
191 old_entry is None or old_entry.removed
192 old_entry is None or old_entry.removed
192 ) and "_dirs" in self.__dict__:
193 ) and "_dirs" in self.__dict__:
193 self._dirs.addpath(f)
194 self._dirs.addpath(f)
194 if old_entry is None and "_alldirs" in self.__dict__:
195 if old_entry is None and "_alldirs" in self.__dict__:
195 self._alldirs.addpath(f)
196 self._alldirs.addpath(f)
196 self._map[f] = dirstatetuple(state, mode, size, mtime)
197 self._map[f] = dirstatetuple(state, mode, size, mtime)
197 if state != b'n' or mtime == AMBIGUOUS_TIME:
198 if state != b'n' or mtime == AMBIGUOUS_TIME:
198 self.nonnormalset.add(f)
199 self.nonnormalset.add(f)
199 if size == FROM_P2:
200 if size == FROM_P2:
200 self.otherparentset.add(f)
201 self.otherparentset.add(f)
201
202
202 def removefile(self, f, in_merge=False):
203 def removefile(self, f, in_merge=False):
203 """
204 """
204 Mark a file as removed in the dirstate.
205 Mark a file as removed in the dirstate.
205
206
206 The `size` parameter is used to store sentinel values that indicate
207 The `size` parameter is used to store sentinel values that indicate
207 the file's previous state. In the future, we should refactor this
208 the file's previous state. In the future, we should refactor this
208 to be more explicit about what that state is.
209 to be more explicit about what that state is.
209 """
210 """
210 entry = self.get(f)
211 entry = self.get(f)
211 size = 0
212 size = 0
212 if in_merge:
213 if in_merge:
213 # XXX we should not be able to have 'm' state and 'FROM_P2' if not
214 # XXX we should not be able to have 'm' state and 'FROM_P2' if not
214 # during a merge. So I (marmoute) am not sure we need the
215 # during a merge. So I (marmoute) am not sure we need the
215 # conditionnal at all. Adding double checking this with assert
216 # conditionnal at all. Adding double checking this with assert
216 # would be nice.
217 # would be nice.
217 if entry is not None:
218 if entry is not None:
218 # backup the previous state
219 # backup the previous state
219 if entry.merged: # merge
220 if entry.merged: # merge
220 size = NONNORMAL
221 size = NONNORMAL
221 elif entry[0] == b'n' and entry.from_p2:
222 elif entry[0] == b'n' and entry.from_p2:
222 size = FROM_P2
223 size = FROM_P2
223 self.otherparentset.add(f)
224 self.otherparentset.add(f)
224 if size == 0:
225 if size == 0:
225 self.copymap.pop(f, None)
226 self.copymap.pop(f, None)
226
227
227 if entry is not None and entry[0] != b'r' and "_dirs" in self.__dict__:
228 if entry is not None and entry[0] != b'r' and "_dirs" in self.__dict__:
228 self._dirs.delpath(f)
229 self._dirs.delpath(f)
229 if entry is None and "_alldirs" in self.__dict__:
230 if entry is None and "_alldirs" in self.__dict__:
230 self._alldirs.addpath(f)
231 self._alldirs.addpath(f)
231 if "filefoldmap" in self.__dict__:
232 if "filefoldmap" in self.__dict__:
232 normed = util.normcase(f)
233 normed = util.normcase(f)
233 self.filefoldmap.pop(normed, None)
234 self.filefoldmap.pop(normed, None)
234 self._map[f] = dirstatetuple(b'r', 0, size, 0)
235 self._map[f] = dirstatetuple(b'r', 0, size, 0)
235 self.nonnormalset.add(f)
236 self.nonnormalset.add(f)
236
237
237 def dropfile(self, f, oldstate):
238 def dropfile(self, f, oldstate):
238 """
239 """
239 Remove a file from the dirstate. Returns True if the file was
240 Remove a file from the dirstate. Returns True if the file was
240 previously recorded.
241 previously recorded.
241 """
242 """
242 exists = self._map.pop(f, None) is not None
243 exists = self._map.pop(f, None) is not None
243 if exists:
244 if exists:
244 if oldstate != b"r" and "_dirs" in self.__dict__:
245 if oldstate != b"r" and "_dirs" in self.__dict__:
245 self._dirs.delpath(f)
246 self._dirs.delpath(f)
246 if "_alldirs" in self.__dict__:
247 if "_alldirs" in self.__dict__:
247 self._alldirs.delpath(f)
248 self._alldirs.delpath(f)
248 if "filefoldmap" in self.__dict__:
249 if "filefoldmap" in self.__dict__:
249 normed = util.normcase(f)
250 normed = util.normcase(f)
250 self.filefoldmap.pop(normed, None)
251 self.filefoldmap.pop(normed, None)
251 self.nonnormalset.discard(f)
252 self.nonnormalset.discard(f)
252 return exists
253 return exists
253
254
254 def clearambiguoustimes(self, files, now):
255 def clearambiguoustimes(self, files, now):
255 for f in files:
256 for f in files:
256 e = self.get(f)
257 e = self.get(f)
257 if e is not None and e[0] == b'n' and e[3] == now:
258 if e is not None and e[0] == b'n' and e[3] == now:
258 self._map[f] = dirstatetuple(e[0], e[1], e[2], AMBIGUOUS_TIME)
259 self._map[f] = dirstatetuple(e[0], e[1], e[2], AMBIGUOUS_TIME)
259 self.nonnormalset.add(f)
260 self.nonnormalset.add(f)
260
261
261 def nonnormalentries(self):
262 def nonnormalentries(self):
262 '''Compute the nonnormal dirstate entries from the dmap'''
263 '''Compute the nonnormal dirstate entries from the dmap'''
263 try:
264 try:
264 return parsers.nonnormalotherparententries(self._map)
265 return parsers.nonnormalotherparententries(self._map)
265 except AttributeError:
266 except AttributeError:
266 nonnorm = set()
267 nonnorm = set()
267 otherparent = set()
268 otherparent = set()
268 for fname, e in pycompat.iteritems(self._map):
269 for fname, e in pycompat.iteritems(self._map):
269 if e[0] != b'n' or e[3] == AMBIGUOUS_TIME:
270 if e[0] != b'n' or e[3] == AMBIGUOUS_TIME:
270 nonnorm.add(fname)
271 nonnorm.add(fname)
271 if e[0] == b'n' and e[2] == FROM_P2:
272 if e[0] == b'n' and e[2] == FROM_P2:
272 otherparent.add(fname)
273 otherparent.add(fname)
273 return nonnorm, otherparent
274 return nonnorm, otherparent
274
275
275 @propertycache
276 @propertycache
276 def filefoldmap(self):
277 def filefoldmap(self):
277 """Returns a dictionary mapping normalized case paths to their
278 """Returns a dictionary mapping normalized case paths to their
278 non-normalized versions.
279 non-normalized versions.
279 """
280 """
280 try:
281 try:
281 makefilefoldmap = parsers.make_file_foldmap
282 makefilefoldmap = parsers.make_file_foldmap
282 except AttributeError:
283 except AttributeError:
283 pass
284 pass
284 else:
285 else:
285 return makefilefoldmap(
286 return makefilefoldmap(
286 self._map, util.normcasespec, util.normcasefallback
287 self._map, util.normcasespec, util.normcasefallback
287 )
288 )
288
289
289 f = {}
290 f = {}
290 normcase = util.normcase
291 normcase = util.normcase
291 for name, s in pycompat.iteritems(self._map):
292 for name, s in pycompat.iteritems(self._map):
292 if s[0] != b'r':
293 if s[0] != b'r':
293 f[normcase(name)] = name
294 f[normcase(name)] = name
294 f[b'.'] = b'.' # prevents useless util.fspath() invocation
295 f[b'.'] = b'.' # prevents useless util.fspath() invocation
295 return f
296 return f
296
297
297 def hastrackeddir(self, d):
298 def hastrackeddir(self, d):
298 """
299 """
299 Returns True if the dirstate contains a tracked (not removed) file
300 Returns True if the dirstate contains a tracked (not removed) file
300 in this directory.
301 in this directory.
301 """
302 """
302 return d in self._dirs
303 return d in self._dirs
303
304
304 def hasdir(self, d):
305 def hasdir(self, d):
305 """
306 """
306 Returns True if the dirstate contains a file (tracked or removed)
307 Returns True if the dirstate contains a file (tracked or removed)
307 in this directory.
308 in this directory.
308 """
309 """
309 return d in self._alldirs
310 return d in self._alldirs
310
311
311 @propertycache
312 @propertycache
312 def _dirs(self):
313 def _dirs(self):
313 return pathutil.dirs(self._map, b'r')
314 return pathutil.dirs(self._map, b'r')
314
315
315 @propertycache
316 @propertycache
316 def _alldirs(self):
317 def _alldirs(self):
317 return pathutil.dirs(self._map)
318 return pathutil.dirs(self._map)
318
319
319 def _opendirstatefile(self):
320 def _opendirstatefile(self):
320 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
321 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
321 if self._pendingmode is not None and self._pendingmode != mode:
322 if self._pendingmode is not None and self._pendingmode != mode:
322 fp.close()
323 fp.close()
323 raise error.Abort(
324 raise error.Abort(
324 _(b'working directory state may be changed parallelly')
325 _(b'working directory state may be changed parallelly')
325 )
326 )
326 self._pendingmode = mode
327 self._pendingmode = mode
327 return fp
328 return fp
328
329
329 def parents(self):
330 def parents(self):
330 if not self._parents:
331 if not self._parents:
331 try:
332 try:
332 fp = self._opendirstatefile()
333 fp = self._opendirstatefile()
333 st = fp.read(2 * self._nodelen)
334 st = fp.read(2 * self._nodelen)
334 fp.close()
335 fp.close()
335 except IOError as err:
336 except IOError as err:
336 if err.errno != errno.ENOENT:
337 if err.errno != errno.ENOENT:
337 raise
338 raise
338 # File doesn't exist, so the current state is empty
339 # File doesn't exist, so the current state is empty
339 st = b''
340 st = b''
340
341
341 l = len(st)
342 l = len(st)
342 if l == self._nodelen * 2:
343 if l == self._nodelen * 2:
343 self._parents = (
344 self._parents = (
344 st[: self._nodelen],
345 st[: self._nodelen],
345 st[self._nodelen : 2 * self._nodelen],
346 st[self._nodelen : 2 * self._nodelen],
346 )
347 )
347 elif l == 0:
348 elif l == 0:
348 self._parents = (
349 self._parents = (
349 self._nodeconstants.nullid,
350 self._nodeconstants.nullid,
350 self._nodeconstants.nullid,
351 self._nodeconstants.nullid,
351 )
352 )
352 else:
353 else:
353 raise error.Abort(
354 raise error.Abort(
354 _(b'working directory state appears damaged!')
355 _(b'working directory state appears damaged!')
355 )
356 )
356
357
357 return self._parents
358 return self._parents
358
359
359 def setparents(self, p1, p2):
360 def setparents(self, p1, p2):
360 self._parents = (p1, p2)
361 self._parents = (p1, p2)
361 self._dirtyparents = True
362 self._dirtyparents = True
362
363
363 def read(self):
364 def read(self):
364 # ignore HG_PENDING because identity is used only for writing
365 # ignore HG_PENDING because identity is used only for writing
365 self.identity = util.filestat.frompath(
366 self.identity = util.filestat.frompath(
366 self._opener.join(self._filename)
367 self._opener.join(self._filename)
367 )
368 )
368
369
369 try:
370 try:
370 fp = self._opendirstatefile()
371 fp = self._opendirstatefile()
371 try:
372 try:
372 st = fp.read()
373 st = fp.read()
373 finally:
374 finally:
374 fp.close()
375 fp.close()
375 except IOError as err:
376 except IOError as err:
376 if err.errno != errno.ENOENT:
377 if err.errno != errno.ENOENT:
377 raise
378 raise
378 return
379 return
379 if not st:
380 if not st:
380 return
381 return
381
382
382 if util.safehasattr(parsers, b'dict_new_presized'):
383 if util.safehasattr(parsers, b'dict_new_presized'):
383 # Make an estimate of the number of files in the dirstate based on
384 # Make an estimate of the number of files in the dirstate based on
384 # its size. This trades wasting some memory for avoiding costly
385 # its size. This trades wasting some memory for avoiding costly
385 # resizes. Each entry have a prefix of 17 bytes followed by one or
386 # resizes. Each entry have a prefix of 17 bytes followed by one or
386 # two path names. Studies on various large-scale real-world repositories
387 # two path names. Studies on various large-scale real-world repositories
387 # found 54 bytes a reasonable upper limit for the average path names.
388 # found 54 bytes a reasonable upper limit for the average path names.
388 # Copy entries are ignored for the sake of this estimate.
389 # Copy entries are ignored for the sake of this estimate.
389 self._map = parsers.dict_new_presized(len(st) // 71)
390 self._map = parsers.dict_new_presized(len(st) // 71)
390
391
391 # Python's garbage collector triggers a GC each time a certain number
392 # Python's garbage collector triggers a GC each time a certain number
392 # of container objects (the number being defined by
393 # of container objects (the number being defined by
393 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
394 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
394 # for each file in the dirstate. The C version then immediately marks
395 # for each file in the dirstate. The C version then immediately marks
395 # them as not to be tracked by the collector. However, this has no
396 # them as not to be tracked by the collector. However, this has no
396 # effect on when GCs are triggered, only on what objects the GC looks
397 # effect on when GCs are triggered, only on what objects the GC looks
397 # into. This means that O(number of files) GCs are unavoidable.
398 # into. This means that O(number of files) GCs are unavoidable.
398 # Depending on when in the process's lifetime the dirstate is parsed,
399 # Depending on when in the process's lifetime the dirstate is parsed,
399 # this can get very expensive. As a workaround, disable GC while
400 # this can get very expensive. As a workaround, disable GC while
400 # parsing the dirstate.
401 # parsing the dirstate.
401 #
402 #
402 # (we cannot decorate the function directly since it is in a C module)
403 # (we cannot decorate the function directly since it is in a C module)
403 parse_dirstate = util.nogc(parsers.parse_dirstate)
404 parse_dirstate = util.nogc(parsers.parse_dirstate)
404 p = parse_dirstate(self._map, self.copymap, st)
405 p = parse_dirstate(self._map, self.copymap, st)
405 if not self._dirtyparents:
406 if not self._dirtyparents:
406 self.setparents(*p)
407 self.setparents(*p)
407
408
408 # Avoid excess attribute lookups by fast pathing certain checks
409 # Avoid excess attribute lookups by fast pathing certain checks
409 self.__contains__ = self._map.__contains__
410 self.__contains__ = self._map.__contains__
410 self.__getitem__ = self._map.__getitem__
411 self.__getitem__ = self._map.__getitem__
411 self.get = self._map.get
412 self.get = self._map.get
412
413
413 def write(self, st, now):
414 def write(self, st, now):
414 st.write(
415 st.write(
415 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
416 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
416 )
417 )
417 st.close()
418 st.close()
418 self._dirtyparents = False
419 self._dirtyparents = False
419 self.nonnormalset, self.otherparentset = self.nonnormalentries()
420 self.nonnormalset, self.otherparentset = self.nonnormalentries()
420
421
421 @propertycache
422 @propertycache
422 def nonnormalset(self):
423 def nonnormalset(self):
423 nonnorm, otherparents = self.nonnormalentries()
424 nonnorm, otherparents = self.nonnormalentries()
424 self.otherparentset = otherparents
425 self.otherparentset = otherparents
425 return nonnorm
426 return nonnorm
426
427
427 @propertycache
428 @propertycache
428 def otherparentset(self):
429 def otherparentset(self):
429 nonnorm, otherparents = self.nonnormalentries()
430 nonnorm, otherparents = self.nonnormalentries()
430 self.nonnormalset = nonnorm
431 self.nonnormalset = nonnorm
431 return otherparents
432 return otherparents
432
433
433 def non_normal_or_other_parent_paths(self):
434 def non_normal_or_other_parent_paths(self):
434 return self.nonnormalset.union(self.otherparentset)
435 return self.nonnormalset.union(self.otherparentset)
435
436
436 @propertycache
437 @propertycache
437 def identity(self):
438 def identity(self):
438 self._map
439 self._map
439 return self.identity
440 return self.identity
440
441
441 @propertycache
442 @propertycache
442 def dirfoldmap(self):
443 def dirfoldmap(self):
443 f = {}
444 f = {}
444 normcase = util.normcase
445 normcase = util.normcase
445 for name in self._dirs:
446 for name in self._dirs:
446 f[normcase(name)] = name
447 f[normcase(name)] = name
447 return f
448 return f
448
449
449
450
450 if rustmod is not None:
451 if rustmod is not None:
451
452
452 class dirstatemap(object):
453 class dirstatemap(object):
453 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
454 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
454 self._use_dirstate_v2 = use_dirstate_v2
455 self._use_dirstate_v2 = use_dirstate_v2
455 self._nodeconstants = nodeconstants
456 self._nodeconstants = nodeconstants
456 self._ui = ui
457 self._ui = ui
457 self._opener = opener
458 self._opener = opener
458 self._root = root
459 self._root = root
459 self._filename = b'dirstate'
460 self._filename = b'dirstate'
460 self._nodelen = 20 # Also update Rust code when changing this!
461 self._nodelen = 20 # Also update Rust code when changing this!
461 self._parents = None
462 self._parents = None
462 self._dirtyparents = False
463 self._dirtyparents = False
463
464
464 # for consistent view between _pl() and _read() invocations
465 # for consistent view between _pl() and _read() invocations
465 self._pendingmode = None
466 self._pendingmode = None
466
467
467 self._use_dirstate_tree = self._ui.configbool(
468 self._use_dirstate_tree = self._ui.configbool(
468 b"experimental",
469 b"experimental",
469 b"dirstate-tree.in-memory",
470 b"dirstate-tree.in-memory",
470 False,
471 False,
471 )
472 )
472
473
473 def addfile(
474 def addfile(
474 self,
475 self,
475 f,
476 f,
476 state=None,
477 state=None,
477 mode=0,
478 mode=0,
478 size=None,
479 size=None,
479 mtime=None,
480 mtime=None,
480 added=False,
481 added=False,
481 merged=False,
482 merged=False,
482 from_p2=False,
483 from_p2=False,
483 possibly_dirty=False,
484 possibly_dirty=False,
484 ):
485 ):
485 return self._rustmap.addfile(
486 return self._rustmap.addfile(
486 f,
487 f,
487 state,
488 state,
488 mode,
489 mode,
489 size,
490 size,
490 mtime,
491 mtime,
491 added,
492 added,
492 merged,
493 merged,
493 from_p2,
494 from_p2,
494 possibly_dirty,
495 possibly_dirty,
495 )
496 )
496
497
497 def removefile(self, *args, **kwargs):
498 def removefile(self, *args, **kwargs):
498 return self._rustmap.removefile(*args, **kwargs)
499 return self._rustmap.removefile(*args, **kwargs)
499
500
500 def dropfile(self, *args, **kwargs):
501 def dropfile(self, *args, **kwargs):
501 return self._rustmap.dropfile(*args, **kwargs)
502 return self._rustmap.dropfile(*args, **kwargs)
502
503
503 def clearambiguoustimes(self, *args, **kwargs):
504 def clearambiguoustimes(self, *args, **kwargs):
504 return self._rustmap.clearambiguoustimes(*args, **kwargs)
505 return self._rustmap.clearambiguoustimes(*args, **kwargs)
505
506
506 def nonnormalentries(self):
507 def nonnormalentries(self):
507 return self._rustmap.nonnormalentries()
508 return self._rustmap.nonnormalentries()
508
509
509 def get(self, *args, **kwargs):
510 def get(self, *args, **kwargs):
510 return self._rustmap.get(*args, **kwargs)
511 return self._rustmap.get(*args, **kwargs)
511
512
512 @property
513 @property
513 def copymap(self):
514 def copymap(self):
514 return self._rustmap.copymap()
515 return self._rustmap.copymap()
515
516
516 def directories(self):
517 def directories(self):
517 return self._rustmap.directories()
518 return self._rustmap.directories()
518
519
519 def preload(self):
520 def preload(self):
520 self._rustmap
521 self._rustmap
521
522
522 def clear(self):
523 def clear(self):
523 self._rustmap.clear()
524 self._rustmap.clear()
524 self.setparents(
525 self.setparents(
525 self._nodeconstants.nullid, self._nodeconstants.nullid
526 self._nodeconstants.nullid, self._nodeconstants.nullid
526 )
527 )
527 util.clearcachedproperty(self, b"_dirs")
528 util.clearcachedproperty(self, b"_dirs")
528 util.clearcachedproperty(self, b"_alldirs")
529 util.clearcachedproperty(self, b"_alldirs")
529 util.clearcachedproperty(self, b"dirfoldmap")
530 util.clearcachedproperty(self, b"dirfoldmap")
530
531
531 def items(self):
532 def items(self):
532 return self._rustmap.items()
533 return self._rustmap.items()
533
534
534 def keys(self):
535 def keys(self):
535 return iter(self._rustmap)
536 return iter(self._rustmap)
536
537
537 def __contains__(self, key):
538 def __contains__(self, key):
538 return key in self._rustmap
539 return key in self._rustmap
539
540
540 def __getitem__(self, item):
541 def __getitem__(self, item):
541 return self._rustmap[item]
542 return self._rustmap[item]
542
543
543 def __len__(self):
544 def __len__(self):
544 return len(self._rustmap)
545 return len(self._rustmap)
545
546
546 def __iter__(self):
547 def __iter__(self):
547 return iter(self._rustmap)
548 return iter(self._rustmap)
548
549
549 # forward for python2,3 compat
550 # forward for python2,3 compat
550 iteritems = items
551 iteritems = items
551
552
552 def _opendirstatefile(self):
553 def _opendirstatefile(self):
553 fp, mode = txnutil.trypending(
554 fp, mode = txnutil.trypending(
554 self._root, self._opener, self._filename
555 self._root, self._opener, self._filename
555 )
556 )
556 if self._pendingmode is not None and self._pendingmode != mode:
557 if self._pendingmode is not None and self._pendingmode != mode:
557 fp.close()
558 fp.close()
558 raise error.Abort(
559 raise error.Abort(
559 _(b'working directory state may be changed parallelly')
560 _(b'working directory state may be changed parallelly')
560 )
561 )
561 self._pendingmode = mode
562 self._pendingmode = mode
562 return fp
563 return fp
563
564
564 def setparents(self, p1, p2):
565 def setparents(self, p1, p2):
565 self._parents = (p1, p2)
566 self._parents = (p1, p2)
566 self._dirtyparents = True
567 self._dirtyparents = True
567
568
568 def parents(self):
569 def parents(self):
569 if not self._parents:
570 if not self._parents:
570 if self._use_dirstate_v2:
571 if self._use_dirstate_v2:
571 offset = len(rustmod.V2_FORMAT_MARKER)
572 offset = len(rustmod.V2_FORMAT_MARKER)
572 else:
573 else:
573 offset = 0
574 offset = 0
574 read_len = offset + self._nodelen * 2
575 read_len = offset + self._nodelen * 2
575 try:
576 try:
576 fp = self._opendirstatefile()
577 fp = self._opendirstatefile()
577 st = fp.read(read_len)
578 st = fp.read(read_len)
578 fp.close()
579 fp.close()
579 except IOError as err:
580 except IOError as err:
580 if err.errno != errno.ENOENT:
581 if err.errno != errno.ENOENT:
581 raise
582 raise
582 # File doesn't exist, so the current state is empty
583 # File doesn't exist, so the current state is empty
583 st = b''
584 st = b''
584
585
585 l = len(st)
586 l = len(st)
586 if l == read_len:
587 if l == read_len:
587 st = st[offset:]
588 st = st[offset:]
588 self._parents = (
589 self._parents = (
589 st[: self._nodelen],
590 st[: self._nodelen],
590 st[self._nodelen : 2 * self._nodelen],
591 st[self._nodelen : 2 * self._nodelen],
591 )
592 )
592 elif l == 0:
593 elif l == 0:
593 self._parents = (
594 self._parents = (
594 self._nodeconstants.nullid,
595 self._nodeconstants.nullid,
595 self._nodeconstants.nullid,
596 self._nodeconstants.nullid,
596 )
597 )
597 else:
598 else:
598 raise error.Abort(
599 raise error.Abort(
599 _(b'working directory state appears damaged!')
600 _(b'working directory state appears damaged!')
600 )
601 )
601
602
602 return self._parents
603 return self._parents
603
604
604 @propertycache
605 @propertycache
605 def _rustmap(self):
606 def _rustmap(self):
606 """
607 """
607 Fills the Dirstatemap when called.
608 Fills the Dirstatemap when called.
608 """
609 """
609 # ignore HG_PENDING because identity is used only for writing
610 # ignore HG_PENDING because identity is used only for writing
610 self.identity = util.filestat.frompath(
611 self.identity = util.filestat.frompath(
611 self._opener.join(self._filename)
612 self._opener.join(self._filename)
612 )
613 )
613
614
614 try:
615 try:
615 fp = self._opendirstatefile()
616 fp = self._opendirstatefile()
616 try:
617 try:
617 st = fp.read()
618 st = fp.read()
618 finally:
619 finally:
619 fp.close()
620 fp.close()
620 except IOError as err:
621 except IOError as err:
621 if err.errno != errno.ENOENT:
622 if err.errno != errno.ENOENT:
622 raise
623 raise
623 st = b''
624 st = b''
624
625
625 self._rustmap, parents = rustmod.DirstateMap.new(
626 self._rustmap, parents = rustmod.DirstateMap.new(
626 self._use_dirstate_tree, self._use_dirstate_v2, st
627 self._use_dirstate_tree, self._use_dirstate_v2, st
627 )
628 )
628
629
629 if parents and not self._dirtyparents:
630 if parents and not self._dirtyparents:
630 self.setparents(*parents)
631 self.setparents(*parents)
631
632
632 self.__contains__ = self._rustmap.__contains__
633 self.__contains__ = self._rustmap.__contains__
633 self.__getitem__ = self._rustmap.__getitem__
634 self.__getitem__ = self._rustmap.__getitem__
634 self.get = self._rustmap.get
635 self.get = self._rustmap.get
635 return self._rustmap
636 return self._rustmap
636
637
637 def write(self, st, now):
638 def write(self, st, now):
638 parents = self.parents()
639 parents = self.parents()
639 packed = self._rustmap.write(
640 packed = self._rustmap.write(
640 self._use_dirstate_v2, parents[0], parents[1], now
641 self._use_dirstate_v2, parents[0], parents[1], now
641 )
642 )
642 st.write(packed)
643 st.write(packed)
643 st.close()
644 st.close()
644 self._dirtyparents = False
645 self._dirtyparents = False
645
646
646 @propertycache
647 @propertycache
647 def filefoldmap(self):
648 def filefoldmap(self):
648 """Returns a dictionary mapping normalized case paths to their
649 """Returns a dictionary mapping normalized case paths to their
649 non-normalized versions.
650 non-normalized versions.
650 """
651 """
651 return self._rustmap.filefoldmapasdict()
652 return self._rustmap.filefoldmapasdict()
652
653
653 def hastrackeddir(self, d):
654 def hastrackeddir(self, d):
654 return self._rustmap.hastrackeddir(d)
655 return self._rustmap.hastrackeddir(d)
655
656
656 def hasdir(self, d):
657 def hasdir(self, d):
657 return self._rustmap.hasdir(d)
658 return self._rustmap.hasdir(d)
658
659
659 @propertycache
660 @propertycache
660 def identity(self):
661 def identity(self):
661 self._rustmap
662 self._rustmap
662 return self.identity
663 return self.identity
663
664
664 @property
665 @property
665 def nonnormalset(self):
666 def nonnormalset(self):
666 nonnorm = self._rustmap.non_normal_entries()
667 nonnorm = self._rustmap.non_normal_entries()
667 return nonnorm
668 return nonnorm
668
669
669 @propertycache
670 @propertycache
670 def otherparentset(self):
671 def otherparentset(self):
671 otherparents = self._rustmap.other_parent_entries()
672 otherparents = self._rustmap.other_parent_entries()
672 return otherparents
673 return otherparents
673
674
674 def non_normal_or_other_parent_paths(self):
675 def non_normal_or_other_parent_paths(self):
675 return self._rustmap.non_normal_or_other_parent_paths()
676 return self._rustmap.non_normal_or_other_parent_paths()
676
677
677 @propertycache
678 @propertycache
678 def dirfoldmap(self):
679 def dirfoldmap(self):
679 f = {}
680 f = {}
680 normcase = util.normcase
681 normcase = util.normcase
681 for name, _pseudo_entry in self.directories():
682 for name, _pseudo_entry in self.directories():
682 f[normcase(name)] = name
683 f[normcase(name)] = name
683 return f
684 return f
@@ -1,475 +1,476 b''
1 // dirstate_map.rs
1 // dirstate_map.rs
2 //
2 //
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 use crate::dirstate::parsers::Timestamp;
8 use crate::dirstate::parsers::Timestamp;
9 use crate::{
9 use crate::{
10 dirstate::EntryState,
10 dirstate::EntryState,
11 dirstate::MTIME_UNSET,
11 dirstate::MTIME_UNSET,
12 dirstate::SIZE_FROM_OTHER_PARENT,
12 dirstate::SIZE_FROM_OTHER_PARENT,
13 dirstate::SIZE_NON_NORMAL,
13 dirstate::SIZE_NON_NORMAL,
14 dirstate::V1_RANGEMASK,
14 dirstate::V1_RANGEMASK,
15 pack_dirstate, parse_dirstate,
15 pack_dirstate, parse_dirstate,
16 utils::hg_path::{HgPath, HgPathBuf},
16 utils::hg_path::{HgPath, HgPathBuf},
17 CopyMap, DirsMultiset, DirstateEntry, DirstateError, DirstateParents,
17 CopyMap, DirsMultiset, DirstateEntry, DirstateError, DirstateParents,
18 StateMap,
18 StateMap,
19 };
19 };
20 use micro_timer::timed;
20 use micro_timer::timed;
21 use std::collections::HashSet;
21 use std::collections::HashSet;
22 use std::iter::FromIterator;
22 use std::iter::FromIterator;
23 use std::ops::Deref;
23 use std::ops::Deref;
24
24
25 #[derive(Default)]
25 #[derive(Default)]
26 pub struct DirstateMap {
26 pub struct DirstateMap {
27 state_map: StateMap,
27 state_map: StateMap,
28 pub copy_map: CopyMap,
28 pub copy_map: CopyMap,
29 pub dirs: Option<DirsMultiset>,
29 pub dirs: Option<DirsMultiset>,
30 pub all_dirs: Option<DirsMultiset>,
30 pub all_dirs: Option<DirsMultiset>,
31 non_normal_set: Option<HashSet<HgPathBuf>>,
31 non_normal_set: Option<HashSet<HgPathBuf>>,
32 other_parent_set: Option<HashSet<HgPathBuf>>,
32 other_parent_set: Option<HashSet<HgPathBuf>>,
33 }
33 }
34
34
35 /// Should only really be used in python interface code, for clarity
35 /// Should only really be used in python interface code, for clarity
36 impl Deref for DirstateMap {
36 impl Deref for DirstateMap {
37 type Target = StateMap;
37 type Target = StateMap;
38
38
39 fn deref(&self) -> &Self::Target {
39 fn deref(&self) -> &Self::Target {
40 &self.state_map
40 &self.state_map
41 }
41 }
42 }
42 }
43
43
44 impl FromIterator<(HgPathBuf, DirstateEntry)> for DirstateMap {
44 impl FromIterator<(HgPathBuf, DirstateEntry)> for DirstateMap {
45 fn from_iter<I: IntoIterator<Item = (HgPathBuf, DirstateEntry)>>(
45 fn from_iter<I: IntoIterator<Item = (HgPathBuf, DirstateEntry)>>(
46 iter: I,
46 iter: I,
47 ) -> Self {
47 ) -> Self {
48 Self {
48 Self {
49 state_map: iter.into_iter().collect(),
49 state_map: iter.into_iter().collect(),
50 ..Self::default()
50 ..Self::default()
51 }
51 }
52 }
52 }
53 }
53 }
54
54
55 impl DirstateMap {
55 impl DirstateMap {
56 pub fn new() -> Self {
56 pub fn new() -> Self {
57 Self::default()
57 Self::default()
58 }
58 }
59
59
60 pub fn clear(&mut self) {
60 pub fn clear(&mut self) {
61 self.state_map = StateMap::default();
61 self.state_map = StateMap::default();
62 self.copy_map.clear();
62 self.copy_map.clear();
63 self.non_normal_set = None;
63 self.non_normal_set = None;
64 self.other_parent_set = None;
64 self.other_parent_set = None;
65 }
65 }
66
66
67 /// Add a tracked file to the dirstate
67 /// Add a tracked file to the dirstate
68 pub fn add_file(
68 pub fn add_file(
69 &mut self,
69 &mut self,
70 filename: &HgPath,
70 filename: &HgPath,
71 entry: DirstateEntry,
71 entry: DirstateEntry,
72 // XXX once the dust settle this should probably become an enum
72 // XXX once the dust settle this should probably become an enum
73 added: bool,
73 added: bool,
74 merged: bool,
74 merged: bool,
75 from_p2: bool,
75 from_p2: bool,
76 possibly_dirty: bool,
76 possibly_dirty: bool,
77 ) -> Result<(), DirstateError> {
77 ) -> Result<(), DirstateError> {
78 let mut entry = entry;
78 let mut entry = entry;
79 if added {
79 if added {
80 assert!(!merged);
80 assert!(!merged);
81 assert!(!possibly_dirty);
81 assert!(!possibly_dirty);
82 assert!(!from_p2);
82 assert!(!from_p2);
83 entry.state = EntryState::Added;
83 entry.state = EntryState::Added;
84 entry.size = SIZE_NON_NORMAL;
84 entry.size = SIZE_NON_NORMAL;
85 entry.mtime = MTIME_UNSET;
85 entry.mtime = MTIME_UNSET;
86 } else if merged {
86 } else if merged {
87 assert!(!possibly_dirty);
87 assert!(!possibly_dirty);
88 assert!(!from_p2);
88 assert!(!from_p2);
89 entry.state = EntryState::Merged;
89 entry.state = EntryState::Merged;
90 entry.size = SIZE_FROM_OTHER_PARENT;
90 entry.size = SIZE_FROM_OTHER_PARENT;
91 entry.mtime = MTIME_UNSET;
91 entry.mtime = MTIME_UNSET;
92 } else if from_p2 {
92 } else if from_p2 {
93 assert!(!possibly_dirty);
93 assert!(!possibly_dirty);
94 entry.size = SIZE_FROM_OTHER_PARENT;
94 entry.size = SIZE_FROM_OTHER_PARENT;
95 entry.mtime = MTIME_UNSET;
95 entry.mtime = MTIME_UNSET;
96 } else if possibly_dirty {
96 } else if possibly_dirty {
97 entry.state = EntryState::Normal;
97 entry.size = SIZE_NON_NORMAL;
98 entry.size = SIZE_NON_NORMAL;
98 entry.mtime = MTIME_UNSET;
99 entry.mtime = MTIME_UNSET;
99 } else {
100 } else {
100 entry.size = entry.size & V1_RANGEMASK;
101 entry.size = entry.size & V1_RANGEMASK;
101 entry.mtime = entry.mtime & V1_RANGEMASK;
102 entry.mtime = entry.mtime & V1_RANGEMASK;
102 }
103 }
103 let old_state = match self.get(filename) {
104 let old_state = match self.get(filename) {
104 Some(e) => e.state,
105 Some(e) => e.state,
105 None => EntryState::Unknown,
106 None => EntryState::Unknown,
106 };
107 };
107 if old_state == EntryState::Unknown || old_state == EntryState::Removed
108 if old_state == EntryState::Unknown || old_state == EntryState::Removed
108 {
109 {
109 if let Some(ref mut dirs) = self.dirs {
110 if let Some(ref mut dirs) = self.dirs {
110 dirs.add_path(filename)?;
111 dirs.add_path(filename)?;
111 }
112 }
112 }
113 }
113 if old_state == EntryState::Unknown {
114 if old_state == EntryState::Unknown {
114 if let Some(ref mut all_dirs) = self.all_dirs {
115 if let Some(ref mut all_dirs) = self.all_dirs {
115 all_dirs.add_path(filename)?;
116 all_dirs.add_path(filename)?;
116 }
117 }
117 }
118 }
118 self.state_map.insert(filename.to_owned(), entry.to_owned());
119 self.state_map.insert(filename.to_owned(), entry.to_owned());
119
120
120 if entry.is_non_normal() {
121 if entry.is_non_normal() {
121 self.get_non_normal_other_parent_entries()
122 self.get_non_normal_other_parent_entries()
122 .0
123 .0
123 .insert(filename.to_owned());
124 .insert(filename.to_owned());
124 }
125 }
125
126
126 if entry.is_from_other_parent() {
127 if entry.is_from_other_parent() {
127 self.get_non_normal_other_parent_entries()
128 self.get_non_normal_other_parent_entries()
128 .1
129 .1
129 .insert(filename.to_owned());
130 .insert(filename.to_owned());
130 }
131 }
131 Ok(())
132 Ok(())
132 }
133 }
133
134
134 /// Mark a file as removed in the dirstate.
135 /// Mark a file as removed in the dirstate.
135 ///
136 ///
136 /// The `size` parameter is used to store sentinel values that indicate
137 /// The `size` parameter is used to store sentinel values that indicate
137 /// the file's previous state. In the future, we should refactor this
138 /// the file's previous state. In the future, we should refactor this
138 /// to be more explicit about what that state is.
139 /// to be more explicit about what that state is.
139 pub fn remove_file(
140 pub fn remove_file(
140 &mut self,
141 &mut self,
141 filename: &HgPath,
142 filename: &HgPath,
142 in_merge: bool,
143 in_merge: bool,
143 ) -> Result<(), DirstateError> {
144 ) -> Result<(), DirstateError> {
144 let old_entry_opt = self.get(filename);
145 let old_entry_opt = self.get(filename);
145 let old_state = match old_entry_opt {
146 let old_state = match old_entry_opt {
146 Some(e) => e.state,
147 Some(e) => e.state,
147 None => EntryState::Unknown,
148 None => EntryState::Unknown,
148 };
149 };
149 let mut size = 0;
150 let mut size = 0;
150 if in_merge {
151 if in_merge {
151 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
152 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
152 // during a merge. So I (marmoute) am not sure we need the
153 // during a merge. So I (marmoute) am not sure we need the
153 // conditionnal at all. Adding double checking this with assert
154 // conditionnal at all. Adding double checking this with assert
154 // would be nice.
155 // would be nice.
155 if let Some(old_entry) = old_entry_opt {
156 if let Some(old_entry) = old_entry_opt {
156 // backup the previous state
157 // backup the previous state
157 if old_entry.state == EntryState::Merged {
158 if old_entry.state == EntryState::Merged {
158 size = SIZE_NON_NORMAL;
159 size = SIZE_NON_NORMAL;
159 } else if old_entry.state == EntryState::Normal
160 } else if old_entry.state == EntryState::Normal
160 && old_entry.size == SIZE_FROM_OTHER_PARENT
161 && old_entry.size == SIZE_FROM_OTHER_PARENT
161 {
162 {
162 // other parent
163 // other parent
163 size = SIZE_FROM_OTHER_PARENT;
164 size = SIZE_FROM_OTHER_PARENT;
164 self.get_non_normal_other_parent_entries()
165 self.get_non_normal_other_parent_entries()
165 .1
166 .1
166 .insert(filename.to_owned());
167 .insert(filename.to_owned());
167 }
168 }
168 }
169 }
169 }
170 }
170 if old_state != EntryState::Unknown && old_state != EntryState::Removed
171 if old_state != EntryState::Unknown && old_state != EntryState::Removed
171 {
172 {
172 if let Some(ref mut dirs) = self.dirs {
173 if let Some(ref mut dirs) = self.dirs {
173 dirs.delete_path(filename)?;
174 dirs.delete_path(filename)?;
174 }
175 }
175 }
176 }
176 if old_state == EntryState::Unknown {
177 if old_state == EntryState::Unknown {
177 if let Some(ref mut all_dirs) = self.all_dirs {
178 if let Some(ref mut all_dirs) = self.all_dirs {
178 all_dirs.add_path(filename)?;
179 all_dirs.add_path(filename)?;
179 }
180 }
180 }
181 }
181 if size == 0 {
182 if size == 0 {
182 self.copy_map.remove(filename);
183 self.copy_map.remove(filename);
183 }
184 }
184
185
185 self.state_map.insert(
186 self.state_map.insert(
186 filename.to_owned(),
187 filename.to_owned(),
187 DirstateEntry {
188 DirstateEntry {
188 state: EntryState::Removed,
189 state: EntryState::Removed,
189 mode: 0,
190 mode: 0,
190 size,
191 size,
191 mtime: 0,
192 mtime: 0,
192 },
193 },
193 );
194 );
194 self.get_non_normal_other_parent_entries()
195 self.get_non_normal_other_parent_entries()
195 .0
196 .0
196 .insert(filename.to_owned());
197 .insert(filename.to_owned());
197 Ok(())
198 Ok(())
198 }
199 }
199
200
200 /// Remove a file from the dirstate.
201 /// Remove a file from the dirstate.
201 /// Returns `true` if the file was previously recorded.
202 /// Returns `true` if the file was previously recorded.
202 pub fn drop_file(
203 pub fn drop_file(
203 &mut self,
204 &mut self,
204 filename: &HgPath,
205 filename: &HgPath,
205 old_state: EntryState,
206 old_state: EntryState,
206 ) -> Result<bool, DirstateError> {
207 ) -> Result<bool, DirstateError> {
207 let exists = self.state_map.remove(filename).is_some();
208 let exists = self.state_map.remove(filename).is_some();
208
209
209 if exists {
210 if exists {
210 if old_state != EntryState::Removed {
211 if old_state != EntryState::Removed {
211 if let Some(ref mut dirs) = self.dirs {
212 if let Some(ref mut dirs) = self.dirs {
212 dirs.delete_path(filename)?;
213 dirs.delete_path(filename)?;
213 }
214 }
214 }
215 }
215 if let Some(ref mut all_dirs) = self.all_dirs {
216 if let Some(ref mut all_dirs) = self.all_dirs {
216 all_dirs.delete_path(filename)?;
217 all_dirs.delete_path(filename)?;
217 }
218 }
218 }
219 }
219 self.get_non_normal_other_parent_entries()
220 self.get_non_normal_other_parent_entries()
220 .0
221 .0
221 .remove(filename);
222 .remove(filename);
222
223
223 Ok(exists)
224 Ok(exists)
224 }
225 }
225
226
226 pub fn clear_ambiguous_times(
227 pub fn clear_ambiguous_times(
227 &mut self,
228 &mut self,
228 filenames: Vec<HgPathBuf>,
229 filenames: Vec<HgPathBuf>,
229 now: i32,
230 now: i32,
230 ) {
231 ) {
231 for filename in filenames {
232 for filename in filenames {
232 if let Some(entry) = self.state_map.get_mut(&filename) {
233 if let Some(entry) = self.state_map.get_mut(&filename) {
233 if entry.clear_ambiguous_mtime(now) {
234 if entry.clear_ambiguous_mtime(now) {
234 self.get_non_normal_other_parent_entries()
235 self.get_non_normal_other_parent_entries()
235 .0
236 .0
236 .insert(filename.to_owned());
237 .insert(filename.to_owned());
237 }
238 }
238 }
239 }
239 }
240 }
240 }
241 }
241
242
242 pub fn non_normal_entries_remove(&mut self, key: impl AsRef<HgPath>) {
243 pub fn non_normal_entries_remove(&mut self, key: impl AsRef<HgPath>) {
243 self.get_non_normal_other_parent_entries()
244 self.get_non_normal_other_parent_entries()
244 .0
245 .0
245 .remove(key.as_ref());
246 .remove(key.as_ref());
246 }
247 }
247
248
248 pub fn non_normal_entries_union(
249 pub fn non_normal_entries_union(
249 &mut self,
250 &mut self,
250 other: HashSet<HgPathBuf>,
251 other: HashSet<HgPathBuf>,
251 ) -> Vec<HgPathBuf> {
252 ) -> Vec<HgPathBuf> {
252 self.get_non_normal_other_parent_entries()
253 self.get_non_normal_other_parent_entries()
253 .0
254 .0
254 .union(&other)
255 .union(&other)
255 .map(ToOwned::to_owned)
256 .map(ToOwned::to_owned)
256 .collect()
257 .collect()
257 }
258 }
258
259
259 pub fn get_non_normal_other_parent_entries(
260 pub fn get_non_normal_other_parent_entries(
260 &mut self,
261 &mut self,
261 ) -> (&mut HashSet<HgPathBuf>, &mut HashSet<HgPathBuf>) {
262 ) -> (&mut HashSet<HgPathBuf>, &mut HashSet<HgPathBuf>) {
262 self.set_non_normal_other_parent_entries(false);
263 self.set_non_normal_other_parent_entries(false);
263 (
264 (
264 self.non_normal_set.as_mut().unwrap(),
265 self.non_normal_set.as_mut().unwrap(),
265 self.other_parent_set.as_mut().unwrap(),
266 self.other_parent_set.as_mut().unwrap(),
266 )
267 )
267 }
268 }
268
269
269 /// Useful to get immutable references to those sets in contexts where
270 /// Useful to get immutable references to those sets in contexts where
270 /// you only have an immutable reference to the `DirstateMap`, like when
271 /// you only have an immutable reference to the `DirstateMap`, like when
271 /// sharing references with Python.
272 /// sharing references with Python.
272 ///
273 ///
273 /// TODO, get rid of this along with the other "setter/getter" stuff when
274 /// TODO, get rid of this along with the other "setter/getter" stuff when
274 /// a nice typestate plan is defined.
275 /// a nice typestate plan is defined.
275 ///
276 ///
276 /// # Panics
277 /// # Panics
277 ///
278 ///
278 /// Will panic if either set is `None`.
279 /// Will panic if either set is `None`.
279 pub fn get_non_normal_other_parent_entries_panic(
280 pub fn get_non_normal_other_parent_entries_panic(
280 &self,
281 &self,
281 ) -> (&HashSet<HgPathBuf>, &HashSet<HgPathBuf>) {
282 ) -> (&HashSet<HgPathBuf>, &HashSet<HgPathBuf>) {
282 (
283 (
283 self.non_normal_set.as_ref().unwrap(),
284 self.non_normal_set.as_ref().unwrap(),
284 self.other_parent_set.as_ref().unwrap(),
285 self.other_parent_set.as_ref().unwrap(),
285 )
286 )
286 }
287 }
287
288
288 pub fn set_non_normal_other_parent_entries(&mut self, force: bool) {
289 pub fn set_non_normal_other_parent_entries(&mut self, force: bool) {
289 if !force
290 if !force
290 && self.non_normal_set.is_some()
291 && self.non_normal_set.is_some()
291 && self.other_parent_set.is_some()
292 && self.other_parent_set.is_some()
292 {
293 {
293 return;
294 return;
294 }
295 }
295 let mut non_normal = HashSet::new();
296 let mut non_normal = HashSet::new();
296 let mut other_parent = HashSet::new();
297 let mut other_parent = HashSet::new();
297
298
298 for (filename, entry) in self.state_map.iter() {
299 for (filename, entry) in self.state_map.iter() {
299 if entry.is_non_normal() {
300 if entry.is_non_normal() {
300 non_normal.insert(filename.to_owned());
301 non_normal.insert(filename.to_owned());
301 }
302 }
302 if entry.is_from_other_parent() {
303 if entry.is_from_other_parent() {
303 other_parent.insert(filename.to_owned());
304 other_parent.insert(filename.to_owned());
304 }
305 }
305 }
306 }
306 self.non_normal_set = Some(non_normal);
307 self.non_normal_set = Some(non_normal);
307 self.other_parent_set = Some(other_parent);
308 self.other_parent_set = Some(other_parent);
308 }
309 }
309
310
310 /// Both of these setters and their uses appear to be the simplest way to
311 /// Both of these setters and their uses appear to be the simplest way to
311 /// emulate a Python lazy property, but it is ugly and unidiomatic.
312 /// emulate a Python lazy property, but it is ugly and unidiomatic.
312 /// TODO One day, rewriting this struct using the typestate might be a
313 /// TODO One day, rewriting this struct using the typestate might be a
313 /// good idea.
314 /// good idea.
314 pub fn set_all_dirs(&mut self) -> Result<(), DirstateError> {
315 pub fn set_all_dirs(&mut self) -> Result<(), DirstateError> {
315 if self.all_dirs.is_none() {
316 if self.all_dirs.is_none() {
316 self.all_dirs = Some(DirsMultiset::from_dirstate(
317 self.all_dirs = Some(DirsMultiset::from_dirstate(
317 self.state_map.iter().map(|(k, v)| Ok((k, *v))),
318 self.state_map.iter().map(|(k, v)| Ok((k, *v))),
318 None,
319 None,
319 )?);
320 )?);
320 }
321 }
321 Ok(())
322 Ok(())
322 }
323 }
323
324
324 pub fn set_dirs(&mut self) -> Result<(), DirstateError> {
325 pub fn set_dirs(&mut self) -> Result<(), DirstateError> {
325 if self.dirs.is_none() {
326 if self.dirs.is_none() {
326 self.dirs = Some(DirsMultiset::from_dirstate(
327 self.dirs = Some(DirsMultiset::from_dirstate(
327 self.state_map.iter().map(|(k, v)| Ok((k, *v))),
328 self.state_map.iter().map(|(k, v)| Ok((k, *v))),
328 Some(EntryState::Removed),
329 Some(EntryState::Removed),
329 )?);
330 )?);
330 }
331 }
331 Ok(())
332 Ok(())
332 }
333 }
333
334
334 pub fn has_tracked_dir(
335 pub fn has_tracked_dir(
335 &mut self,
336 &mut self,
336 directory: &HgPath,
337 directory: &HgPath,
337 ) -> Result<bool, DirstateError> {
338 ) -> Result<bool, DirstateError> {
338 self.set_dirs()?;
339 self.set_dirs()?;
339 Ok(self.dirs.as_ref().unwrap().contains(directory))
340 Ok(self.dirs.as_ref().unwrap().contains(directory))
340 }
341 }
341
342
342 pub fn has_dir(
343 pub fn has_dir(
343 &mut self,
344 &mut self,
344 directory: &HgPath,
345 directory: &HgPath,
345 ) -> Result<bool, DirstateError> {
346 ) -> Result<bool, DirstateError> {
346 self.set_all_dirs()?;
347 self.set_all_dirs()?;
347 Ok(self.all_dirs.as_ref().unwrap().contains(directory))
348 Ok(self.all_dirs.as_ref().unwrap().contains(directory))
348 }
349 }
349
350
350 #[timed]
351 #[timed]
351 pub fn read(
352 pub fn read(
352 &mut self,
353 &mut self,
353 file_contents: &[u8],
354 file_contents: &[u8],
354 ) -> Result<Option<DirstateParents>, DirstateError> {
355 ) -> Result<Option<DirstateParents>, DirstateError> {
355 if file_contents.is_empty() {
356 if file_contents.is_empty() {
356 return Ok(None);
357 return Ok(None);
357 }
358 }
358
359
359 let (parents, entries, copies) = parse_dirstate(file_contents)?;
360 let (parents, entries, copies) = parse_dirstate(file_contents)?;
360 self.state_map.extend(
361 self.state_map.extend(
361 entries
362 entries
362 .into_iter()
363 .into_iter()
363 .map(|(path, entry)| (path.to_owned(), entry)),
364 .map(|(path, entry)| (path.to_owned(), entry)),
364 );
365 );
365 self.copy_map.extend(
366 self.copy_map.extend(
366 copies
367 copies
367 .into_iter()
368 .into_iter()
368 .map(|(path, copy)| (path.to_owned(), copy.to_owned())),
369 .map(|(path, copy)| (path.to_owned(), copy.to_owned())),
369 );
370 );
370 Ok(Some(parents.clone()))
371 Ok(Some(parents.clone()))
371 }
372 }
372
373
373 pub fn pack(
374 pub fn pack(
374 &mut self,
375 &mut self,
375 parents: DirstateParents,
376 parents: DirstateParents,
376 now: Timestamp,
377 now: Timestamp,
377 ) -> Result<Vec<u8>, DirstateError> {
378 ) -> Result<Vec<u8>, DirstateError> {
378 let packed =
379 let packed =
379 pack_dirstate(&mut self.state_map, &self.copy_map, parents, now)?;
380 pack_dirstate(&mut self.state_map, &self.copy_map, parents, now)?;
380
381
381 self.set_non_normal_other_parent_entries(true);
382 self.set_non_normal_other_parent_entries(true);
382 Ok(packed)
383 Ok(packed)
383 }
384 }
384 }
385 }
385
386
386 #[cfg(test)]
387 #[cfg(test)]
387 mod tests {
388 mod tests {
388 use super::*;
389 use super::*;
389
390
390 #[test]
391 #[test]
391 fn test_dirs_multiset() {
392 fn test_dirs_multiset() {
392 let mut map = DirstateMap::new();
393 let mut map = DirstateMap::new();
393 assert!(map.dirs.is_none());
394 assert!(map.dirs.is_none());
394 assert!(map.all_dirs.is_none());
395 assert!(map.all_dirs.is_none());
395
396
396 assert_eq!(map.has_dir(HgPath::new(b"nope")).unwrap(), false);
397 assert_eq!(map.has_dir(HgPath::new(b"nope")).unwrap(), false);
397 assert!(map.all_dirs.is_some());
398 assert!(map.all_dirs.is_some());
398 assert!(map.dirs.is_none());
399 assert!(map.dirs.is_none());
399
400
400 assert_eq!(map.has_tracked_dir(HgPath::new(b"nope")).unwrap(), false);
401 assert_eq!(map.has_tracked_dir(HgPath::new(b"nope")).unwrap(), false);
401 assert!(map.dirs.is_some());
402 assert!(map.dirs.is_some());
402 }
403 }
403
404
404 #[test]
405 #[test]
405 fn test_add_file() {
406 fn test_add_file() {
406 let mut map = DirstateMap::new();
407 let mut map = DirstateMap::new();
407
408
408 assert_eq!(0, map.len());
409 assert_eq!(0, map.len());
409
410
410 map.add_file(
411 map.add_file(
411 HgPath::new(b"meh"),
412 HgPath::new(b"meh"),
412 DirstateEntry {
413 DirstateEntry {
413 state: EntryState::Normal,
414 state: EntryState::Normal,
414 mode: 1337,
415 mode: 1337,
415 mtime: 1337,
416 mtime: 1337,
416 size: 1337,
417 size: 1337,
417 },
418 },
418 false,
419 false,
419 false,
420 false,
420 false,
421 false,
421 false,
422 false,
422 )
423 )
423 .unwrap();
424 .unwrap();
424
425
425 assert_eq!(1, map.len());
426 assert_eq!(1, map.len());
426 assert_eq!(0, map.get_non_normal_other_parent_entries().0.len());
427 assert_eq!(0, map.get_non_normal_other_parent_entries().0.len());
427 assert_eq!(0, map.get_non_normal_other_parent_entries().1.len());
428 assert_eq!(0, map.get_non_normal_other_parent_entries().1.len());
428 }
429 }
429
430
430 #[test]
431 #[test]
431 fn test_non_normal_other_parent_entries() {
432 fn test_non_normal_other_parent_entries() {
432 let mut map: DirstateMap = [
433 let mut map: DirstateMap = [
433 (b"f1", (EntryState::Removed, 1337, 1337, 1337)),
434 (b"f1", (EntryState::Removed, 1337, 1337, 1337)),
434 (b"f2", (EntryState::Normal, 1337, 1337, -1)),
435 (b"f2", (EntryState::Normal, 1337, 1337, -1)),
435 (b"f3", (EntryState::Normal, 1337, 1337, 1337)),
436 (b"f3", (EntryState::Normal, 1337, 1337, 1337)),
436 (b"f4", (EntryState::Normal, 1337, -2, 1337)),
437 (b"f4", (EntryState::Normal, 1337, -2, 1337)),
437 (b"f5", (EntryState::Added, 1337, 1337, 1337)),
438 (b"f5", (EntryState::Added, 1337, 1337, 1337)),
438 (b"f6", (EntryState::Added, 1337, 1337, -1)),
439 (b"f6", (EntryState::Added, 1337, 1337, -1)),
439 (b"f7", (EntryState::Merged, 1337, 1337, -1)),
440 (b"f7", (EntryState::Merged, 1337, 1337, -1)),
440 (b"f8", (EntryState::Merged, 1337, 1337, 1337)),
441 (b"f8", (EntryState::Merged, 1337, 1337, 1337)),
441 (b"f9", (EntryState::Merged, 1337, -2, 1337)),
442 (b"f9", (EntryState::Merged, 1337, -2, 1337)),
442 (b"fa", (EntryState::Added, 1337, -2, 1337)),
443 (b"fa", (EntryState::Added, 1337, -2, 1337)),
443 (b"fb", (EntryState::Removed, 1337, -2, 1337)),
444 (b"fb", (EntryState::Removed, 1337, -2, 1337)),
444 ]
445 ]
445 .iter()
446 .iter()
446 .map(|(fname, (state, mode, size, mtime))| {
447 .map(|(fname, (state, mode, size, mtime))| {
447 (
448 (
448 HgPathBuf::from_bytes(fname.as_ref()),
449 HgPathBuf::from_bytes(fname.as_ref()),
449 DirstateEntry {
450 DirstateEntry {
450 state: *state,
451 state: *state,
451 mode: *mode,
452 mode: *mode,
452 size: *size,
453 size: *size,
453 mtime: *mtime,
454 mtime: *mtime,
454 },
455 },
455 )
456 )
456 })
457 })
457 .collect();
458 .collect();
458
459
459 let mut non_normal = [
460 let mut non_normal = [
460 b"f1", b"f2", b"f5", b"f6", b"f7", b"f8", b"f9", b"fa", b"fb",
461 b"f1", b"f2", b"f5", b"f6", b"f7", b"f8", b"f9", b"fa", b"fb",
461 ]
462 ]
462 .iter()
463 .iter()
463 .map(|x| HgPathBuf::from_bytes(x.as_ref()))
464 .map(|x| HgPathBuf::from_bytes(x.as_ref()))
464 .collect();
465 .collect();
465
466
466 let mut other_parent = HashSet::new();
467 let mut other_parent = HashSet::new();
467 other_parent.insert(HgPathBuf::from_bytes(b"f4"));
468 other_parent.insert(HgPathBuf::from_bytes(b"f4"));
468 let entries = map.get_non_normal_other_parent_entries();
469 let entries = map.get_non_normal_other_parent_entries();
469
470
470 assert_eq!(
471 assert_eq!(
471 (&mut non_normal, &mut other_parent),
472 (&mut non_normal, &mut other_parent),
472 (entries.0, entries.1)
473 (entries.0, entries.1)
473 );
474 );
474 }
475 }
475 }
476 }
@@ -1,1203 +1,1204 b''
1 use bytes_cast::BytesCast;
1 use bytes_cast::BytesCast;
2 use micro_timer::timed;
2 use micro_timer::timed;
3 use std::borrow::Cow;
3 use std::borrow::Cow;
4 use std::convert::TryInto;
4 use std::convert::TryInto;
5 use std::path::PathBuf;
5 use std::path::PathBuf;
6
6
7 use super::on_disk;
7 use super::on_disk;
8 use super::on_disk::DirstateV2ParseError;
8 use super::on_disk::DirstateV2ParseError;
9 use super::path_with_basename::WithBasename;
9 use super::path_with_basename::WithBasename;
10 use crate::dirstate::parsers::pack_entry;
10 use crate::dirstate::parsers::pack_entry;
11 use crate::dirstate::parsers::packed_entry_size;
11 use crate::dirstate::parsers::packed_entry_size;
12 use crate::dirstate::parsers::parse_dirstate_entries;
12 use crate::dirstate::parsers::parse_dirstate_entries;
13 use crate::dirstate::parsers::Timestamp;
13 use crate::dirstate::parsers::Timestamp;
14 use crate::dirstate::MTIME_UNSET;
14 use crate::dirstate::MTIME_UNSET;
15 use crate::dirstate::SIZE_FROM_OTHER_PARENT;
15 use crate::dirstate::SIZE_FROM_OTHER_PARENT;
16 use crate::dirstate::SIZE_NON_NORMAL;
16 use crate::dirstate::SIZE_NON_NORMAL;
17 use crate::dirstate::V1_RANGEMASK;
17 use crate::dirstate::V1_RANGEMASK;
18 use crate::matchers::Matcher;
18 use crate::matchers::Matcher;
19 use crate::utils::hg_path::{HgPath, HgPathBuf};
19 use crate::utils::hg_path::{HgPath, HgPathBuf};
20 use crate::CopyMapIter;
20 use crate::CopyMapIter;
21 use crate::DirstateEntry;
21 use crate::DirstateEntry;
22 use crate::DirstateError;
22 use crate::DirstateError;
23 use crate::DirstateParents;
23 use crate::DirstateParents;
24 use crate::DirstateStatus;
24 use crate::DirstateStatus;
25 use crate::EntryState;
25 use crate::EntryState;
26 use crate::FastHashMap;
26 use crate::FastHashMap;
27 use crate::PatternFileWarning;
27 use crate::PatternFileWarning;
28 use crate::StateMapIter;
28 use crate::StateMapIter;
29 use crate::StatusError;
29 use crate::StatusError;
30 use crate::StatusOptions;
30 use crate::StatusOptions;
31
31
32 pub struct DirstateMap<'on_disk> {
32 pub struct DirstateMap<'on_disk> {
33 /// Contents of the `.hg/dirstate` file
33 /// Contents of the `.hg/dirstate` file
34 pub(super) on_disk: &'on_disk [u8],
34 pub(super) on_disk: &'on_disk [u8],
35
35
36 pub(super) root: ChildNodes<'on_disk>,
36 pub(super) root: ChildNodes<'on_disk>,
37
37
38 /// Number of nodes anywhere in the tree that have `.entry.is_some()`.
38 /// Number of nodes anywhere in the tree that have `.entry.is_some()`.
39 pub(super) nodes_with_entry_count: u32,
39 pub(super) nodes_with_entry_count: u32,
40
40
41 /// Number of nodes anywhere in the tree that have
41 /// Number of nodes anywhere in the tree that have
42 /// `.copy_source.is_some()`.
42 /// `.copy_source.is_some()`.
43 pub(super) nodes_with_copy_source_count: u32,
43 pub(super) nodes_with_copy_source_count: u32,
44
44
45 /// See on_disk::Header
45 /// See on_disk::Header
46 pub(super) ignore_patterns_hash: on_disk::IgnorePatternsHash,
46 pub(super) ignore_patterns_hash: on_disk::IgnorePatternsHash,
47 }
47 }
48
48
49 /// Using a plain `HgPathBuf` of the full path from the repository root as a
49 /// Using a plain `HgPathBuf` of the full path from the repository root as a
50 /// map key would also work: all paths in a given map have the same parent
50 /// map key would also work: all paths in a given map have the same parent
51 /// path, so comparing full paths gives the same result as comparing base
51 /// path, so comparing full paths gives the same result as comparing base
52 /// names. However `HashMap` would waste time always re-hashing the same
52 /// names. However `HashMap` would waste time always re-hashing the same
53 /// string prefix.
53 /// string prefix.
54 pub(super) type NodeKey<'on_disk> = WithBasename<Cow<'on_disk, HgPath>>;
54 pub(super) type NodeKey<'on_disk> = WithBasename<Cow<'on_disk, HgPath>>;
55
55
56 /// Similar to `&'tree Cow<'on_disk, HgPath>`, but can also be returned
56 /// Similar to `&'tree Cow<'on_disk, HgPath>`, but can also be returned
57 /// for on-disk nodes that don’t actually have a `Cow` to borrow.
57 /// for on-disk nodes that don’t actually have a `Cow` to borrow.
58 pub(super) enum BorrowedPath<'tree, 'on_disk> {
58 pub(super) enum BorrowedPath<'tree, 'on_disk> {
59 InMemory(&'tree HgPathBuf),
59 InMemory(&'tree HgPathBuf),
60 OnDisk(&'on_disk HgPath),
60 OnDisk(&'on_disk HgPath),
61 }
61 }
62
62
63 pub(super) enum ChildNodes<'on_disk> {
63 pub(super) enum ChildNodes<'on_disk> {
64 InMemory(FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
64 InMemory(FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
65 OnDisk(&'on_disk [on_disk::Node]),
65 OnDisk(&'on_disk [on_disk::Node]),
66 }
66 }
67
67
68 pub(super) enum ChildNodesRef<'tree, 'on_disk> {
68 pub(super) enum ChildNodesRef<'tree, 'on_disk> {
69 InMemory(&'tree FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
69 InMemory(&'tree FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
70 OnDisk(&'on_disk [on_disk::Node]),
70 OnDisk(&'on_disk [on_disk::Node]),
71 }
71 }
72
72
73 pub(super) enum NodeRef<'tree, 'on_disk> {
73 pub(super) enum NodeRef<'tree, 'on_disk> {
74 InMemory(&'tree NodeKey<'on_disk>, &'tree Node<'on_disk>),
74 InMemory(&'tree NodeKey<'on_disk>, &'tree Node<'on_disk>),
75 OnDisk(&'on_disk on_disk::Node),
75 OnDisk(&'on_disk on_disk::Node),
76 }
76 }
77
77
78 impl<'tree, 'on_disk> BorrowedPath<'tree, 'on_disk> {
78 impl<'tree, 'on_disk> BorrowedPath<'tree, 'on_disk> {
79 pub fn detach_from_tree(&self) -> Cow<'on_disk, HgPath> {
79 pub fn detach_from_tree(&self) -> Cow<'on_disk, HgPath> {
80 match *self {
80 match *self {
81 BorrowedPath::InMemory(in_memory) => Cow::Owned(in_memory.clone()),
81 BorrowedPath::InMemory(in_memory) => Cow::Owned(in_memory.clone()),
82 BorrowedPath::OnDisk(on_disk) => Cow::Borrowed(on_disk),
82 BorrowedPath::OnDisk(on_disk) => Cow::Borrowed(on_disk),
83 }
83 }
84 }
84 }
85 }
85 }
86
86
87 impl<'tree, 'on_disk> std::ops::Deref for BorrowedPath<'tree, 'on_disk> {
87 impl<'tree, 'on_disk> std::ops::Deref for BorrowedPath<'tree, 'on_disk> {
88 type Target = HgPath;
88 type Target = HgPath;
89
89
90 fn deref(&self) -> &HgPath {
90 fn deref(&self) -> &HgPath {
91 match *self {
91 match *self {
92 BorrowedPath::InMemory(in_memory) => in_memory,
92 BorrowedPath::InMemory(in_memory) => in_memory,
93 BorrowedPath::OnDisk(on_disk) => on_disk,
93 BorrowedPath::OnDisk(on_disk) => on_disk,
94 }
94 }
95 }
95 }
96 }
96 }
97
97
98 impl Default for ChildNodes<'_> {
98 impl Default for ChildNodes<'_> {
99 fn default() -> Self {
99 fn default() -> Self {
100 ChildNodes::InMemory(Default::default())
100 ChildNodes::InMemory(Default::default())
101 }
101 }
102 }
102 }
103
103
104 impl<'on_disk> ChildNodes<'on_disk> {
104 impl<'on_disk> ChildNodes<'on_disk> {
105 pub(super) fn as_ref<'tree>(
105 pub(super) fn as_ref<'tree>(
106 &'tree self,
106 &'tree self,
107 ) -> ChildNodesRef<'tree, 'on_disk> {
107 ) -> ChildNodesRef<'tree, 'on_disk> {
108 match self {
108 match self {
109 ChildNodes::InMemory(nodes) => ChildNodesRef::InMemory(nodes),
109 ChildNodes::InMemory(nodes) => ChildNodesRef::InMemory(nodes),
110 ChildNodes::OnDisk(nodes) => ChildNodesRef::OnDisk(nodes),
110 ChildNodes::OnDisk(nodes) => ChildNodesRef::OnDisk(nodes),
111 }
111 }
112 }
112 }
113
113
114 pub(super) fn is_empty(&self) -> bool {
114 pub(super) fn is_empty(&self) -> bool {
115 match self {
115 match self {
116 ChildNodes::InMemory(nodes) => nodes.is_empty(),
116 ChildNodes::InMemory(nodes) => nodes.is_empty(),
117 ChildNodes::OnDisk(nodes) => nodes.is_empty(),
117 ChildNodes::OnDisk(nodes) => nodes.is_empty(),
118 }
118 }
119 }
119 }
120
120
121 pub(super) fn make_mut(
121 pub(super) fn make_mut(
122 &mut self,
122 &mut self,
123 on_disk: &'on_disk [u8],
123 on_disk: &'on_disk [u8],
124 ) -> Result<
124 ) -> Result<
125 &mut FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>,
125 &mut FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>,
126 DirstateV2ParseError,
126 DirstateV2ParseError,
127 > {
127 > {
128 match self {
128 match self {
129 ChildNodes::InMemory(nodes) => Ok(nodes),
129 ChildNodes::InMemory(nodes) => Ok(nodes),
130 ChildNodes::OnDisk(nodes) => {
130 ChildNodes::OnDisk(nodes) => {
131 let nodes = nodes
131 let nodes = nodes
132 .iter()
132 .iter()
133 .map(|node| {
133 .map(|node| {
134 Ok((
134 Ok((
135 node.path(on_disk)?,
135 node.path(on_disk)?,
136 node.to_in_memory_node(on_disk)?,
136 node.to_in_memory_node(on_disk)?,
137 ))
137 ))
138 })
138 })
139 .collect::<Result<_, _>>()?;
139 .collect::<Result<_, _>>()?;
140 *self = ChildNodes::InMemory(nodes);
140 *self = ChildNodes::InMemory(nodes);
141 match self {
141 match self {
142 ChildNodes::InMemory(nodes) => Ok(nodes),
142 ChildNodes::InMemory(nodes) => Ok(nodes),
143 ChildNodes::OnDisk(_) => unreachable!(),
143 ChildNodes::OnDisk(_) => unreachable!(),
144 }
144 }
145 }
145 }
146 }
146 }
147 }
147 }
148 }
148 }
149
149
150 impl<'tree, 'on_disk> ChildNodesRef<'tree, 'on_disk> {
150 impl<'tree, 'on_disk> ChildNodesRef<'tree, 'on_disk> {
151 pub(super) fn get(
151 pub(super) fn get(
152 &self,
152 &self,
153 base_name: &HgPath,
153 base_name: &HgPath,
154 on_disk: &'on_disk [u8],
154 on_disk: &'on_disk [u8],
155 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
155 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
156 match self {
156 match self {
157 ChildNodesRef::InMemory(nodes) => Ok(nodes
157 ChildNodesRef::InMemory(nodes) => Ok(nodes
158 .get_key_value(base_name)
158 .get_key_value(base_name)
159 .map(|(k, v)| NodeRef::InMemory(k, v))),
159 .map(|(k, v)| NodeRef::InMemory(k, v))),
160 ChildNodesRef::OnDisk(nodes) => {
160 ChildNodesRef::OnDisk(nodes) => {
161 let mut parse_result = Ok(());
161 let mut parse_result = Ok(());
162 let search_result = nodes.binary_search_by(|node| {
162 let search_result = nodes.binary_search_by(|node| {
163 match node.base_name(on_disk) {
163 match node.base_name(on_disk) {
164 Ok(node_base_name) => node_base_name.cmp(base_name),
164 Ok(node_base_name) => node_base_name.cmp(base_name),
165 Err(e) => {
165 Err(e) => {
166 parse_result = Err(e);
166 parse_result = Err(e);
167 // Dummy comparison result, `search_result` won’t
167 // Dummy comparison result, `search_result` won’t
168 // be used since `parse_result` is an error
168 // be used since `parse_result` is an error
169 std::cmp::Ordering::Equal
169 std::cmp::Ordering::Equal
170 }
170 }
171 }
171 }
172 });
172 });
173 parse_result.map(|()| {
173 parse_result.map(|()| {
174 search_result.ok().map(|i| NodeRef::OnDisk(&nodes[i]))
174 search_result.ok().map(|i| NodeRef::OnDisk(&nodes[i]))
175 })
175 })
176 }
176 }
177 }
177 }
178 }
178 }
179
179
180 /// Iterate in undefined order
180 /// Iterate in undefined order
181 pub(super) fn iter(
181 pub(super) fn iter(
182 &self,
182 &self,
183 ) -> impl Iterator<Item = NodeRef<'tree, 'on_disk>> {
183 ) -> impl Iterator<Item = NodeRef<'tree, 'on_disk>> {
184 match self {
184 match self {
185 ChildNodesRef::InMemory(nodes) => itertools::Either::Left(
185 ChildNodesRef::InMemory(nodes) => itertools::Either::Left(
186 nodes.iter().map(|(k, v)| NodeRef::InMemory(k, v)),
186 nodes.iter().map(|(k, v)| NodeRef::InMemory(k, v)),
187 ),
187 ),
188 ChildNodesRef::OnDisk(nodes) => {
188 ChildNodesRef::OnDisk(nodes) => {
189 itertools::Either::Right(nodes.iter().map(NodeRef::OnDisk))
189 itertools::Either::Right(nodes.iter().map(NodeRef::OnDisk))
190 }
190 }
191 }
191 }
192 }
192 }
193
193
194 /// Iterate in parallel in undefined order
194 /// Iterate in parallel in undefined order
195 pub(super) fn par_iter(
195 pub(super) fn par_iter(
196 &self,
196 &self,
197 ) -> impl rayon::iter::ParallelIterator<Item = NodeRef<'tree, 'on_disk>>
197 ) -> impl rayon::iter::ParallelIterator<Item = NodeRef<'tree, 'on_disk>>
198 {
198 {
199 use rayon::prelude::*;
199 use rayon::prelude::*;
200 match self {
200 match self {
201 ChildNodesRef::InMemory(nodes) => rayon::iter::Either::Left(
201 ChildNodesRef::InMemory(nodes) => rayon::iter::Either::Left(
202 nodes.par_iter().map(|(k, v)| NodeRef::InMemory(k, v)),
202 nodes.par_iter().map(|(k, v)| NodeRef::InMemory(k, v)),
203 ),
203 ),
204 ChildNodesRef::OnDisk(nodes) => rayon::iter::Either::Right(
204 ChildNodesRef::OnDisk(nodes) => rayon::iter::Either::Right(
205 nodes.par_iter().map(NodeRef::OnDisk),
205 nodes.par_iter().map(NodeRef::OnDisk),
206 ),
206 ),
207 }
207 }
208 }
208 }
209
209
210 pub(super) fn sorted(&self) -> Vec<NodeRef<'tree, 'on_disk>> {
210 pub(super) fn sorted(&self) -> Vec<NodeRef<'tree, 'on_disk>> {
211 match self {
211 match self {
212 ChildNodesRef::InMemory(nodes) => {
212 ChildNodesRef::InMemory(nodes) => {
213 let mut vec: Vec<_> = nodes
213 let mut vec: Vec<_> = nodes
214 .iter()
214 .iter()
215 .map(|(k, v)| NodeRef::InMemory(k, v))
215 .map(|(k, v)| NodeRef::InMemory(k, v))
216 .collect();
216 .collect();
217 fn sort_key<'a>(node: &'a NodeRef) -> &'a HgPath {
217 fn sort_key<'a>(node: &'a NodeRef) -> &'a HgPath {
218 match node {
218 match node {
219 NodeRef::InMemory(path, _node) => path.base_name(),
219 NodeRef::InMemory(path, _node) => path.base_name(),
220 NodeRef::OnDisk(_) => unreachable!(),
220 NodeRef::OnDisk(_) => unreachable!(),
221 }
221 }
222 }
222 }
223 // `sort_unstable_by_key` doesn’t allow keys borrowing from the
223 // `sort_unstable_by_key` doesn’t allow keys borrowing from the
224 // value: https://github.com/rust-lang/rust/issues/34162
224 // value: https://github.com/rust-lang/rust/issues/34162
225 vec.sort_unstable_by(|a, b| sort_key(a).cmp(sort_key(b)));
225 vec.sort_unstable_by(|a, b| sort_key(a).cmp(sort_key(b)));
226 vec
226 vec
227 }
227 }
228 ChildNodesRef::OnDisk(nodes) => {
228 ChildNodesRef::OnDisk(nodes) => {
229 // Nodes on disk are already sorted
229 // Nodes on disk are already sorted
230 nodes.iter().map(NodeRef::OnDisk).collect()
230 nodes.iter().map(NodeRef::OnDisk).collect()
231 }
231 }
232 }
232 }
233 }
233 }
234 }
234 }
235
235
236 impl<'tree, 'on_disk> NodeRef<'tree, 'on_disk> {
236 impl<'tree, 'on_disk> NodeRef<'tree, 'on_disk> {
237 pub(super) fn full_path(
237 pub(super) fn full_path(
238 &self,
238 &self,
239 on_disk: &'on_disk [u8],
239 on_disk: &'on_disk [u8],
240 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
240 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
241 match self {
241 match self {
242 NodeRef::InMemory(path, _node) => Ok(path.full_path()),
242 NodeRef::InMemory(path, _node) => Ok(path.full_path()),
243 NodeRef::OnDisk(node) => node.full_path(on_disk),
243 NodeRef::OnDisk(node) => node.full_path(on_disk),
244 }
244 }
245 }
245 }
246
246
247 /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk,
247 /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk,
248 /// HgPath>` detached from `'tree`
248 /// HgPath>` detached from `'tree`
249 pub(super) fn full_path_borrowed(
249 pub(super) fn full_path_borrowed(
250 &self,
250 &self,
251 on_disk: &'on_disk [u8],
251 on_disk: &'on_disk [u8],
252 ) -> Result<BorrowedPath<'tree, 'on_disk>, DirstateV2ParseError> {
252 ) -> Result<BorrowedPath<'tree, 'on_disk>, DirstateV2ParseError> {
253 match self {
253 match self {
254 NodeRef::InMemory(path, _node) => match path.full_path() {
254 NodeRef::InMemory(path, _node) => match path.full_path() {
255 Cow::Borrowed(on_disk) => Ok(BorrowedPath::OnDisk(on_disk)),
255 Cow::Borrowed(on_disk) => Ok(BorrowedPath::OnDisk(on_disk)),
256 Cow::Owned(in_memory) => Ok(BorrowedPath::InMemory(in_memory)),
256 Cow::Owned(in_memory) => Ok(BorrowedPath::InMemory(in_memory)),
257 },
257 },
258 NodeRef::OnDisk(node) => {
258 NodeRef::OnDisk(node) => {
259 Ok(BorrowedPath::OnDisk(node.full_path(on_disk)?))
259 Ok(BorrowedPath::OnDisk(node.full_path(on_disk)?))
260 }
260 }
261 }
261 }
262 }
262 }
263
263
264 pub(super) fn base_name(
264 pub(super) fn base_name(
265 &self,
265 &self,
266 on_disk: &'on_disk [u8],
266 on_disk: &'on_disk [u8],
267 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
267 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
268 match self {
268 match self {
269 NodeRef::InMemory(path, _node) => Ok(path.base_name()),
269 NodeRef::InMemory(path, _node) => Ok(path.base_name()),
270 NodeRef::OnDisk(node) => node.base_name(on_disk),
270 NodeRef::OnDisk(node) => node.base_name(on_disk),
271 }
271 }
272 }
272 }
273
273
274 pub(super) fn children(
274 pub(super) fn children(
275 &self,
275 &self,
276 on_disk: &'on_disk [u8],
276 on_disk: &'on_disk [u8],
277 ) -> Result<ChildNodesRef<'tree, 'on_disk>, DirstateV2ParseError> {
277 ) -> Result<ChildNodesRef<'tree, 'on_disk>, DirstateV2ParseError> {
278 match self {
278 match self {
279 NodeRef::InMemory(_path, node) => Ok(node.children.as_ref()),
279 NodeRef::InMemory(_path, node) => Ok(node.children.as_ref()),
280 NodeRef::OnDisk(node) => {
280 NodeRef::OnDisk(node) => {
281 Ok(ChildNodesRef::OnDisk(node.children(on_disk)?))
281 Ok(ChildNodesRef::OnDisk(node.children(on_disk)?))
282 }
282 }
283 }
283 }
284 }
284 }
285
285
286 pub(super) fn has_copy_source(&self) -> bool {
286 pub(super) fn has_copy_source(&self) -> bool {
287 match self {
287 match self {
288 NodeRef::InMemory(_path, node) => node.copy_source.is_some(),
288 NodeRef::InMemory(_path, node) => node.copy_source.is_some(),
289 NodeRef::OnDisk(node) => node.has_copy_source(),
289 NodeRef::OnDisk(node) => node.has_copy_source(),
290 }
290 }
291 }
291 }
292
292
293 pub(super) fn copy_source(
293 pub(super) fn copy_source(
294 &self,
294 &self,
295 on_disk: &'on_disk [u8],
295 on_disk: &'on_disk [u8],
296 ) -> Result<Option<&'tree HgPath>, DirstateV2ParseError> {
296 ) -> Result<Option<&'tree HgPath>, DirstateV2ParseError> {
297 match self {
297 match self {
298 NodeRef::InMemory(_path, node) => {
298 NodeRef::InMemory(_path, node) => {
299 Ok(node.copy_source.as_ref().map(|s| &**s))
299 Ok(node.copy_source.as_ref().map(|s| &**s))
300 }
300 }
301 NodeRef::OnDisk(node) => node.copy_source(on_disk),
301 NodeRef::OnDisk(node) => node.copy_source(on_disk),
302 }
302 }
303 }
303 }
304
304
305 pub(super) fn entry(
305 pub(super) fn entry(
306 &self,
306 &self,
307 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
307 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
308 match self {
308 match self {
309 NodeRef::InMemory(_path, node) => {
309 NodeRef::InMemory(_path, node) => {
310 Ok(node.data.as_entry().copied())
310 Ok(node.data.as_entry().copied())
311 }
311 }
312 NodeRef::OnDisk(node) => node.entry(),
312 NodeRef::OnDisk(node) => node.entry(),
313 }
313 }
314 }
314 }
315
315
316 pub(super) fn state(
316 pub(super) fn state(
317 &self,
317 &self,
318 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
318 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
319 match self {
319 match self {
320 NodeRef::InMemory(_path, node) => {
320 NodeRef::InMemory(_path, node) => {
321 Ok(node.data.as_entry().map(|entry| entry.state))
321 Ok(node.data.as_entry().map(|entry| entry.state))
322 }
322 }
323 NodeRef::OnDisk(node) => node.state(),
323 NodeRef::OnDisk(node) => node.state(),
324 }
324 }
325 }
325 }
326
326
327 pub(super) fn cached_directory_mtime(
327 pub(super) fn cached_directory_mtime(
328 &self,
328 &self,
329 ) -> Option<&'tree on_disk::Timestamp> {
329 ) -> Option<&'tree on_disk::Timestamp> {
330 match self {
330 match self {
331 NodeRef::InMemory(_path, node) => match &node.data {
331 NodeRef::InMemory(_path, node) => match &node.data {
332 NodeData::CachedDirectory { mtime } => Some(mtime),
332 NodeData::CachedDirectory { mtime } => Some(mtime),
333 _ => None,
333 _ => None,
334 },
334 },
335 NodeRef::OnDisk(node) => node.cached_directory_mtime(),
335 NodeRef::OnDisk(node) => node.cached_directory_mtime(),
336 }
336 }
337 }
337 }
338
338
339 pub(super) fn descendants_with_entry_count(&self) -> u32 {
339 pub(super) fn descendants_with_entry_count(&self) -> u32 {
340 match self {
340 match self {
341 NodeRef::InMemory(_path, node) => {
341 NodeRef::InMemory(_path, node) => {
342 node.descendants_with_entry_count
342 node.descendants_with_entry_count
343 }
343 }
344 NodeRef::OnDisk(node) => node.descendants_with_entry_count.get(),
344 NodeRef::OnDisk(node) => node.descendants_with_entry_count.get(),
345 }
345 }
346 }
346 }
347
347
348 pub(super) fn tracked_descendants_count(&self) -> u32 {
348 pub(super) fn tracked_descendants_count(&self) -> u32 {
349 match self {
349 match self {
350 NodeRef::InMemory(_path, node) => node.tracked_descendants_count,
350 NodeRef::InMemory(_path, node) => node.tracked_descendants_count,
351 NodeRef::OnDisk(node) => node.tracked_descendants_count.get(),
351 NodeRef::OnDisk(node) => node.tracked_descendants_count.get(),
352 }
352 }
353 }
353 }
354 }
354 }
355
355
356 /// Represents a file or a directory
356 /// Represents a file or a directory
357 #[derive(Default)]
357 #[derive(Default)]
358 pub(super) struct Node<'on_disk> {
358 pub(super) struct Node<'on_disk> {
359 pub(super) data: NodeData,
359 pub(super) data: NodeData,
360
360
361 pub(super) copy_source: Option<Cow<'on_disk, HgPath>>,
361 pub(super) copy_source: Option<Cow<'on_disk, HgPath>>,
362
362
363 pub(super) children: ChildNodes<'on_disk>,
363 pub(super) children: ChildNodes<'on_disk>,
364
364
365 /// How many (non-inclusive) descendants of this node have an entry.
365 /// How many (non-inclusive) descendants of this node have an entry.
366 pub(super) descendants_with_entry_count: u32,
366 pub(super) descendants_with_entry_count: u32,
367
367
368 /// How many (non-inclusive) descendants of this node have an entry whose
368 /// How many (non-inclusive) descendants of this node have an entry whose
369 /// state is "tracked".
369 /// state is "tracked".
370 pub(super) tracked_descendants_count: u32,
370 pub(super) tracked_descendants_count: u32,
371 }
371 }
372
372
373 pub(super) enum NodeData {
373 pub(super) enum NodeData {
374 Entry(DirstateEntry),
374 Entry(DirstateEntry),
375 CachedDirectory { mtime: on_disk::Timestamp },
375 CachedDirectory { mtime: on_disk::Timestamp },
376 None,
376 None,
377 }
377 }
378
378
379 impl Default for NodeData {
379 impl Default for NodeData {
380 fn default() -> Self {
380 fn default() -> Self {
381 NodeData::None
381 NodeData::None
382 }
382 }
383 }
383 }
384
384
385 impl NodeData {
385 impl NodeData {
386 fn has_entry(&self) -> bool {
386 fn has_entry(&self) -> bool {
387 match self {
387 match self {
388 NodeData::Entry(_) => true,
388 NodeData::Entry(_) => true,
389 _ => false,
389 _ => false,
390 }
390 }
391 }
391 }
392
392
393 fn as_entry(&self) -> Option<&DirstateEntry> {
393 fn as_entry(&self) -> Option<&DirstateEntry> {
394 match self {
394 match self {
395 NodeData::Entry(entry) => Some(entry),
395 NodeData::Entry(entry) => Some(entry),
396 _ => None,
396 _ => None,
397 }
397 }
398 }
398 }
399 }
399 }
400
400
401 impl<'on_disk> DirstateMap<'on_disk> {
401 impl<'on_disk> DirstateMap<'on_disk> {
402 pub(super) fn empty(on_disk: &'on_disk [u8]) -> Self {
402 pub(super) fn empty(on_disk: &'on_disk [u8]) -> Self {
403 Self {
403 Self {
404 on_disk,
404 on_disk,
405 root: ChildNodes::default(),
405 root: ChildNodes::default(),
406 nodes_with_entry_count: 0,
406 nodes_with_entry_count: 0,
407 nodes_with_copy_source_count: 0,
407 nodes_with_copy_source_count: 0,
408 ignore_patterns_hash: [0; on_disk::IGNORE_PATTERNS_HASH_LEN],
408 ignore_patterns_hash: [0; on_disk::IGNORE_PATTERNS_HASH_LEN],
409 }
409 }
410 }
410 }
411
411
412 #[timed]
412 #[timed]
413 pub fn new_v2(
413 pub fn new_v2(
414 on_disk: &'on_disk [u8],
414 on_disk: &'on_disk [u8],
415 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
415 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
416 Ok(on_disk::read(on_disk)?)
416 Ok(on_disk::read(on_disk)?)
417 }
417 }
418
418
419 #[timed]
419 #[timed]
420 pub fn new_v1(
420 pub fn new_v1(
421 on_disk: &'on_disk [u8],
421 on_disk: &'on_disk [u8],
422 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
422 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
423 let mut map = Self::empty(on_disk);
423 let mut map = Self::empty(on_disk);
424 if map.on_disk.is_empty() {
424 if map.on_disk.is_empty() {
425 return Ok((map, None));
425 return Ok((map, None));
426 }
426 }
427
427
428 let parents = parse_dirstate_entries(
428 let parents = parse_dirstate_entries(
429 map.on_disk,
429 map.on_disk,
430 |path, entry, copy_source| {
430 |path, entry, copy_source| {
431 let tracked = entry.state.is_tracked();
431 let tracked = entry.state.is_tracked();
432 let node = Self::get_or_insert_node(
432 let node = Self::get_or_insert_node(
433 map.on_disk,
433 map.on_disk,
434 &mut map.root,
434 &mut map.root,
435 path,
435 path,
436 WithBasename::to_cow_borrowed,
436 WithBasename::to_cow_borrowed,
437 |ancestor| {
437 |ancestor| {
438 if tracked {
438 if tracked {
439 ancestor.tracked_descendants_count += 1
439 ancestor.tracked_descendants_count += 1
440 }
440 }
441 ancestor.descendants_with_entry_count += 1
441 ancestor.descendants_with_entry_count += 1
442 },
442 },
443 )?;
443 )?;
444 assert!(
444 assert!(
445 !node.data.has_entry(),
445 !node.data.has_entry(),
446 "duplicate dirstate entry in read"
446 "duplicate dirstate entry in read"
447 );
447 );
448 assert!(
448 assert!(
449 node.copy_source.is_none(),
449 node.copy_source.is_none(),
450 "duplicate dirstate entry in read"
450 "duplicate dirstate entry in read"
451 );
451 );
452 node.data = NodeData::Entry(*entry);
452 node.data = NodeData::Entry(*entry);
453 node.copy_source = copy_source.map(Cow::Borrowed);
453 node.copy_source = copy_source.map(Cow::Borrowed);
454 map.nodes_with_entry_count += 1;
454 map.nodes_with_entry_count += 1;
455 if copy_source.is_some() {
455 if copy_source.is_some() {
456 map.nodes_with_copy_source_count += 1
456 map.nodes_with_copy_source_count += 1
457 }
457 }
458 Ok(())
458 Ok(())
459 },
459 },
460 )?;
460 )?;
461 let parents = Some(parents.clone());
461 let parents = Some(parents.clone());
462
462
463 Ok((map, parents))
463 Ok((map, parents))
464 }
464 }
465
465
466 fn get_node<'tree>(
466 fn get_node<'tree>(
467 &'tree self,
467 &'tree self,
468 path: &HgPath,
468 path: &HgPath,
469 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
469 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
470 let mut children = self.root.as_ref();
470 let mut children = self.root.as_ref();
471 let mut components = path.components();
471 let mut components = path.components();
472 let mut component =
472 let mut component =
473 components.next().expect("expected at least one components");
473 components.next().expect("expected at least one components");
474 loop {
474 loop {
475 if let Some(child) = children.get(component, self.on_disk)? {
475 if let Some(child) = children.get(component, self.on_disk)? {
476 if let Some(next_component) = components.next() {
476 if let Some(next_component) = components.next() {
477 component = next_component;
477 component = next_component;
478 children = child.children(self.on_disk)?;
478 children = child.children(self.on_disk)?;
479 } else {
479 } else {
480 return Ok(Some(child));
480 return Ok(Some(child));
481 }
481 }
482 } else {
482 } else {
483 return Ok(None);
483 return Ok(None);
484 }
484 }
485 }
485 }
486 }
486 }
487
487
488 /// Returns a mutable reference to the node at `path` if it exists
488 /// Returns a mutable reference to the node at `path` if it exists
489 ///
489 ///
490 /// This takes `root` instead of `&mut self` so that callers can mutate
490 /// This takes `root` instead of `&mut self` so that callers can mutate
491 /// other fields while the returned borrow is still valid
491 /// other fields while the returned borrow is still valid
492 fn get_node_mut<'tree>(
492 fn get_node_mut<'tree>(
493 on_disk: &'on_disk [u8],
493 on_disk: &'on_disk [u8],
494 root: &'tree mut ChildNodes<'on_disk>,
494 root: &'tree mut ChildNodes<'on_disk>,
495 path: &HgPath,
495 path: &HgPath,
496 ) -> Result<Option<&'tree mut Node<'on_disk>>, DirstateV2ParseError> {
496 ) -> Result<Option<&'tree mut Node<'on_disk>>, DirstateV2ParseError> {
497 let mut children = root;
497 let mut children = root;
498 let mut components = path.components();
498 let mut components = path.components();
499 let mut component =
499 let mut component =
500 components.next().expect("expected at least one components");
500 components.next().expect("expected at least one components");
501 loop {
501 loop {
502 if let Some(child) = children.make_mut(on_disk)?.get_mut(component)
502 if let Some(child) = children.make_mut(on_disk)?.get_mut(component)
503 {
503 {
504 if let Some(next_component) = components.next() {
504 if let Some(next_component) = components.next() {
505 component = next_component;
505 component = next_component;
506 children = &mut child.children;
506 children = &mut child.children;
507 } else {
507 } else {
508 return Ok(Some(child));
508 return Ok(Some(child));
509 }
509 }
510 } else {
510 } else {
511 return Ok(None);
511 return Ok(None);
512 }
512 }
513 }
513 }
514 }
514 }
515
515
516 pub(super) fn get_or_insert<'tree, 'path>(
516 pub(super) fn get_or_insert<'tree, 'path>(
517 &'tree mut self,
517 &'tree mut self,
518 path: &HgPath,
518 path: &HgPath,
519 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
519 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
520 Self::get_or_insert_node(
520 Self::get_or_insert_node(
521 self.on_disk,
521 self.on_disk,
522 &mut self.root,
522 &mut self.root,
523 path,
523 path,
524 WithBasename::to_cow_owned,
524 WithBasename::to_cow_owned,
525 |_| {},
525 |_| {},
526 )
526 )
527 }
527 }
528
528
529 pub(super) fn get_or_insert_node<'tree, 'path>(
529 pub(super) fn get_or_insert_node<'tree, 'path>(
530 on_disk: &'on_disk [u8],
530 on_disk: &'on_disk [u8],
531 root: &'tree mut ChildNodes<'on_disk>,
531 root: &'tree mut ChildNodes<'on_disk>,
532 path: &'path HgPath,
532 path: &'path HgPath,
533 to_cow: impl Fn(
533 to_cow: impl Fn(
534 WithBasename<&'path HgPath>,
534 WithBasename<&'path HgPath>,
535 ) -> WithBasename<Cow<'on_disk, HgPath>>,
535 ) -> WithBasename<Cow<'on_disk, HgPath>>,
536 mut each_ancestor: impl FnMut(&mut Node),
536 mut each_ancestor: impl FnMut(&mut Node),
537 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
537 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
538 let mut child_nodes = root;
538 let mut child_nodes = root;
539 let mut inclusive_ancestor_paths =
539 let mut inclusive_ancestor_paths =
540 WithBasename::inclusive_ancestors_of(path);
540 WithBasename::inclusive_ancestors_of(path);
541 let mut ancestor_path = inclusive_ancestor_paths
541 let mut ancestor_path = inclusive_ancestor_paths
542 .next()
542 .next()
543 .expect("expected at least one inclusive ancestor");
543 .expect("expected at least one inclusive ancestor");
544 loop {
544 loop {
545 // TODO: can we avoid allocating an owned key in cases where the
545 // TODO: can we avoid allocating an owned key in cases where the
546 // map already contains that key, without introducing double
546 // map already contains that key, without introducing double
547 // lookup?
547 // lookup?
548 let child_node = child_nodes
548 let child_node = child_nodes
549 .make_mut(on_disk)?
549 .make_mut(on_disk)?
550 .entry(to_cow(ancestor_path))
550 .entry(to_cow(ancestor_path))
551 .or_default();
551 .or_default();
552 if let Some(next) = inclusive_ancestor_paths.next() {
552 if let Some(next) = inclusive_ancestor_paths.next() {
553 each_ancestor(child_node);
553 each_ancestor(child_node);
554 ancestor_path = next;
554 ancestor_path = next;
555 child_nodes = &mut child_node.children;
555 child_nodes = &mut child_node.children;
556 } else {
556 } else {
557 return Ok(child_node);
557 return Ok(child_node);
558 }
558 }
559 }
559 }
560 }
560 }
561
561
562 fn add_or_remove_file(
562 fn add_or_remove_file(
563 &mut self,
563 &mut self,
564 path: &HgPath,
564 path: &HgPath,
565 old_state: EntryState,
565 old_state: EntryState,
566 new_entry: DirstateEntry,
566 new_entry: DirstateEntry,
567 ) -> Result<(), DirstateV2ParseError> {
567 ) -> Result<(), DirstateV2ParseError> {
568 let had_entry = old_state != EntryState::Unknown;
568 let had_entry = old_state != EntryState::Unknown;
569 let tracked_count_increment =
569 let tracked_count_increment =
570 match (old_state.is_tracked(), new_entry.state.is_tracked()) {
570 match (old_state.is_tracked(), new_entry.state.is_tracked()) {
571 (false, true) => 1,
571 (false, true) => 1,
572 (true, false) => -1,
572 (true, false) => -1,
573 _ => 0,
573 _ => 0,
574 };
574 };
575
575
576 let node = Self::get_or_insert_node(
576 let node = Self::get_or_insert_node(
577 self.on_disk,
577 self.on_disk,
578 &mut self.root,
578 &mut self.root,
579 path,
579 path,
580 WithBasename::to_cow_owned,
580 WithBasename::to_cow_owned,
581 |ancestor| {
581 |ancestor| {
582 if !had_entry {
582 if !had_entry {
583 ancestor.descendants_with_entry_count += 1;
583 ancestor.descendants_with_entry_count += 1;
584 }
584 }
585
585
586 // We can’t use `+= increment` because the counter is unsigned,
586 // We can’t use `+= increment` because the counter is unsigned,
587 // and we want debug builds to detect accidental underflow
587 // and we want debug builds to detect accidental underflow
588 // through zero
588 // through zero
589 match tracked_count_increment {
589 match tracked_count_increment {
590 1 => ancestor.tracked_descendants_count += 1,
590 1 => ancestor.tracked_descendants_count += 1,
591 -1 => ancestor.tracked_descendants_count -= 1,
591 -1 => ancestor.tracked_descendants_count -= 1,
592 _ => {}
592 _ => {}
593 }
593 }
594 },
594 },
595 )?;
595 )?;
596 if !had_entry {
596 if !had_entry {
597 self.nodes_with_entry_count += 1
597 self.nodes_with_entry_count += 1
598 }
598 }
599 node.data = NodeData::Entry(new_entry);
599 node.data = NodeData::Entry(new_entry);
600 Ok(())
600 Ok(())
601 }
601 }
602
602
603 fn iter_nodes<'tree>(
603 fn iter_nodes<'tree>(
604 &'tree self,
604 &'tree self,
605 ) -> impl Iterator<
605 ) -> impl Iterator<
606 Item = Result<NodeRef<'tree, 'on_disk>, DirstateV2ParseError>,
606 Item = Result<NodeRef<'tree, 'on_disk>, DirstateV2ParseError>,
607 > + 'tree {
607 > + 'tree {
608 // Depth first tree traversal.
608 // Depth first tree traversal.
609 //
609 //
610 // If we could afford internal iteration and recursion,
610 // If we could afford internal iteration and recursion,
611 // this would look like:
611 // this would look like:
612 //
612 //
613 // ```
613 // ```
614 // fn traverse_children(
614 // fn traverse_children(
615 // children: &ChildNodes,
615 // children: &ChildNodes,
616 // each: &mut impl FnMut(&Node),
616 // each: &mut impl FnMut(&Node),
617 // ) {
617 // ) {
618 // for child in children.values() {
618 // for child in children.values() {
619 // traverse_children(&child.children, each);
619 // traverse_children(&child.children, each);
620 // each(child);
620 // each(child);
621 // }
621 // }
622 // }
622 // }
623 // ```
623 // ```
624 //
624 //
625 // However we want an external iterator and therefore can’t use the
625 // However we want an external iterator and therefore can’t use the
626 // call stack. Use an explicit stack instead:
626 // call stack. Use an explicit stack instead:
627 let mut stack = Vec::new();
627 let mut stack = Vec::new();
628 let mut iter = self.root.as_ref().iter();
628 let mut iter = self.root.as_ref().iter();
629 std::iter::from_fn(move || {
629 std::iter::from_fn(move || {
630 while let Some(child_node) = iter.next() {
630 while let Some(child_node) = iter.next() {
631 let children = match child_node.children(self.on_disk) {
631 let children = match child_node.children(self.on_disk) {
632 Ok(children) => children,
632 Ok(children) => children,
633 Err(error) => return Some(Err(error)),
633 Err(error) => return Some(Err(error)),
634 };
634 };
635 // Pseudo-recursion
635 // Pseudo-recursion
636 let new_iter = children.iter();
636 let new_iter = children.iter();
637 let old_iter = std::mem::replace(&mut iter, new_iter);
637 let old_iter = std::mem::replace(&mut iter, new_iter);
638 stack.push((child_node, old_iter));
638 stack.push((child_node, old_iter));
639 }
639 }
640 // Found the end of a `children.iter()` iterator.
640 // Found the end of a `children.iter()` iterator.
641 if let Some((child_node, next_iter)) = stack.pop() {
641 if let Some((child_node, next_iter)) = stack.pop() {
642 // "Return" from pseudo-recursion by restoring state from the
642 // "Return" from pseudo-recursion by restoring state from the
643 // explicit stack
643 // explicit stack
644 iter = next_iter;
644 iter = next_iter;
645
645
646 Some(Ok(child_node))
646 Some(Ok(child_node))
647 } else {
647 } else {
648 // Reached the bottom of the stack, we’re done
648 // Reached the bottom of the stack, we’re done
649 None
649 None
650 }
650 }
651 })
651 })
652 }
652 }
653
653
654 fn clear_known_ambiguous_mtimes(
654 fn clear_known_ambiguous_mtimes(
655 &mut self,
655 &mut self,
656 paths: &[impl AsRef<HgPath>],
656 paths: &[impl AsRef<HgPath>],
657 ) -> Result<(), DirstateV2ParseError> {
657 ) -> Result<(), DirstateV2ParseError> {
658 for path in paths {
658 for path in paths {
659 if let Some(node) = Self::get_node_mut(
659 if let Some(node) = Self::get_node_mut(
660 self.on_disk,
660 self.on_disk,
661 &mut self.root,
661 &mut self.root,
662 path.as_ref(),
662 path.as_ref(),
663 )? {
663 )? {
664 if let NodeData::Entry(entry) = &mut node.data {
664 if let NodeData::Entry(entry) = &mut node.data {
665 entry.clear_mtime();
665 entry.clear_mtime();
666 }
666 }
667 }
667 }
668 }
668 }
669 Ok(())
669 Ok(())
670 }
670 }
671
671
672 /// Return a faillilble iterator of full paths of nodes that have an
672 /// Return a faillilble iterator of full paths of nodes that have an
673 /// `entry` for which the given `predicate` returns true.
673 /// `entry` for which the given `predicate` returns true.
674 ///
674 ///
675 /// Fallibility means that each iterator item is a `Result`, which may
675 /// Fallibility means that each iterator item is a `Result`, which may
676 /// indicate a parse error of the on-disk dirstate-v2 format. Such errors
676 /// indicate a parse error of the on-disk dirstate-v2 format. Such errors
677 /// should only happen if Mercurial is buggy or a repository is corrupted.
677 /// should only happen if Mercurial is buggy or a repository is corrupted.
678 fn filter_full_paths<'tree>(
678 fn filter_full_paths<'tree>(
679 &'tree self,
679 &'tree self,
680 predicate: impl Fn(&DirstateEntry) -> bool + 'tree,
680 predicate: impl Fn(&DirstateEntry) -> bool + 'tree,
681 ) -> impl Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + 'tree
681 ) -> impl Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + 'tree
682 {
682 {
683 filter_map_results(self.iter_nodes(), move |node| {
683 filter_map_results(self.iter_nodes(), move |node| {
684 if let Some(entry) = node.entry()? {
684 if let Some(entry) = node.entry()? {
685 if predicate(&entry) {
685 if predicate(&entry) {
686 return Ok(Some(node.full_path(self.on_disk)?));
686 return Ok(Some(node.full_path(self.on_disk)?));
687 }
687 }
688 }
688 }
689 Ok(None)
689 Ok(None)
690 })
690 })
691 }
691 }
692 }
692 }
693
693
694 /// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s.
694 /// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s.
695 ///
695 ///
696 /// The callback is only called for incoming `Ok` values. Errors are passed
696 /// The callback is only called for incoming `Ok` values. Errors are passed
697 /// through as-is. In order to let it use the `?` operator the callback is
697 /// through as-is. In order to let it use the `?` operator the callback is
698 /// expected to return a `Result` of `Option`, instead of an `Option` of
698 /// expected to return a `Result` of `Option`, instead of an `Option` of
699 /// `Result`.
699 /// `Result`.
700 fn filter_map_results<'a, I, F, A, B, E>(
700 fn filter_map_results<'a, I, F, A, B, E>(
701 iter: I,
701 iter: I,
702 f: F,
702 f: F,
703 ) -> impl Iterator<Item = Result<B, E>> + 'a
703 ) -> impl Iterator<Item = Result<B, E>> + 'a
704 where
704 where
705 I: Iterator<Item = Result<A, E>> + 'a,
705 I: Iterator<Item = Result<A, E>> + 'a,
706 F: Fn(A) -> Result<Option<B>, E> + 'a,
706 F: Fn(A) -> Result<Option<B>, E> + 'a,
707 {
707 {
708 iter.filter_map(move |result| match result {
708 iter.filter_map(move |result| match result {
709 Ok(node) => f(node).transpose(),
709 Ok(node) => f(node).transpose(),
710 Err(e) => Some(Err(e)),
710 Err(e) => Some(Err(e)),
711 })
711 })
712 }
712 }
713
713
714 impl<'on_disk> super::dispatch::DirstateMapMethods for DirstateMap<'on_disk> {
714 impl<'on_disk> super::dispatch::DirstateMapMethods for DirstateMap<'on_disk> {
715 fn clear(&mut self) {
715 fn clear(&mut self) {
716 self.root = Default::default();
716 self.root = Default::default();
717 self.nodes_with_entry_count = 0;
717 self.nodes_with_entry_count = 0;
718 self.nodes_with_copy_source_count = 0;
718 self.nodes_with_copy_source_count = 0;
719 }
719 }
720
720
721 fn add_file(
721 fn add_file(
722 &mut self,
722 &mut self,
723 filename: &HgPath,
723 filename: &HgPath,
724 entry: DirstateEntry,
724 entry: DirstateEntry,
725 added: bool,
725 added: bool,
726 merged: bool,
726 merged: bool,
727 from_p2: bool,
727 from_p2: bool,
728 possibly_dirty: bool,
728 possibly_dirty: bool,
729 ) -> Result<(), DirstateError> {
729 ) -> Result<(), DirstateError> {
730 let mut entry = entry;
730 let mut entry = entry;
731 if added {
731 if added {
732 assert!(!possibly_dirty);
732 assert!(!possibly_dirty);
733 assert!(!from_p2);
733 assert!(!from_p2);
734 entry.state = EntryState::Added;
734 entry.state = EntryState::Added;
735 entry.size = SIZE_NON_NORMAL;
735 entry.size = SIZE_NON_NORMAL;
736 entry.mtime = MTIME_UNSET;
736 entry.mtime = MTIME_UNSET;
737 } else if merged {
737 } else if merged {
738 assert!(!possibly_dirty);
738 assert!(!possibly_dirty);
739 assert!(!from_p2);
739 assert!(!from_p2);
740 entry.state = EntryState::Merged;
740 entry.state = EntryState::Merged;
741 entry.size = SIZE_FROM_OTHER_PARENT;
741 entry.size = SIZE_FROM_OTHER_PARENT;
742 entry.mtime = MTIME_UNSET;
742 entry.mtime = MTIME_UNSET;
743 } else if from_p2 {
743 } else if from_p2 {
744 assert!(!possibly_dirty);
744 assert!(!possibly_dirty);
745 entry.size = SIZE_FROM_OTHER_PARENT;
745 entry.size = SIZE_FROM_OTHER_PARENT;
746 entry.mtime = MTIME_UNSET;
746 entry.mtime = MTIME_UNSET;
747 } else if possibly_dirty {
747 } else if possibly_dirty {
748 entry.state = EntryState::Normal;
748 entry.size = SIZE_NON_NORMAL;
749 entry.size = SIZE_NON_NORMAL;
749 entry.mtime = MTIME_UNSET;
750 entry.mtime = MTIME_UNSET;
750 } else {
751 } else {
751 entry.size = entry.size & V1_RANGEMASK;
752 entry.size = entry.size & V1_RANGEMASK;
752 entry.mtime = entry.mtime & V1_RANGEMASK;
753 entry.mtime = entry.mtime & V1_RANGEMASK;
753 }
754 }
754
755
755 let old_state = match self.get(filename)? {
756 let old_state = match self.get(filename)? {
756 Some(e) => e.state,
757 Some(e) => e.state,
757 None => EntryState::Unknown,
758 None => EntryState::Unknown,
758 };
759 };
759
760
760 Ok(self.add_or_remove_file(filename, old_state, entry)?)
761 Ok(self.add_or_remove_file(filename, old_state, entry)?)
761 }
762 }
762
763
763 fn remove_file(
764 fn remove_file(
764 &mut self,
765 &mut self,
765 filename: &HgPath,
766 filename: &HgPath,
766 in_merge: bool,
767 in_merge: bool,
767 ) -> Result<(), DirstateError> {
768 ) -> Result<(), DirstateError> {
768 let old_entry_opt = self.get(filename)?;
769 let old_entry_opt = self.get(filename)?;
769 let old_state = match old_entry_opt {
770 let old_state = match old_entry_opt {
770 Some(e) => e.state,
771 Some(e) => e.state,
771 None => EntryState::Unknown,
772 None => EntryState::Unknown,
772 };
773 };
773 let mut size = 0;
774 let mut size = 0;
774 if in_merge {
775 if in_merge {
775 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
776 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
776 // during a merge. So I (marmoute) am not sure we need the
777 // during a merge. So I (marmoute) am not sure we need the
777 // conditionnal at all. Adding double checking this with assert
778 // conditionnal at all. Adding double checking this with assert
778 // would be nice.
779 // would be nice.
779 if let Some(old_entry) = old_entry_opt {
780 if let Some(old_entry) = old_entry_opt {
780 // backup the previous state
781 // backup the previous state
781 if old_entry.state == EntryState::Merged {
782 if old_entry.state == EntryState::Merged {
782 size = SIZE_NON_NORMAL;
783 size = SIZE_NON_NORMAL;
783 } else if old_entry.state == EntryState::Normal
784 } else if old_entry.state == EntryState::Normal
784 && old_entry.size == SIZE_FROM_OTHER_PARENT
785 && old_entry.size == SIZE_FROM_OTHER_PARENT
785 {
786 {
786 // other parent
787 // other parent
787 size = SIZE_FROM_OTHER_PARENT;
788 size = SIZE_FROM_OTHER_PARENT;
788 }
789 }
789 }
790 }
790 }
791 }
791 if size == 0 {
792 if size == 0 {
792 self.copy_map_remove(filename)?;
793 self.copy_map_remove(filename)?;
793 }
794 }
794 let entry = DirstateEntry {
795 let entry = DirstateEntry {
795 state: EntryState::Removed,
796 state: EntryState::Removed,
796 mode: 0,
797 mode: 0,
797 size,
798 size,
798 mtime: 0,
799 mtime: 0,
799 };
800 };
800 Ok(self.add_or_remove_file(filename, old_state, entry)?)
801 Ok(self.add_or_remove_file(filename, old_state, entry)?)
801 }
802 }
802
803
803 fn drop_file(
804 fn drop_file(
804 &mut self,
805 &mut self,
805 filename: &HgPath,
806 filename: &HgPath,
806 old_state: EntryState,
807 old_state: EntryState,
807 ) -> Result<bool, DirstateError> {
808 ) -> Result<bool, DirstateError> {
808 struct Dropped {
809 struct Dropped {
809 was_tracked: bool,
810 was_tracked: bool,
810 had_entry: bool,
811 had_entry: bool,
811 had_copy_source: bool,
812 had_copy_source: bool,
812 }
813 }
813
814
814 /// If this returns `Ok(Some((dropped, removed)))`, then
815 /// If this returns `Ok(Some((dropped, removed)))`, then
815 ///
816 ///
816 /// * `dropped` is about the leaf node that was at `filename`
817 /// * `dropped` is about the leaf node that was at `filename`
817 /// * `removed` is whether this particular level of recursion just
818 /// * `removed` is whether this particular level of recursion just
818 /// removed a node in `nodes`.
819 /// removed a node in `nodes`.
819 fn recur<'on_disk>(
820 fn recur<'on_disk>(
820 on_disk: &'on_disk [u8],
821 on_disk: &'on_disk [u8],
821 nodes: &mut ChildNodes<'on_disk>,
822 nodes: &mut ChildNodes<'on_disk>,
822 path: &HgPath,
823 path: &HgPath,
823 ) -> Result<Option<(Dropped, bool)>, DirstateV2ParseError> {
824 ) -> Result<Option<(Dropped, bool)>, DirstateV2ParseError> {
824 let (first_path_component, rest_of_path) =
825 let (first_path_component, rest_of_path) =
825 path.split_first_component();
826 path.split_first_component();
826 let node = if let Some(node) =
827 let node = if let Some(node) =
827 nodes.make_mut(on_disk)?.get_mut(first_path_component)
828 nodes.make_mut(on_disk)?.get_mut(first_path_component)
828 {
829 {
829 node
830 node
830 } else {
831 } else {
831 return Ok(None);
832 return Ok(None);
832 };
833 };
833 let dropped;
834 let dropped;
834 if let Some(rest) = rest_of_path {
835 if let Some(rest) = rest_of_path {
835 if let Some((d, removed)) =
836 if let Some((d, removed)) =
836 recur(on_disk, &mut node.children, rest)?
837 recur(on_disk, &mut node.children, rest)?
837 {
838 {
838 dropped = d;
839 dropped = d;
839 if dropped.had_entry {
840 if dropped.had_entry {
840 node.descendants_with_entry_count -= 1;
841 node.descendants_with_entry_count -= 1;
841 }
842 }
842 if dropped.was_tracked {
843 if dropped.was_tracked {
843 node.tracked_descendants_count -= 1;
844 node.tracked_descendants_count -= 1;
844 }
845 }
845
846
846 // Directory caches must be invalidated when removing a
847 // Directory caches must be invalidated when removing a
847 // child node
848 // child node
848 if removed {
849 if removed {
849 if let NodeData::CachedDirectory { .. } = &node.data {
850 if let NodeData::CachedDirectory { .. } = &node.data {
850 node.data = NodeData::None
851 node.data = NodeData::None
851 }
852 }
852 }
853 }
853 } else {
854 } else {
854 return Ok(None);
855 return Ok(None);
855 }
856 }
856 } else {
857 } else {
857 let had_entry = node.data.has_entry();
858 let had_entry = node.data.has_entry();
858 if had_entry {
859 if had_entry {
859 node.data = NodeData::None
860 node.data = NodeData::None
860 }
861 }
861 dropped = Dropped {
862 dropped = Dropped {
862 was_tracked: node
863 was_tracked: node
863 .data
864 .data
864 .as_entry()
865 .as_entry()
865 .map_or(false, |entry| entry.state.is_tracked()),
866 .map_or(false, |entry| entry.state.is_tracked()),
866 had_entry,
867 had_entry,
867 had_copy_source: node.copy_source.take().is_some(),
868 had_copy_source: node.copy_source.take().is_some(),
868 };
869 };
869 }
870 }
870 // After recursion, for both leaf (rest_of_path is None) nodes and
871 // After recursion, for both leaf (rest_of_path is None) nodes and
871 // parent nodes, remove a node if it just became empty.
872 // parent nodes, remove a node if it just became empty.
872 let remove = !node.data.has_entry()
873 let remove = !node.data.has_entry()
873 && node.copy_source.is_none()
874 && node.copy_source.is_none()
874 && node.children.is_empty();
875 && node.children.is_empty();
875 if remove {
876 if remove {
876 nodes.make_mut(on_disk)?.remove(first_path_component);
877 nodes.make_mut(on_disk)?.remove(first_path_component);
877 }
878 }
878 Ok(Some((dropped, remove)))
879 Ok(Some((dropped, remove)))
879 }
880 }
880
881
881 if let Some((dropped, _removed)) =
882 if let Some((dropped, _removed)) =
882 recur(self.on_disk, &mut self.root, filename)?
883 recur(self.on_disk, &mut self.root, filename)?
883 {
884 {
884 if dropped.had_entry {
885 if dropped.had_entry {
885 self.nodes_with_entry_count -= 1
886 self.nodes_with_entry_count -= 1
886 }
887 }
887 if dropped.had_copy_source {
888 if dropped.had_copy_source {
888 self.nodes_with_copy_source_count -= 1
889 self.nodes_with_copy_source_count -= 1
889 }
890 }
890 Ok(dropped.had_entry)
891 Ok(dropped.had_entry)
891 } else {
892 } else {
892 debug_assert!(!old_state.is_tracked());
893 debug_assert!(!old_state.is_tracked());
893 Ok(false)
894 Ok(false)
894 }
895 }
895 }
896 }
896
897
897 fn clear_ambiguous_times(
898 fn clear_ambiguous_times(
898 &mut self,
899 &mut self,
899 filenames: Vec<HgPathBuf>,
900 filenames: Vec<HgPathBuf>,
900 now: i32,
901 now: i32,
901 ) -> Result<(), DirstateV2ParseError> {
902 ) -> Result<(), DirstateV2ParseError> {
902 for filename in filenames {
903 for filename in filenames {
903 if let Some(node) =
904 if let Some(node) =
904 Self::get_node_mut(self.on_disk, &mut self.root, &filename)?
905 Self::get_node_mut(self.on_disk, &mut self.root, &filename)?
905 {
906 {
906 if let NodeData::Entry(entry) = &mut node.data {
907 if let NodeData::Entry(entry) = &mut node.data {
907 entry.clear_ambiguous_mtime(now);
908 entry.clear_ambiguous_mtime(now);
908 }
909 }
909 }
910 }
910 }
911 }
911 Ok(())
912 Ok(())
912 }
913 }
913
914
914 fn non_normal_entries_contains(
915 fn non_normal_entries_contains(
915 &mut self,
916 &mut self,
916 key: &HgPath,
917 key: &HgPath,
917 ) -> Result<bool, DirstateV2ParseError> {
918 ) -> Result<bool, DirstateV2ParseError> {
918 Ok(if let Some(node) = self.get_node(key)? {
919 Ok(if let Some(node) = self.get_node(key)? {
919 node.entry()?.map_or(false, |entry| entry.is_non_normal())
920 node.entry()?.map_or(false, |entry| entry.is_non_normal())
920 } else {
921 } else {
921 false
922 false
922 })
923 })
923 }
924 }
924
925
925 fn non_normal_entries_remove(&mut self, _key: &HgPath) {
926 fn non_normal_entries_remove(&mut self, _key: &HgPath) {
926 // Do nothing, this `DirstateMap` does not have a separate "non normal
927 // Do nothing, this `DirstateMap` does not have a separate "non normal
927 // entries" set that need to be kept up to date
928 // entries" set that need to be kept up to date
928 }
929 }
929
930
930 fn non_normal_or_other_parent_paths(
931 fn non_normal_or_other_parent_paths(
931 &mut self,
932 &mut self,
932 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
933 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
933 {
934 {
934 Box::new(self.filter_full_paths(|entry| {
935 Box::new(self.filter_full_paths(|entry| {
935 entry.is_non_normal() || entry.is_from_other_parent()
936 entry.is_non_normal() || entry.is_from_other_parent()
936 }))
937 }))
937 }
938 }
938
939
939 fn set_non_normal_other_parent_entries(&mut self, _force: bool) {
940 fn set_non_normal_other_parent_entries(&mut self, _force: bool) {
940 // Do nothing, this `DirstateMap` does not have a separate "non normal
941 // Do nothing, this `DirstateMap` does not have a separate "non normal
941 // entries" and "from other parent" sets that need to be recomputed
942 // entries" and "from other parent" sets that need to be recomputed
942 }
943 }
943
944
944 fn iter_non_normal_paths(
945 fn iter_non_normal_paths(
945 &mut self,
946 &mut self,
946 ) -> Box<
947 ) -> Box<
947 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
948 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
948 > {
949 > {
949 self.iter_non_normal_paths_panic()
950 self.iter_non_normal_paths_panic()
950 }
951 }
951
952
952 fn iter_non_normal_paths_panic(
953 fn iter_non_normal_paths_panic(
953 &self,
954 &self,
954 ) -> Box<
955 ) -> Box<
955 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
956 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
956 > {
957 > {
957 Box::new(self.filter_full_paths(|entry| entry.is_non_normal()))
958 Box::new(self.filter_full_paths(|entry| entry.is_non_normal()))
958 }
959 }
959
960
960 fn iter_other_parent_paths(
961 fn iter_other_parent_paths(
961 &mut self,
962 &mut self,
962 ) -> Box<
963 ) -> Box<
963 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
964 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
964 > {
965 > {
965 Box::new(self.filter_full_paths(|entry| entry.is_from_other_parent()))
966 Box::new(self.filter_full_paths(|entry| entry.is_from_other_parent()))
966 }
967 }
967
968
968 fn has_tracked_dir(
969 fn has_tracked_dir(
969 &mut self,
970 &mut self,
970 directory: &HgPath,
971 directory: &HgPath,
971 ) -> Result<bool, DirstateError> {
972 ) -> Result<bool, DirstateError> {
972 if let Some(node) = self.get_node(directory)? {
973 if let Some(node) = self.get_node(directory)? {
973 // A node without a `DirstateEntry` was created to hold child
974 // A node without a `DirstateEntry` was created to hold child
974 // nodes, and is therefore a directory.
975 // nodes, and is therefore a directory.
975 let state = node.state()?;
976 let state = node.state()?;
976 Ok(state.is_none() && node.tracked_descendants_count() > 0)
977 Ok(state.is_none() && node.tracked_descendants_count() > 0)
977 } else {
978 } else {
978 Ok(false)
979 Ok(false)
979 }
980 }
980 }
981 }
981
982
982 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
983 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
983 if let Some(node) = self.get_node(directory)? {
984 if let Some(node) = self.get_node(directory)? {
984 // A node without a `DirstateEntry` was created to hold child
985 // A node without a `DirstateEntry` was created to hold child
985 // nodes, and is therefore a directory.
986 // nodes, and is therefore a directory.
986 let state = node.state()?;
987 let state = node.state()?;
987 Ok(state.is_none() && node.descendants_with_entry_count() > 0)
988 Ok(state.is_none() && node.descendants_with_entry_count() > 0)
988 } else {
989 } else {
989 Ok(false)
990 Ok(false)
990 }
991 }
991 }
992 }
992
993
993 #[timed]
994 #[timed]
994 fn pack_v1(
995 fn pack_v1(
995 &mut self,
996 &mut self,
996 parents: DirstateParents,
997 parents: DirstateParents,
997 now: Timestamp,
998 now: Timestamp,
998 ) -> Result<Vec<u8>, DirstateError> {
999 ) -> Result<Vec<u8>, DirstateError> {
999 let now: i32 = now.0.try_into().expect("time overflow");
1000 let now: i32 = now.0.try_into().expect("time overflow");
1000 let mut ambiguous_mtimes = Vec::new();
1001 let mut ambiguous_mtimes = Vec::new();
1001 // Optizimation (to be measured?): pre-compute size to avoid `Vec`
1002 // Optizimation (to be measured?): pre-compute size to avoid `Vec`
1002 // reallocations
1003 // reallocations
1003 let mut size = parents.as_bytes().len();
1004 let mut size = parents.as_bytes().len();
1004 for node in self.iter_nodes() {
1005 for node in self.iter_nodes() {
1005 let node = node?;
1006 let node = node?;
1006 if let Some(entry) = node.entry()? {
1007 if let Some(entry) = node.entry()? {
1007 size += packed_entry_size(
1008 size += packed_entry_size(
1008 node.full_path(self.on_disk)?,
1009 node.full_path(self.on_disk)?,
1009 node.copy_source(self.on_disk)?,
1010 node.copy_source(self.on_disk)?,
1010 );
1011 );
1011 if entry.mtime_is_ambiguous(now) {
1012 if entry.mtime_is_ambiguous(now) {
1012 ambiguous_mtimes.push(
1013 ambiguous_mtimes.push(
1013 node.full_path_borrowed(self.on_disk)?
1014 node.full_path_borrowed(self.on_disk)?
1014 .detach_from_tree(),
1015 .detach_from_tree(),
1015 )
1016 )
1016 }
1017 }
1017 }
1018 }
1018 }
1019 }
1019 self.clear_known_ambiguous_mtimes(&ambiguous_mtimes)?;
1020 self.clear_known_ambiguous_mtimes(&ambiguous_mtimes)?;
1020
1021
1021 let mut packed = Vec::with_capacity(size);
1022 let mut packed = Vec::with_capacity(size);
1022 packed.extend(parents.as_bytes());
1023 packed.extend(parents.as_bytes());
1023
1024
1024 for node in self.iter_nodes() {
1025 for node in self.iter_nodes() {
1025 let node = node?;
1026 let node = node?;
1026 if let Some(entry) = node.entry()? {
1027 if let Some(entry) = node.entry()? {
1027 pack_entry(
1028 pack_entry(
1028 node.full_path(self.on_disk)?,
1029 node.full_path(self.on_disk)?,
1029 &entry,
1030 &entry,
1030 node.copy_source(self.on_disk)?,
1031 node.copy_source(self.on_disk)?,
1031 &mut packed,
1032 &mut packed,
1032 );
1033 );
1033 }
1034 }
1034 }
1035 }
1035 Ok(packed)
1036 Ok(packed)
1036 }
1037 }
1037
1038
1038 #[timed]
1039 #[timed]
1039 fn pack_v2(
1040 fn pack_v2(
1040 &mut self,
1041 &mut self,
1041 parents: DirstateParents,
1042 parents: DirstateParents,
1042 now: Timestamp,
1043 now: Timestamp,
1043 ) -> Result<Vec<u8>, DirstateError> {
1044 ) -> Result<Vec<u8>, DirstateError> {
1044 // TODO:Β how do we want to handle this in 2038?
1045 // TODO:Β how do we want to handle this in 2038?
1045 let now: i32 = now.0.try_into().expect("time overflow");
1046 let now: i32 = now.0.try_into().expect("time overflow");
1046 let mut paths = Vec::new();
1047 let mut paths = Vec::new();
1047 for node in self.iter_nodes() {
1048 for node in self.iter_nodes() {
1048 let node = node?;
1049 let node = node?;
1049 if let Some(entry) = node.entry()? {
1050 if let Some(entry) = node.entry()? {
1050 if entry.mtime_is_ambiguous(now) {
1051 if entry.mtime_is_ambiguous(now) {
1051 paths.push(
1052 paths.push(
1052 node.full_path_borrowed(self.on_disk)?
1053 node.full_path_borrowed(self.on_disk)?
1053 .detach_from_tree(),
1054 .detach_from_tree(),
1054 )
1055 )
1055 }
1056 }
1056 }
1057 }
1057 }
1058 }
1058 // Borrow of `self` ends here since we collect cloned paths
1059 // Borrow of `self` ends here since we collect cloned paths
1059
1060
1060 self.clear_known_ambiguous_mtimes(&paths)?;
1061 self.clear_known_ambiguous_mtimes(&paths)?;
1061
1062
1062 on_disk::write(self, parents)
1063 on_disk::write(self, parents)
1063 }
1064 }
1064
1065
1065 fn status<'a>(
1066 fn status<'a>(
1066 &'a mut self,
1067 &'a mut self,
1067 matcher: &'a (dyn Matcher + Sync),
1068 matcher: &'a (dyn Matcher + Sync),
1068 root_dir: PathBuf,
1069 root_dir: PathBuf,
1069 ignore_files: Vec<PathBuf>,
1070 ignore_files: Vec<PathBuf>,
1070 options: StatusOptions,
1071 options: StatusOptions,
1071 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
1072 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
1072 {
1073 {
1073 super::status::status(self, matcher, root_dir, ignore_files, options)
1074 super::status::status(self, matcher, root_dir, ignore_files, options)
1074 }
1075 }
1075
1076
1076 fn copy_map_len(&self) -> usize {
1077 fn copy_map_len(&self) -> usize {
1077 self.nodes_with_copy_source_count as usize
1078 self.nodes_with_copy_source_count as usize
1078 }
1079 }
1079
1080
1080 fn copy_map_iter(&self) -> CopyMapIter<'_> {
1081 fn copy_map_iter(&self) -> CopyMapIter<'_> {
1081 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1082 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1082 Ok(if let Some(source) = node.copy_source(self.on_disk)? {
1083 Ok(if let Some(source) = node.copy_source(self.on_disk)? {
1083 Some((node.full_path(self.on_disk)?, source))
1084 Some((node.full_path(self.on_disk)?, source))
1084 } else {
1085 } else {
1085 None
1086 None
1086 })
1087 })
1087 }))
1088 }))
1088 }
1089 }
1089
1090
1090 fn copy_map_contains_key(
1091 fn copy_map_contains_key(
1091 &self,
1092 &self,
1092 key: &HgPath,
1093 key: &HgPath,
1093 ) -> Result<bool, DirstateV2ParseError> {
1094 ) -> Result<bool, DirstateV2ParseError> {
1094 Ok(if let Some(node) = self.get_node(key)? {
1095 Ok(if let Some(node) = self.get_node(key)? {
1095 node.has_copy_source()
1096 node.has_copy_source()
1096 } else {
1097 } else {
1097 false
1098 false
1098 })
1099 })
1099 }
1100 }
1100
1101
1101 fn copy_map_get(
1102 fn copy_map_get(
1102 &self,
1103 &self,
1103 key: &HgPath,
1104 key: &HgPath,
1104 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
1105 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
1105 if let Some(node) = self.get_node(key)? {
1106 if let Some(node) = self.get_node(key)? {
1106 if let Some(source) = node.copy_source(self.on_disk)? {
1107 if let Some(source) = node.copy_source(self.on_disk)? {
1107 return Ok(Some(source));
1108 return Ok(Some(source));
1108 }
1109 }
1109 }
1110 }
1110 Ok(None)
1111 Ok(None)
1111 }
1112 }
1112
1113
1113 fn copy_map_remove(
1114 fn copy_map_remove(
1114 &mut self,
1115 &mut self,
1115 key: &HgPath,
1116 key: &HgPath,
1116 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1117 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1117 let count = &mut self.nodes_with_copy_source_count;
1118 let count = &mut self.nodes_with_copy_source_count;
1118 Ok(
1119 Ok(
1119 Self::get_node_mut(self.on_disk, &mut self.root, key)?.and_then(
1120 Self::get_node_mut(self.on_disk, &mut self.root, key)?.and_then(
1120 |node| {
1121 |node| {
1121 if node.copy_source.is_some() {
1122 if node.copy_source.is_some() {
1122 *count -= 1
1123 *count -= 1
1123 }
1124 }
1124 node.copy_source.take().map(Cow::into_owned)
1125 node.copy_source.take().map(Cow::into_owned)
1125 },
1126 },
1126 ),
1127 ),
1127 )
1128 )
1128 }
1129 }
1129
1130
1130 fn copy_map_insert(
1131 fn copy_map_insert(
1131 &mut self,
1132 &mut self,
1132 key: HgPathBuf,
1133 key: HgPathBuf,
1133 value: HgPathBuf,
1134 value: HgPathBuf,
1134 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1135 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1135 let node = Self::get_or_insert_node(
1136 let node = Self::get_or_insert_node(
1136 self.on_disk,
1137 self.on_disk,
1137 &mut self.root,
1138 &mut self.root,
1138 &key,
1139 &key,
1139 WithBasename::to_cow_owned,
1140 WithBasename::to_cow_owned,
1140 |_ancestor| {},
1141 |_ancestor| {},
1141 )?;
1142 )?;
1142 if node.copy_source.is_none() {
1143 if node.copy_source.is_none() {
1143 self.nodes_with_copy_source_count += 1
1144 self.nodes_with_copy_source_count += 1
1144 }
1145 }
1145 Ok(node.copy_source.replace(value.into()).map(Cow::into_owned))
1146 Ok(node.copy_source.replace(value.into()).map(Cow::into_owned))
1146 }
1147 }
1147
1148
1148 fn len(&self) -> usize {
1149 fn len(&self) -> usize {
1149 self.nodes_with_entry_count as usize
1150 self.nodes_with_entry_count as usize
1150 }
1151 }
1151
1152
1152 fn contains_key(
1153 fn contains_key(
1153 &self,
1154 &self,
1154 key: &HgPath,
1155 key: &HgPath,
1155 ) -> Result<bool, DirstateV2ParseError> {
1156 ) -> Result<bool, DirstateV2ParseError> {
1156 Ok(self.get(key)?.is_some())
1157 Ok(self.get(key)?.is_some())
1157 }
1158 }
1158
1159
1159 fn get(
1160 fn get(
1160 &self,
1161 &self,
1161 key: &HgPath,
1162 key: &HgPath,
1162 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
1163 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
1163 Ok(if let Some(node) = self.get_node(key)? {
1164 Ok(if let Some(node) = self.get_node(key)? {
1164 node.entry()?
1165 node.entry()?
1165 } else {
1166 } else {
1166 None
1167 None
1167 })
1168 })
1168 }
1169 }
1169
1170
1170 fn iter(&self) -> StateMapIter<'_> {
1171 fn iter(&self) -> StateMapIter<'_> {
1171 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1172 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1172 Ok(if let Some(entry) = node.entry()? {
1173 Ok(if let Some(entry) = node.entry()? {
1173 Some((node.full_path(self.on_disk)?, entry))
1174 Some((node.full_path(self.on_disk)?, entry))
1174 } else {
1175 } else {
1175 None
1176 None
1176 })
1177 })
1177 }))
1178 }))
1178 }
1179 }
1179
1180
1180 fn iter_directories(
1181 fn iter_directories(
1181 &self,
1182 &self,
1182 ) -> Box<
1183 ) -> Box<
1183 dyn Iterator<
1184 dyn Iterator<
1184 Item = Result<
1185 Item = Result<
1185 (&HgPath, Option<Timestamp>),
1186 (&HgPath, Option<Timestamp>),
1186 DirstateV2ParseError,
1187 DirstateV2ParseError,
1187 >,
1188 >,
1188 > + Send
1189 > + Send
1189 + '_,
1190 + '_,
1190 > {
1191 > {
1191 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1192 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1192 Ok(if node.state()?.is_none() {
1193 Ok(if node.state()?.is_none() {
1193 Some((
1194 Some((
1194 node.full_path(self.on_disk)?,
1195 node.full_path(self.on_disk)?,
1195 node.cached_directory_mtime()
1196 node.cached_directory_mtime()
1196 .map(|mtime| Timestamp(mtime.seconds())),
1197 .map(|mtime| Timestamp(mtime.seconds())),
1197 ))
1198 ))
1198 } else {
1199 } else {
1199 None
1200 None
1200 })
1201 })
1201 }))
1202 }))
1202 }
1203 }
1203 }
1204 }
General Comments 0
You need to be logged in to leave comments. Login now