##// END OF EJS Templates
dirstate: infer the 'n' state from `from_p2`...
marmoute -
r48318:d3cf2032 default
parent child Browse files
Show More
@@ -1,1439 +1,1439 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 dirstatetuple = parsers.dirstatetuple
48 dirstatetuple = parsers.dirstatetuple
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 @interfaceutil.implementer(intdirstate.idirstate)
75 @interfaceutil.implementer(intdirstate.idirstate)
76 class dirstate(object):
76 class dirstate(object):
77 def __init__(
77 def __init__(
78 self,
78 self,
79 opener,
79 opener,
80 ui,
80 ui,
81 root,
81 root,
82 validate,
82 validate,
83 sparsematchfn,
83 sparsematchfn,
84 nodeconstants,
84 nodeconstants,
85 use_dirstate_v2,
85 use_dirstate_v2,
86 ):
86 ):
87 """Create a new dirstate object.
87 """Create a new dirstate object.
88
88
89 opener is an open()-like callable that can be used to open the
89 opener is an open()-like callable that can be used to open the
90 dirstate file; root is the root of the directory tracked by
90 dirstate file; root is the root of the directory tracked by
91 the dirstate.
91 the dirstate.
92 """
92 """
93 self._use_dirstate_v2 = use_dirstate_v2
93 self._use_dirstate_v2 = use_dirstate_v2
94 self._nodeconstants = nodeconstants
94 self._nodeconstants = nodeconstants
95 self._opener = opener
95 self._opener = opener
96 self._validate = validate
96 self._validate = validate
97 self._root = root
97 self._root = root
98 self._sparsematchfn = sparsematchfn
98 self._sparsematchfn = sparsematchfn
99 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
99 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
100 # UNC path pointing to root share (issue4557)
100 # UNC path pointing to root share (issue4557)
101 self._rootdir = pathutil.normasprefix(root)
101 self._rootdir = pathutil.normasprefix(root)
102 self._dirty = False
102 self._dirty = False
103 self._lastnormaltime = 0
103 self._lastnormaltime = 0
104 self._ui = ui
104 self._ui = ui
105 self._filecache = {}
105 self._filecache = {}
106 self._parentwriters = 0
106 self._parentwriters = 0
107 self._filename = b'dirstate'
107 self._filename = b'dirstate'
108 self._pendingfilename = b'%s.pending' % self._filename
108 self._pendingfilename = b'%s.pending' % self._filename
109 self._plchangecallbacks = {}
109 self._plchangecallbacks = {}
110 self._origpl = None
110 self._origpl = None
111 self._updatedfiles = set()
111 self._updatedfiles = set()
112 self._mapcls = dirstatemap.dirstatemap
112 self._mapcls = dirstatemap.dirstatemap
113 # Access and cache cwd early, so we don't access it for the first time
113 # Access and cache cwd early, so we don't access it for the first time
114 # after a working-copy update caused it to not exist (accessing it then
114 # after a working-copy update caused it to not exist (accessing it then
115 # raises an exception).
115 # raises an exception).
116 self._cwd
116 self._cwd
117
117
118 def prefetch_parents(self):
118 def prefetch_parents(self):
119 """make sure the parents are loaded
119 """make sure the parents are loaded
120
120
121 Used to avoid a race condition.
121 Used to avoid a race condition.
122 """
122 """
123 self._pl
123 self._pl
124
124
125 @contextlib.contextmanager
125 @contextlib.contextmanager
126 def parentchange(self):
126 def parentchange(self):
127 """Context manager for handling dirstate parents.
127 """Context manager for handling dirstate parents.
128
128
129 If an exception occurs in the scope of the context manager,
129 If an exception occurs in the scope of the context manager,
130 the incoherent dirstate won't be written when wlock is
130 the incoherent dirstate won't be written when wlock is
131 released.
131 released.
132 """
132 """
133 self._parentwriters += 1
133 self._parentwriters += 1
134 yield
134 yield
135 # Typically we want the "undo" step of a context manager in a
135 # Typically we want the "undo" step of a context manager in a
136 # finally block so it happens even when an exception
136 # finally block so it happens even when an exception
137 # occurs. In this case, however, we only want to decrement
137 # occurs. In this case, however, we only want to decrement
138 # parentwriters if the code in the with statement exits
138 # parentwriters if the code in the with statement exits
139 # normally, so we don't have a try/finally here on purpose.
139 # normally, so we don't have a try/finally here on purpose.
140 self._parentwriters -= 1
140 self._parentwriters -= 1
141
141
142 def pendingparentchange(self):
142 def pendingparentchange(self):
143 """Returns true if the dirstate is in the middle of a set of changes
143 """Returns true if the dirstate is in the middle of a set of changes
144 that modify the dirstate parent.
144 that modify the dirstate parent.
145 """
145 """
146 return self._parentwriters > 0
146 return self._parentwriters > 0
147
147
148 @propertycache
148 @propertycache
149 def _map(self):
149 def _map(self):
150 """Return the dirstate contents (see documentation for dirstatemap)."""
150 """Return the dirstate contents (see documentation for dirstatemap)."""
151 self._map = self._mapcls(
151 self._map = self._mapcls(
152 self._ui,
152 self._ui,
153 self._opener,
153 self._opener,
154 self._root,
154 self._root,
155 self._nodeconstants,
155 self._nodeconstants,
156 self._use_dirstate_v2,
156 self._use_dirstate_v2,
157 )
157 )
158 return self._map
158 return self._map
159
159
160 @property
160 @property
161 def _sparsematcher(self):
161 def _sparsematcher(self):
162 """The matcher for the sparse checkout.
162 """The matcher for the sparse checkout.
163
163
164 The working directory may not include every file from a manifest. The
164 The working directory may not include every file from a manifest. The
165 matcher obtained by this property will match a path if it is to be
165 matcher obtained by this property will match a path if it is to be
166 included in the working directory.
166 included in the working directory.
167 """
167 """
168 # TODO there is potential to cache this property. For now, the matcher
168 # TODO there is potential to cache this property. For now, the matcher
169 # is resolved on every access. (But the called function does use a
169 # is resolved on every access. (But the called function does use a
170 # cache to keep the lookup fast.)
170 # cache to keep the lookup fast.)
171 return self._sparsematchfn()
171 return self._sparsematchfn()
172
172
173 @repocache(b'branch')
173 @repocache(b'branch')
174 def _branch(self):
174 def _branch(self):
175 try:
175 try:
176 return self._opener.read(b"branch").strip() or b"default"
176 return self._opener.read(b"branch").strip() or b"default"
177 except IOError as inst:
177 except IOError as inst:
178 if inst.errno != errno.ENOENT:
178 if inst.errno != errno.ENOENT:
179 raise
179 raise
180 return b"default"
180 return b"default"
181
181
182 @property
182 @property
183 def _pl(self):
183 def _pl(self):
184 return self._map.parents()
184 return self._map.parents()
185
185
186 def hasdir(self, d):
186 def hasdir(self, d):
187 return self._map.hastrackeddir(d)
187 return self._map.hastrackeddir(d)
188
188
189 @rootcache(b'.hgignore')
189 @rootcache(b'.hgignore')
190 def _ignore(self):
190 def _ignore(self):
191 files = self._ignorefiles()
191 files = self._ignorefiles()
192 if not files:
192 if not files:
193 return matchmod.never()
193 return matchmod.never()
194
194
195 pats = [b'include:%s' % f for f in files]
195 pats = [b'include:%s' % f for f in files]
196 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
196 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
197
197
198 @propertycache
198 @propertycache
199 def _slash(self):
199 def _slash(self):
200 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
200 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
201
201
202 @propertycache
202 @propertycache
203 def _checklink(self):
203 def _checklink(self):
204 return util.checklink(self._root)
204 return util.checklink(self._root)
205
205
206 @propertycache
206 @propertycache
207 def _checkexec(self):
207 def _checkexec(self):
208 return bool(util.checkexec(self._root))
208 return bool(util.checkexec(self._root))
209
209
210 @propertycache
210 @propertycache
211 def _checkcase(self):
211 def _checkcase(self):
212 return not util.fscasesensitive(self._join(b'.hg'))
212 return not util.fscasesensitive(self._join(b'.hg'))
213
213
214 def _join(self, f):
214 def _join(self, f):
215 # much faster than os.path.join()
215 # much faster than os.path.join()
216 # it's safe because f is always a relative path
216 # it's safe because f is always a relative path
217 return self._rootdir + f
217 return self._rootdir + f
218
218
219 def flagfunc(self, buildfallback):
219 def flagfunc(self, buildfallback):
220 if self._checklink and self._checkexec:
220 if self._checklink and self._checkexec:
221
221
222 def f(x):
222 def f(x):
223 try:
223 try:
224 st = os.lstat(self._join(x))
224 st = os.lstat(self._join(x))
225 if util.statislink(st):
225 if util.statislink(st):
226 return b'l'
226 return b'l'
227 if util.statisexec(st):
227 if util.statisexec(st):
228 return b'x'
228 return b'x'
229 except OSError:
229 except OSError:
230 pass
230 pass
231 return b''
231 return b''
232
232
233 return f
233 return f
234
234
235 fallback = buildfallback()
235 fallback = buildfallback()
236 if self._checklink:
236 if self._checklink:
237
237
238 def f(x):
238 def f(x):
239 if os.path.islink(self._join(x)):
239 if os.path.islink(self._join(x)):
240 return b'l'
240 return b'l'
241 if b'x' in fallback(x):
241 if b'x' in fallback(x):
242 return b'x'
242 return b'x'
243 return b''
243 return b''
244
244
245 return f
245 return f
246 if self._checkexec:
246 if self._checkexec:
247
247
248 def f(x):
248 def f(x):
249 if b'l' in fallback(x):
249 if b'l' in fallback(x):
250 return b'l'
250 return b'l'
251 if util.isexec(self._join(x)):
251 if util.isexec(self._join(x)):
252 return b'x'
252 return b'x'
253 return b''
253 return b''
254
254
255 return f
255 return f
256 else:
256 else:
257 return fallback
257 return fallback
258
258
259 @propertycache
259 @propertycache
260 def _cwd(self):
260 def _cwd(self):
261 # internal config: ui.forcecwd
261 # internal config: ui.forcecwd
262 forcecwd = self._ui.config(b'ui', b'forcecwd')
262 forcecwd = self._ui.config(b'ui', b'forcecwd')
263 if forcecwd:
263 if forcecwd:
264 return forcecwd
264 return forcecwd
265 return encoding.getcwd()
265 return encoding.getcwd()
266
266
267 def getcwd(self):
267 def getcwd(self):
268 """Return the path from which a canonical path is calculated.
268 """Return the path from which a canonical path is calculated.
269
269
270 This path should be used to resolve file patterns or to convert
270 This path should be used to resolve file patterns or to convert
271 canonical paths back to file paths for display. It shouldn't be
271 canonical paths back to file paths for display. It shouldn't be
272 used to get real file paths. Use vfs functions instead.
272 used to get real file paths. Use vfs functions instead.
273 """
273 """
274 cwd = self._cwd
274 cwd = self._cwd
275 if cwd == self._root:
275 if cwd == self._root:
276 return b''
276 return b''
277 # self._root ends with a path separator if self._root is '/' or 'C:\'
277 # self._root ends with a path separator if self._root is '/' or 'C:\'
278 rootsep = self._root
278 rootsep = self._root
279 if not util.endswithsep(rootsep):
279 if not util.endswithsep(rootsep):
280 rootsep += pycompat.ossep
280 rootsep += pycompat.ossep
281 if cwd.startswith(rootsep):
281 if cwd.startswith(rootsep):
282 return cwd[len(rootsep) :]
282 return cwd[len(rootsep) :]
283 else:
283 else:
284 # we're outside the repo. return an absolute path.
284 # we're outside the repo. return an absolute path.
285 return cwd
285 return cwd
286
286
287 def pathto(self, f, cwd=None):
287 def pathto(self, f, cwd=None):
288 if cwd is None:
288 if cwd is None:
289 cwd = self.getcwd()
289 cwd = self.getcwd()
290 path = util.pathto(self._root, cwd, f)
290 path = util.pathto(self._root, cwd, f)
291 if self._slash:
291 if self._slash:
292 return util.pconvert(path)
292 return util.pconvert(path)
293 return path
293 return path
294
294
295 def __getitem__(self, key):
295 def __getitem__(self, key):
296 """Return the current state of key (a filename) in the dirstate.
296 """Return the current state of key (a filename) in the dirstate.
297
297
298 States are:
298 States are:
299 n normal
299 n normal
300 m needs merging
300 m needs merging
301 r marked for removal
301 r marked for removal
302 a marked for addition
302 a marked for addition
303 ? not tracked
303 ? not tracked
304
304
305 XXX The "state" is a bit obscure to be in the "public" API. we should
305 XXX The "state" is a bit obscure to be in the "public" API. we should
306 consider migrating all user of this to going through the dirstate entry
306 consider migrating all user of this to going through the dirstate entry
307 instead.
307 instead.
308 """
308 """
309 entry = self._map.get(key)
309 entry = self._map.get(key)
310 if entry is not None:
310 if entry is not None:
311 return entry.state
311 return entry.state
312 return b'?'
312 return b'?'
313
313
314 def __contains__(self, key):
314 def __contains__(self, key):
315 return key in self._map
315 return key in self._map
316
316
317 def __iter__(self):
317 def __iter__(self):
318 return iter(sorted(self._map))
318 return iter(sorted(self._map))
319
319
320 def items(self):
320 def items(self):
321 return pycompat.iteritems(self._map)
321 return pycompat.iteritems(self._map)
322
322
323 iteritems = items
323 iteritems = items
324
324
325 def directories(self):
325 def directories(self):
326 return self._map.directories()
326 return self._map.directories()
327
327
328 def parents(self):
328 def parents(self):
329 return [self._validate(p) for p in self._pl]
329 return [self._validate(p) for p in self._pl]
330
330
331 def p1(self):
331 def p1(self):
332 return self._validate(self._pl[0])
332 return self._validate(self._pl[0])
333
333
334 def p2(self):
334 def p2(self):
335 return self._validate(self._pl[1])
335 return self._validate(self._pl[1])
336
336
337 @property
337 @property
338 def in_merge(self):
338 def in_merge(self):
339 """True if a merge is in progress"""
339 """True if a merge is in progress"""
340 return self._pl[1] != self._nodeconstants.nullid
340 return self._pl[1] != self._nodeconstants.nullid
341
341
342 def branch(self):
342 def branch(self):
343 return encoding.tolocal(self._branch)
343 return encoding.tolocal(self._branch)
344
344
345 def setparents(self, p1, p2=None):
345 def setparents(self, p1, p2=None):
346 """Set dirstate parents to p1 and p2.
346 """Set dirstate parents to p1 and p2.
347
347
348 When moving from two parents to one, "merged" entries a
348 When moving from two parents to one, "merged" entries a
349 adjusted to normal and previous copy records discarded and
349 adjusted to normal and previous copy records discarded and
350 returned by the call.
350 returned by the call.
351
351
352 See localrepo.setparents()
352 See localrepo.setparents()
353 """
353 """
354 if p2 is None:
354 if p2 is None:
355 p2 = self._nodeconstants.nullid
355 p2 = self._nodeconstants.nullid
356 if self._parentwriters == 0:
356 if self._parentwriters == 0:
357 raise ValueError(
357 raise ValueError(
358 b"cannot set dirstate parent outside of "
358 b"cannot set dirstate parent outside of "
359 b"dirstate.parentchange context manager"
359 b"dirstate.parentchange context manager"
360 )
360 )
361
361
362 self._dirty = True
362 self._dirty = True
363 oldp2 = self._pl[1]
363 oldp2 = self._pl[1]
364 if self._origpl is None:
364 if self._origpl is None:
365 self._origpl = self._pl
365 self._origpl = self._pl
366 self._map.setparents(p1, p2)
366 self._map.setparents(p1, p2)
367 copies = {}
367 copies = {}
368 if (
368 if (
369 oldp2 != self._nodeconstants.nullid
369 oldp2 != self._nodeconstants.nullid
370 and p2 == self._nodeconstants.nullid
370 and p2 == self._nodeconstants.nullid
371 ):
371 ):
372 candidatefiles = self._map.non_normal_or_other_parent_paths()
372 candidatefiles = self._map.non_normal_or_other_parent_paths()
373
373
374 for f in candidatefiles:
374 for f in candidatefiles:
375 s = self._map.get(f)
375 s = self._map.get(f)
376 if s is None:
376 if s is None:
377 continue
377 continue
378
378
379 # Discard "merged" markers when moving away from a merge state
379 # Discard "merged" markers when moving away from a merge state
380 if s.merged:
380 if s.merged:
381 source = self._map.copymap.get(f)
381 source = self._map.copymap.get(f)
382 if source:
382 if source:
383 copies[f] = source
383 copies[f] = source
384 self.normallookup(f)
384 self.normallookup(f)
385 # Also fix up otherparent markers
385 # Also fix up otherparent markers
386 elif s.from_p2:
386 elif s.from_p2:
387 source = self._map.copymap.get(f)
387 source = self._map.copymap.get(f)
388 if source:
388 if source:
389 copies[f] = source
389 copies[f] = source
390 self.add(f)
390 self.add(f)
391 return copies
391 return copies
392
392
393 def setbranch(self, branch):
393 def setbranch(self, branch):
394 self.__class__._branch.set(self, encoding.fromlocal(branch))
394 self.__class__._branch.set(self, encoding.fromlocal(branch))
395 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
395 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
396 try:
396 try:
397 f.write(self._branch + b'\n')
397 f.write(self._branch + b'\n')
398 f.close()
398 f.close()
399
399
400 # make sure filecache has the correct stat info for _branch after
400 # make sure filecache has the correct stat info for _branch after
401 # replacing the underlying file
401 # replacing the underlying file
402 ce = self._filecache[b'_branch']
402 ce = self._filecache[b'_branch']
403 if ce:
403 if ce:
404 ce.refresh()
404 ce.refresh()
405 except: # re-raises
405 except: # re-raises
406 f.discard()
406 f.discard()
407 raise
407 raise
408
408
409 def invalidate(self):
409 def invalidate(self):
410 """Causes the next access to reread the dirstate.
410 """Causes the next access to reread the dirstate.
411
411
412 This is different from localrepo.invalidatedirstate() because it always
412 This is different from localrepo.invalidatedirstate() because it always
413 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
413 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
414 check whether the dirstate has changed before rereading it."""
414 check whether the dirstate has changed before rereading it."""
415
415
416 for a in ("_map", "_branch", "_ignore"):
416 for a in ("_map", "_branch", "_ignore"):
417 if a in self.__dict__:
417 if a in self.__dict__:
418 delattr(self, a)
418 delattr(self, a)
419 self._lastnormaltime = 0
419 self._lastnormaltime = 0
420 self._dirty = False
420 self._dirty = False
421 self._updatedfiles.clear()
421 self._updatedfiles.clear()
422 self._parentwriters = 0
422 self._parentwriters = 0
423 self._origpl = None
423 self._origpl = None
424
424
425 def copy(self, source, dest):
425 def copy(self, source, dest):
426 """Mark dest as a copy of source. Unmark dest if source is None."""
426 """Mark dest as a copy of source. Unmark dest if source is None."""
427 if source == dest:
427 if source == dest:
428 return
428 return
429 self._dirty = True
429 self._dirty = True
430 if source is not None:
430 if source is not None:
431 self._map.copymap[dest] = source
431 self._map.copymap[dest] = source
432 self._updatedfiles.add(source)
432 self._updatedfiles.add(source)
433 self._updatedfiles.add(dest)
433 self._updatedfiles.add(dest)
434 elif self._map.copymap.pop(dest, None):
434 elif self._map.copymap.pop(dest, None):
435 self._updatedfiles.add(dest)
435 self._updatedfiles.add(dest)
436
436
437 def copied(self, file):
437 def copied(self, file):
438 return self._map.copymap.get(file, None)
438 return self._map.copymap.get(file, None)
439
439
440 def copies(self):
440 def copies(self):
441 return self._map.copymap
441 return self._map.copymap
442
442
443 def _addpath(
443 def _addpath(
444 self,
444 self,
445 f,
445 f,
446 state=None,
446 state=None,
447 mode=0,
447 mode=0,
448 size=None,
448 size=None,
449 mtime=None,
449 mtime=None,
450 added=False,
450 added=False,
451 merged=False,
451 merged=False,
452 from_p2=False,
452 from_p2=False,
453 possibly_dirty=False,
453 possibly_dirty=False,
454 ):
454 ):
455 entry = self._map.get(f)
455 entry = self._map.get(f)
456 if added or entry is not None and entry.removed:
456 if added or entry is not None and entry.removed:
457 scmutil.checkfilename(f)
457 scmutil.checkfilename(f)
458 if self._map.hastrackeddir(f):
458 if self._map.hastrackeddir(f):
459 msg = _(b'directory %r already in dirstate')
459 msg = _(b'directory %r already in dirstate')
460 msg %= pycompat.bytestr(f)
460 msg %= pycompat.bytestr(f)
461 raise error.Abort(msg)
461 raise error.Abort(msg)
462 # shadows
462 # shadows
463 for d in pathutil.finddirs(f):
463 for d in pathutil.finddirs(f):
464 if self._map.hastrackeddir(d):
464 if self._map.hastrackeddir(d):
465 break
465 break
466 entry = self._map.get(d)
466 entry = self._map.get(d)
467 if entry is not None and not entry.removed:
467 if entry is not None and not entry.removed:
468 msg = _(b'file %r in dirstate clashes with %r')
468 msg = _(b'file %r in dirstate clashes with %r')
469 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
469 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
470 raise error.Abort(msg)
470 raise error.Abort(msg)
471 self._dirty = True
471 self._dirty = True
472 self._updatedfiles.add(f)
472 self._updatedfiles.add(f)
473 self._map.addfile(
473 self._map.addfile(
474 f,
474 f,
475 state=state,
475 state=state,
476 mode=mode,
476 mode=mode,
477 size=size,
477 size=size,
478 mtime=mtime,
478 mtime=mtime,
479 added=added,
479 added=added,
480 merged=merged,
480 merged=merged,
481 from_p2=from_p2,
481 from_p2=from_p2,
482 possibly_dirty=possibly_dirty,
482 possibly_dirty=possibly_dirty,
483 )
483 )
484
484
485 def normal(self, f, parentfiledata=None):
485 def normal(self, f, parentfiledata=None):
486 """Mark a file normal and clean.
486 """Mark a file normal and clean.
487
487
488 parentfiledata: (mode, size, mtime) of the clean file
488 parentfiledata: (mode, size, mtime) of the clean file
489
489
490 parentfiledata should be computed from memory (for mode,
490 parentfiledata should be computed from memory (for mode,
491 size), as or close as possible from the point where we
491 size), as or close as possible from the point where we
492 determined the file was clean, to limit the risk of the
492 determined the file was clean, to limit the risk of the
493 file having been changed by an external process between the
493 file having been changed by an external process between the
494 moment where the file was determined to be clean and now."""
494 moment where the file was determined to be clean and now."""
495 if parentfiledata:
495 if parentfiledata:
496 (mode, size, mtime) = parentfiledata
496 (mode, size, mtime) = parentfiledata
497 else:
497 else:
498 s = os.lstat(self._join(f))
498 s = os.lstat(self._join(f))
499 mode = s.st_mode
499 mode = s.st_mode
500 size = s.st_size
500 size = s.st_size
501 mtime = s[stat.ST_MTIME]
501 mtime = s[stat.ST_MTIME]
502 self._addpath(f, b'n', mode, size, mtime)
502 self._addpath(f, b'n', mode, size, mtime)
503 self._map.copymap.pop(f, None)
503 self._map.copymap.pop(f, None)
504 if f in self._map.nonnormalset:
504 if f in self._map.nonnormalset:
505 self._map.nonnormalset.remove(f)
505 self._map.nonnormalset.remove(f)
506 if mtime > self._lastnormaltime:
506 if mtime > self._lastnormaltime:
507 # Remember the most recent modification timeslot for status(),
507 # Remember the most recent modification timeslot for status(),
508 # to make sure we won't miss future size-preserving file content
508 # to make sure we won't miss future size-preserving file content
509 # modifications that happen within the same timeslot.
509 # modifications that happen within the same timeslot.
510 self._lastnormaltime = mtime
510 self._lastnormaltime = mtime
511
511
512 def normallookup(self, f):
512 def normallookup(self, f):
513 '''Mark a file normal, but possibly dirty.'''
513 '''Mark a file normal, but possibly dirty.'''
514 if self.in_merge:
514 if self.in_merge:
515 # if there is a merge going on and the file was either
515 # if there is a merge going on and the file was either
516 # "merged" or coming from other parent (-2) before
516 # "merged" or coming from other parent (-2) before
517 # being removed, restore that state.
517 # being removed, restore that state.
518 entry = self._map.get(f)
518 entry = self._map.get(f)
519 if entry is not None:
519 if entry is not None:
520 # XXX this should probably be dealt with a a lower level
520 # XXX this should probably be dealt with a a lower level
521 # (see `merged_removed` and `from_p2_removed`)
521 # (see `merged_removed` and `from_p2_removed`)
522 if entry.merged_removed or entry.from_p2_removed:
522 if entry.merged_removed or entry.from_p2_removed:
523 source = self._map.copymap.get(f)
523 source = self._map.copymap.get(f)
524 if entry.merged_removed:
524 if entry.merged_removed:
525 self.merge(f)
525 self.merge(f)
526 elif entry.from_p2_removed:
526 elif entry.from_p2_removed:
527 self.otherparent(f)
527 self.otherparent(f)
528 if source is not None:
528 if source is not None:
529 self.copy(source, f)
529 self.copy(source, f)
530 return
530 return
531 elif entry.merged or entry.from_p2:
531 elif entry.merged or entry.from_p2:
532 return
532 return
533 self._addpath(f, possibly_dirty=True)
533 self._addpath(f, possibly_dirty=True)
534 self._map.copymap.pop(f, None)
534 self._map.copymap.pop(f, None)
535
535
536 def otherparent(self, f):
536 def otherparent(self, f):
537 '''Mark as coming from the other parent, always dirty.'''
537 '''Mark as coming from the other parent, always dirty.'''
538 if not self.in_merge:
538 if not self.in_merge:
539 msg = _(b"setting %r to other parent only allowed in merges") % f
539 msg = _(b"setting %r to other parent only allowed in merges") % f
540 raise error.Abort(msg)
540 raise error.Abort(msg)
541 if f in self and self[f] == b'n':
541 if f in self and self[f] == b'n':
542 # merge-like
542 # merge-like
543 self._addpath(f, merged=True)
543 self._addpath(f, merged=True)
544 else:
544 else:
545 # add-like
545 # add-like
546 self._addpath(f, b'n', 0, from_p2=True)
546 self._addpath(f, from_p2=True)
547 self._map.copymap.pop(f, None)
547 self._map.copymap.pop(f, None)
548
548
549 def add(self, f):
549 def add(self, f):
550 '''Mark a file added.'''
550 '''Mark a file added.'''
551 self._addpath(f, added=True)
551 self._addpath(f, added=True)
552 self._map.copymap.pop(f, None)
552 self._map.copymap.pop(f, None)
553
553
554 def remove(self, f):
554 def remove(self, f):
555 '''Mark a file removed.'''
555 '''Mark a file removed.'''
556 self._dirty = True
556 self._dirty = True
557 self._updatedfiles.add(f)
557 self._updatedfiles.add(f)
558 self._map.removefile(f, in_merge=self.in_merge)
558 self._map.removefile(f, in_merge=self.in_merge)
559
559
560 def merge(self, f):
560 def merge(self, f):
561 '''Mark a file merged.'''
561 '''Mark a file merged.'''
562 if not self.in_merge:
562 if not self.in_merge:
563 return self.normallookup(f)
563 return self.normallookup(f)
564 return self.otherparent(f)
564 return self.otherparent(f)
565
565
566 def drop(self, f):
566 def drop(self, f):
567 '''Drop a file from the dirstate'''
567 '''Drop a file from the dirstate'''
568 oldstate = self[f]
568 oldstate = self[f]
569 if self._map.dropfile(f, oldstate):
569 if self._map.dropfile(f, oldstate):
570 self._dirty = True
570 self._dirty = True
571 self._updatedfiles.add(f)
571 self._updatedfiles.add(f)
572 self._map.copymap.pop(f, None)
572 self._map.copymap.pop(f, None)
573
573
574 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
574 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
575 if exists is None:
575 if exists is None:
576 exists = os.path.lexists(os.path.join(self._root, path))
576 exists = os.path.lexists(os.path.join(self._root, path))
577 if not exists:
577 if not exists:
578 # Maybe a path component exists
578 # Maybe a path component exists
579 if not ignoremissing and b'/' in path:
579 if not ignoremissing and b'/' in path:
580 d, f = path.rsplit(b'/', 1)
580 d, f = path.rsplit(b'/', 1)
581 d = self._normalize(d, False, ignoremissing, None)
581 d = self._normalize(d, False, ignoremissing, None)
582 folded = d + b"/" + f
582 folded = d + b"/" + f
583 else:
583 else:
584 # No path components, preserve original case
584 # No path components, preserve original case
585 folded = path
585 folded = path
586 else:
586 else:
587 # recursively normalize leading directory components
587 # recursively normalize leading directory components
588 # against dirstate
588 # against dirstate
589 if b'/' in normed:
589 if b'/' in normed:
590 d, f = normed.rsplit(b'/', 1)
590 d, f = normed.rsplit(b'/', 1)
591 d = self._normalize(d, False, ignoremissing, True)
591 d = self._normalize(d, False, ignoremissing, True)
592 r = self._root + b"/" + d
592 r = self._root + b"/" + d
593 folded = d + b"/" + util.fspath(f, r)
593 folded = d + b"/" + util.fspath(f, r)
594 else:
594 else:
595 folded = util.fspath(normed, self._root)
595 folded = util.fspath(normed, self._root)
596 storemap[normed] = folded
596 storemap[normed] = folded
597
597
598 return folded
598 return folded
599
599
600 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
600 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
601 normed = util.normcase(path)
601 normed = util.normcase(path)
602 folded = self._map.filefoldmap.get(normed, None)
602 folded = self._map.filefoldmap.get(normed, None)
603 if folded is None:
603 if folded is None:
604 if isknown:
604 if isknown:
605 folded = path
605 folded = path
606 else:
606 else:
607 folded = self._discoverpath(
607 folded = self._discoverpath(
608 path, normed, ignoremissing, exists, self._map.filefoldmap
608 path, normed, ignoremissing, exists, self._map.filefoldmap
609 )
609 )
610 return folded
610 return folded
611
611
612 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
612 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
613 normed = util.normcase(path)
613 normed = util.normcase(path)
614 folded = self._map.filefoldmap.get(normed, None)
614 folded = self._map.filefoldmap.get(normed, None)
615 if folded is None:
615 if folded is None:
616 folded = self._map.dirfoldmap.get(normed, None)
616 folded = self._map.dirfoldmap.get(normed, None)
617 if folded is None:
617 if folded is None:
618 if isknown:
618 if isknown:
619 folded = path
619 folded = path
620 else:
620 else:
621 # store discovered result in dirfoldmap so that future
621 # store discovered result in dirfoldmap so that future
622 # normalizefile calls don't start matching directories
622 # normalizefile calls don't start matching directories
623 folded = self._discoverpath(
623 folded = self._discoverpath(
624 path, normed, ignoremissing, exists, self._map.dirfoldmap
624 path, normed, ignoremissing, exists, self._map.dirfoldmap
625 )
625 )
626 return folded
626 return folded
627
627
628 def normalize(self, path, isknown=False, ignoremissing=False):
628 def normalize(self, path, isknown=False, ignoremissing=False):
629 """
629 """
630 normalize the case of a pathname when on a casefolding filesystem
630 normalize the case of a pathname when on a casefolding filesystem
631
631
632 isknown specifies whether the filename came from walking the
632 isknown specifies whether the filename came from walking the
633 disk, to avoid extra filesystem access.
633 disk, to avoid extra filesystem access.
634
634
635 If ignoremissing is True, missing path are returned
635 If ignoremissing is True, missing path are returned
636 unchanged. Otherwise, we try harder to normalize possibly
636 unchanged. Otherwise, we try harder to normalize possibly
637 existing path components.
637 existing path components.
638
638
639 The normalized case is determined based on the following precedence:
639 The normalized case is determined based on the following precedence:
640
640
641 - version of name already stored in the dirstate
641 - version of name already stored in the dirstate
642 - version of name stored on disk
642 - version of name stored on disk
643 - version provided via command arguments
643 - version provided via command arguments
644 """
644 """
645
645
646 if self._checkcase:
646 if self._checkcase:
647 return self._normalize(path, isknown, ignoremissing)
647 return self._normalize(path, isknown, ignoremissing)
648 return path
648 return path
649
649
650 def clear(self):
650 def clear(self):
651 self._map.clear()
651 self._map.clear()
652 self._lastnormaltime = 0
652 self._lastnormaltime = 0
653 self._updatedfiles.clear()
653 self._updatedfiles.clear()
654 self._dirty = True
654 self._dirty = True
655
655
656 def rebuild(self, parent, allfiles, changedfiles=None):
656 def rebuild(self, parent, allfiles, changedfiles=None):
657 if changedfiles is None:
657 if changedfiles is None:
658 # Rebuild entire dirstate
658 # Rebuild entire dirstate
659 to_lookup = allfiles
659 to_lookup = allfiles
660 to_drop = []
660 to_drop = []
661 lastnormaltime = self._lastnormaltime
661 lastnormaltime = self._lastnormaltime
662 self.clear()
662 self.clear()
663 self._lastnormaltime = lastnormaltime
663 self._lastnormaltime = lastnormaltime
664 elif len(changedfiles) < 10:
664 elif len(changedfiles) < 10:
665 # Avoid turning allfiles into a set, which can be expensive if it's
665 # Avoid turning allfiles into a set, which can be expensive if it's
666 # large.
666 # large.
667 to_lookup = []
667 to_lookup = []
668 to_drop = []
668 to_drop = []
669 for f in changedfiles:
669 for f in changedfiles:
670 if f in allfiles:
670 if f in allfiles:
671 to_lookup.append(f)
671 to_lookup.append(f)
672 else:
672 else:
673 to_drop.append(f)
673 to_drop.append(f)
674 else:
674 else:
675 changedfilesset = set(changedfiles)
675 changedfilesset = set(changedfiles)
676 to_lookup = changedfilesset & set(allfiles)
676 to_lookup = changedfilesset & set(allfiles)
677 to_drop = changedfilesset - to_lookup
677 to_drop = changedfilesset - to_lookup
678
678
679 if self._origpl is None:
679 if self._origpl is None:
680 self._origpl = self._pl
680 self._origpl = self._pl
681 self._map.setparents(parent, self._nodeconstants.nullid)
681 self._map.setparents(parent, self._nodeconstants.nullid)
682
682
683 for f in to_lookup:
683 for f in to_lookup:
684 self.normallookup(f)
684 self.normallookup(f)
685 for f in to_drop:
685 for f in to_drop:
686 self.drop(f)
686 self.drop(f)
687
687
688 self._dirty = True
688 self._dirty = True
689
689
690 def identity(self):
690 def identity(self):
691 """Return identity of dirstate itself to detect changing in storage
691 """Return identity of dirstate itself to detect changing in storage
692
692
693 If identity of previous dirstate is equal to this, writing
693 If identity of previous dirstate is equal to this, writing
694 changes based on the former dirstate out can keep consistency.
694 changes based on the former dirstate out can keep consistency.
695 """
695 """
696 return self._map.identity
696 return self._map.identity
697
697
698 def write(self, tr):
698 def write(self, tr):
699 if not self._dirty:
699 if not self._dirty:
700 return
700 return
701
701
702 filename = self._filename
702 filename = self._filename
703 if tr:
703 if tr:
704 # 'dirstate.write()' is not only for writing in-memory
704 # 'dirstate.write()' is not only for writing in-memory
705 # changes out, but also for dropping ambiguous timestamp.
705 # changes out, but also for dropping ambiguous timestamp.
706 # delayed writing re-raise "ambiguous timestamp issue".
706 # delayed writing re-raise "ambiguous timestamp issue".
707 # See also the wiki page below for detail:
707 # See also the wiki page below for detail:
708 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
708 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
709
709
710 # emulate dropping timestamp in 'parsers.pack_dirstate'
710 # emulate dropping timestamp in 'parsers.pack_dirstate'
711 now = _getfsnow(self._opener)
711 now = _getfsnow(self._opener)
712 self._map.clearambiguoustimes(self._updatedfiles, now)
712 self._map.clearambiguoustimes(self._updatedfiles, now)
713
713
714 # emulate that all 'dirstate.normal' results are written out
714 # emulate that all 'dirstate.normal' results are written out
715 self._lastnormaltime = 0
715 self._lastnormaltime = 0
716 self._updatedfiles.clear()
716 self._updatedfiles.clear()
717
717
718 # delay writing in-memory changes out
718 # delay writing in-memory changes out
719 tr.addfilegenerator(
719 tr.addfilegenerator(
720 b'dirstate',
720 b'dirstate',
721 (self._filename,),
721 (self._filename,),
722 self._writedirstate,
722 self._writedirstate,
723 location=b'plain',
723 location=b'plain',
724 )
724 )
725 return
725 return
726
726
727 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
727 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
728 self._writedirstate(st)
728 self._writedirstate(st)
729
729
730 def addparentchangecallback(self, category, callback):
730 def addparentchangecallback(self, category, callback):
731 """add a callback to be called when the wd parents are changed
731 """add a callback to be called when the wd parents are changed
732
732
733 Callback will be called with the following arguments:
733 Callback will be called with the following arguments:
734 dirstate, (oldp1, oldp2), (newp1, newp2)
734 dirstate, (oldp1, oldp2), (newp1, newp2)
735
735
736 Category is a unique identifier to allow overwriting an old callback
736 Category is a unique identifier to allow overwriting an old callback
737 with a newer callback.
737 with a newer callback.
738 """
738 """
739 self._plchangecallbacks[category] = callback
739 self._plchangecallbacks[category] = callback
740
740
741 def _writedirstate(self, st):
741 def _writedirstate(self, st):
742 # notify callbacks about parents change
742 # notify callbacks about parents change
743 if self._origpl is not None and self._origpl != self._pl:
743 if self._origpl is not None and self._origpl != self._pl:
744 for c, callback in sorted(
744 for c, callback in sorted(
745 pycompat.iteritems(self._plchangecallbacks)
745 pycompat.iteritems(self._plchangecallbacks)
746 ):
746 ):
747 callback(self, self._origpl, self._pl)
747 callback(self, self._origpl, self._pl)
748 self._origpl = None
748 self._origpl = None
749 # use the modification time of the newly created temporary file as the
749 # use the modification time of the newly created temporary file as the
750 # filesystem's notion of 'now'
750 # filesystem's notion of 'now'
751 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
751 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
752
752
753 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
753 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
754 # timestamp of each entries in dirstate, because of 'now > mtime'
754 # timestamp of each entries in dirstate, because of 'now > mtime'
755 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
755 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
756 if delaywrite > 0:
756 if delaywrite > 0:
757 # do we have any files to delay for?
757 # do we have any files to delay for?
758 for f, e in pycompat.iteritems(self._map):
758 for f, e in pycompat.iteritems(self._map):
759 if e.state == b'n' and e[3] == now:
759 if e.state == b'n' and e[3] == now:
760 import time # to avoid useless import
760 import time # to avoid useless import
761
761
762 # rather than sleep n seconds, sleep until the next
762 # rather than sleep n seconds, sleep until the next
763 # multiple of n seconds
763 # multiple of n seconds
764 clock = time.time()
764 clock = time.time()
765 start = int(clock) - (int(clock) % delaywrite)
765 start = int(clock) - (int(clock) % delaywrite)
766 end = start + delaywrite
766 end = start + delaywrite
767 time.sleep(end - clock)
767 time.sleep(end - clock)
768 now = end # trust our estimate that the end is near now
768 now = end # trust our estimate that the end is near now
769 break
769 break
770
770
771 self._map.write(st, now)
771 self._map.write(st, now)
772 self._lastnormaltime = 0
772 self._lastnormaltime = 0
773 self._dirty = False
773 self._dirty = False
774
774
775 def _dirignore(self, f):
775 def _dirignore(self, f):
776 if self._ignore(f):
776 if self._ignore(f):
777 return True
777 return True
778 for p in pathutil.finddirs(f):
778 for p in pathutil.finddirs(f):
779 if self._ignore(p):
779 if self._ignore(p):
780 return True
780 return True
781 return False
781 return False
782
782
783 def _ignorefiles(self):
783 def _ignorefiles(self):
784 files = []
784 files = []
785 if os.path.exists(self._join(b'.hgignore')):
785 if os.path.exists(self._join(b'.hgignore')):
786 files.append(self._join(b'.hgignore'))
786 files.append(self._join(b'.hgignore'))
787 for name, path in self._ui.configitems(b"ui"):
787 for name, path in self._ui.configitems(b"ui"):
788 if name == b'ignore' or name.startswith(b'ignore.'):
788 if name == b'ignore' or name.startswith(b'ignore.'):
789 # we need to use os.path.join here rather than self._join
789 # we need to use os.path.join here rather than self._join
790 # because path is arbitrary and user-specified
790 # because path is arbitrary and user-specified
791 files.append(os.path.join(self._rootdir, util.expandpath(path)))
791 files.append(os.path.join(self._rootdir, util.expandpath(path)))
792 return files
792 return files
793
793
794 def _ignorefileandline(self, f):
794 def _ignorefileandline(self, f):
795 files = collections.deque(self._ignorefiles())
795 files = collections.deque(self._ignorefiles())
796 visited = set()
796 visited = set()
797 while files:
797 while files:
798 i = files.popleft()
798 i = files.popleft()
799 patterns = matchmod.readpatternfile(
799 patterns = matchmod.readpatternfile(
800 i, self._ui.warn, sourceinfo=True
800 i, self._ui.warn, sourceinfo=True
801 )
801 )
802 for pattern, lineno, line in patterns:
802 for pattern, lineno, line in patterns:
803 kind, p = matchmod._patsplit(pattern, b'glob')
803 kind, p = matchmod._patsplit(pattern, b'glob')
804 if kind == b"subinclude":
804 if kind == b"subinclude":
805 if p not in visited:
805 if p not in visited:
806 files.append(p)
806 files.append(p)
807 continue
807 continue
808 m = matchmod.match(
808 m = matchmod.match(
809 self._root, b'', [], [pattern], warn=self._ui.warn
809 self._root, b'', [], [pattern], warn=self._ui.warn
810 )
810 )
811 if m(f):
811 if m(f):
812 return (i, lineno, line)
812 return (i, lineno, line)
813 visited.add(i)
813 visited.add(i)
814 return (None, -1, b"")
814 return (None, -1, b"")
815
815
816 def _walkexplicit(self, match, subrepos):
816 def _walkexplicit(self, match, subrepos):
817 """Get stat data about the files explicitly specified by match.
817 """Get stat data about the files explicitly specified by match.
818
818
819 Return a triple (results, dirsfound, dirsnotfound).
819 Return a triple (results, dirsfound, dirsnotfound).
820 - results is a mapping from filename to stat result. It also contains
820 - results is a mapping from filename to stat result. It also contains
821 listings mapping subrepos and .hg to None.
821 listings mapping subrepos and .hg to None.
822 - dirsfound is a list of files found to be directories.
822 - dirsfound is a list of files found to be directories.
823 - dirsnotfound is a list of files that the dirstate thinks are
823 - dirsnotfound is a list of files that the dirstate thinks are
824 directories and that were not found."""
824 directories and that were not found."""
825
825
826 def badtype(mode):
826 def badtype(mode):
827 kind = _(b'unknown')
827 kind = _(b'unknown')
828 if stat.S_ISCHR(mode):
828 if stat.S_ISCHR(mode):
829 kind = _(b'character device')
829 kind = _(b'character device')
830 elif stat.S_ISBLK(mode):
830 elif stat.S_ISBLK(mode):
831 kind = _(b'block device')
831 kind = _(b'block device')
832 elif stat.S_ISFIFO(mode):
832 elif stat.S_ISFIFO(mode):
833 kind = _(b'fifo')
833 kind = _(b'fifo')
834 elif stat.S_ISSOCK(mode):
834 elif stat.S_ISSOCK(mode):
835 kind = _(b'socket')
835 kind = _(b'socket')
836 elif stat.S_ISDIR(mode):
836 elif stat.S_ISDIR(mode):
837 kind = _(b'directory')
837 kind = _(b'directory')
838 return _(b'unsupported file type (type is %s)') % kind
838 return _(b'unsupported file type (type is %s)') % kind
839
839
840 badfn = match.bad
840 badfn = match.bad
841 dmap = self._map
841 dmap = self._map
842 lstat = os.lstat
842 lstat = os.lstat
843 getkind = stat.S_IFMT
843 getkind = stat.S_IFMT
844 dirkind = stat.S_IFDIR
844 dirkind = stat.S_IFDIR
845 regkind = stat.S_IFREG
845 regkind = stat.S_IFREG
846 lnkkind = stat.S_IFLNK
846 lnkkind = stat.S_IFLNK
847 join = self._join
847 join = self._join
848 dirsfound = []
848 dirsfound = []
849 foundadd = dirsfound.append
849 foundadd = dirsfound.append
850 dirsnotfound = []
850 dirsnotfound = []
851 notfoundadd = dirsnotfound.append
851 notfoundadd = dirsnotfound.append
852
852
853 if not match.isexact() and self._checkcase:
853 if not match.isexact() and self._checkcase:
854 normalize = self._normalize
854 normalize = self._normalize
855 else:
855 else:
856 normalize = None
856 normalize = None
857
857
858 files = sorted(match.files())
858 files = sorted(match.files())
859 subrepos.sort()
859 subrepos.sort()
860 i, j = 0, 0
860 i, j = 0, 0
861 while i < len(files) and j < len(subrepos):
861 while i < len(files) and j < len(subrepos):
862 subpath = subrepos[j] + b"/"
862 subpath = subrepos[j] + b"/"
863 if files[i] < subpath:
863 if files[i] < subpath:
864 i += 1
864 i += 1
865 continue
865 continue
866 while i < len(files) and files[i].startswith(subpath):
866 while i < len(files) and files[i].startswith(subpath):
867 del files[i]
867 del files[i]
868 j += 1
868 j += 1
869
869
870 if not files or b'' in files:
870 if not files or b'' in files:
871 files = [b'']
871 files = [b'']
872 # constructing the foldmap is expensive, so don't do it for the
872 # constructing the foldmap is expensive, so don't do it for the
873 # common case where files is ['']
873 # common case where files is ['']
874 normalize = None
874 normalize = None
875 results = dict.fromkeys(subrepos)
875 results = dict.fromkeys(subrepos)
876 results[b'.hg'] = None
876 results[b'.hg'] = None
877
877
878 for ff in files:
878 for ff in files:
879 if normalize:
879 if normalize:
880 nf = normalize(ff, False, True)
880 nf = normalize(ff, False, True)
881 else:
881 else:
882 nf = ff
882 nf = ff
883 if nf in results:
883 if nf in results:
884 continue
884 continue
885
885
886 try:
886 try:
887 st = lstat(join(nf))
887 st = lstat(join(nf))
888 kind = getkind(st.st_mode)
888 kind = getkind(st.st_mode)
889 if kind == dirkind:
889 if kind == dirkind:
890 if nf in dmap:
890 if nf in dmap:
891 # file replaced by dir on disk but still in dirstate
891 # file replaced by dir on disk but still in dirstate
892 results[nf] = None
892 results[nf] = None
893 foundadd((nf, ff))
893 foundadd((nf, ff))
894 elif kind == regkind or kind == lnkkind:
894 elif kind == regkind or kind == lnkkind:
895 results[nf] = st
895 results[nf] = st
896 else:
896 else:
897 badfn(ff, badtype(kind))
897 badfn(ff, badtype(kind))
898 if nf in dmap:
898 if nf in dmap:
899 results[nf] = None
899 results[nf] = None
900 except OSError as inst: # nf not found on disk - it is dirstate only
900 except OSError as inst: # nf not found on disk - it is dirstate only
901 if nf in dmap: # does it exactly match a missing file?
901 if nf in dmap: # does it exactly match a missing file?
902 results[nf] = None
902 results[nf] = None
903 else: # does it match a missing directory?
903 else: # does it match a missing directory?
904 if self._map.hasdir(nf):
904 if self._map.hasdir(nf):
905 notfoundadd(nf)
905 notfoundadd(nf)
906 else:
906 else:
907 badfn(ff, encoding.strtolocal(inst.strerror))
907 badfn(ff, encoding.strtolocal(inst.strerror))
908
908
909 # match.files() may contain explicitly-specified paths that shouldn't
909 # match.files() may contain explicitly-specified paths that shouldn't
910 # be taken; drop them from the list of files found. dirsfound/notfound
910 # be taken; drop them from the list of files found. dirsfound/notfound
911 # aren't filtered here because they will be tested later.
911 # aren't filtered here because they will be tested later.
912 if match.anypats():
912 if match.anypats():
913 for f in list(results):
913 for f in list(results):
914 if f == b'.hg' or f in subrepos:
914 if f == b'.hg' or f in subrepos:
915 # keep sentinel to disable further out-of-repo walks
915 # keep sentinel to disable further out-of-repo walks
916 continue
916 continue
917 if not match(f):
917 if not match(f):
918 del results[f]
918 del results[f]
919
919
920 # Case insensitive filesystems cannot rely on lstat() failing to detect
920 # Case insensitive filesystems cannot rely on lstat() failing to detect
921 # a case-only rename. Prune the stat object for any file that does not
921 # a case-only rename. Prune the stat object for any file that does not
922 # match the case in the filesystem, if there are multiple files that
922 # match the case in the filesystem, if there are multiple files that
923 # normalize to the same path.
923 # normalize to the same path.
924 if match.isexact() and self._checkcase:
924 if match.isexact() and self._checkcase:
925 normed = {}
925 normed = {}
926
926
927 for f, st in pycompat.iteritems(results):
927 for f, st in pycompat.iteritems(results):
928 if st is None:
928 if st is None:
929 continue
929 continue
930
930
931 nc = util.normcase(f)
931 nc = util.normcase(f)
932 paths = normed.get(nc)
932 paths = normed.get(nc)
933
933
934 if paths is None:
934 if paths is None:
935 paths = set()
935 paths = set()
936 normed[nc] = paths
936 normed[nc] = paths
937
937
938 paths.add(f)
938 paths.add(f)
939
939
940 for norm, paths in pycompat.iteritems(normed):
940 for norm, paths in pycompat.iteritems(normed):
941 if len(paths) > 1:
941 if len(paths) > 1:
942 for path in paths:
942 for path in paths:
943 folded = self._discoverpath(
943 folded = self._discoverpath(
944 path, norm, True, None, self._map.dirfoldmap
944 path, norm, True, None, self._map.dirfoldmap
945 )
945 )
946 if path != folded:
946 if path != folded:
947 results[path] = None
947 results[path] = None
948
948
949 return results, dirsfound, dirsnotfound
949 return results, dirsfound, dirsnotfound
950
950
951 def walk(self, match, subrepos, unknown, ignored, full=True):
951 def walk(self, match, subrepos, unknown, ignored, full=True):
952 """
952 """
953 Walk recursively through the directory tree, finding all files
953 Walk recursively through the directory tree, finding all files
954 matched by match.
954 matched by match.
955
955
956 If full is False, maybe skip some known-clean files.
956 If full is False, maybe skip some known-clean files.
957
957
958 Return a dict mapping filename to stat-like object (either
958 Return a dict mapping filename to stat-like object (either
959 mercurial.osutil.stat instance or return value of os.stat()).
959 mercurial.osutil.stat instance or return value of os.stat()).
960
960
961 """
961 """
962 # full is a flag that extensions that hook into walk can use -- this
962 # full is a flag that extensions that hook into walk can use -- this
963 # implementation doesn't use it at all. This satisfies the contract
963 # implementation doesn't use it at all. This satisfies the contract
964 # because we only guarantee a "maybe".
964 # because we only guarantee a "maybe".
965
965
966 if ignored:
966 if ignored:
967 ignore = util.never
967 ignore = util.never
968 dirignore = util.never
968 dirignore = util.never
969 elif unknown:
969 elif unknown:
970 ignore = self._ignore
970 ignore = self._ignore
971 dirignore = self._dirignore
971 dirignore = self._dirignore
972 else:
972 else:
973 # if not unknown and not ignored, drop dir recursion and step 2
973 # if not unknown and not ignored, drop dir recursion and step 2
974 ignore = util.always
974 ignore = util.always
975 dirignore = util.always
975 dirignore = util.always
976
976
977 matchfn = match.matchfn
977 matchfn = match.matchfn
978 matchalways = match.always()
978 matchalways = match.always()
979 matchtdir = match.traversedir
979 matchtdir = match.traversedir
980 dmap = self._map
980 dmap = self._map
981 listdir = util.listdir
981 listdir = util.listdir
982 lstat = os.lstat
982 lstat = os.lstat
983 dirkind = stat.S_IFDIR
983 dirkind = stat.S_IFDIR
984 regkind = stat.S_IFREG
984 regkind = stat.S_IFREG
985 lnkkind = stat.S_IFLNK
985 lnkkind = stat.S_IFLNK
986 join = self._join
986 join = self._join
987
987
988 exact = skipstep3 = False
988 exact = skipstep3 = False
989 if match.isexact(): # match.exact
989 if match.isexact(): # match.exact
990 exact = True
990 exact = True
991 dirignore = util.always # skip step 2
991 dirignore = util.always # skip step 2
992 elif match.prefix(): # match.match, no patterns
992 elif match.prefix(): # match.match, no patterns
993 skipstep3 = True
993 skipstep3 = True
994
994
995 if not exact and self._checkcase:
995 if not exact and self._checkcase:
996 normalize = self._normalize
996 normalize = self._normalize
997 normalizefile = self._normalizefile
997 normalizefile = self._normalizefile
998 skipstep3 = False
998 skipstep3 = False
999 else:
999 else:
1000 normalize = self._normalize
1000 normalize = self._normalize
1001 normalizefile = None
1001 normalizefile = None
1002
1002
1003 # step 1: find all explicit files
1003 # step 1: find all explicit files
1004 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1004 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1005 if matchtdir:
1005 if matchtdir:
1006 for d in work:
1006 for d in work:
1007 matchtdir(d[0])
1007 matchtdir(d[0])
1008 for d in dirsnotfound:
1008 for d in dirsnotfound:
1009 matchtdir(d)
1009 matchtdir(d)
1010
1010
1011 skipstep3 = skipstep3 and not (work or dirsnotfound)
1011 skipstep3 = skipstep3 and not (work or dirsnotfound)
1012 work = [d for d in work if not dirignore(d[0])]
1012 work = [d for d in work if not dirignore(d[0])]
1013
1013
1014 # step 2: visit subdirectories
1014 # step 2: visit subdirectories
1015 def traverse(work, alreadynormed):
1015 def traverse(work, alreadynormed):
1016 wadd = work.append
1016 wadd = work.append
1017 while work:
1017 while work:
1018 tracing.counter('dirstate.walk work', len(work))
1018 tracing.counter('dirstate.walk work', len(work))
1019 nd = work.pop()
1019 nd = work.pop()
1020 visitentries = match.visitchildrenset(nd)
1020 visitentries = match.visitchildrenset(nd)
1021 if not visitentries:
1021 if not visitentries:
1022 continue
1022 continue
1023 if visitentries == b'this' or visitentries == b'all':
1023 if visitentries == b'this' or visitentries == b'all':
1024 visitentries = None
1024 visitentries = None
1025 skip = None
1025 skip = None
1026 if nd != b'':
1026 if nd != b'':
1027 skip = b'.hg'
1027 skip = b'.hg'
1028 try:
1028 try:
1029 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1029 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1030 entries = listdir(join(nd), stat=True, skip=skip)
1030 entries = listdir(join(nd), stat=True, skip=skip)
1031 except OSError as inst:
1031 except OSError as inst:
1032 if inst.errno in (errno.EACCES, errno.ENOENT):
1032 if inst.errno in (errno.EACCES, errno.ENOENT):
1033 match.bad(
1033 match.bad(
1034 self.pathto(nd), encoding.strtolocal(inst.strerror)
1034 self.pathto(nd), encoding.strtolocal(inst.strerror)
1035 )
1035 )
1036 continue
1036 continue
1037 raise
1037 raise
1038 for f, kind, st in entries:
1038 for f, kind, st in entries:
1039 # Some matchers may return files in the visitentries set,
1039 # Some matchers may return files in the visitentries set,
1040 # instead of 'this', if the matcher explicitly mentions them
1040 # instead of 'this', if the matcher explicitly mentions them
1041 # and is not an exactmatcher. This is acceptable; we do not
1041 # and is not an exactmatcher. This is acceptable; we do not
1042 # make any hard assumptions about file-or-directory below
1042 # make any hard assumptions about file-or-directory below
1043 # based on the presence of `f` in visitentries. If
1043 # based on the presence of `f` in visitentries. If
1044 # visitchildrenset returned a set, we can always skip the
1044 # visitchildrenset returned a set, we can always skip the
1045 # entries *not* in the set it provided regardless of whether
1045 # entries *not* in the set it provided regardless of whether
1046 # they're actually a file or a directory.
1046 # they're actually a file or a directory.
1047 if visitentries and f not in visitentries:
1047 if visitentries and f not in visitentries:
1048 continue
1048 continue
1049 if normalizefile:
1049 if normalizefile:
1050 # even though f might be a directory, we're only
1050 # even though f might be a directory, we're only
1051 # interested in comparing it to files currently in the
1051 # interested in comparing it to files currently in the
1052 # dmap -- therefore normalizefile is enough
1052 # dmap -- therefore normalizefile is enough
1053 nf = normalizefile(
1053 nf = normalizefile(
1054 nd and (nd + b"/" + f) or f, True, True
1054 nd and (nd + b"/" + f) or f, True, True
1055 )
1055 )
1056 else:
1056 else:
1057 nf = nd and (nd + b"/" + f) or f
1057 nf = nd and (nd + b"/" + f) or f
1058 if nf not in results:
1058 if nf not in results:
1059 if kind == dirkind:
1059 if kind == dirkind:
1060 if not ignore(nf):
1060 if not ignore(nf):
1061 if matchtdir:
1061 if matchtdir:
1062 matchtdir(nf)
1062 matchtdir(nf)
1063 wadd(nf)
1063 wadd(nf)
1064 if nf in dmap and (matchalways or matchfn(nf)):
1064 if nf in dmap and (matchalways or matchfn(nf)):
1065 results[nf] = None
1065 results[nf] = None
1066 elif kind == regkind or kind == lnkkind:
1066 elif kind == regkind or kind == lnkkind:
1067 if nf in dmap:
1067 if nf in dmap:
1068 if matchalways or matchfn(nf):
1068 if matchalways or matchfn(nf):
1069 results[nf] = st
1069 results[nf] = st
1070 elif (matchalways or matchfn(nf)) and not ignore(
1070 elif (matchalways or matchfn(nf)) and not ignore(
1071 nf
1071 nf
1072 ):
1072 ):
1073 # unknown file -- normalize if necessary
1073 # unknown file -- normalize if necessary
1074 if not alreadynormed:
1074 if not alreadynormed:
1075 nf = normalize(nf, False, True)
1075 nf = normalize(nf, False, True)
1076 results[nf] = st
1076 results[nf] = st
1077 elif nf in dmap and (matchalways or matchfn(nf)):
1077 elif nf in dmap and (matchalways or matchfn(nf)):
1078 results[nf] = None
1078 results[nf] = None
1079
1079
1080 for nd, d in work:
1080 for nd, d in work:
1081 # alreadynormed means that processwork doesn't have to do any
1081 # alreadynormed means that processwork doesn't have to do any
1082 # expensive directory normalization
1082 # expensive directory normalization
1083 alreadynormed = not normalize or nd == d
1083 alreadynormed = not normalize or nd == d
1084 traverse([d], alreadynormed)
1084 traverse([d], alreadynormed)
1085
1085
1086 for s in subrepos:
1086 for s in subrepos:
1087 del results[s]
1087 del results[s]
1088 del results[b'.hg']
1088 del results[b'.hg']
1089
1089
1090 # step 3: visit remaining files from dmap
1090 # step 3: visit remaining files from dmap
1091 if not skipstep3 and not exact:
1091 if not skipstep3 and not exact:
1092 # If a dmap file is not in results yet, it was either
1092 # If a dmap file is not in results yet, it was either
1093 # a) not matching matchfn b) ignored, c) missing, or d) under a
1093 # a) not matching matchfn b) ignored, c) missing, or d) under a
1094 # symlink directory.
1094 # symlink directory.
1095 if not results and matchalways:
1095 if not results and matchalways:
1096 visit = [f for f in dmap]
1096 visit = [f for f in dmap]
1097 else:
1097 else:
1098 visit = [f for f in dmap if f not in results and matchfn(f)]
1098 visit = [f for f in dmap if f not in results and matchfn(f)]
1099 visit.sort()
1099 visit.sort()
1100
1100
1101 if unknown:
1101 if unknown:
1102 # unknown == True means we walked all dirs under the roots
1102 # unknown == True means we walked all dirs under the roots
1103 # that wasn't ignored, and everything that matched was stat'ed
1103 # that wasn't ignored, and everything that matched was stat'ed
1104 # and is already in results.
1104 # and is already in results.
1105 # The rest must thus be ignored or under a symlink.
1105 # The rest must thus be ignored or under a symlink.
1106 audit_path = pathutil.pathauditor(self._root, cached=True)
1106 audit_path = pathutil.pathauditor(self._root, cached=True)
1107
1107
1108 for nf in iter(visit):
1108 for nf in iter(visit):
1109 # If a stat for the same file was already added with a
1109 # If a stat for the same file was already added with a
1110 # different case, don't add one for this, since that would
1110 # different case, don't add one for this, since that would
1111 # make it appear as if the file exists under both names
1111 # make it appear as if the file exists under both names
1112 # on disk.
1112 # on disk.
1113 if (
1113 if (
1114 normalizefile
1114 normalizefile
1115 and normalizefile(nf, True, True) in results
1115 and normalizefile(nf, True, True) in results
1116 ):
1116 ):
1117 results[nf] = None
1117 results[nf] = None
1118 # Report ignored items in the dmap as long as they are not
1118 # Report ignored items in the dmap as long as they are not
1119 # under a symlink directory.
1119 # under a symlink directory.
1120 elif audit_path.check(nf):
1120 elif audit_path.check(nf):
1121 try:
1121 try:
1122 results[nf] = lstat(join(nf))
1122 results[nf] = lstat(join(nf))
1123 # file was just ignored, no links, and exists
1123 # file was just ignored, no links, and exists
1124 except OSError:
1124 except OSError:
1125 # file doesn't exist
1125 # file doesn't exist
1126 results[nf] = None
1126 results[nf] = None
1127 else:
1127 else:
1128 # It's either missing or under a symlink directory
1128 # It's either missing or under a symlink directory
1129 # which we in this case report as missing
1129 # which we in this case report as missing
1130 results[nf] = None
1130 results[nf] = None
1131 else:
1131 else:
1132 # We may not have walked the full directory tree above,
1132 # We may not have walked the full directory tree above,
1133 # so stat and check everything we missed.
1133 # so stat and check everything we missed.
1134 iv = iter(visit)
1134 iv = iter(visit)
1135 for st in util.statfiles([join(i) for i in visit]):
1135 for st in util.statfiles([join(i) for i in visit]):
1136 results[next(iv)] = st
1136 results[next(iv)] = st
1137 return results
1137 return results
1138
1138
1139 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1139 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1140 # Force Rayon (Rust parallelism library) to respect the number of
1140 # Force Rayon (Rust parallelism library) to respect the number of
1141 # workers. This is a temporary workaround until Rust code knows
1141 # workers. This is a temporary workaround until Rust code knows
1142 # how to read the config file.
1142 # how to read the config file.
1143 numcpus = self._ui.configint(b"worker", b"numcpus")
1143 numcpus = self._ui.configint(b"worker", b"numcpus")
1144 if numcpus is not None:
1144 if numcpus is not None:
1145 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1145 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1146
1146
1147 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1147 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1148 if not workers_enabled:
1148 if not workers_enabled:
1149 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1149 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1150
1150
1151 (
1151 (
1152 lookup,
1152 lookup,
1153 modified,
1153 modified,
1154 added,
1154 added,
1155 removed,
1155 removed,
1156 deleted,
1156 deleted,
1157 clean,
1157 clean,
1158 ignored,
1158 ignored,
1159 unknown,
1159 unknown,
1160 warnings,
1160 warnings,
1161 bad,
1161 bad,
1162 traversed,
1162 traversed,
1163 dirty,
1163 dirty,
1164 ) = rustmod.status(
1164 ) = rustmod.status(
1165 self._map._rustmap,
1165 self._map._rustmap,
1166 matcher,
1166 matcher,
1167 self._rootdir,
1167 self._rootdir,
1168 self._ignorefiles(),
1168 self._ignorefiles(),
1169 self._checkexec,
1169 self._checkexec,
1170 self._lastnormaltime,
1170 self._lastnormaltime,
1171 bool(list_clean),
1171 bool(list_clean),
1172 bool(list_ignored),
1172 bool(list_ignored),
1173 bool(list_unknown),
1173 bool(list_unknown),
1174 bool(matcher.traversedir),
1174 bool(matcher.traversedir),
1175 )
1175 )
1176
1176
1177 self._dirty |= dirty
1177 self._dirty |= dirty
1178
1178
1179 if matcher.traversedir:
1179 if matcher.traversedir:
1180 for dir in traversed:
1180 for dir in traversed:
1181 matcher.traversedir(dir)
1181 matcher.traversedir(dir)
1182
1182
1183 if self._ui.warn:
1183 if self._ui.warn:
1184 for item in warnings:
1184 for item in warnings:
1185 if isinstance(item, tuple):
1185 if isinstance(item, tuple):
1186 file_path, syntax = item
1186 file_path, syntax = item
1187 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1187 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1188 file_path,
1188 file_path,
1189 syntax,
1189 syntax,
1190 )
1190 )
1191 self._ui.warn(msg)
1191 self._ui.warn(msg)
1192 else:
1192 else:
1193 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1193 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1194 self._ui.warn(
1194 self._ui.warn(
1195 msg
1195 msg
1196 % (
1196 % (
1197 pathutil.canonpath(
1197 pathutil.canonpath(
1198 self._rootdir, self._rootdir, item
1198 self._rootdir, self._rootdir, item
1199 ),
1199 ),
1200 b"No such file or directory",
1200 b"No such file or directory",
1201 )
1201 )
1202 )
1202 )
1203
1203
1204 for (fn, message) in bad:
1204 for (fn, message) in bad:
1205 matcher.bad(fn, encoding.strtolocal(message))
1205 matcher.bad(fn, encoding.strtolocal(message))
1206
1206
1207 status = scmutil.status(
1207 status = scmutil.status(
1208 modified=modified,
1208 modified=modified,
1209 added=added,
1209 added=added,
1210 removed=removed,
1210 removed=removed,
1211 deleted=deleted,
1211 deleted=deleted,
1212 unknown=unknown,
1212 unknown=unknown,
1213 ignored=ignored,
1213 ignored=ignored,
1214 clean=clean,
1214 clean=clean,
1215 )
1215 )
1216 return (lookup, status)
1216 return (lookup, status)
1217
1217
1218 def status(self, match, subrepos, ignored, clean, unknown):
1218 def status(self, match, subrepos, ignored, clean, unknown):
1219 """Determine the status of the working copy relative to the
1219 """Determine the status of the working copy relative to the
1220 dirstate and return a pair of (unsure, status), where status is of type
1220 dirstate and return a pair of (unsure, status), where status is of type
1221 scmutil.status and:
1221 scmutil.status and:
1222
1222
1223 unsure:
1223 unsure:
1224 files that might have been modified since the dirstate was
1224 files that might have been modified since the dirstate was
1225 written, but need to be read to be sure (size is the same
1225 written, but need to be read to be sure (size is the same
1226 but mtime differs)
1226 but mtime differs)
1227 status.modified:
1227 status.modified:
1228 files that have definitely been modified since the dirstate
1228 files that have definitely been modified since the dirstate
1229 was written (different size or mode)
1229 was written (different size or mode)
1230 status.clean:
1230 status.clean:
1231 files that have definitely not been modified since the
1231 files that have definitely not been modified since the
1232 dirstate was written
1232 dirstate was written
1233 """
1233 """
1234 listignored, listclean, listunknown = ignored, clean, unknown
1234 listignored, listclean, listunknown = ignored, clean, unknown
1235 lookup, modified, added, unknown, ignored = [], [], [], [], []
1235 lookup, modified, added, unknown, ignored = [], [], [], [], []
1236 removed, deleted, clean = [], [], []
1236 removed, deleted, clean = [], [], []
1237
1237
1238 dmap = self._map
1238 dmap = self._map
1239 dmap.preload()
1239 dmap.preload()
1240
1240
1241 use_rust = True
1241 use_rust = True
1242
1242
1243 allowed_matchers = (
1243 allowed_matchers = (
1244 matchmod.alwaysmatcher,
1244 matchmod.alwaysmatcher,
1245 matchmod.exactmatcher,
1245 matchmod.exactmatcher,
1246 matchmod.includematcher,
1246 matchmod.includematcher,
1247 )
1247 )
1248
1248
1249 if rustmod is None:
1249 if rustmod is None:
1250 use_rust = False
1250 use_rust = False
1251 elif self._checkcase:
1251 elif self._checkcase:
1252 # Case-insensitive filesystems are not handled yet
1252 # Case-insensitive filesystems are not handled yet
1253 use_rust = False
1253 use_rust = False
1254 elif subrepos:
1254 elif subrepos:
1255 use_rust = False
1255 use_rust = False
1256 elif sparse.enabled:
1256 elif sparse.enabled:
1257 use_rust = False
1257 use_rust = False
1258 elif not isinstance(match, allowed_matchers):
1258 elif not isinstance(match, allowed_matchers):
1259 # Some matchers have yet to be implemented
1259 # Some matchers have yet to be implemented
1260 use_rust = False
1260 use_rust = False
1261
1261
1262 if use_rust:
1262 if use_rust:
1263 try:
1263 try:
1264 return self._rust_status(
1264 return self._rust_status(
1265 match, listclean, listignored, listunknown
1265 match, listclean, listignored, listunknown
1266 )
1266 )
1267 except rustmod.FallbackError:
1267 except rustmod.FallbackError:
1268 pass
1268 pass
1269
1269
1270 def noop(f):
1270 def noop(f):
1271 pass
1271 pass
1272
1272
1273 dcontains = dmap.__contains__
1273 dcontains = dmap.__contains__
1274 dget = dmap.__getitem__
1274 dget = dmap.__getitem__
1275 ladd = lookup.append # aka "unsure"
1275 ladd = lookup.append # aka "unsure"
1276 madd = modified.append
1276 madd = modified.append
1277 aadd = added.append
1277 aadd = added.append
1278 uadd = unknown.append if listunknown else noop
1278 uadd = unknown.append if listunknown else noop
1279 iadd = ignored.append if listignored else noop
1279 iadd = ignored.append if listignored else noop
1280 radd = removed.append
1280 radd = removed.append
1281 dadd = deleted.append
1281 dadd = deleted.append
1282 cadd = clean.append if listclean else noop
1282 cadd = clean.append if listclean else noop
1283 mexact = match.exact
1283 mexact = match.exact
1284 dirignore = self._dirignore
1284 dirignore = self._dirignore
1285 checkexec = self._checkexec
1285 checkexec = self._checkexec
1286 copymap = self._map.copymap
1286 copymap = self._map.copymap
1287 lastnormaltime = self._lastnormaltime
1287 lastnormaltime = self._lastnormaltime
1288
1288
1289 # We need to do full walks when either
1289 # We need to do full walks when either
1290 # - we're listing all clean files, or
1290 # - we're listing all clean files, or
1291 # - match.traversedir does something, because match.traversedir should
1291 # - match.traversedir does something, because match.traversedir should
1292 # be called for every dir in the working dir
1292 # be called for every dir in the working dir
1293 full = listclean or match.traversedir is not None
1293 full = listclean or match.traversedir is not None
1294 for fn, st in pycompat.iteritems(
1294 for fn, st in pycompat.iteritems(
1295 self.walk(match, subrepos, listunknown, listignored, full=full)
1295 self.walk(match, subrepos, listunknown, listignored, full=full)
1296 ):
1296 ):
1297 if not dcontains(fn):
1297 if not dcontains(fn):
1298 if (listignored or mexact(fn)) and dirignore(fn):
1298 if (listignored or mexact(fn)) and dirignore(fn):
1299 if listignored:
1299 if listignored:
1300 iadd(fn)
1300 iadd(fn)
1301 else:
1301 else:
1302 uadd(fn)
1302 uadd(fn)
1303 continue
1303 continue
1304
1304
1305 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1305 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1306 # written like that for performance reasons. dmap[fn] is not a
1306 # written like that for performance reasons. dmap[fn] is not a
1307 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1307 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1308 # opcode has fast paths when the value to be unpacked is a tuple or
1308 # opcode has fast paths when the value to be unpacked is a tuple or
1309 # a list, but falls back to creating a full-fledged iterator in
1309 # a list, but falls back to creating a full-fledged iterator in
1310 # general. That is much slower than simply accessing and storing the
1310 # general. That is much slower than simply accessing and storing the
1311 # tuple members one by one.
1311 # tuple members one by one.
1312 t = dget(fn)
1312 t = dget(fn)
1313 state = t.state
1313 state = t.state
1314 mode = t[1]
1314 mode = t[1]
1315 size = t[2]
1315 size = t[2]
1316 time = t[3]
1316 time = t[3]
1317
1317
1318 if not st and state in b"nma":
1318 if not st and state in b"nma":
1319 dadd(fn)
1319 dadd(fn)
1320 elif state == b'n':
1320 elif state == b'n':
1321 if (
1321 if (
1322 size >= 0
1322 size >= 0
1323 and (
1323 and (
1324 (size != st.st_size and size != st.st_size & _rangemask)
1324 (size != st.st_size and size != st.st_size & _rangemask)
1325 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1325 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1326 )
1326 )
1327 or t.from_p2
1327 or t.from_p2
1328 or fn in copymap
1328 or fn in copymap
1329 ):
1329 ):
1330 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1330 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1331 # issue6456: Size returned may be longer due to
1331 # issue6456: Size returned may be longer due to
1332 # encryption on EXT-4 fscrypt, undecided.
1332 # encryption on EXT-4 fscrypt, undecided.
1333 ladd(fn)
1333 ladd(fn)
1334 else:
1334 else:
1335 madd(fn)
1335 madd(fn)
1336 elif (
1336 elif (
1337 time != st[stat.ST_MTIME]
1337 time != st[stat.ST_MTIME]
1338 and time != st[stat.ST_MTIME] & _rangemask
1338 and time != st[stat.ST_MTIME] & _rangemask
1339 ):
1339 ):
1340 ladd(fn)
1340 ladd(fn)
1341 elif st[stat.ST_MTIME] == lastnormaltime:
1341 elif st[stat.ST_MTIME] == lastnormaltime:
1342 # fn may have just been marked as normal and it may have
1342 # fn may have just been marked as normal and it may have
1343 # changed in the same second without changing its size.
1343 # changed in the same second without changing its size.
1344 # This can happen if we quickly do multiple commits.
1344 # This can happen if we quickly do multiple commits.
1345 # Force lookup, so we don't miss such a racy file change.
1345 # Force lookup, so we don't miss such a racy file change.
1346 ladd(fn)
1346 ladd(fn)
1347 elif listclean:
1347 elif listclean:
1348 cadd(fn)
1348 cadd(fn)
1349 elif t.merged:
1349 elif t.merged:
1350 madd(fn)
1350 madd(fn)
1351 elif t.added:
1351 elif t.added:
1352 aadd(fn)
1352 aadd(fn)
1353 elif t.removed:
1353 elif t.removed:
1354 radd(fn)
1354 radd(fn)
1355 status = scmutil.status(
1355 status = scmutil.status(
1356 modified, added, removed, deleted, unknown, ignored, clean
1356 modified, added, removed, deleted, unknown, ignored, clean
1357 )
1357 )
1358 return (lookup, status)
1358 return (lookup, status)
1359
1359
1360 def matches(self, match):
1360 def matches(self, match):
1361 """
1361 """
1362 return files in the dirstate (in whatever state) filtered by match
1362 return files in the dirstate (in whatever state) filtered by match
1363 """
1363 """
1364 dmap = self._map
1364 dmap = self._map
1365 if rustmod is not None:
1365 if rustmod is not None:
1366 dmap = self._map._rustmap
1366 dmap = self._map._rustmap
1367
1367
1368 if match.always():
1368 if match.always():
1369 return dmap.keys()
1369 return dmap.keys()
1370 files = match.files()
1370 files = match.files()
1371 if match.isexact():
1371 if match.isexact():
1372 # fast path -- filter the other way around, since typically files is
1372 # fast path -- filter the other way around, since typically files is
1373 # much smaller than dmap
1373 # much smaller than dmap
1374 return [f for f in files if f in dmap]
1374 return [f for f in files if f in dmap]
1375 if match.prefix() and all(fn in dmap for fn in files):
1375 if match.prefix() and all(fn in dmap for fn in files):
1376 # fast path -- all the values are known to be files, so just return
1376 # fast path -- all the values are known to be files, so just return
1377 # that
1377 # that
1378 return list(files)
1378 return list(files)
1379 return [f for f in dmap if match(f)]
1379 return [f for f in dmap if match(f)]
1380
1380
1381 def _actualfilename(self, tr):
1381 def _actualfilename(self, tr):
1382 if tr:
1382 if tr:
1383 return self._pendingfilename
1383 return self._pendingfilename
1384 else:
1384 else:
1385 return self._filename
1385 return self._filename
1386
1386
1387 def savebackup(self, tr, backupname):
1387 def savebackup(self, tr, backupname):
1388 '''Save current dirstate into backup file'''
1388 '''Save current dirstate into backup file'''
1389 filename = self._actualfilename(tr)
1389 filename = self._actualfilename(tr)
1390 assert backupname != filename
1390 assert backupname != filename
1391
1391
1392 # use '_writedirstate' instead of 'write' to write changes certainly,
1392 # use '_writedirstate' instead of 'write' to write changes certainly,
1393 # because the latter omits writing out if transaction is running.
1393 # because the latter omits writing out if transaction is running.
1394 # output file will be used to create backup of dirstate at this point.
1394 # output file will be used to create backup of dirstate at this point.
1395 if self._dirty or not self._opener.exists(filename):
1395 if self._dirty or not self._opener.exists(filename):
1396 self._writedirstate(
1396 self._writedirstate(
1397 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1397 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1398 )
1398 )
1399
1399
1400 if tr:
1400 if tr:
1401 # ensure that subsequent tr.writepending returns True for
1401 # ensure that subsequent tr.writepending returns True for
1402 # changes written out above, even if dirstate is never
1402 # changes written out above, even if dirstate is never
1403 # changed after this
1403 # changed after this
1404 tr.addfilegenerator(
1404 tr.addfilegenerator(
1405 b'dirstate',
1405 b'dirstate',
1406 (self._filename,),
1406 (self._filename,),
1407 self._writedirstate,
1407 self._writedirstate,
1408 location=b'plain',
1408 location=b'plain',
1409 )
1409 )
1410
1410
1411 # ensure that pending file written above is unlinked at
1411 # ensure that pending file written above is unlinked at
1412 # failure, even if tr.writepending isn't invoked until the
1412 # failure, even if tr.writepending isn't invoked until the
1413 # end of this transaction
1413 # end of this transaction
1414 tr.registertmp(filename, location=b'plain')
1414 tr.registertmp(filename, location=b'plain')
1415
1415
1416 self._opener.tryunlink(backupname)
1416 self._opener.tryunlink(backupname)
1417 # hardlink backup is okay because _writedirstate is always called
1417 # hardlink backup is okay because _writedirstate is always called
1418 # with an "atomictemp=True" file.
1418 # with an "atomictemp=True" file.
1419 util.copyfile(
1419 util.copyfile(
1420 self._opener.join(filename),
1420 self._opener.join(filename),
1421 self._opener.join(backupname),
1421 self._opener.join(backupname),
1422 hardlink=True,
1422 hardlink=True,
1423 )
1423 )
1424
1424
1425 def restorebackup(self, tr, backupname):
1425 def restorebackup(self, tr, backupname):
1426 '''Restore dirstate by backup file'''
1426 '''Restore dirstate by backup file'''
1427 # this "invalidate()" prevents "wlock.release()" from writing
1427 # this "invalidate()" prevents "wlock.release()" from writing
1428 # changes of dirstate out after restoring from backup file
1428 # changes of dirstate out after restoring from backup file
1429 self.invalidate()
1429 self.invalidate()
1430 filename = self._actualfilename(tr)
1430 filename = self._actualfilename(tr)
1431 o = self._opener
1431 o = self._opener
1432 if util.samefile(o.join(backupname), o.join(filename)):
1432 if util.samefile(o.join(backupname), o.join(filename)):
1433 o.unlink(backupname)
1433 o.unlink(backupname)
1434 else:
1434 else:
1435 o.rename(backupname, filename, checkambig=True)
1435 o.rename(backupname, filename, checkambig=True)
1436
1436
1437 def clearbackup(self, tr, backupname):
1437 def clearbackup(self, tr, backupname):
1438 '''Clear backup file'''
1438 '''Clear backup file'''
1439 self._opener.unlink(backupname)
1439 self._opener.unlink(backupname)
@@ -1,684 +1,685 b''
1 # dirstatemap.py
1 # dirstatemap.py
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 import errno
8 import errno
9
9
10 from .i18n import _
10 from .i18n import _
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 pathutil,
14 pathutil,
15 policy,
15 policy,
16 pycompat,
16 pycompat,
17 txnutil,
17 txnutil,
18 util,
18 util,
19 )
19 )
20
20
21 parsers = policy.importmod('parsers')
21 parsers = policy.importmod('parsers')
22 rustmod = policy.importrust('dirstate')
22 rustmod = policy.importrust('dirstate')
23
23
24 propertycache = util.propertycache
24 propertycache = util.propertycache
25
25
26 dirstatetuple = parsers.dirstatetuple
26 dirstatetuple = parsers.dirstatetuple
27
27
28
28
29 # a special value used internally for `size` if the file come from the other parent
29 # a special value used internally for `size` if the file come from the other parent
30 FROM_P2 = -2
30 FROM_P2 = -2
31
31
32 # a special value used internally for `size` if the file is modified/merged/added
32 # a special value used internally for `size` if the file is modified/merged/added
33 NONNORMAL = -1
33 NONNORMAL = -1
34
34
35 # a special value used internally for `time` if the time is ambigeous
35 # a special value used internally for `time` if the time is ambigeous
36 AMBIGUOUS_TIME = -1
36 AMBIGUOUS_TIME = -1
37
37
38 rangemask = 0x7FFFFFFF
38 rangemask = 0x7FFFFFFF
39
39
40
40
41 class dirstatemap(object):
41 class dirstatemap(object):
42 """Map encapsulating the dirstate's contents.
42 """Map encapsulating the dirstate's contents.
43
43
44 The dirstate contains the following state:
44 The dirstate contains the following state:
45
45
46 - `identity` is the identity of the dirstate file, which can be used to
46 - `identity` is the identity of the dirstate file, which can be used to
47 detect when changes have occurred to the dirstate file.
47 detect when changes have occurred to the dirstate file.
48
48
49 - `parents` is a pair containing the parents of the working copy. The
49 - `parents` is a pair containing the parents of the working copy. The
50 parents are updated by calling `setparents`.
50 parents are updated by calling `setparents`.
51
51
52 - the state map maps filenames to tuples of (state, mode, size, mtime),
52 - the state map maps filenames to tuples of (state, mode, size, mtime),
53 where state is a single character representing 'normal', 'added',
53 where state is a single character representing 'normal', 'added',
54 'removed', or 'merged'. It is read by treating the dirstate as a
54 'removed', or 'merged'. It is read by treating the dirstate as a
55 dict. File state is updated by calling the `addfile`, `removefile` and
55 dict. File state is updated by calling the `addfile`, `removefile` and
56 `dropfile` methods.
56 `dropfile` methods.
57
57
58 - `copymap` maps destination filenames to their source filename.
58 - `copymap` maps destination filenames to their source filename.
59
59
60 The dirstate also provides the following views onto the state:
60 The dirstate also provides the following views onto the state:
61
61
62 - `nonnormalset` is a set of the filenames that have state other
62 - `nonnormalset` is a set of the filenames that have state other
63 than 'normal', or are normal but have an mtime of -1 ('normallookup').
63 than 'normal', or are normal but have an mtime of -1 ('normallookup').
64
64
65 - `otherparentset` is a set of the filenames that are marked as coming
65 - `otherparentset` is a set of the filenames that are marked as coming
66 from the second parent when the dirstate is currently being merged.
66 from the second parent when the dirstate is currently being merged.
67
67
68 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
68 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
69 form that they appear as in the dirstate.
69 form that they appear as in the dirstate.
70
70
71 - `dirfoldmap` is a dict mapping normalized directory names to the
71 - `dirfoldmap` is a dict mapping normalized directory names to the
72 denormalized form that they appear as in the dirstate.
72 denormalized form that they appear as in the dirstate.
73 """
73 """
74
74
75 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
75 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
76 self._ui = ui
76 self._ui = ui
77 self._opener = opener
77 self._opener = opener
78 self._root = root
78 self._root = root
79 self._filename = b'dirstate'
79 self._filename = b'dirstate'
80 self._nodelen = 20
80 self._nodelen = 20
81 self._nodeconstants = nodeconstants
81 self._nodeconstants = nodeconstants
82 assert (
82 assert (
83 not use_dirstate_v2
83 not use_dirstate_v2
84 ), "should have detected unsupported requirement"
84 ), "should have detected unsupported requirement"
85
85
86 self._parents = None
86 self._parents = None
87 self._dirtyparents = False
87 self._dirtyparents = False
88
88
89 # for consistent view between _pl() and _read() invocations
89 # for consistent view between _pl() and _read() invocations
90 self._pendingmode = None
90 self._pendingmode = None
91
91
92 @propertycache
92 @propertycache
93 def _map(self):
93 def _map(self):
94 self._map = {}
94 self._map = {}
95 self.read()
95 self.read()
96 return self._map
96 return self._map
97
97
98 @propertycache
98 @propertycache
99 def copymap(self):
99 def copymap(self):
100 self.copymap = {}
100 self.copymap = {}
101 self._map
101 self._map
102 return self.copymap
102 return self.copymap
103
103
104 def directories(self):
104 def directories(self):
105 # Rust / dirstate-v2 only
105 # Rust / dirstate-v2 only
106 return []
106 return []
107
107
108 def clear(self):
108 def clear(self):
109 self._map.clear()
109 self._map.clear()
110 self.copymap.clear()
110 self.copymap.clear()
111 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
111 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
112 util.clearcachedproperty(self, b"_dirs")
112 util.clearcachedproperty(self, b"_dirs")
113 util.clearcachedproperty(self, b"_alldirs")
113 util.clearcachedproperty(self, b"_alldirs")
114 util.clearcachedproperty(self, b"filefoldmap")
114 util.clearcachedproperty(self, b"filefoldmap")
115 util.clearcachedproperty(self, b"dirfoldmap")
115 util.clearcachedproperty(self, b"dirfoldmap")
116 util.clearcachedproperty(self, b"nonnormalset")
116 util.clearcachedproperty(self, b"nonnormalset")
117 util.clearcachedproperty(self, b"otherparentset")
117 util.clearcachedproperty(self, b"otherparentset")
118
118
119 def items(self):
119 def items(self):
120 return pycompat.iteritems(self._map)
120 return pycompat.iteritems(self._map)
121
121
122 # forward for python2,3 compat
122 # forward for python2,3 compat
123 iteritems = items
123 iteritems = items
124
124
125 def __len__(self):
125 def __len__(self):
126 return len(self._map)
126 return len(self._map)
127
127
128 def __iter__(self):
128 def __iter__(self):
129 return iter(self._map)
129 return iter(self._map)
130
130
131 def get(self, key, default=None):
131 def get(self, key, default=None):
132 return self._map.get(key, default)
132 return self._map.get(key, default)
133
133
134 def __contains__(self, key):
134 def __contains__(self, key):
135 return key in self._map
135 return key in self._map
136
136
137 def __getitem__(self, key):
137 def __getitem__(self, key):
138 return self._map[key]
138 return self._map[key]
139
139
140 def keys(self):
140 def keys(self):
141 return self._map.keys()
141 return self._map.keys()
142
142
143 def preload(self):
143 def preload(self):
144 """Loads the underlying data, if it's not already loaded"""
144 """Loads the underlying data, if it's not already loaded"""
145 self._map
145 self._map
146
146
147 def addfile(
147 def addfile(
148 self,
148 self,
149 f,
149 f,
150 state=None,
150 state=None,
151 mode=0,
151 mode=0,
152 size=None,
152 size=None,
153 mtime=None,
153 mtime=None,
154 added=False,
154 added=False,
155 merged=False,
155 merged=False,
156 from_p2=False,
156 from_p2=False,
157 possibly_dirty=False,
157 possibly_dirty=False,
158 ):
158 ):
159 """Add a tracked file to the dirstate."""
159 """Add a tracked file to the dirstate."""
160 if added:
160 if added:
161 assert not merged
161 assert not merged
162 assert not possibly_dirty
162 assert not possibly_dirty
163 assert not from_p2
163 assert not from_p2
164 state = b'a'
164 state = b'a'
165 size = NONNORMAL
165 size = NONNORMAL
166 mtime = AMBIGUOUS_TIME
166 mtime = AMBIGUOUS_TIME
167 elif merged:
167 elif merged:
168 assert not possibly_dirty
168 assert not possibly_dirty
169 assert not from_p2
169 assert not from_p2
170 state = b'm'
170 state = b'm'
171 size = FROM_P2
171 size = FROM_P2
172 mtime = AMBIGUOUS_TIME
172 mtime = AMBIGUOUS_TIME
173 elif from_p2:
173 elif from_p2:
174 assert not possibly_dirty
174 assert not possibly_dirty
175 state = b'n'
175 size = FROM_P2
176 size = FROM_P2
176 mtime = AMBIGUOUS_TIME
177 mtime = AMBIGUOUS_TIME
177 elif possibly_dirty:
178 elif possibly_dirty:
178 state = b'n'
179 state = b'n'
179 size = NONNORMAL
180 size = NONNORMAL
180 mtime = AMBIGUOUS_TIME
181 mtime = AMBIGUOUS_TIME
181 else:
182 else:
182 assert state != b'a'
183 assert state != b'a'
183 assert size != FROM_P2
184 assert size != FROM_P2
184 assert size != NONNORMAL
185 assert size != NONNORMAL
185 size = size & rangemask
186 size = size & rangemask
186 mtime = mtime & rangemask
187 mtime = mtime & rangemask
187 assert state is not None
188 assert state is not None
188 assert size is not None
189 assert size is not None
189 assert mtime is not None
190 assert mtime is not None
190 old_entry = self.get(f)
191 old_entry = self.get(f)
191 if (
192 if (
192 old_entry is None or old_entry.removed
193 old_entry is None or old_entry.removed
193 ) and "_dirs" in self.__dict__:
194 ) and "_dirs" in self.__dict__:
194 self._dirs.addpath(f)
195 self._dirs.addpath(f)
195 if old_entry is None and "_alldirs" in self.__dict__:
196 if old_entry is None and "_alldirs" in self.__dict__:
196 self._alldirs.addpath(f)
197 self._alldirs.addpath(f)
197 self._map[f] = dirstatetuple(state, mode, size, mtime)
198 self._map[f] = dirstatetuple(state, mode, size, mtime)
198 if state != b'n' or mtime == AMBIGUOUS_TIME:
199 if state != b'n' or mtime == AMBIGUOUS_TIME:
199 self.nonnormalset.add(f)
200 self.nonnormalset.add(f)
200 if size == FROM_P2:
201 if size == FROM_P2:
201 self.otherparentset.add(f)
202 self.otherparentset.add(f)
202
203
203 def removefile(self, f, in_merge=False):
204 def removefile(self, f, in_merge=False):
204 """
205 """
205 Mark a file as removed in the dirstate.
206 Mark a file as removed in the dirstate.
206
207
207 The `size` parameter is used to store sentinel values that indicate
208 The `size` parameter is used to store sentinel values that indicate
208 the file's previous state. In the future, we should refactor this
209 the file's previous state. In the future, we should refactor this
209 to be more explicit about what that state is.
210 to be more explicit about what that state is.
210 """
211 """
211 entry = self.get(f)
212 entry = self.get(f)
212 size = 0
213 size = 0
213 if in_merge:
214 if in_merge:
214 # XXX we should not be able to have 'm' state and 'FROM_P2' if not
215 # XXX we should not be able to have 'm' state and 'FROM_P2' if not
215 # during a merge. So I (marmoute) am not sure we need the
216 # during a merge. So I (marmoute) am not sure we need the
216 # conditionnal at all. Adding double checking this with assert
217 # conditionnal at all. Adding double checking this with assert
217 # would be nice.
218 # would be nice.
218 if entry is not None:
219 if entry is not None:
219 # backup the previous state
220 # backup the previous state
220 if entry.merged: # merge
221 if entry.merged: # merge
221 size = NONNORMAL
222 size = NONNORMAL
222 elif entry[0] == b'n' and entry.from_p2:
223 elif entry[0] == b'n' and entry.from_p2:
223 size = FROM_P2
224 size = FROM_P2
224 self.otherparentset.add(f)
225 self.otherparentset.add(f)
225 if size == 0:
226 if size == 0:
226 self.copymap.pop(f, None)
227 self.copymap.pop(f, None)
227
228
228 if entry is not None and entry[0] != b'r' and "_dirs" in self.__dict__:
229 if entry is not None and entry[0] != b'r' and "_dirs" in self.__dict__:
229 self._dirs.delpath(f)
230 self._dirs.delpath(f)
230 if entry is None and "_alldirs" in self.__dict__:
231 if entry is None and "_alldirs" in self.__dict__:
231 self._alldirs.addpath(f)
232 self._alldirs.addpath(f)
232 if "filefoldmap" in self.__dict__:
233 if "filefoldmap" in self.__dict__:
233 normed = util.normcase(f)
234 normed = util.normcase(f)
234 self.filefoldmap.pop(normed, None)
235 self.filefoldmap.pop(normed, None)
235 self._map[f] = dirstatetuple(b'r', 0, size, 0)
236 self._map[f] = dirstatetuple(b'r', 0, size, 0)
236 self.nonnormalset.add(f)
237 self.nonnormalset.add(f)
237
238
238 def dropfile(self, f, oldstate):
239 def dropfile(self, f, oldstate):
239 """
240 """
240 Remove a file from the dirstate. Returns True if the file was
241 Remove a file from the dirstate. Returns True if the file was
241 previously recorded.
242 previously recorded.
242 """
243 """
243 exists = self._map.pop(f, None) is not None
244 exists = self._map.pop(f, None) is not None
244 if exists:
245 if exists:
245 if oldstate != b"r" and "_dirs" in self.__dict__:
246 if oldstate != b"r" and "_dirs" in self.__dict__:
246 self._dirs.delpath(f)
247 self._dirs.delpath(f)
247 if "_alldirs" in self.__dict__:
248 if "_alldirs" in self.__dict__:
248 self._alldirs.delpath(f)
249 self._alldirs.delpath(f)
249 if "filefoldmap" in self.__dict__:
250 if "filefoldmap" in self.__dict__:
250 normed = util.normcase(f)
251 normed = util.normcase(f)
251 self.filefoldmap.pop(normed, None)
252 self.filefoldmap.pop(normed, None)
252 self.nonnormalset.discard(f)
253 self.nonnormalset.discard(f)
253 return exists
254 return exists
254
255
255 def clearambiguoustimes(self, files, now):
256 def clearambiguoustimes(self, files, now):
256 for f in files:
257 for f in files:
257 e = self.get(f)
258 e = self.get(f)
258 if e is not None and e[0] == b'n' and e[3] == now:
259 if e is not None and e[0] == b'n' and e[3] == now:
259 self._map[f] = dirstatetuple(e[0], e[1], e[2], AMBIGUOUS_TIME)
260 self._map[f] = dirstatetuple(e[0], e[1], e[2], AMBIGUOUS_TIME)
260 self.nonnormalset.add(f)
261 self.nonnormalset.add(f)
261
262
262 def nonnormalentries(self):
263 def nonnormalentries(self):
263 '''Compute the nonnormal dirstate entries from the dmap'''
264 '''Compute the nonnormal dirstate entries from the dmap'''
264 try:
265 try:
265 return parsers.nonnormalotherparententries(self._map)
266 return parsers.nonnormalotherparententries(self._map)
266 except AttributeError:
267 except AttributeError:
267 nonnorm = set()
268 nonnorm = set()
268 otherparent = set()
269 otherparent = set()
269 for fname, e in pycompat.iteritems(self._map):
270 for fname, e in pycompat.iteritems(self._map):
270 if e[0] != b'n' or e[3] == AMBIGUOUS_TIME:
271 if e[0] != b'n' or e[3] == AMBIGUOUS_TIME:
271 nonnorm.add(fname)
272 nonnorm.add(fname)
272 if e[0] == b'n' and e[2] == FROM_P2:
273 if e[0] == b'n' and e[2] == FROM_P2:
273 otherparent.add(fname)
274 otherparent.add(fname)
274 return nonnorm, otherparent
275 return nonnorm, otherparent
275
276
276 @propertycache
277 @propertycache
277 def filefoldmap(self):
278 def filefoldmap(self):
278 """Returns a dictionary mapping normalized case paths to their
279 """Returns a dictionary mapping normalized case paths to their
279 non-normalized versions.
280 non-normalized versions.
280 """
281 """
281 try:
282 try:
282 makefilefoldmap = parsers.make_file_foldmap
283 makefilefoldmap = parsers.make_file_foldmap
283 except AttributeError:
284 except AttributeError:
284 pass
285 pass
285 else:
286 else:
286 return makefilefoldmap(
287 return makefilefoldmap(
287 self._map, util.normcasespec, util.normcasefallback
288 self._map, util.normcasespec, util.normcasefallback
288 )
289 )
289
290
290 f = {}
291 f = {}
291 normcase = util.normcase
292 normcase = util.normcase
292 for name, s in pycompat.iteritems(self._map):
293 for name, s in pycompat.iteritems(self._map):
293 if s[0] != b'r':
294 if s[0] != b'r':
294 f[normcase(name)] = name
295 f[normcase(name)] = name
295 f[b'.'] = b'.' # prevents useless util.fspath() invocation
296 f[b'.'] = b'.' # prevents useless util.fspath() invocation
296 return f
297 return f
297
298
298 def hastrackeddir(self, d):
299 def hastrackeddir(self, d):
299 """
300 """
300 Returns True if the dirstate contains a tracked (not removed) file
301 Returns True if the dirstate contains a tracked (not removed) file
301 in this directory.
302 in this directory.
302 """
303 """
303 return d in self._dirs
304 return d in self._dirs
304
305
305 def hasdir(self, d):
306 def hasdir(self, d):
306 """
307 """
307 Returns True if the dirstate contains a file (tracked or removed)
308 Returns True if the dirstate contains a file (tracked or removed)
308 in this directory.
309 in this directory.
309 """
310 """
310 return d in self._alldirs
311 return d in self._alldirs
311
312
312 @propertycache
313 @propertycache
313 def _dirs(self):
314 def _dirs(self):
314 return pathutil.dirs(self._map, b'r')
315 return pathutil.dirs(self._map, b'r')
315
316
316 @propertycache
317 @propertycache
317 def _alldirs(self):
318 def _alldirs(self):
318 return pathutil.dirs(self._map)
319 return pathutil.dirs(self._map)
319
320
320 def _opendirstatefile(self):
321 def _opendirstatefile(self):
321 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
322 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
322 if self._pendingmode is not None and self._pendingmode != mode:
323 if self._pendingmode is not None and self._pendingmode != mode:
323 fp.close()
324 fp.close()
324 raise error.Abort(
325 raise error.Abort(
325 _(b'working directory state may be changed parallelly')
326 _(b'working directory state may be changed parallelly')
326 )
327 )
327 self._pendingmode = mode
328 self._pendingmode = mode
328 return fp
329 return fp
329
330
330 def parents(self):
331 def parents(self):
331 if not self._parents:
332 if not self._parents:
332 try:
333 try:
333 fp = self._opendirstatefile()
334 fp = self._opendirstatefile()
334 st = fp.read(2 * self._nodelen)
335 st = fp.read(2 * self._nodelen)
335 fp.close()
336 fp.close()
336 except IOError as err:
337 except IOError as err:
337 if err.errno != errno.ENOENT:
338 if err.errno != errno.ENOENT:
338 raise
339 raise
339 # File doesn't exist, so the current state is empty
340 # File doesn't exist, so the current state is empty
340 st = b''
341 st = b''
341
342
342 l = len(st)
343 l = len(st)
343 if l == self._nodelen * 2:
344 if l == self._nodelen * 2:
344 self._parents = (
345 self._parents = (
345 st[: self._nodelen],
346 st[: self._nodelen],
346 st[self._nodelen : 2 * self._nodelen],
347 st[self._nodelen : 2 * self._nodelen],
347 )
348 )
348 elif l == 0:
349 elif l == 0:
349 self._parents = (
350 self._parents = (
350 self._nodeconstants.nullid,
351 self._nodeconstants.nullid,
351 self._nodeconstants.nullid,
352 self._nodeconstants.nullid,
352 )
353 )
353 else:
354 else:
354 raise error.Abort(
355 raise error.Abort(
355 _(b'working directory state appears damaged!')
356 _(b'working directory state appears damaged!')
356 )
357 )
357
358
358 return self._parents
359 return self._parents
359
360
360 def setparents(self, p1, p2):
361 def setparents(self, p1, p2):
361 self._parents = (p1, p2)
362 self._parents = (p1, p2)
362 self._dirtyparents = True
363 self._dirtyparents = True
363
364
364 def read(self):
365 def read(self):
365 # ignore HG_PENDING because identity is used only for writing
366 # ignore HG_PENDING because identity is used only for writing
366 self.identity = util.filestat.frompath(
367 self.identity = util.filestat.frompath(
367 self._opener.join(self._filename)
368 self._opener.join(self._filename)
368 )
369 )
369
370
370 try:
371 try:
371 fp = self._opendirstatefile()
372 fp = self._opendirstatefile()
372 try:
373 try:
373 st = fp.read()
374 st = fp.read()
374 finally:
375 finally:
375 fp.close()
376 fp.close()
376 except IOError as err:
377 except IOError as err:
377 if err.errno != errno.ENOENT:
378 if err.errno != errno.ENOENT:
378 raise
379 raise
379 return
380 return
380 if not st:
381 if not st:
381 return
382 return
382
383
383 if util.safehasattr(parsers, b'dict_new_presized'):
384 if util.safehasattr(parsers, b'dict_new_presized'):
384 # Make an estimate of the number of files in the dirstate based on
385 # Make an estimate of the number of files in the dirstate based on
385 # its size. This trades wasting some memory for avoiding costly
386 # its size. This trades wasting some memory for avoiding costly
386 # resizes. Each entry have a prefix of 17 bytes followed by one or
387 # resizes. Each entry have a prefix of 17 bytes followed by one or
387 # two path names. Studies on various large-scale real-world repositories
388 # two path names. Studies on various large-scale real-world repositories
388 # found 54 bytes a reasonable upper limit for the average path names.
389 # found 54 bytes a reasonable upper limit for the average path names.
389 # Copy entries are ignored for the sake of this estimate.
390 # Copy entries are ignored for the sake of this estimate.
390 self._map = parsers.dict_new_presized(len(st) // 71)
391 self._map = parsers.dict_new_presized(len(st) // 71)
391
392
392 # Python's garbage collector triggers a GC each time a certain number
393 # Python's garbage collector triggers a GC each time a certain number
393 # of container objects (the number being defined by
394 # of container objects (the number being defined by
394 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
395 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
395 # for each file in the dirstate. The C version then immediately marks
396 # for each file in the dirstate. The C version then immediately marks
396 # them as not to be tracked by the collector. However, this has no
397 # them as not to be tracked by the collector. However, this has no
397 # effect on when GCs are triggered, only on what objects the GC looks
398 # effect on when GCs are triggered, only on what objects the GC looks
398 # into. This means that O(number of files) GCs are unavoidable.
399 # into. This means that O(number of files) GCs are unavoidable.
399 # Depending on when in the process's lifetime the dirstate is parsed,
400 # Depending on when in the process's lifetime the dirstate is parsed,
400 # this can get very expensive. As a workaround, disable GC while
401 # this can get very expensive. As a workaround, disable GC while
401 # parsing the dirstate.
402 # parsing the dirstate.
402 #
403 #
403 # (we cannot decorate the function directly since it is in a C module)
404 # (we cannot decorate the function directly since it is in a C module)
404 parse_dirstate = util.nogc(parsers.parse_dirstate)
405 parse_dirstate = util.nogc(parsers.parse_dirstate)
405 p = parse_dirstate(self._map, self.copymap, st)
406 p = parse_dirstate(self._map, self.copymap, st)
406 if not self._dirtyparents:
407 if not self._dirtyparents:
407 self.setparents(*p)
408 self.setparents(*p)
408
409
409 # Avoid excess attribute lookups by fast pathing certain checks
410 # Avoid excess attribute lookups by fast pathing certain checks
410 self.__contains__ = self._map.__contains__
411 self.__contains__ = self._map.__contains__
411 self.__getitem__ = self._map.__getitem__
412 self.__getitem__ = self._map.__getitem__
412 self.get = self._map.get
413 self.get = self._map.get
413
414
414 def write(self, st, now):
415 def write(self, st, now):
415 st.write(
416 st.write(
416 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
417 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
417 )
418 )
418 st.close()
419 st.close()
419 self._dirtyparents = False
420 self._dirtyparents = False
420 self.nonnormalset, self.otherparentset = self.nonnormalentries()
421 self.nonnormalset, self.otherparentset = self.nonnormalentries()
421
422
422 @propertycache
423 @propertycache
423 def nonnormalset(self):
424 def nonnormalset(self):
424 nonnorm, otherparents = self.nonnormalentries()
425 nonnorm, otherparents = self.nonnormalentries()
425 self.otherparentset = otherparents
426 self.otherparentset = otherparents
426 return nonnorm
427 return nonnorm
427
428
428 @propertycache
429 @propertycache
429 def otherparentset(self):
430 def otherparentset(self):
430 nonnorm, otherparents = self.nonnormalentries()
431 nonnorm, otherparents = self.nonnormalentries()
431 self.nonnormalset = nonnorm
432 self.nonnormalset = nonnorm
432 return otherparents
433 return otherparents
433
434
434 def non_normal_or_other_parent_paths(self):
435 def non_normal_or_other_parent_paths(self):
435 return self.nonnormalset.union(self.otherparentset)
436 return self.nonnormalset.union(self.otherparentset)
436
437
437 @propertycache
438 @propertycache
438 def identity(self):
439 def identity(self):
439 self._map
440 self._map
440 return self.identity
441 return self.identity
441
442
442 @propertycache
443 @propertycache
443 def dirfoldmap(self):
444 def dirfoldmap(self):
444 f = {}
445 f = {}
445 normcase = util.normcase
446 normcase = util.normcase
446 for name in self._dirs:
447 for name in self._dirs:
447 f[normcase(name)] = name
448 f[normcase(name)] = name
448 return f
449 return f
449
450
450
451
451 if rustmod is not None:
452 if rustmod is not None:
452
453
453 class dirstatemap(object):
454 class dirstatemap(object):
454 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
455 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
455 self._use_dirstate_v2 = use_dirstate_v2
456 self._use_dirstate_v2 = use_dirstate_v2
456 self._nodeconstants = nodeconstants
457 self._nodeconstants = nodeconstants
457 self._ui = ui
458 self._ui = ui
458 self._opener = opener
459 self._opener = opener
459 self._root = root
460 self._root = root
460 self._filename = b'dirstate'
461 self._filename = b'dirstate'
461 self._nodelen = 20 # Also update Rust code when changing this!
462 self._nodelen = 20 # Also update Rust code when changing this!
462 self._parents = None
463 self._parents = None
463 self._dirtyparents = False
464 self._dirtyparents = False
464
465
465 # for consistent view between _pl() and _read() invocations
466 # for consistent view between _pl() and _read() invocations
466 self._pendingmode = None
467 self._pendingmode = None
467
468
468 self._use_dirstate_tree = self._ui.configbool(
469 self._use_dirstate_tree = self._ui.configbool(
469 b"experimental",
470 b"experimental",
470 b"dirstate-tree.in-memory",
471 b"dirstate-tree.in-memory",
471 False,
472 False,
472 )
473 )
473
474
474 def addfile(
475 def addfile(
475 self,
476 self,
476 f,
477 f,
477 state=None,
478 state=None,
478 mode=0,
479 mode=0,
479 size=None,
480 size=None,
480 mtime=None,
481 mtime=None,
481 added=False,
482 added=False,
482 merged=False,
483 merged=False,
483 from_p2=False,
484 from_p2=False,
484 possibly_dirty=False,
485 possibly_dirty=False,
485 ):
486 ):
486 return self._rustmap.addfile(
487 return self._rustmap.addfile(
487 f,
488 f,
488 state,
489 state,
489 mode,
490 mode,
490 size,
491 size,
491 mtime,
492 mtime,
492 added,
493 added,
493 merged,
494 merged,
494 from_p2,
495 from_p2,
495 possibly_dirty,
496 possibly_dirty,
496 )
497 )
497
498
498 def removefile(self, *args, **kwargs):
499 def removefile(self, *args, **kwargs):
499 return self._rustmap.removefile(*args, **kwargs)
500 return self._rustmap.removefile(*args, **kwargs)
500
501
501 def dropfile(self, *args, **kwargs):
502 def dropfile(self, *args, **kwargs):
502 return self._rustmap.dropfile(*args, **kwargs)
503 return self._rustmap.dropfile(*args, **kwargs)
503
504
504 def clearambiguoustimes(self, *args, **kwargs):
505 def clearambiguoustimes(self, *args, **kwargs):
505 return self._rustmap.clearambiguoustimes(*args, **kwargs)
506 return self._rustmap.clearambiguoustimes(*args, **kwargs)
506
507
507 def nonnormalentries(self):
508 def nonnormalentries(self):
508 return self._rustmap.nonnormalentries()
509 return self._rustmap.nonnormalentries()
509
510
510 def get(self, *args, **kwargs):
511 def get(self, *args, **kwargs):
511 return self._rustmap.get(*args, **kwargs)
512 return self._rustmap.get(*args, **kwargs)
512
513
513 @property
514 @property
514 def copymap(self):
515 def copymap(self):
515 return self._rustmap.copymap()
516 return self._rustmap.copymap()
516
517
517 def directories(self):
518 def directories(self):
518 return self._rustmap.directories()
519 return self._rustmap.directories()
519
520
520 def preload(self):
521 def preload(self):
521 self._rustmap
522 self._rustmap
522
523
523 def clear(self):
524 def clear(self):
524 self._rustmap.clear()
525 self._rustmap.clear()
525 self.setparents(
526 self.setparents(
526 self._nodeconstants.nullid, self._nodeconstants.nullid
527 self._nodeconstants.nullid, self._nodeconstants.nullid
527 )
528 )
528 util.clearcachedproperty(self, b"_dirs")
529 util.clearcachedproperty(self, b"_dirs")
529 util.clearcachedproperty(self, b"_alldirs")
530 util.clearcachedproperty(self, b"_alldirs")
530 util.clearcachedproperty(self, b"dirfoldmap")
531 util.clearcachedproperty(self, b"dirfoldmap")
531
532
532 def items(self):
533 def items(self):
533 return self._rustmap.items()
534 return self._rustmap.items()
534
535
535 def keys(self):
536 def keys(self):
536 return iter(self._rustmap)
537 return iter(self._rustmap)
537
538
538 def __contains__(self, key):
539 def __contains__(self, key):
539 return key in self._rustmap
540 return key in self._rustmap
540
541
541 def __getitem__(self, item):
542 def __getitem__(self, item):
542 return self._rustmap[item]
543 return self._rustmap[item]
543
544
544 def __len__(self):
545 def __len__(self):
545 return len(self._rustmap)
546 return len(self._rustmap)
546
547
547 def __iter__(self):
548 def __iter__(self):
548 return iter(self._rustmap)
549 return iter(self._rustmap)
549
550
550 # forward for python2,3 compat
551 # forward for python2,3 compat
551 iteritems = items
552 iteritems = items
552
553
553 def _opendirstatefile(self):
554 def _opendirstatefile(self):
554 fp, mode = txnutil.trypending(
555 fp, mode = txnutil.trypending(
555 self._root, self._opener, self._filename
556 self._root, self._opener, self._filename
556 )
557 )
557 if self._pendingmode is not None and self._pendingmode != mode:
558 if self._pendingmode is not None and self._pendingmode != mode:
558 fp.close()
559 fp.close()
559 raise error.Abort(
560 raise error.Abort(
560 _(b'working directory state may be changed parallelly')
561 _(b'working directory state may be changed parallelly')
561 )
562 )
562 self._pendingmode = mode
563 self._pendingmode = mode
563 return fp
564 return fp
564
565
565 def setparents(self, p1, p2):
566 def setparents(self, p1, p2):
566 self._parents = (p1, p2)
567 self._parents = (p1, p2)
567 self._dirtyparents = True
568 self._dirtyparents = True
568
569
569 def parents(self):
570 def parents(self):
570 if not self._parents:
571 if not self._parents:
571 if self._use_dirstate_v2:
572 if self._use_dirstate_v2:
572 offset = len(rustmod.V2_FORMAT_MARKER)
573 offset = len(rustmod.V2_FORMAT_MARKER)
573 else:
574 else:
574 offset = 0
575 offset = 0
575 read_len = offset + self._nodelen * 2
576 read_len = offset + self._nodelen * 2
576 try:
577 try:
577 fp = self._opendirstatefile()
578 fp = self._opendirstatefile()
578 st = fp.read(read_len)
579 st = fp.read(read_len)
579 fp.close()
580 fp.close()
580 except IOError as err:
581 except IOError as err:
581 if err.errno != errno.ENOENT:
582 if err.errno != errno.ENOENT:
582 raise
583 raise
583 # File doesn't exist, so the current state is empty
584 # File doesn't exist, so the current state is empty
584 st = b''
585 st = b''
585
586
586 l = len(st)
587 l = len(st)
587 if l == read_len:
588 if l == read_len:
588 st = st[offset:]
589 st = st[offset:]
589 self._parents = (
590 self._parents = (
590 st[: self._nodelen],
591 st[: self._nodelen],
591 st[self._nodelen : 2 * self._nodelen],
592 st[self._nodelen : 2 * self._nodelen],
592 )
593 )
593 elif l == 0:
594 elif l == 0:
594 self._parents = (
595 self._parents = (
595 self._nodeconstants.nullid,
596 self._nodeconstants.nullid,
596 self._nodeconstants.nullid,
597 self._nodeconstants.nullid,
597 )
598 )
598 else:
599 else:
599 raise error.Abort(
600 raise error.Abort(
600 _(b'working directory state appears damaged!')
601 _(b'working directory state appears damaged!')
601 )
602 )
602
603
603 return self._parents
604 return self._parents
604
605
605 @propertycache
606 @propertycache
606 def _rustmap(self):
607 def _rustmap(self):
607 """
608 """
608 Fills the Dirstatemap when called.
609 Fills the Dirstatemap when called.
609 """
610 """
610 # ignore HG_PENDING because identity is used only for writing
611 # ignore HG_PENDING because identity is used only for writing
611 self.identity = util.filestat.frompath(
612 self.identity = util.filestat.frompath(
612 self._opener.join(self._filename)
613 self._opener.join(self._filename)
613 )
614 )
614
615
615 try:
616 try:
616 fp = self._opendirstatefile()
617 fp = self._opendirstatefile()
617 try:
618 try:
618 st = fp.read()
619 st = fp.read()
619 finally:
620 finally:
620 fp.close()
621 fp.close()
621 except IOError as err:
622 except IOError as err:
622 if err.errno != errno.ENOENT:
623 if err.errno != errno.ENOENT:
623 raise
624 raise
624 st = b''
625 st = b''
625
626
626 self._rustmap, parents = rustmod.DirstateMap.new(
627 self._rustmap, parents = rustmod.DirstateMap.new(
627 self._use_dirstate_tree, self._use_dirstate_v2, st
628 self._use_dirstate_tree, self._use_dirstate_v2, st
628 )
629 )
629
630
630 if parents and not self._dirtyparents:
631 if parents and not self._dirtyparents:
631 self.setparents(*parents)
632 self.setparents(*parents)
632
633
633 self.__contains__ = self._rustmap.__contains__
634 self.__contains__ = self._rustmap.__contains__
634 self.__getitem__ = self._rustmap.__getitem__
635 self.__getitem__ = self._rustmap.__getitem__
635 self.get = self._rustmap.get
636 self.get = self._rustmap.get
636 return self._rustmap
637 return self._rustmap
637
638
638 def write(self, st, now):
639 def write(self, st, now):
639 parents = self.parents()
640 parents = self.parents()
640 packed = self._rustmap.write(
641 packed = self._rustmap.write(
641 self._use_dirstate_v2, parents[0], parents[1], now
642 self._use_dirstate_v2, parents[0], parents[1], now
642 )
643 )
643 st.write(packed)
644 st.write(packed)
644 st.close()
645 st.close()
645 self._dirtyparents = False
646 self._dirtyparents = False
646
647
647 @propertycache
648 @propertycache
648 def filefoldmap(self):
649 def filefoldmap(self):
649 """Returns a dictionary mapping normalized case paths to their
650 """Returns a dictionary mapping normalized case paths to their
650 non-normalized versions.
651 non-normalized versions.
651 """
652 """
652 return self._rustmap.filefoldmapasdict()
653 return self._rustmap.filefoldmapasdict()
653
654
654 def hastrackeddir(self, d):
655 def hastrackeddir(self, d):
655 return self._rustmap.hastrackeddir(d)
656 return self._rustmap.hastrackeddir(d)
656
657
657 def hasdir(self, d):
658 def hasdir(self, d):
658 return self._rustmap.hasdir(d)
659 return self._rustmap.hasdir(d)
659
660
660 @propertycache
661 @propertycache
661 def identity(self):
662 def identity(self):
662 self._rustmap
663 self._rustmap
663 return self.identity
664 return self.identity
664
665
665 @property
666 @property
666 def nonnormalset(self):
667 def nonnormalset(self):
667 nonnorm = self._rustmap.non_normal_entries()
668 nonnorm = self._rustmap.non_normal_entries()
668 return nonnorm
669 return nonnorm
669
670
670 @propertycache
671 @propertycache
671 def otherparentset(self):
672 def otherparentset(self):
672 otherparents = self._rustmap.other_parent_entries()
673 otherparents = self._rustmap.other_parent_entries()
673 return otherparents
674 return otherparents
674
675
675 def non_normal_or_other_parent_paths(self):
676 def non_normal_or_other_parent_paths(self):
676 return self._rustmap.non_normal_or_other_parent_paths()
677 return self._rustmap.non_normal_or_other_parent_paths()
677
678
678 @propertycache
679 @propertycache
679 def dirfoldmap(self):
680 def dirfoldmap(self):
680 f = {}
681 f = {}
681 normcase = util.normcase
682 normcase = util.normcase
682 for name, _pseudo_entry in self.directories():
683 for name, _pseudo_entry in self.directories():
683 f[normcase(name)] = name
684 f[normcase(name)] = name
684 return f
685 return f
@@ -1,476 +1,477 b''
1 // dirstate_map.rs
1 // dirstate_map.rs
2 //
2 //
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 use crate::dirstate::parsers::Timestamp;
8 use crate::dirstate::parsers::Timestamp;
9 use crate::{
9 use crate::{
10 dirstate::EntryState,
10 dirstate::EntryState,
11 dirstate::MTIME_UNSET,
11 dirstate::MTIME_UNSET,
12 dirstate::SIZE_FROM_OTHER_PARENT,
12 dirstate::SIZE_FROM_OTHER_PARENT,
13 dirstate::SIZE_NON_NORMAL,
13 dirstate::SIZE_NON_NORMAL,
14 dirstate::V1_RANGEMASK,
14 dirstate::V1_RANGEMASK,
15 pack_dirstate, parse_dirstate,
15 pack_dirstate, parse_dirstate,
16 utils::hg_path::{HgPath, HgPathBuf},
16 utils::hg_path::{HgPath, HgPathBuf},
17 CopyMap, DirsMultiset, DirstateEntry, DirstateError, DirstateParents,
17 CopyMap, DirsMultiset, DirstateEntry, DirstateError, DirstateParents,
18 StateMap,
18 StateMap,
19 };
19 };
20 use micro_timer::timed;
20 use micro_timer::timed;
21 use std::collections::HashSet;
21 use std::collections::HashSet;
22 use std::iter::FromIterator;
22 use std::iter::FromIterator;
23 use std::ops::Deref;
23 use std::ops::Deref;
24
24
25 #[derive(Default)]
25 #[derive(Default)]
26 pub struct DirstateMap {
26 pub struct DirstateMap {
27 state_map: StateMap,
27 state_map: StateMap,
28 pub copy_map: CopyMap,
28 pub copy_map: CopyMap,
29 pub dirs: Option<DirsMultiset>,
29 pub dirs: Option<DirsMultiset>,
30 pub all_dirs: Option<DirsMultiset>,
30 pub all_dirs: Option<DirsMultiset>,
31 non_normal_set: Option<HashSet<HgPathBuf>>,
31 non_normal_set: Option<HashSet<HgPathBuf>>,
32 other_parent_set: Option<HashSet<HgPathBuf>>,
32 other_parent_set: Option<HashSet<HgPathBuf>>,
33 }
33 }
34
34
35 /// Should only really be used in python interface code, for clarity
35 /// Should only really be used in python interface code, for clarity
36 impl Deref for DirstateMap {
36 impl Deref for DirstateMap {
37 type Target = StateMap;
37 type Target = StateMap;
38
38
39 fn deref(&self) -> &Self::Target {
39 fn deref(&self) -> &Self::Target {
40 &self.state_map
40 &self.state_map
41 }
41 }
42 }
42 }
43
43
44 impl FromIterator<(HgPathBuf, DirstateEntry)> for DirstateMap {
44 impl FromIterator<(HgPathBuf, DirstateEntry)> for DirstateMap {
45 fn from_iter<I: IntoIterator<Item = (HgPathBuf, DirstateEntry)>>(
45 fn from_iter<I: IntoIterator<Item = (HgPathBuf, DirstateEntry)>>(
46 iter: I,
46 iter: I,
47 ) -> Self {
47 ) -> Self {
48 Self {
48 Self {
49 state_map: iter.into_iter().collect(),
49 state_map: iter.into_iter().collect(),
50 ..Self::default()
50 ..Self::default()
51 }
51 }
52 }
52 }
53 }
53 }
54
54
55 impl DirstateMap {
55 impl DirstateMap {
56 pub fn new() -> Self {
56 pub fn new() -> Self {
57 Self::default()
57 Self::default()
58 }
58 }
59
59
60 pub fn clear(&mut self) {
60 pub fn clear(&mut self) {
61 self.state_map = StateMap::default();
61 self.state_map = StateMap::default();
62 self.copy_map.clear();
62 self.copy_map.clear();
63 self.non_normal_set = None;
63 self.non_normal_set = None;
64 self.other_parent_set = None;
64 self.other_parent_set = None;
65 }
65 }
66
66
67 /// Add a tracked file to the dirstate
67 /// Add a tracked file to the dirstate
68 pub fn add_file(
68 pub fn add_file(
69 &mut self,
69 &mut self,
70 filename: &HgPath,
70 filename: &HgPath,
71 entry: DirstateEntry,
71 entry: DirstateEntry,
72 // XXX once the dust settle this should probably become an enum
72 // XXX once the dust settle this should probably become an enum
73 added: bool,
73 added: bool,
74 merged: bool,
74 merged: bool,
75 from_p2: bool,
75 from_p2: bool,
76 possibly_dirty: bool,
76 possibly_dirty: bool,
77 ) -> Result<(), DirstateError> {
77 ) -> Result<(), DirstateError> {
78 let mut entry = entry;
78 let mut entry = entry;
79 if added {
79 if added {
80 assert!(!merged);
80 assert!(!merged);
81 assert!(!possibly_dirty);
81 assert!(!possibly_dirty);
82 assert!(!from_p2);
82 assert!(!from_p2);
83 entry.state = EntryState::Added;
83 entry.state = EntryState::Added;
84 entry.size = SIZE_NON_NORMAL;
84 entry.size = SIZE_NON_NORMAL;
85 entry.mtime = MTIME_UNSET;
85 entry.mtime = MTIME_UNSET;
86 } else if merged {
86 } else if merged {
87 assert!(!possibly_dirty);
87 assert!(!possibly_dirty);
88 assert!(!from_p2);
88 assert!(!from_p2);
89 entry.state = EntryState::Merged;
89 entry.state = EntryState::Merged;
90 entry.size = SIZE_FROM_OTHER_PARENT;
90 entry.size = SIZE_FROM_OTHER_PARENT;
91 entry.mtime = MTIME_UNSET;
91 entry.mtime = MTIME_UNSET;
92 } else if from_p2 {
92 } else if from_p2 {
93 assert!(!possibly_dirty);
93 assert!(!possibly_dirty);
94 entry.state = EntryState::Normal;
94 entry.size = SIZE_FROM_OTHER_PARENT;
95 entry.size = SIZE_FROM_OTHER_PARENT;
95 entry.mtime = MTIME_UNSET;
96 entry.mtime = MTIME_UNSET;
96 } else if possibly_dirty {
97 } else if possibly_dirty {
97 entry.state = EntryState::Normal;
98 entry.state = EntryState::Normal;
98 entry.size = SIZE_NON_NORMAL;
99 entry.size = SIZE_NON_NORMAL;
99 entry.mtime = MTIME_UNSET;
100 entry.mtime = MTIME_UNSET;
100 } else {
101 } else {
101 entry.size = entry.size & V1_RANGEMASK;
102 entry.size = entry.size & V1_RANGEMASK;
102 entry.mtime = entry.mtime & V1_RANGEMASK;
103 entry.mtime = entry.mtime & V1_RANGEMASK;
103 }
104 }
104 let old_state = match self.get(filename) {
105 let old_state = match self.get(filename) {
105 Some(e) => e.state,
106 Some(e) => e.state,
106 None => EntryState::Unknown,
107 None => EntryState::Unknown,
107 };
108 };
108 if old_state == EntryState::Unknown || old_state == EntryState::Removed
109 if old_state == EntryState::Unknown || old_state == EntryState::Removed
109 {
110 {
110 if let Some(ref mut dirs) = self.dirs {
111 if let Some(ref mut dirs) = self.dirs {
111 dirs.add_path(filename)?;
112 dirs.add_path(filename)?;
112 }
113 }
113 }
114 }
114 if old_state == EntryState::Unknown {
115 if old_state == EntryState::Unknown {
115 if let Some(ref mut all_dirs) = self.all_dirs {
116 if let Some(ref mut all_dirs) = self.all_dirs {
116 all_dirs.add_path(filename)?;
117 all_dirs.add_path(filename)?;
117 }
118 }
118 }
119 }
119 self.state_map.insert(filename.to_owned(), entry.to_owned());
120 self.state_map.insert(filename.to_owned(), entry.to_owned());
120
121
121 if entry.is_non_normal() {
122 if entry.is_non_normal() {
122 self.get_non_normal_other_parent_entries()
123 self.get_non_normal_other_parent_entries()
123 .0
124 .0
124 .insert(filename.to_owned());
125 .insert(filename.to_owned());
125 }
126 }
126
127
127 if entry.is_from_other_parent() {
128 if entry.is_from_other_parent() {
128 self.get_non_normal_other_parent_entries()
129 self.get_non_normal_other_parent_entries()
129 .1
130 .1
130 .insert(filename.to_owned());
131 .insert(filename.to_owned());
131 }
132 }
132 Ok(())
133 Ok(())
133 }
134 }
134
135
135 /// Mark a file as removed in the dirstate.
136 /// Mark a file as removed in the dirstate.
136 ///
137 ///
137 /// The `size` parameter is used to store sentinel values that indicate
138 /// The `size` parameter is used to store sentinel values that indicate
138 /// the file's previous state. In the future, we should refactor this
139 /// the file's previous state. In the future, we should refactor this
139 /// to be more explicit about what that state is.
140 /// to be more explicit about what that state is.
140 pub fn remove_file(
141 pub fn remove_file(
141 &mut self,
142 &mut self,
142 filename: &HgPath,
143 filename: &HgPath,
143 in_merge: bool,
144 in_merge: bool,
144 ) -> Result<(), DirstateError> {
145 ) -> Result<(), DirstateError> {
145 let old_entry_opt = self.get(filename);
146 let old_entry_opt = self.get(filename);
146 let old_state = match old_entry_opt {
147 let old_state = match old_entry_opt {
147 Some(e) => e.state,
148 Some(e) => e.state,
148 None => EntryState::Unknown,
149 None => EntryState::Unknown,
149 };
150 };
150 let mut size = 0;
151 let mut size = 0;
151 if in_merge {
152 if in_merge {
152 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
153 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
153 // during a merge. So I (marmoute) am not sure we need the
154 // during a merge. So I (marmoute) am not sure we need the
154 // conditionnal at all. Adding double checking this with assert
155 // conditionnal at all. Adding double checking this with assert
155 // would be nice.
156 // would be nice.
156 if let Some(old_entry) = old_entry_opt {
157 if let Some(old_entry) = old_entry_opt {
157 // backup the previous state
158 // backup the previous state
158 if old_entry.state == EntryState::Merged {
159 if old_entry.state == EntryState::Merged {
159 size = SIZE_NON_NORMAL;
160 size = SIZE_NON_NORMAL;
160 } else if old_entry.state == EntryState::Normal
161 } else if old_entry.state == EntryState::Normal
161 && old_entry.size == SIZE_FROM_OTHER_PARENT
162 && old_entry.size == SIZE_FROM_OTHER_PARENT
162 {
163 {
163 // other parent
164 // other parent
164 size = SIZE_FROM_OTHER_PARENT;
165 size = SIZE_FROM_OTHER_PARENT;
165 self.get_non_normal_other_parent_entries()
166 self.get_non_normal_other_parent_entries()
166 .1
167 .1
167 .insert(filename.to_owned());
168 .insert(filename.to_owned());
168 }
169 }
169 }
170 }
170 }
171 }
171 if old_state != EntryState::Unknown && old_state != EntryState::Removed
172 if old_state != EntryState::Unknown && old_state != EntryState::Removed
172 {
173 {
173 if let Some(ref mut dirs) = self.dirs {
174 if let Some(ref mut dirs) = self.dirs {
174 dirs.delete_path(filename)?;
175 dirs.delete_path(filename)?;
175 }
176 }
176 }
177 }
177 if old_state == EntryState::Unknown {
178 if old_state == EntryState::Unknown {
178 if let Some(ref mut all_dirs) = self.all_dirs {
179 if let Some(ref mut all_dirs) = self.all_dirs {
179 all_dirs.add_path(filename)?;
180 all_dirs.add_path(filename)?;
180 }
181 }
181 }
182 }
182 if size == 0 {
183 if size == 0 {
183 self.copy_map.remove(filename);
184 self.copy_map.remove(filename);
184 }
185 }
185
186
186 self.state_map.insert(
187 self.state_map.insert(
187 filename.to_owned(),
188 filename.to_owned(),
188 DirstateEntry {
189 DirstateEntry {
189 state: EntryState::Removed,
190 state: EntryState::Removed,
190 mode: 0,
191 mode: 0,
191 size,
192 size,
192 mtime: 0,
193 mtime: 0,
193 },
194 },
194 );
195 );
195 self.get_non_normal_other_parent_entries()
196 self.get_non_normal_other_parent_entries()
196 .0
197 .0
197 .insert(filename.to_owned());
198 .insert(filename.to_owned());
198 Ok(())
199 Ok(())
199 }
200 }
200
201
201 /// Remove a file from the dirstate.
202 /// Remove a file from the dirstate.
202 /// Returns `true` if the file was previously recorded.
203 /// Returns `true` if the file was previously recorded.
203 pub fn drop_file(
204 pub fn drop_file(
204 &mut self,
205 &mut self,
205 filename: &HgPath,
206 filename: &HgPath,
206 old_state: EntryState,
207 old_state: EntryState,
207 ) -> Result<bool, DirstateError> {
208 ) -> Result<bool, DirstateError> {
208 let exists = self.state_map.remove(filename).is_some();
209 let exists = self.state_map.remove(filename).is_some();
209
210
210 if exists {
211 if exists {
211 if old_state != EntryState::Removed {
212 if old_state != EntryState::Removed {
212 if let Some(ref mut dirs) = self.dirs {
213 if let Some(ref mut dirs) = self.dirs {
213 dirs.delete_path(filename)?;
214 dirs.delete_path(filename)?;
214 }
215 }
215 }
216 }
216 if let Some(ref mut all_dirs) = self.all_dirs {
217 if let Some(ref mut all_dirs) = self.all_dirs {
217 all_dirs.delete_path(filename)?;
218 all_dirs.delete_path(filename)?;
218 }
219 }
219 }
220 }
220 self.get_non_normal_other_parent_entries()
221 self.get_non_normal_other_parent_entries()
221 .0
222 .0
222 .remove(filename);
223 .remove(filename);
223
224
224 Ok(exists)
225 Ok(exists)
225 }
226 }
226
227
227 pub fn clear_ambiguous_times(
228 pub fn clear_ambiguous_times(
228 &mut self,
229 &mut self,
229 filenames: Vec<HgPathBuf>,
230 filenames: Vec<HgPathBuf>,
230 now: i32,
231 now: i32,
231 ) {
232 ) {
232 for filename in filenames {
233 for filename in filenames {
233 if let Some(entry) = self.state_map.get_mut(&filename) {
234 if let Some(entry) = self.state_map.get_mut(&filename) {
234 if entry.clear_ambiguous_mtime(now) {
235 if entry.clear_ambiguous_mtime(now) {
235 self.get_non_normal_other_parent_entries()
236 self.get_non_normal_other_parent_entries()
236 .0
237 .0
237 .insert(filename.to_owned());
238 .insert(filename.to_owned());
238 }
239 }
239 }
240 }
240 }
241 }
241 }
242 }
242
243
243 pub fn non_normal_entries_remove(&mut self, key: impl AsRef<HgPath>) {
244 pub fn non_normal_entries_remove(&mut self, key: impl AsRef<HgPath>) {
244 self.get_non_normal_other_parent_entries()
245 self.get_non_normal_other_parent_entries()
245 .0
246 .0
246 .remove(key.as_ref());
247 .remove(key.as_ref());
247 }
248 }
248
249
249 pub fn non_normal_entries_union(
250 pub fn non_normal_entries_union(
250 &mut self,
251 &mut self,
251 other: HashSet<HgPathBuf>,
252 other: HashSet<HgPathBuf>,
252 ) -> Vec<HgPathBuf> {
253 ) -> Vec<HgPathBuf> {
253 self.get_non_normal_other_parent_entries()
254 self.get_non_normal_other_parent_entries()
254 .0
255 .0
255 .union(&other)
256 .union(&other)
256 .map(ToOwned::to_owned)
257 .map(ToOwned::to_owned)
257 .collect()
258 .collect()
258 }
259 }
259
260
260 pub fn get_non_normal_other_parent_entries(
261 pub fn get_non_normal_other_parent_entries(
261 &mut self,
262 &mut self,
262 ) -> (&mut HashSet<HgPathBuf>, &mut HashSet<HgPathBuf>) {
263 ) -> (&mut HashSet<HgPathBuf>, &mut HashSet<HgPathBuf>) {
263 self.set_non_normal_other_parent_entries(false);
264 self.set_non_normal_other_parent_entries(false);
264 (
265 (
265 self.non_normal_set.as_mut().unwrap(),
266 self.non_normal_set.as_mut().unwrap(),
266 self.other_parent_set.as_mut().unwrap(),
267 self.other_parent_set.as_mut().unwrap(),
267 )
268 )
268 }
269 }
269
270
270 /// Useful to get immutable references to those sets in contexts where
271 /// Useful to get immutable references to those sets in contexts where
271 /// you only have an immutable reference to the `DirstateMap`, like when
272 /// you only have an immutable reference to the `DirstateMap`, like when
272 /// sharing references with Python.
273 /// sharing references with Python.
273 ///
274 ///
274 /// TODO, get rid of this along with the other "setter/getter" stuff when
275 /// TODO, get rid of this along with the other "setter/getter" stuff when
275 /// a nice typestate plan is defined.
276 /// a nice typestate plan is defined.
276 ///
277 ///
277 /// # Panics
278 /// # Panics
278 ///
279 ///
279 /// Will panic if either set is `None`.
280 /// Will panic if either set is `None`.
280 pub fn get_non_normal_other_parent_entries_panic(
281 pub fn get_non_normal_other_parent_entries_panic(
281 &self,
282 &self,
282 ) -> (&HashSet<HgPathBuf>, &HashSet<HgPathBuf>) {
283 ) -> (&HashSet<HgPathBuf>, &HashSet<HgPathBuf>) {
283 (
284 (
284 self.non_normal_set.as_ref().unwrap(),
285 self.non_normal_set.as_ref().unwrap(),
285 self.other_parent_set.as_ref().unwrap(),
286 self.other_parent_set.as_ref().unwrap(),
286 )
287 )
287 }
288 }
288
289
289 pub fn set_non_normal_other_parent_entries(&mut self, force: bool) {
290 pub fn set_non_normal_other_parent_entries(&mut self, force: bool) {
290 if !force
291 if !force
291 && self.non_normal_set.is_some()
292 && self.non_normal_set.is_some()
292 && self.other_parent_set.is_some()
293 && self.other_parent_set.is_some()
293 {
294 {
294 return;
295 return;
295 }
296 }
296 let mut non_normal = HashSet::new();
297 let mut non_normal = HashSet::new();
297 let mut other_parent = HashSet::new();
298 let mut other_parent = HashSet::new();
298
299
299 for (filename, entry) in self.state_map.iter() {
300 for (filename, entry) in self.state_map.iter() {
300 if entry.is_non_normal() {
301 if entry.is_non_normal() {
301 non_normal.insert(filename.to_owned());
302 non_normal.insert(filename.to_owned());
302 }
303 }
303 if entry.is_from_other_parent() {
304 if entry.is_from_other_parent() {
304 other_parent.insert(filename.to_owned());
305 other_parent.insert(filename.to_owned());
305 }
306 }
306 }
307 }
307 self.non_normal_set = Some(non_normal);
308 self.non_normal_set = Some(non_normal);
308 self.other_parent_set = Some(other_parent);
309 self.other_parent_set = Some(other_parent);
309 }
310 }
310
311
311 /// Both of these setters and their uses appear to be the simplest way to
312 /// Both of these setters and their uses appear to be the simplest way to
312 /// emulate a Python lazy property, but it is ugly and unidiomatic.
313 /// emulate a Python lazy property, but it is ugly and unidiomatic.
313 /// TODO One day, rewriting this struct using the typestate might be a
314 /// TODO One day, rewriting this struct using the typestate might be a
314 /// good idea.
315 /// good idea.
315 pub fn set_all_dirs(&mut self) -> Result<(), DirstateError> {
316 pub fn set_all_dirs(&mut self) -> Result<(), DirstateError> {
316 if self.all_dirs.is_none() {
317 if self.all_dirs.is_none() {
317 self.all_dirs = Some(DirsMultiset::from_dirstate(
318 self.all_dirs = Some(DirsMultiset::from_dirstate(
318 self.state_map.iter().map(|(k, v)| Ok((k, *v))),
319 self.state_map.iter().map(|(k, v)| Ok((k, *v))),
319 None,
320 None,
320 )?);
321 )?);
321 }
322 }
322 Ok(())
323 Ok(())
323 }
324 }
324
325
325 pub fn set_dirs(&mut self) -> Result<(), DirstateError> {
326 pub fn set_dirs(&mut self) -> Result<(), DirstateError> {
326 if self.dirs.is_none() {
327 if self.dirs.is_none() {
327 self.dirs = Some(DirsMultiset::from_dirstate(
328 self.dirs = Some(DirsMultiset::from_dirstate(
328 self.state_map.iter().map(|(k, v)| Ok((k, *v))),
329 self.state_map.iter().map(|(k, v)| Ok((k, *v))),
329 Some(EntryState::Removed),
330 Some(EntryState::Removed),
330 )?);
331 )?);
331 }
332 }
332 Ok(())
333 Ok(())
333 }
334 }
334
335
335 pub fn has_tracked_dir(
336 pub fn has_tracked_dir(
336 &mut self,
337 &mut self,
337 directory: &HgPath,
338 directory: &HgPath,
338 ) -> Result<bool, DirstateError> {
339 ) -> Result<bool, DirstateError> {
339 self.set_dirs()?;
340 self.set_dirs()?;
340 Ok(self.dirs.as_ref().unwrap().contains(directory))
341 Ok(self.dirs.as_ref().unwrap().contains(directory))
341 }
342 }
342
343
343 pub fn has_dir(
344 pub fn has_dir(
344 &mut self,
345 &mut self,
345 directory: &HgPath,
346 directory: &HgPath,
346 ) -> Result<bool, DirstateError> {
347 ) -> Result<bool, DirstateError> {
347 self.set_all_dirs()?;
348 self.set_all_dirs()?;
348 Ok(self.all_dirs.as_ref().unwrap().contains(directory))
349 Ok(self.all_dirs.as_ref().unwrap().contains(directory))
349 }
350 }
350
351
351 #[timed]
352 #[timed]
352 pub fn read(
353 pub fn read(
353 &mut self,
354 &mut self,
354 file_contents: &[u8],
355 file_contents: &[u8],
355 ) -> Result<Option<DirstateParents>, DirstateError> {
356 ) -> Result<Option<DirstateParents>, DirstateError> {
356 if file_contents.is_empty() {
357 if file_contents.is_empty() {
357 return Ok(None);
358 return Ok(None);
358 }
359 }
359
360
360 let (parents, entries, copies) = parse_dirstate(file_contents)?;
361 let (parents, entries, copies) = parse_dirstate(file_contents)?;
361 self.state_map.extend(
362 self.state_map.extend(
362 entries
363 entries
363 .into_iter()
364 .into_iter()
364 .map(|(path, entry)| (path.to_owned(), entry)),
365 .map(|(path, entry)| (path.to_owned(), entry)),
365 );
366 );
366 self.copy_map.extend(
367 self.copy_map.extend(
367 copies
368 copies
368 .into_iter()
369 .into_iter()
369 .map(|(path, copy)| (path.to_owned(), copy.to_owned())),
370 .map(|(path, copy)| (path.to_owned(), copy.to_owned())),
370 );
371 );
371 Ok(Some(parents.clone()))
372 Ok(Some(parents.clone()))
372 }
373 }
373
374
374 pub fn pack(
375 pub fn pack(
375 &mut self,
376 &mut self,
376 parents: DirstateParents,
377 parents: DirstateParents,
377 now: Timestamp,
378 now: Timestamp,
378 ) -> Result<Vec<u8>, DirstateError> {
379 ) -> Result<Vec<u8>, DirstateError> {
379 let packed =
380 let packed =
380 pack_dirstate(&mut self.state_map, &self.copy_map, parents, now)?;
381 pack_dirstate(&mut self.state_map, &self.copy_map, parents, now)?;
381
382
382 self.set_non_normal_other_parent_entries(true);
383 self.set_non_normal_other_parent_entries(true);
383 Ok(packed)
384 Ok(packed)
384 }
385 }
385 }
386 }
386
387
387 #[cfg(test)]
388 #[cfg(test)]
388 mod tests {
389 mod tests {
389 use super::*;
390 use super::*;
390
391
391 #[test]
392 #[test]
392 fn test_dirs_multiset() {
393 fn test_dirs_multiset() {
393 let mut map = DirstateMap::new();
394 let mut map = DirstateMap::new();
394 assert!(map.dirs.is_none());
395 assert!(map.dirs.is_none());
395 assert!(map.all_dirs.is_none());
396 assert!(map.all_dirs.is_none());
396
397
397 assert_eq!(map.has_dir(HgPath::new(b"nope")).unwrap(), false);
398 assert_eq!(map.has_dir(HgPath::new(b"nope")).unwrap(), false);
398 assert!(map.all_dirs.is_some());
399 assert!(map.all_dirs.is_some());
399 assert!(map.dirs.is_none());
400 assert!(map.dirs.is_none());
400
401
401 assert_eq!(map.has_tracked_dir(HgPath::new(b"nope")).unwrap(), false);
402 assert_eq!(map.has_tracked_dir(HgPath::new(b"nope")).unwrap(), false);
402 assert!(map.dirs.is_some());
403 assert!(map.dirs.is_some());
403 }
404 }
404
405
405 #[test]
406 #[test]
406 fn test_add_file() {
407 fn test_add_file() {
407 let mut map = DirstateMap::new();
408 let mut map = DirstateMap::new();
408
409
409 assert_eq!(0, map.len());
410 assert_eq!(0, map.len());
410
411
411 map.add_file(
412 map.add_file(
412 HgPath::new(b"meh"),
413 HgPath::new(b"meh"),
413 DirstateEntry {
414 DirstateEntry {
414 state: EntryState::Normal,
415 state: EntryState::Normal,
415 mode: 1337,
416 mode: 1337,
416 mtime: 1337,
417 mtime: 1337,
417 size: 1337,
418 size: 1337,
418 },
419 },
419 false,
420 false,
420 false,
421 false,
421 false,
422 false,
422 false,
423 false,
423 )
424 )
424 .unwrap();
425 .unwrap();
425
426
426 assert_eq!(1, map.len());
427 assert_eq!(1, map.len());
427 assert_eq!(0, map.get_non_normal_other_parent_entries().0.len());
428 assert_eq!(0, map.get_non_normal_other_parent_entries().0.len());
428 assert_eq!(0, map.get_non_normal_other_parent_entries().1.len());
429 assert_eq!(0, map.get_non_normal_other_parent_entries().1.len());
429 }
430 }
430
431
431 #[test]
432 #[test]
432 fn test_non_normal_other_parent_entries() {
433 fn test_non_normal_other_parent_entries() {
433 let mut map: DirstateMap = [
434 let mut map: DirstateMap = [
434 (b"f1", (EntryState::Removed, 1337, 1337, 1337)),
435 (b"f1", (EntryState::Removed, 1337, 1337, 1337)),
435 (b"f2", (EntryState::Normal, 1337, 1337, -1)),
436 (b"f2", (EntryState::Normal, 1337, 1337, -1)),
436 (b"f3", (EntryState::Normal, 1337, 1337, 1337)),
437 (b"f3", (EntryState::Normal, 1337, 1337, 1337)),
437 (b"f4", (EntryState::Normal, 1337, -2, 1337)),
438 (b"f4", (EntryState::Normal, 1337, -2, 1337)),
438 (b"f5", (EntryState::Added, 1337, 1337, 1337)),
439 (b"f5", (EntryState::Added, 1337, 1337, 1337)),
439 (b"f6", (EntryState::Added, 1337, 1337, -1)),
440 (b"f6", (EntryState::Added, 1337, 1337, -1)),
440 (b"f7", (EntryState::Merged, 1337, 1337, -1)),
441 (b"f7", (EntryState::Merged, 1337, 1337, -1)),
441 (b"f8", (EntryState::Merged, 1337, 1337, 1337)),
442 (b"f8", (EntryState::Merged, 1337, 1337, 1337)),
442 (b"f9", (EntryState::Merged, 1337, -2, 1337)),
443 (b"f9", (EntryState::Merged, 1337, -2, 1337)),
443 (b"fa", (EntryState::Added, 1337, -2, 1337)),
444 (b"fa", (EntryState::Added, 1337, -2, 1337)),
444 (b"fb", (EntryState::Removed, 1337, -2, 1337)),
445 (b"fb", (EntryState::Removed, 1337, -2, 1337)),
445 ]
446 ]
446 .iter()
447 .iter()
447 .map(|(fname, (state, mode, size, mtime))| {
448 .map(|(fname, (state, mode, size, mtime))| {
448 (
449 (
449 HgPathBuf::from_bytes(fname.as_ref()),
450 HgPathBuf::from_bytes(fname.as_ref()),
450 DirstateEntry {
451 DirstateEntry {
451 state: *state,
452 state: *state,
452 mode: *mode,
453 mode: *mode,
453 size: *size,
454 size: *size,
454 mtime: *mtime,
455 mtime: *mtime,
455 },
456 },
456 )
457 )
457 })
458 })
458 .collect();
459 .collect();
459
460
460 let mut non_normal = [
461 let mut non_normal = [
461 b"f1", b"f2", b"f5", b"f6", b"f7", b"f8", b"f9", b"fa", b"fb",
462 b"f1", b"f2", b"f5", b"f6", b"f7", b"f8", b"f9", b"fa", b"fb",
462 ]
463 ]
463 .iter()
464 .iter()
464 .map(|x| HgPathBuf::from_bytes(x.as_ref()))
465 .map(|x| HgPathBuf::from_bytes(x.as_ref()))
465 .collect();
466 .collect();
466
467
467 let mut other_parent = HashSet::new();
468 let mut other_parent = HashSet::new();
468 other_parent.insert(HgPathBuf::from_bytes(b"f4"));
469 other_parent.insert(HgPathBuf::from_bytes(b"f4"));
469 let entries = map.get_non_normal_other_parent_entries();
470 let entries = map.get_non_normal_other_parent_entries();
470
471
471 assert_eq!(
472 assert_eq!(
472 (&mut non_normal, &mut other_parent),
473 (&mut non_normal, &mut other_parent),
473 (entries.0, entries.1)
474 (entries.0, entries.1)
474 );
475 );
475 }
476 }
476 }
477 }
@@ -1,1204 +1,1205 b''
1 use bytes_cast::BytesCast;
1 use bytes_cast::BytesCast;
2 use micro_timer::timed;
2 use micro_timer::timed;
3 use std::borrow::Cow;
3 use std::borrow::Cow;
4 use std::convert::TryInto;
4 use std::convert::TryInto;
5 use std::path::PathBuf;
5 use std::path::PathBuf;
6
6
7 use super::on_disk;
7 use super::on_disk;
8 use super::on_disk::DirstateV2ParseError;
8 use super::on_disk::DirstateV2ParseError;
9 use super::path_with_basename::WithBasename;
9 use super::path_with_basename::WithBasename;
10 use crate::dirstate::parsers::pack_entry;
10 use crate::dirstate::parsers::pack_entry;
11 use crate::dirstate::parsers::packed_entry_size;
11 use crate::dirstate::parsers::packed_entry_size;
12 use crate::dirstate::parsers::parse_dirstate_entries;
12 use crate::dirstate::parsers::parse_dirstate_entries;
13 use crate::dirstate::parsers::Timestamp;
13 use crate::dirstate::parsers::Timestamp;
14 use crate::dirstate::MTIME_UNSET;
14 use crate::dirstate::MTIME_UNSET;
15 use crate::dirstate::SIZE_FROM_OTHER_PARENT;
15 use crate::dirstate::SIZE_FROM_OTHER_PARENT;
16 use crate::dirstate::SIZE_NON_NORMAL;
16 use crate::dirstate::SIZE_NON_NORMAL;
17 use crate::dirstate::V1_RANGEMASK;
17 use crate::dirstate::V1_RANGEMASK;
18 use crate::matchers::Matcher;
18 use crate::matchers::Matcher;
19 use crate::utils::hg_path::{HgPath, HgPathBuf};
19 use crate::utils::hg_path::{HgPath, HgPathBuf};
20 use crate::CopyMapIter;
20 use crate::CopyMapIter;
21 use crate::DirstateEntry;
21 use crate::DirstateEntry;
22 use crate::DirstateError;
22 use crate::DirstateError;
23 use crate::DirstateParents;
23 use crate::DirstateParents;
24 use crate::DirstateStatus;
24 use crate::DirstateStatus;
25 use crate::EntryState;
25 use crate::EntryState;
26 use crate::FastHashMap;
26 use crate::FastHashMap;
27 use crate::PatternFileWarning;
27 use crate::PatternFileWarning;
28 use crate::StateMapIter;
28 use crate::StateMapIter;
29 use crate::StatusError;
29 use crate::StatusError;
30 use crate::StatusOptions;
30 use crate::StatusOptions;
31
31
32 pub struct DirstateMap<'on_disk> {
32 pub struct DirstateMap<'on_disk> {
33 /// Contents of the `.hg/dirstate` file
33 /// Contents of the `.hg/dirstate` file
34 pub(super) on_disk: &'on_disk [u8],
34 pub(super) on_disk: &'on_disk [u8],
35
35
36 pub(super) root: ChildNodes<'on_disk>,
36 pub(super) root: ChildNodes<'on_disk>,
37
37
38 /// Number of nodes anywhere in the tree that have `.entry.is_some()`.
38 /// Number of nodes anywhere in the tree that have `.entry.is_some()`.
39 pub(super) nodes_with_entry_count: u32,
39 pub(super) nodes_with_entry_count: u32,
40
40
41 /// Number of nodes anywhere in the tree that have
41 /// Number of nodes anywhere in the tree that have
42 /// `.copy_source.is_some()`.
42 /// `.copy_source.is_some()`.
43 pub(super) nodes_with_copy_source_count: u32,
43 pub(super) nodes_with_copy_source_count: u32,
44
44
45 /// See on_disk::Header
45 /// See on_disk::Header
46 pub(super) ignore_patterns_hash: on_disk::IgnorePatternsHash,
46 pub(super) ignore_patterns_hash: on_disk::IgnorePatternsHash,
47 }
47 }
48
48
49 /// Using a plain `HgPathBuf` of the full path from the repository root as a
49 /// Using a plain `HgPathBuf` of the full path from the repository root as a
50 /// map key would also work: all paths in a given map have the same parent
50 /// map key would also work: all paths in a given map have the same parent
51 /// path, so comparing full paths gives the same result as comparing base
51 /// path, so comparing full paths gives the same result as comparing base
52 /// names. However `HashMap` would waste time always re-hashing the same
52 /// names. However `HashMap` would waste time always re-hashing the same
53 /// string prefix.
53 /// string prefix.
54 pub(super) type NodeKey<'on_disk> = WithBasename<Cow<'on_disk, HgPath>>;
54 pub(super) type NodeKey<'on_disk> = WithBasename<Cow<'on_disk, HgPath>>;
55
55
56 /// Similar to `&'tree Cow<'on_disk, HgPath>`, but can also be returned
56 /// Similar to `&'tree Cow<'on_disk, HgPath>`, but can also be returned
57 /// for on-disk nodes that don’t actually have a `Cow` to borrow.
57 /// for on-disk nodes that don’t actually have a `Cow` to borrow.
58 pub(super) enum BorrowedPath<'tree, 'on_disk> {
58 pub(super) enum BorrowedPath<'tree, 'on_disk> {
59 InMemory(&'tree HgPathBuf),
59 InMemory(&'tree HgPathBuf),
60 OnDisk(&'on_disk HgPath),
60 OnDisk(&'on_disk HgPath),
61 }
61 }
62
62
63 pub(super) enum ChildNodes<'on_disk> {
63 pub(super) enum ChildNodes<'on_disk> {
64 InMemory(FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
64 InMemory(FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
65 OnDisk(&'on_disk [on_disk::Node]),
65 OnDisk(&'on_disk [on_disk::Node]),
66 }
66 }
67
67
68 pub(super) enum ChildNodesRef<'tree, 'on_disk> {
68 pub(super) enum ChildNodesRef<'tree, 'on_disk> {
69 InMemory(&'tree FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
69 InMemory(&'tree FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
70 OnDisk(&'on_disk [on_disk::Node]),
70 OnDisk(&'on_disk [on_disk::Node]),
71 }
71 }
72
72
73 pub(super) enum NodeRef<'tree, 'on_disk> {
73 pub(super) enum NodeRef<'tree, 'on_disk> {
74 InMemory(&'tree NodeKey<'on_disk>, &'tree Node<'on_disk>),
74 InMemory(&'tree NodeKey<'on_disk>, &'tree Node<'on_disk>),
75 OnDisk(&'on_disk on_disk::Node),
75 OnDisk(&'on_disk on_disk::Node),
76 }
76 }
77
77
78 impl<'tree, 'on_disk> BorrowedPath<'tree, 'on_disk> {
78 impl<'tree, 'on_disk> BorrowedPath<'tree, 'on_disk> {
79 pub fn detach_from_tree(&self) -> Cow<'on_disk, HgPath> {
79 pub fn detach_from_tree(&self) -> Cow<'on_disk, HgPath> {
80 match *self {
80 match *self {
81 BorrowedPath::InMemory(in_memory) => Cow::Owned(in_memory.clone()),
81 BorrowedPath::InMemory(in_memory) => Cow::Owned(in_memory.clone()),
82 BorrowedPath::OnDisk(on_disk) => Cow::Borrowed(on_disk),
82 BorrowedPath::OnDisk(on_disk) => Cow::Borrowed(on_disk),
83 }
83 }
84 }
84 }
85 }
85 }
86
86
87 impl<'tree, 'on_disk> std::ops::Deref for BorrowedPath<'tree, 'on_disk> {
87 impl<'tree, 'on_disk> std::ops::Deref for BorrowedPath<'tree, 'on_disk> {
88 type Target = HgPath;
88 type Target = HgPath;
89
89
90 fn deref(&self) -> &HgPath {
90 fn deref(&self) -> &HgPath {
91 match *self {
91 match *self {
92 BorrowedPath::InMemory(in_memory) => in_memory,
92 BorrowedPath::InMemory(in_memory) => in_memory,
93 BorrowedPath::OnDisk(on_disk) => on_disk,
93 BorrowedPath::OnDisk(on_disk) => on_disk,
94 }
94 }
95 }
95 }
96 }
96 }
97
97
98 impl Default for ChildNodes<'_> {
98 impl Default for ChildNodes<'_> {
99 fn default() -> Self {
99 fn default() -> Self {
100 ChildNodes::InMemory(Default::default())
100 ChildNodes::InMemory(Default::default())
101 }
101 }
102 }
102 }
103
103
104 impl<'on_disk> ChildNodes<'on_disk> {
104 impl<'on_disk> ChildNodes<'on_disk> {
105 pub(super) fn as_ref<'tree>(
105 pub(super) fn as_ref<'tree>(
106 &'tree self,
106 &'tree self,
107 ) -> ChildNodesRef<'tree, 'on_disk> {
107 ) -> ChildNodesRef<'tree, 'on_disk> {
108 match self {
108 match self {
109 ChildNodes::InMemory(nodes) => ChildNodesRef::InMemory(nodes),
109 ChildNodes::InMemory(nodes) => ChildNodesRef::InMemory(nodes),
110 ChildNodes::OnDisk(nodes) => ChildNodesRef::OnDisk(nodes),
110 ChildNodes::OnDisk(nodes) => ChildNodesRef::OnDisk(nodes),
111 }
111 }
112 }
112 }
113
113
114 pub(super) fn is_empty(&self) -> bool {
114 pub(super) fn is_empty(&self) -> bool {
115 match self {
115 match self {
116 ChildNodes::InMemory(nodes) => nodes.is_empty(),
116 ChildNodes::InMemory(nodes) => nodes.is_empty(),
117 ChildNodes::OnDisk(nodes) => nodes.is_empty(),
117 ChildNodes::OnDisk(nodes) => nodes.is_empty(),
118 }
118 }
119 }
119 }
120
120
121 pub(super) fn make_mut(
121 pub(super) fn make_mut(
122 &mut self,
122 &mut self,
123 on_disk: &'on_disk [u8],
123 on_disk: &'on_disk [u8],
124 ) -> Result<
124 ) -> Result<
125 &mut FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>,
125 &mut FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>,
126 DirstateV2ParseError,
126 DirstateV2ParseError,
127 > {
127 > {
128 match self {
128 match self {
129 ChildNodes::InMemory(nodes) => Ok(nodes),
129 ChildNodes::InMemory(nodes) => Ok(nodes),
130 ChildNodes::OnDisk(nodes) => {
130 ChildNodes::OnDisk(nodes) => {
131 let nodes = nodes
131 let nodes = nodes
132 .iter()
132 .iter()
133 .map(|node| {
133 .map(|node| {
134 Ok((
134 Ok((
135 node.path(on_disk)?,
135 node.path(on_disk)?,
136 node.to_in_memory_node(on_disk)?,
136 node.to_in_memory_node(on_disk)?,
137 ))
137 ))
138 })
138 })
139 .collect::<Result<_, _>>()?;
139 .collect::<Result<_, _>>()?;
140 *self = ChildNodes::InMemory(nodes);
140 *self = ChildNodes::InMemory(nodes);
141 match self {
141 match self {
142 ChildNodes::InMemory(nodes) => Ok(nodes),
142 ChildNodes::InMemory(nodes) => Ok(nodes),
143 ChildNodes::OnDisk(_) => unreachable!(),
143 ChildNodes::OnDisk(_) => unreachable!(),
144 }
144 }
145 }
145 }
146 }
146 }
147 }
147 }
148 }
148 }
149
149
150 impl<'tree, 'on_disk> ChildNodesRef<'tree, 'on_disk> {
150 impl<'tree, 'on_disk> ChildNodesRef<'tree, 'on_disk> {
151 pub(super) fn get(
151 pub(super) fn get(
152 &self,
152 &self,
153 base_name: &HgPath,
153 base_name: &HgPath,
154 on_disk: &'on_disk [u8],
154 on_disk: &'on_disk [u8],
155 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
155 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
156 match self {
156 match self {
157 ChildNodesRef::InMemory(nodes) => Ok(nodes
157 ChildNodesRef::InMemory(nodes) => Ok(nodes
158 .get_key_value(base_name)
158 .get_key_value(base_name)
159 .map(|(k, v)| NodeRef::InMemory(k, v))),
159 .map(|(k, v)| NodeRef::InMemory(k, v))),
160 ChildNodesRef::OnDisk(nodes) => {
160 ChildNodesRef::OnDisk(nodes) => {
161 let mut parse_result = Ok(());
161 let mut parse_result = Ok(());
162 let search_result = nodes.binary_search_by(|node| {
162 let search_result = nodes.binary_search_by(|node| {
163 match node.base_name(on_disk) {
163 match node.base_name(on_disk) {
164 Ok(node_base_name) => node_base_name.cmp(base_name),
164 Ok(node_base_name) => node_base_name.cmp(base_name),
165 Err(e) => {
165 Err(e) => {
166 parse_result = Err(e);
166 parse_result = Err(e);
167 // Dummy comparison result, `search_result` won’t
167 // Dummy comparison result, `search_result` won’t
168 // be used since `parse_result` is an error
168 // be used since `parse_result` is an error
169 std::cmp::Ordering::Equal
169 std::cmp::Ordering::Equal
170 }
170 }
171 }
171 }
172 });
172 });
173 parse_result.map(|()| {
173 parse_result.map(|()| {
174 search_result.ok().map(|i| NodeRef::OnDisk(&nodes[i]))
174 search_result.ok().map(|i| NodeRef::OnDisk(&nodes[i]))
175 })
175 })
176 }
176 }
177 }
177 }
178 }
178 }
179
179
180 /// Iterate in undefined order
180 /// Iterate in undefined order
181 pub(super) fn iter(
181 pub(super) fn iter(
182 &self,
182 &self,
183 ) -> impl Iterator<Item = NodeRef<'tree, 'on_disk>> {
183 ) -> impl Iterator<Item = NodeRef<'tree, 'on_disk>> {
184 match self {
184 match self {
185 ChildNodesRef::InMemory(nodes) => itertools::Either::Left(
185 ChildNodesRef::InMemory(nodes) => itertools::Either::Left(
186 nodes.iter().map(|(k, v)| NodeRef::InMemory(k, v)),
186 nodes.iter().map(|(k, v)| NodeRef::InMemory(k, v)),
187 ),
187 ),
188 ChildNodesRef::OnDisk(nodes) => {
188 ChildNodesRef::OnDisk(nodes) => {
189 itertools::Either::Right(nodes.iter().map(NodeRef::OnDisk))
189 itertools::Either::Right(nodes.iter().map(NodeRef::OnDisk))
190 }
190 }
191 }
191 }
192 }
192 }
193
193
194 /// Iterate in parallel in undefined order
194 /// Iterate in parallel in undefined order
195 pub(super) fn par_iter(
195 pub(super) fn par_iter(
196 &self,
196 &self,
197 ) -> impl rayon::iter::ParallelIterator<Item = NodeRef<'tree, 'on_disk>>
197 ) -> impl rayon::iter::ParallelIterator<Item = NodeRef<'tree, 'on_disk>>
198 {
198 {
199 use rayon::prelude::*;
199 use rayon::prelude::*;
200 match self {
200 match self {
201 ChildNodesRef::InMemory(nodes) => rayon::iter::Either::Left(
201 ChildNodesRef::InMemory(nodes) => rayon::iter::Either::Left(
202 nodes.par_iter().map(|(k, v)| NodeRef::InMemory(k, v)),
202 nodes.par_iter().map(|(k, v)| NodeRef::InMemory(k, v)),
203 ),
203 ),
204 ChildNodesRef::OnDisk(nodes) => rayon::iter::Either::Right(
204 ChildNodesRef::OnDisk(nodes) => rayon::iter::Either::Right(
205 nodes.par_iter().map(NodeRef::OnDisk),
205 nodes.par_iter().map(NodeRef::OnDisk),
206 ),
206 ),
207 }
207 }
208 }
208 }
209
209
210 pub(super) fn sorted(&self) -> Vec<NodeRef<'tree, 'on_disk>> {
210 pub(super) fn sorted(&self) -> Vec<NodeRef<'tree, 'on_disk>> {
211 match self {
211 match self {
212 ChildNodesRef::InMemory(nodes) => {
212 ChildNodesRef::InMemory(nodes) => {
213 let mut vec: Vec<_> = nodes
213 let mut vec: Vec<_> = nodes
214 .iter()
214 .iter()
215 .map(|(k, v)| NodeRef::InMemory(k, v))
215 .map(|(k, v)| NodeRef::InMemory(k, v))
216 .collect();
216 .collect();
217 fn sort_key<'a>(node: &'a NodeRef) -> &'a HgPath {
217 fn sort_key<'a>(node: &'a NodeRef) -> &'a HgPath {
218 match node {
218 match node {
219 NodeRef::InMemory(path, _node) => path.base_name(),
219 NodeRef::InMemory(path, _node) => path.base_name(),
220 NodeRef::OnDisk(_) => unreachable!(),
220 NodeRef::OnDisk(_) => unreachable!(),
221 }
221 }
222 }
222 }
223 // `sort_unstable_by_key` doesn’t allow keys borrowing from the
223 // `sort_unstable_by_key` doesn’t allow keys borrowing from the
224 // value: https://github.com/rust-lang/rust/issues/34162
224 // value: https://github.com/rust-lang/rust/issues/34162
225 vec.sort_unstable_by(|a, b| sort_key(a).cmp(sort_key(b)));
225 vec.sort_unstable_by(|a, b| sort_key(a).cmp(sort_key(b)));
226 vec
226 vec
227 }
227 }
228 ChildNodesRef::OnDisk(nodes) => {
228 ChildNodesRef::OnDisk(nodes) => {
229 // Nodes on disk are already sorted
229 // Nodes on disk are already sorted
230 nodes.iter().map(NodeRef::OnDisk).collect()
230 nodes.iter().map(NodeRef::OnDisk).collect()
231 }
231 }
232 }
232 }
233 }
233 }
234 }
234 }
235
235
236 impl<'tree, 'on_disk> NodeRef<'tree, 'on_disk> {
236 impl<'tree, 'on_disk> NodeRef<'tree, 'on_disk> {
237 pub(super) fn full_path(
237 pub(super) fn full_path(
238 &self,
238 &self,
239 on_disk: &'on_disk [u8],
239 on_disk: &'on_disk [u8],
240 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
240 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
241 match self {
241 match self {
242 NodeRef::InMemory(path, _node) => Ok(path.full_path()),
242 NodeRef::InMemory(path, _node) => Ok(path.full_path()),
243 NodeRef::OnDisk(node) => node.full_path(on_disk),
243 NodeRef::OnDisk(node) => node.full_path(on_disk),
244 }
244 }
245 }
245 }
246
246
247 /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk,
247 /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk,
248 /// HgPath>` detached from `'tree`
248 /// HgPath>` detached from `'tree`
249 pub(super) fn full_path_borrowed(
249 pub(super) fn full_path_borrowed(
250 &self,
250 &self,
251 on_disk: &'on_disk [u8],
251 on_disk: &'on_disk [u8],
252 ) -> Result<BorrowedPath<'tree, 'on_disk>, DirstateV2ParseError> {
252 ) -> Result<BorrowedPath<'tree, 'on_disk>, DirstateV2ParseError> {
253 match self {
253 match self {
254 NodeRef::InMemory(path, _node) => match path.full_path() {
254 NodeRef::InMemory(path, _node) => match path.full_path() {
255 Cow::Borrowed(on_disk) => Ok(BorrowedPath::OnDisk(on_disk)),
255 Cow::Borrowed(on_disk) => Ok(BorrowedPath::OnDisk(on_disk)),
256 Cow::Owned(in_memory) => Ok(BorrowedPath::InMemory(in_memory)),
256 Cow::Owned(in_memory) => Ok(BorrowedPath::InMemory(in_memory)),
257 },
257 },
258 NodeRef::OnDisk(node) => {
258 NodeRef::OnDisk(node) => {
259 Ok(BorrowedPath::OnDisk(node.full_path(on_disk)?))
259 Ok(BorrowedPath::OnDisk(node.full_path(on_disk)?))
260 }
260 }
261 }
261 }
262 }
262 }
263
263
264 pub(super) fn base_name(
264 pub(super) fn base_name(
265 &self,
265 &self,
266 on_disk: &'on_disk [u8],
266 on_disk: &'on_disk [u8],
267 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
267 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
268 match self {
268 match self {
269 NodeRef::InMemory(path, _node) => Ok(path.base_name()),
269 NodeRef::InMemory(path, _node) => Ok(path.base_name()),
270 NodeRef::OnDisk(node) => node.base_name(on_disk),
270 NodeRef::OnDisk(node) => node.base_name(on_disk),
271 }
271 }
272 }
272 }
273
273
274 pub(super) fn children(
274 pub(super) fn children(
275 &self,
275 &self,
276 on_disk: &'on_disk [u8],
276 on_disk: &'on_disk [u8],
277 ) -> Result<ChildNodesRef<'tree, 'on_disk>, DirstateV2ParseError> {
277 ) -> Result<ChildNodesRef<'tree, 'on_disk>, DirstateV2ParseError> {
278 match self {
278 match self {
279 NodeRef::InMemory(_path, node) => Ok(node.children.as_ref()),
279 NodeRef::InMemory(_path, node) => Ok(node.children.as_ref()),
280 NodeRef::OnDisk(node) => {
280 NodeRef::OnDisk(node) => {
281 Ok(ChildNodesRef::OnDisk(node.children(on_disk)?))
281 Ok(ChildNodesRef::OnDisk(node.children(on_disk)?))
282 }
282 }
283 }
283 }
284 }
284 }
285
285
286 pub(super) fn has_copy_source(&self) -> bool {
286 pub(super) fn has_copy_source(&self) -> bool {
287 match self {
287 match self {
288 NodeRef::InMemory(_path, node) => node.copy_source.is_some(),
288 NodeRef::InMemory(_path, node) => node.copy_source.is_some(),
289 NodeRef::OnDisk(node) => node.has_copy_source(),
289 NodeRef::OnDisk(node) => node.has_copy_source(),
290 }
290 }
291 }
291 }
292
292
293 pub(super) fn copy_source(
293 pub(super) fn copy_source(
294 &self,
294 &self,
295 on_disk: &'on_disk [u8],
295 on_disk: &'on_disk [u8],
296 ) -> Result<Option<&'tree HgPath>, DirstateV2ParseError> {
296 ) -> Result<Option<&'tree HgPath>, DirstateV2ParseError> {
297 match self {
297 match self {
298 NodeRef::InMemory(_path, node) => {
298 NodeRef::InMemory(_path, node) => {
299 Ok(node.copy_source.as_ref().map(|s| &**s))
299 Ok(node.copy_source.as_ref().map(|s| &**s))
300 }
300 }
301 NodeRef::OnDisk(node) => node.copy_source(on_disk),
301 NodeRef::OnDisk(node) => node.copy_source(on_disk),
302 }
302 }
303 }
303 }
304
304
305 pub(super) fn entry(
305 pub(super) fn entry(
306 &self,
306 &self,
307 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
307 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
308 match self {
308 match self {
309 NodeRef::InMemory(_path, node) => {
309 NodeRef::InMemory(_path, node) => {
310 Ok(node.data.as_entry().copied())
310 Ok(node.data.as_entry().copied())
311 }
311 }
312 NodeRef::OnDisk(node) => node.entry(),
312 NodeRef::OnDisk(node) => node.entry(),
313 }
313 }
314 }
314 }
315
315
316 pub(super) fn state(
316 pub(super) fn state(
317 &self,
317 &self,
318 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
318 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
319 match self {
319 match self {
320 NodeRef::InMemory(_path, node) => {
320 NodeRef::InMemory(_path, node) => {
321 Ok(node.data.as_entry().map(|entry| entry.state))
321 Ok(node.data.as_entry().map(|entry| entry.state))
322 }
322 }
323 NodeRef::OnDisk(node) => node.state(),
323 NodeRef::OnDisk(node) => node.state(),
324 }
324 }
325 }
325 }
326
326
327 pub(super) fn cached_directory_mtime(
327 pub(super) fn cached_directory_mtime(
328 &self,
328 &self,
329 ) -> Option<&'tree on_disk::Timestamp> {
329 ) -> Option<&'tree on_disk::Timestamp> {
330 match self {
330 match self {
331 NodeRef::InMemory(_path, node) => match &node.data {
331 NodeRef::InMemory(_path, node) => match &node.data {
332 NodeData::CachedDirectory { mtime } => Some(mtime),
332 NodeData::CachedDirectory { mtime } => Some(mtime),
333 _ => None,
333 _ => None,
334 },
334 },
335 NodeRef::OnDisk(node) => node.cached_directory_mtime(),
335 NodeRef::OnDisk(node) => node.cached_directory_mtime(),
336 }
336 }
337 }
337 }
338
338
339 pub(super) fn descendants_with_entry_count(&self) -> u32 {
339 pub(super) fn descendants_with_entry_count(&self) -> u32 {
340 match self {
340 match self {
341 NodeRef::InMemory(_path, node) => {
341 NodeRef::InMemory(_path, node) => {
342 node.descendants_with_entry_count
342 node.descendants_with_entry_count
343 }
343 }
344 NodeRef::OnDisk(node) => node.descendants_with_entry_count.get(),
344 NodeRef::OnDisk(node) => node.descendants_with_entry_count.get(),
345 }
345 }
346 }
346 }
347
347
348 pub(super) fn tracked_descendants_count(&self) -> u32 {
348 pub(super) fn tracked_descendants_count(&self) -> u32 {
349 match self {
349 match self {
350 NodeRef::InMemory(_path, node) => node.tracked_descendants_count,
350 NodeRef::InMemory(_path, node) => node.tracked_descendants_count,
351 NodeRef::OnDisk(node) => node.tracked_descendants_count.get(),
351 NodeRef::OnDisk(node) => node.tracked_descendants_count.get(),
352 }
352 }
353 }
353 }
354 }
354 }
355
355
356 /// Represents a file or a directory
356 /// Represents a file or a directory
357 #[derive(Default)]
357 #[derive(Default)]
358 pub(super) struct Node<'on_disk> {
358 pub(super) struct Node<'on_disk> {
359 pub(super) data: NodeData,
359 pub(super) data: NodeData,
360
360
361 pub(super) copy_source: Option<Cow<'on_disk, HgPath>>,
361 pub(super) copy_source: Option<Cow<'on_disk, HgPath>>,
362
362
363 pub(super) children: ChildNodes<'on_disk>,
363 pub(super) children: ChildNodes<'on_disk>,
364
364
365 /// How many (non-inclusive) descendants of this node have an entry.
365 /// How many (non-inclusive) descendants of this node have an entry.
366 pub(super) descendants_with_entry_count: u32,
366 pub(super) descendants_with_entry_count: u32,
367
367
368 /// How many (non-inclusive) descendants of this node have an entry whose
368 /// How many (non-inclusive) descendants of this node have an entry whose
369 /// state is "tracked".
369 /// state is "tracked".
370 pub(super) tracked_descendants_count: u32,
370 pub(super) tracked_descendants_count: u32,
371 }
371 }
372
372
373 pub(super) enum NodeData {
373 pub(super) enum NodeData {
374 Entry(DirstateEntry),
374 Entry(DirstateEntry),
375 CachedDirectory { mtime: on_disk::Timestamp },
375 CachedDirectory { mtime: on_disk::Timestamp },
376 None,
376 None,
377 }
377 }
378
378
379 impl Default for NodeData {
379 impl Default for NodeData {
380 fn default() -> Self {
380 fn default() -> Self {
381 NodeData::None
381 NodeData::None
382 }
382 }
383 }
383 }
384
384
385 impl NodeData {
385 impl NodeData {
386 fn has_entry(&self) -> bool {
386 fn has_entry(&self) -> bool {
387 match self {
387 match self {
388 NodeData::Entry(_) => true,
388 NodeData::Entry(_) => true,
389 _ => false,
389 _ => false,
390 }
390 }
391 }
391 }
392
392
393 fn as_entry(&self) -> Option<&DirstateEntry> {
393 fn as_entry(&self) -> Option<&DirstateEntry> {
394 match self {
394 match self {
395 NodeData::Entry(entry) => Some(entry),
395 NodeData::Entry(entry) => Some(entry),
396 _ => None,
396 _ => None,
397 }
397 }
398 }
398 }
399 }
399 }
400
400
401 impl<'on_disk> DirstateMap<'on_disk> {
401 impl<'on_disk> DirstateMap<'on_disk> {
402 pub(super) fn empty(on_disk: &'on_disk [u8]) -> Self {
402 pub(super) fn empty(on_disk: &'on_disk [u8]) -> Self {
403 Self {
403 Self {
404 on_disk,
404 on_disk,
405 root: ChildNodes::default(),
405 root: ChildNodes::default(),
406 nodes_with_entry_count: 0,
406 nodes_with_entry_count: 0,
407 nodes_with_copy_source_count: 0,
407 nodes_with_copy_source_count: 0,
408 ignore_patterns_hash: [0; on_disk::IGNORE_PATTERNS_HASH_LEN],
408 ignore_patterns_hash: [0; on_disk::IGNORE_PATTERNS_HASH_LEN],
409 }
409 }
410 }
410 }
411
411
412 #[timed]
412 #[timed]
413 pub fn new_v2(
413 pub fn new_v2(
414 on_disk: &'on_disk [u8],
414 on_disk: &'on_disk [u8],
415 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
415 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
416 Ok(on_disk::read(on_disk)?)
416 Ok(on_disk::read(on_disk)?)
417 }
417 }
418
418
419 #[timed]
419 #[timed]
420 pub fn new_v1(
420 pub fn new_v1(
421 on_disk: &'on_disk [u8],
421 on_disk: &'on_disk [u8],
422 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
422 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
423 let mut map = Self::empty(on_disk);
423 let mut map = Self::empty(on_disk);
424 if map.on_disk.is_empty() {
424 if map.on_disk.is_empty() {
425 return Ok((map, None));
425 return Ok((map, None));
426 }
426 }
427
427
428 let parents = parse_dirstate_entries(
428 let parents = parse_dirstate_entries(
429 map.on_disk,
429 map.on_disk,
430 |path, entry, copy_source| {
430 |path, entry, copy_source| {
431 let tracked = entry.state.is_tracked();
431 let tracked = entry.state.is_tracked();
432 let node = Self::get_or_insert_node(
432 let node = Self::get_or_insert_node(
433 map.on_disk,
433 map.on_disk,
434 &mut map.root,
434 &mut map.root,
435 path,
435 path,
436 WithBasename::to_cow_borrowed,
436 WithBasename::to_cow_borrowed,
437 |ancestor| {
437 |ancestor| {
438 if tracked {
438 if tracked {
439 ancestor.tracked_descendants_count += 1
439 ancestor.tracked_descendants_count += 1
440 }
440 }
441 ancestor.descendants_with_entry_count += 1
441 ancestor.descendants_with_entry_count += 1
442 },
442 },
443 )?;
443 )?;
444 assert!(
444 assert!(
445 !node.data.has_entry(),
445 !node.data.has_entry(),
446 "duplicate dirstate entry in read"
446 "duplicate dirstate entry in read"
447 );
447 );
448 assert!(
448 assert!(
449 node.copy_source.is_none(),
449 node.copy_source.is_none(),
450 "duplicate dirstate entry in read"
450 "duplicate dirstate entry in read"
451 );
451 );
452 node.data = NodeData::Entry(*entry);
452 node.data = NodeData::Entry(*entry);
453 node.copy_source = copy_source.map(Cow::Borrowed);
453 node.copy_source = copy_source.map(Cow::Borrowed);
454 map.nodes_with_entry_count += 1;
454 map.nodes_with_entry_count += 1;
455 if copy_source.is_some() {
455 if copy_source.is_some() {
456 map.nodes_with_copy_source_count += 1
456 map.nodes_with_copy_source_count += 1
457 }
457 }
458 Ok(())
458 Ok(())
459 },
459 },
460 )?;
460 )?;
461 let parents = Some(parents.clone());
461 let parents = Some(parents.clone());
462
462
463 Ok((map, parents))
463 Ok((map, parents))
464 }
464 }
465
465
466 fn get_node<'tree>(
466 fn get_node<'tree>(
467 &'tree self,
467 &'tree self,
468 path: &HgPath,
468 path: &HgPath,
469 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
469 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
470 let mut children = self.root.as_ref();
470 let mut children = self.root.as_ref();
471 let mut components = path.components();
471 let mut components = path.components();
472 let mut component =
472 let mut component =
473 components.next().expect("expected at least one components");
473 components.next().expect("expected at least one components");
474 loop {
474 loop {
475 if let Some(child) = children.get(component, self.on_disk)? {
475 if let Some(child) = children.get(component, self.on_disk)? {
476 if let Some(next_component) = components.next() {
476 if let Some(next_component) = components.next() {
477 component = next_component;
477 component = next_component;
478 children = child.children(self.on_disk)?;
478 children = child.children(self.on_disk)?;
479 } else {
479 } else {
480 return Ok(Some(child));
480 return Ok(Some(child));
481 }
481 }
482 } else {
482 } else {
483 return Ok(None);
483 return Ok(None);
484 }
484 }
485 }
485 }
486 }
486 }
487
487
488 /// Returns a mutable reference to the node at `path` if it exists
488 /// Returns a mutable reference to the node at `path` if it exists
489 ///
489 ///
490 /// This takes `root` instead of `&mut self` so that callers can mutate
490 /// This takes `root` instead of `&mut self` so that callers can mutate
491 /// other fields while the returned borrow is still valid
491 /// other fields while the returned borrow is still valid
492 fn get_node_mut<'tree>(
492 fn get_node_mut<'tree>(
493 on_disk: &'on_disk [u8],
493 on_disk: &'on_disk [u8],
494 root: &'tree mut ChildNodes<'on_disk>,
494 root: &'tree mut ChildNodes<'on_disk>,
495 path: &HgPath,
495 path: &HgPath,
496 ) -> Result<Option<&'tree mut Node<'on_disk>>, DirstateV2ParseError> {
496 ) -> Result<Option<&'tree mut Node<'on_disk>>, DirstateV2ParseError> {
497 let mut children = root;
497 let mut children = root;
498 let mut components = path.components();
498 let mut components = path.components();
499 let mut component =
499 let mut component =
500 components.next().expect("expected at least one components");
500 components.next().expect("expected at least one components");
501 loop {
501 loop {
502 if let Some(child) = children.make_mut(on_disk)?.get_mut(component)
502 if let Some(child) = children.make_mut(on_disk)?.get_mut(component)
503 {
503 {
504 if let Some(next_component) = components.next() {
504 if let Some(next_component) = components.next() {
505 component = next_component;
505 component = next_component;
506 children = &mut child.children;
506 children = &mut child.children;
507 } else {
507 } else {
508 return Ok(Some(child));
508 return Ok(Some(child));
509 }
509 }
510 } else {
510 } else {
511 return Ok(None);
511 return Ok(None);
512 }
512 }
513 }
513 }
514 }
514 }
515
515
516 pub(super) fn get_or_insert<'tree, 'path>(
516 pub(super) fn get_or_insert<'tree, 'path>(
517 &'tree mut self,
517 &'tree mut self,
518 path: &HgPath,
518 path: &HgPath,
519 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
519 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
520 Self::get_or_insert_node(
520 Self::get_or_insert_node(
521 self.on_disk,
521 self.on_disk,
522 &mut self.root,
522 &mut self.root,
523 path,
523 path,
524 WithBasename::to_cow_owned,
524 WithBasename::to_cow_owned,
525 |_| {},
525 |_| {},
526 )
526 )
527 }
527 }
528
528
529 pub(super) fn get_or_insert_node<'tree, 'path>(
529 pub(super) fn get_or_insert_node<'tree, 'path>(
530 on_disk: &'on_disk [u8],
530 on_disk: &'on_disk [u8],
531 root: &'tree mut ChildNodes<'on_disk>,
531 root: &'tree mut ChildNodes<'on_disk>,
532 path: &'path HgPath,
532 path: &'path HgPath,
533 to_cow: impl Fn(
533 to_cow: impl Fn(
534 WithBasename<&'path HgPath>,
534 WithBasename<&'path HgPath>,
535 ) -> WithBasename<Cow<'on_disk, HgPath>>,
535 ) -> WithBasename<Cow<'on_disk, HgPath>>,
536 mut each_ancestor: impl FnMut(&mut Node),
536 mut each_ancestor: impl FnMut(&mut Node),
537 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
537 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
538 let mut child_nodes = root;
538 let mut child_nodes = root;
539 let mut inclusive_ancestor_paths =
539 let mut inclusive_ancestor_paths =
540 WithBasename::inclusive_ancestors_of(path);
540 WithBasename::inclusive_ancestors_of(path);
541 let mut ancestor_path = inclusive_ancestor_paths
541 let mut ancestor_path = inclusive_ancestor_paths
542 .next()
542 .next()
543 .expect("expected at least one inclusive ancestor");
543 .expect("expected at least one inclusive ancestor");
544 loop {
544 loop {
545 // TODO: can we avoid allocating an owned key in cases where the
545 // TODO: can we avoid allocating an owned key in cases where the
546 // map already contains that key, without introducing double
546 // map already contains that key, without introducing double
547 // lookup?
547 // lookup?
548 let child_node = child_nodes
548 let child_node = child_nodes
549 .make_mut(on_disk)?
549 .make_mut(on_disk)?
550 .entry(to_cow(ancestor_path))
550 .entry(to_cow(ancestor_path))
551 .or_default();
551 .or_default();
552 if let Some(next) = inclusive_ancestor_paths.next() {
552 if let Some(next) = inclusive_ancestor_paths.next() {
553 each_ancestor(child_node);
553 each_ancestor(child_node);
554 ancestor_path = next;
554 ancestor_path = next;
555 child_nodes = &mut child_node.children;
555 child_nodes = &mut child_node.children;
556 } else {
556 } else {
557 return Ok(child_node);
557 return Ok(child_node);
558 }
558 }
559 }
559 }
560 }
560 }
561
561
562 fn add_or_remove_file(
562 fn add_or_remove_file(
563 &mut self,
563 &mut self,
564 path: &HgPath,
564 path: &HgPath,
565 old_state: EntryState,
565 old_state: EntryState,
566 new_entry: DirstateEntry,
566 new_entry: DirstateEntry,
567 ) -> Result<(), DirstateV2ParseError> {
567 ) -> Result<(), DirstateV2ParseError> {
568 let had_entry = old_state != EntryState::Unknown;
568 let had_entry = old_state != EntryState::Unknown;
569 let tracked_count_increment =
569 let tracked_count_increment =
570 match (old_state.is_tracked(), new_entry.state.is_tracked()) {
570 match (old_state.is_tracked(), new_entry.state.is_tracked()) {
571 (false, true) => 1,
571 (false, true) => 1,
572 (true, false) => -1,
572 (true, false) => -1,
573 _ => 0,
573 _ => 0,
574 };
574 };
575
575
576 let node = Self::get_or_insert_node(
576 let node = Self::get_or_insert_node(
577 self.on_disk,
577 self.on_disk,
578 &mut self.root,
578 &mut self.root,
579 path,
579 path,
580 WithBasename::to_cow_owned,
580 WithBasename::to_cow_owned,
581 |ancestor| {
581 |ancestor| {
582 if !had_entry {
582 if !had_entry {
583 ancestor.descendants_with_entry_count += 1;
583 ancestor.descendants_with_entry_count += 1;
584 }
584 }
585
585
586 // We can’t use `+= increment` because the counter is unsigned,
586 // We can’t use `+= increment` because the counter is unsigned,
587 // and we want debug builds to detect accidental underflow
587 // and we want debug builds to detect accidental underflow
588 // through zero
588 // through zero
589 match tracked_count_increment {
589 match tracked_count_increment {
590 1 => ancestor.tracked_descendants_count += 1,
590 1 => ancestor.tracked_descendants_count += 1,
591 -1 => ancestor.tracked_descendants_count -= 1,
591 -1 => ancestor.tracked_descendants_count -= 1,
592 _ => {}
592 _ => {}
593 }
593 }
594 },
594 },
595 )?;
595 )?;
596 if !had_entry {
596 if !had_entry {
597 self.nodes_with_entry_count += 1
597 self.nodes_with_entry_count += 1
598 }
598 }
599 node.data = NodeData::Entry(new_entry);
599 node.data = NodeData::Entry(new_entry);
600 Ok(())
600 Ok(())
601 }
601 }
602
602
603 fn iter_nodes<'tree>(
603 fn iter_nodes<'tree>(
604 &'tree self,
604 &'tree self,
605 ) -> impl Iterator<
605 ) -> impl Iterator<
606 Item = Result<NodeRef<'tree, 'on_disk>, DirstateV2ParseError>,
606 Item = Result<NodeRef<'tree, 'on_disk>, DirstateV2ParseError>,
607 > + 'tree {
607 > + 'tree {
608 // Depth first tree traversal.
608 // Depth first tree traversal.
609 //
609 //
610 // If we could afford internal iteration and recursion,
610 // If we could afford internal iteration and recursion,
611 // this would look like:
611 // this would look like:
612 //
612 //
613 // ```
613 // ```
614 // fn traverse_children(
614 // fn traverse_children(
615 // children: &ChildNodes,
615 // children: &ChildNodes,
616 // each: &mut impl FnMut(&Node),
616 // each: &mut impl FnMut(&Node),
617 // ) {
617 // ) {
618 // for child in children.values() {
618 // for child in children.values() {
619 // traverse_children(&child.children, each);
619 // traverse_children(&child.children, each);
620 // each(child);
620 // each(child);
621 // }
621 // }
622 // }
622 // }
623 // ```
623 // ```
624 //
624 //
625 // However we want an external iterator and therefore can’t use the
625 // However we want an external iterator and therefore can’t use the
626 // call stack. Use an explicit stack instead:
626 // call stack. Use an explicit stack instead:
627 let mut stack = Vec::new();
627 let mut stack = Vec::new();
628 let mut iter = self.root.as_ref().iter();
628 let mut iter = self.root.as_ref().iter();
629 std::iter::from_fn(move || {
629 std::iter::from_fn(move || {
630 while let Some(child_node) = iter.next() {
630 while let Some(child_node) = iter.next() {
631 let children = match child_node.children(self.on_disk) {
631 let children = match child_node.children(self.on_disk) {
632 Ok(children) => children,
632 Ok(children) => children,
633 Err(error) => return Some(Err(error)),
633 Err(error) => return Some(Err(error)),
634 };
634 };
635 // Pseudo-recursion
635 // Pseudo-recursion
636 let new_iter = children.iter();
636 let new_iter = children.iter();
637 let old_iter = std::mem::replace(&mut iter, new_iter);
637 let old_iter = std::mem::replace(&mut iter, new_iter);
638 stack.push((child_node, old_iter));
638 stack.push((child_node, old_iter));
639 }
639 }
640 // Found the end of a `children.iter()` iterator.
640 // Found the end of a `children.iter()` iterator.
641 if let Some((child_node, next_iter)) = stack.pop() {
641 if let Some((child_node, next_iter)) = stack.pop() {
642 // "Return" from pseudo-recursion by restoring state from the
642 // "Return" from pseudo-recursion by restoring state from the
643 // explicit stack
643 // explicit stack
644 iter = next_iter;
644 iter = next_iter;
645
645
646 Some(Ok(child_node))
646 Some(Ok(child_node))
647 } else {
647 } else {
648 // Reached the bottom of the stack, we’re done
648 // Reached the bottom of the stack, we’re done
649 None
649 None
650 }
650 }
651 })
651 })
652 }
652 }
653
653
654 fn clear_known_ambiguous_mtimes(
654 fn clear_known_ambiguous_mtimes(
655 &mut self,
655 &mut self,
656 paths: &[impl AsRef<HgPath>],
656 paths: &[impl AsRef<HgPath>],
657 ) -> Result<(), DirstateV2ParseError> {
657 ) -> Result<(), DirstateV2ParseError> {
658 for path in paths {
658 for path in paths {
659 if let Some(node) = Self::get_node_mut(
659 if let Some(node) = Self::get_node_mut(
660 self.on_disk,
660 self.on_disk,
661 &mut self.root,
661 &mut self.root,
662 path.as_ref(),
662 path.as_ref(),
663 )? {
663 )? {
664 if let NodeData::Entry(entry) = &mut node.data {
664 if let NodeData::Entry(entry) = &mut node.data {
665 entry.clear_mtime();
665 entry.clear_mtime();
666 }
666 }
667 }
667 }
668 }
668 }
669 Ok(())
669 Ok(())
670 }
670 }
671
671
672 /// Return a faillilble iterator of full paths of nodes that have an
672 /// Return a faillilble iterator of full paths of nodes that have an
673 /// `entry` for which the given `predicate` returns true.
673 /// `entry` for which the given `predicate` returns true.
674 ///
674 ///
675 /// Fallibility means that each iterator item is a `Result`, which may
675 /// Fallibility means that each iterator item is a `Result`, which may
676 /// indicate a parse error of the on-disk dirstate-v2 format. Such errors
676 /// indicate a parse error of the on-disk dirstate-v2 format. Such errors
677 /// should only happen if Mercurial is buggy or a repository is corrupted.
677 /// should only happen if Mercurial is buggy or a repository is corrupted.
678 fn filter_full_paths<'tree>(
678 fn filter_full_paths<'tree>(
679 &'tree self,
679 &'tree self,
680 predicate: impl Fn(&DirstateEntry) -> bool + 'tree,
680 predicate: impl Fn(&DirstateEntry) -> bool + 'tree,
681 ) -> impl Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + 'tree
681 ) -> impl Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + 'tree
682 {
682 {
683 filter_map_results(self.iter_nodes(), move |node| {
683 filter_map_results(self.iter_nodes(), move |node| {
684 if let Some(entry) = node.entry()? {
684 if let Some(entry) = node.entry()? {
685 if predicate(&entry) {
685 if predicate(&entry) {
686 return Ok(Some(node.full_path(self.on_disk)?));
686 return Ok(Some(node.full_path(self.on_disk)?));
687 }
687 }
688 }
688 }
689 Ok(None)
689 Ok(None)
690 })
690 })
691 }
691 }
692 }
692 }
693
693
694 /// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s.
694 /// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s.
695 ///
695 ///
696 /// The callback is only called for incoming `Ok` values. Errors are passed
696 /// The callback is only called for incoming `Ok` values. Errors are passed
697 /// through as-is. In order to let it use the `?` operator the callback is
697 /// through as-is. In order to let it use the `?` operator the callback is
698 /// expected to return a `Result` of `Option`, instead of an `Option` of
698 /// expected to return a `Result` of `Option`, instead of an `Option` of
699 /// `Result`.
699 /// `Result`.
700 fn filter_map_results<'a, I, F, A, B, E>(
700 fn filter_map_results<'a, I, F, A, B, E>(
701 iter: I,
701 iter: I,
702 f: F,
702 f: F,
703 ) -> impl Iterator<Item = Result<B, E>> + 'a
703 ) -> impl Iterator<Item = Result<B, E>> + 'a
704 where
704 where
705 I: Iterator<Item = Result<A, E>> + 'a,
705 I: Iterator<Item = Result<A, E>> + 'a,
706 F: Fn(A) -> Result<Option<B>, E> + 'a,
706 F: Fn(A) -> Result<Option<B>, E> + 'a,
707 {
707 {
708 iter.filter_map(move |result| match result {
708 iter.filter_map(move |result| match result {
709 Ok(node) => f(node).transpose(),
709 Ok(node) => f(node).transpose(),
710 Err(e) => Some(Err(e)),
710 Err(e) => Some(Err(e)),
711 })
711 })
712 }
712 }
713
713
714 impl<'on_disk> super::dispatch::DirstateMapMethods for DirstateMap<'on_disk> {
714 impl<'on_disk> super::dispatch::DirstateMapMethods for DirstateMap<'on_disk> {
715 fn clear(&mut self) {
715 fn clear(&mut self) {
716 self.root = Default::default();
716 self.root = Default::default();
717 self.nodes_with_entry_count = 0;
717 self.nodes_with_entry_count = 0;
718 self.nodes_with_copy_source_count = 0;
718 self.nodes_with_copy_source_count = 0;
719 }
719 }
720
720
721 fn add_file(
721 fn add_file(
722 &mut self,
722 &mut self,
723 filename: &HgPath,
723 filename: &HgPath,
724 entry: DirstateEntry,
724 entry: DirstateEntry,
725 added: bool,
725 added: bool,
726 merged: bool,
726 merged: bool,
727 from_p2: bool,
727 from_p2: bool,
728 possibly_dirty: bool,
728 possibly_dirty: bool,
729 ) -> Result<(), DirstateError> {
729 ) -> Result<(), DirstateError> {
730 let mut entry = entry;
730 let mut entry = entry;
731 if added {
731 if added {
732 assert!(!possibly_dirty);
732 assert!(!possibly_dirty);
733 assert!(!from_p2);
733 assert!(!from_p2);
734 entry.state = EntryState::Added;
734 entry.state = EntryState::Added;
735 entry.size = SIZE_NON_NORMAL;
735 entry.size = SIZE_NON_NORMAL;
736 entry.mtime = MTIME_UNSET;
736 entry.mtime = MTIME_UNSET;
737 } else if merged {
737 } else if merged {
738 assert!(!possibly_dirty);
738 assert!(!possibly_dirty);
739 assert!(!from_p2);
739 assert!(!from_p2);
740 entry.state = EntryState::Merged;
740 entry.state = EntryState::Merged;
741 entry.size = SIZE_FROM_OTHER_PARENT;
741 entry.size = SIZE_FROM_OTHER_PARENT;
742 entry.mtime = MTIME_UNSET;
742 entry.mtime = MTIME_UNSET;
743 } else if from_p2 {
743 } else if from_p2 {
744 assert!(!possibly_dirty);
744 assert!(!possibly_dirty);
745 entry.state = EntryState::Normal;
745 entry.size = SIZE_FROM_OTHER_PARENT;
746 entry.size = SIZE_FROM_OTHER_PARENT;
746 entry.mtime = MTIME_UNSET;
747 entry.mtime = MTIME_UNSET;
747 } else if possibly_dirty {
748 } else if possibly_dirty {
748 entry.state = EntryState::Normal;
749 entry.state = EntryState::Normal;
749 entry.size = SIZE_NON_NORMAL;
750 entry.size = SIZE_NON_NORMAL;
750 entry.mtime = MTIME_UNSET;
751 entry.mtime = MTIME_UNSET;
751 } else {
752 } else {
752 entry.size = entry.size & V1_RANGEMASK;
753 entry.size = entry.size & V1_RANGEMASK;
753 entry.mtime = entry.mtime & V1_RANGEMASK;
754 entry.mtime = entry.mtime & V1_RANGEMASK;
754 }
755 }
755
756
756 let old_state = match self.get(filename)? {
757 let old_state = match self.get(filename)? {
757 Some(e) => e.state,
758 Some(e) => e.state,
758 None => EntryState::Unknown,
759 None => EntryState::Unknown,
759 };
760 };
760
761
761 Ok(self.add_or_remove_file(filename, old_state, entry)?)
762 Ok(self.add_or_remove_file(filename, old_state, entry)?)
762 }
763 }
763
764
764 fn remove_file(
765 fn remove_file(
765 &mut self,
766 &mut self,
766 filename: &HgPath,
767 filename: &HgPath,
767 in_merge: bool,
768 in_merge: bool,
768 ) -> Result<(), DirstateError> {
769 ) -> Result<(), DirstateError> {
769 let old_entry_opt = self.get(filename)?;
770 let old_entry_opt = self.get(filename)?;
770 let old_state = match old_entry_opt {
771 let old_state = match old_entry_opt {
771 Some(e) => e.state,
772 Some(e) => e.state,
772 None => EntryState::Unknown,
773 None => EntryState::Unknown,
773 };
774 };
774 let mut size = 0;
775 let mut size = 0;
775 if in_merge {
776 if in_merge {
776 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
777 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
777 // during a merge. So I (marmoute) am not sure we need the
778 // during a merge. So I (marmoute) am not sure we need the
778 // conditionnal at all. Adding double checking this with assert
779 // conditionnal at all. Adding double checking this with assert
779 // would be nice.
780 // would be nice.
780 if let Some(old_entry) = old_entry_opt {
781 if let Some(old_entry) = old_entry_opt {
781 // backup the previous state
782 // backup the previous state
782 if old_entry.state == EntryState::Merged {
783 if old_entry.state == EntryState::Merged {
783 size = SIZE_NON_NORMAL;
784 size = SIZE_NON_NORMAL;
784 } else if old_entry.state == EntryState::Normal
785 } else if old_entry.state == EntryState::Normal
785 && old_entry.size == SIZE_FROM_OTHER_PARENT
786 && old_entry.size == SIZE_FROM_OTHER_PARENT
786 {
787 {
787 // other parent
788 // other parent
788 size = SIZE_FROM_OTHER_PARENT;
789 size = SIZE_FROM_OTHER_PARENT;
789 }
790 }
790 }
791 }
791 }
792 }
792 if size == 0 {
793 if size == 0 {
793 self.copy_map_remove(filename)?;
794 self.copy_map_remove(filename)?;
794 }
795 }
795 let entry = DirstateEntry {
796 let entry = DirstateEntry {
796 state: EntryState::Removed,
797 state: EntryState::Removed,
797 mode: 0,
798 mode: 0,
798 size,
799 size,
799 mtime: 0,
800 mtime: 0,
800 };
801 };
801 Ok(self.add_or_remove_file(filename, old_state, entry)?)
802 Ok(self.add_or_remove_file(filename, old_state, entry)?)
802 }
803 }
803
804
804 fn drop_file(
805 fn drop_file(
805 &mut self,
806 &mut self,
806 filename: &HgPath,
807 filename: &HgPath,
807 old_state: EntryState,
808 old_state: EntryState,
808 ) -> Result<bool, DirstateError> {
809 ) -> Result<bool, DirstateError> {
809 struct Dropped {
810 struct Dropped {
810 was_tracked: bool,
811 was_tracked: bool,
811 had_entry: bool,
812 had_entry: bool,
812 had_copy_source: bool,
813 had_copy_source: bool,
813 }
814 }
814
815
815 /// If this returns `Ok(Some((dropped, removed)))`, then
816 /// If this returns `Ok(Some((dropped, removed)))`, then
816 ///
817 ///
817 /// * `dropped` is about the leaf node that was at `filename`
818 /// * `dropped` is about the leaf node that was at `filename`
818 /// * `removed` is whether this particular level of recursion just
819 /// * `removed` is whether this particular level of recursion just
819 /// removed a node in `nodes`.
820 /// removed a node in `nodes`.
820 fn recur<'on_disk>(
821 fn recur<'on_disk>(
821 on_disk: &'on_disk [u8],
822 on_disk: &'on_disk [u8],
822 nodes: &mut ChildNodes<'on_disk>,
823 nodes: &mut ChildNodes<'on_disk>,
823 path: &HgPath,
824 path: &HgPath,
824 ) -> Result<Option<(Dropped, bool)>, DirstateV2ParseError> {
825 ) -> Result<Option<(Dropped, bool)>, DirstateV2ParseError> {
825 let (first_path_component, rest_of_path) =
826 let (first_path_component, rest_of_path) =
826 path.split_first_component();
827 path.split_first_component();
827 let node = if let Some(node) =
828 let node = if let Some(node) =
828 nodes.make_mut(on_disk)?.get_mut(first_path_component)
829 nodes.make_mut(on_disk)?.get_mut(first_path_component)
829 {
830 {
830 node
831 node
831 } else {
832 } else {
832 return Ok(None);
833 return Ok(None);
833 };
834 };
834 let dropped;
835 let dropped;
835 if let Some(rest) = rest_of_path {
836 if let Some(rest) = rest_of_path {
836 if let Some((d, removed)) =
837 if let Some((d, removed)) =
837 recur(on_disk, &mut node.children, rest)?
838 recur(on_disk, &mut node.children, rest)?
838 {
839 {
839 dropped = d;
840 dropped = d;
840 if dropped.had_entry {
841 if dropped.had_entry {
841 node.descendants_with_entry_count -= 1;
842 node.descendants_with_entry_count -= 1;
842 }
843 }
843 if dropped.was_tracked {
844 if dropped.was_tracked {
844 node.tracked_descendants_count -= 1;
845 node.tracked_descendants_count -= 1;
845 }
846 }
846
847
847 // Directory caches must be invalidated when removing a
848 // Directory caches must be invalidated when removing a
848 // child node
849 // child node
849 if removed {
850 if removed {
850 if let NodeData::CachedDirectory { .. } = &node.data {
851 if let NodeData::CachedDirectory { .. } = &node.data {
851 node.data = NodeData::None
852 node.data = NodeData::None
852 }
853 }
853 }
854 }
854 } else {
855 } else {
855 return Ok(None);
856 return Ok(None);
856 }
857 }
857 } else {
858 } else {
858 let had_entry = node.data.has_entry();
859 let had_entry = node.data.has_entry();
859 if had_entry {
860 if had_entry {
860 node.data = NodeData::None
861 node.data = NodeData::None
861 }
862 }
862 dropped = Dropped {
863 dropped = Dropped {
863 was_tracked: node
864 was_tracked: node
864 .data
865 .data
865 .as_entry()
866 .as_entry()
866 .map_or(false, |entry| entry.state.is_tracked()),
867 .map_or(false, |entry| entry.state.is_tracked()),
867 had_entry,
868 had_entry,
868 had_copy_source: node.copy_source.take().is_some(),
869 had_copy_source: node.copy_source.take().is_some(),
869 };
870 };
870 }
871 }
871 // After recursion, for both leaf (rest_of_path is None) nodes and
872 // After recursion, for both leaf (rest_of_path is None) nodes and
872 // parent nodes, remove a node if it just became empty.
873 // parent nodes, remove a node if it just became empty.
873 let remove = !node.data.has_entry()
874 let remove = !node.data.has_entry()
874 && node.copy_source.is_none()
875 && node.copy_source.is_none()
875 && node.children.is_empty();
876 && node.children.is_empty();
876 if remove {
877 if remove {
877 nodes.make_mut(on_disk)?.remove(first_path_component);
878 nodes.make_mut(on_disk)?.remove(first_path_component);
878 }
879 }
879 Ok(Some((dropped, remove)))
880 Ok(Some((dropped, remove)))
880 }
881 }
881
882
882 if let Some((dropped, _removed)) =
883 if let Some((dropped, _removed)) =
883 recur(self.on_disk, &mut self.root, filename)?
884 recur(self.on_disk, &mut self.root, filename)?
884 {
885 {
885 if dropped.had_entry {
886 if dropped.had_entry {
886 self.nodes_with_entry_count -= 1
887 self.nodes_with_entry_count -= 1
887 }
888 }
888 if dropped.had_copy_source {
889 if dropped.had_copy_source {
889 self.nodes_with_copy_source_count -= 1
890 self.nodes_with_copy_source_count -= 1
890 }
891 }
891 Ok(dropped.had_entry)
892 Ok(dropped.had_entry)
892 } else {
893 } else {
893 debug_assert!(!old_state.is_tracked());
894 debug_assert!(!old_state.is_tracked());
894 Ok(false)
895 Ok(false)
895 }
896 }
896 }
897 }
897
898
898 fn clear_ambiguous_times(
899 fn clear_ambiguous_times(
899 &mut self,
900 &mut self,
900 filenames: Vec<HgPathBuf>,
901 filenames: Vec<HgPathBuf>,
901 now: i32,
902 now: i32,
902 ) -> Result<(), DirstateV2ParseError> {
903 ) -> Result<(), DirstateV2ParseError> {
903 for filename in filenames {
904 for filename in filenames {
904 if let Some(node) =
905 if let Some(node) =
905 Self::get_node_mut(self.on_disk, &mut self.root, &filename)?
906 Self::get_node_mut(self.on_disk, &mut self.root, &filename)?
906 {
907 {
907 if let NodeData::Entry(entry) = &mut node.data {
908 if let NodeData::Entry(entry) = &mut node.data {
908 entry.clear_ambiguous_mtime(now);
909 entry.clear_ambiguous_mtime(now);
909 }
910 }
910 }
911 }
911 }
912 }
912 Ok(())
913 Ok(())
913 }
914 }
914
915
915 fn non_normal_entries_contains(
916 fn non_normal_entries_contains(
916 &mut self,
917 &mut self,
917 key: &HgPath,
918 key: &HgPath,
918 ) -> Result<bool, DirstateV2ParseError> {
919 ) -> Result<bool, DirstateV2ParseError> {
919 Ok(if let Some(node) = self.get_node(key)? {
920 Ok(if let Some(node) = self.get_node(key)? {
920 node.entry()?.map_or(false, |entry| entry.is_non_normal())
921 node.entry()?.map_or(false, |entry| entry.is_non_normal())
921 } else {
922 } else {
922 false
923 false
923 })
924 })
924 }
925 }
925
926
926 fn non_normal_entries_remove(&mut self, _key: &HgPath) {
927 fn non_normal_entries_remove(&mut self, _key: &HgPath) {
927 // Do nothing, this `DirstateMap` does not have a separate "non normal
928 // Do nothing, this `DirstateMap` does not have a separate "non normal
928 // entries" set that need to be kept up to date
929 // entries" set that need to be kept up to date
929 }
930 }
930
931
931 fn non_normal_or_other_parent_paths(
932 fn non_normal_or_other_parent_paths(
932 &mut self,
933 &mut self,
933 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
934 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
934 {
935 {
935 Box::new(self.filter_full_paths(|entry| {
936 Box::new(self.filter_full_paths(|entry| {
936 entry.is_non_normal() || entry.is_from_other_parent()
937 entry.is_non_normal() || entry.is_from_other_parent()
937 }))
938 }))
938 }
939 }
939
940
940 fn set_non_normal_other_parent_entries(&mut self, _force: bool) {
941 fn set_non_normal_other_parent_entries(&mut self, _force: bool) {
941 // Do nothing, this `DirstateMap` does not have a separate "non normal
942 // Do nothing, this `DirstateMap` does not have a separate "non normal
942 // entries" and "from other parent" sets that need to be recomputed
943 // entries" and "from other parent" sets that need to be recomputed
943 }
944 }
944
945
945 fn iter_non_normal_paths(
946 fn iter_non_normal_paths(
946 &mut self,
947 &mut self,
947 ) -> Box<
948 ) -> Box<
948 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
949 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
949 > {
950 > {
950 self.iter_non_normal_paths_panic()
951 self.iter_non_normal_paths_panic()
951 }
952 }
952
953
953 fn iter_non_normal_paths_panic(
954 fn iter_non_normal_paths_panic(
954 &self,
955 &self,
955 ) -> Box<
956 ) -> Box<
956 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
957 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
957 > {
958 > {
958 Box::new(self.filter_full_paths(|entry| entry.is_non_normal()))
959 Box::new(self.filter_full_paths(|entry| entry.is_non_normal()))
959 }
960 }
960
961
961 fn iter_other_parent_paths(
962 fn iter_other_parent_paths(
962 &mut self,
963 &mut self,
963 ) -> Box<
964 ) -> Box<
964 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
965 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
965 > {
966 > {
966 Box::new(self.filter_full_paths(|entry| entry.is_from_other_parent()))
967 Box::new(self.filter_full_paths(|entry| entry.is_from_other_parent()))
967 }
968 }
968
969
969 fn has_tracked_dir(
970 fn has_tracked_dir(
970 &mut self,
971 &mut self,
971 directory: &HgPath,
972 directory: &HgPath,
972 ) -> Result<bool, DirstateError> {
973 ) -> Result<bool, DirstateError> {
973 if let Some(node) = self.get_node(directory)? {
974 if let Some(node) = self.get_node(directory)? {
974 // A node without a `DirstateEntry` was created to hold child
975 // A node without a `DirstateEntry` was created to hold child
975 // nodes, and is therefore a directory.
976 // nodes, and is therefore a directory.
976 let state = node.state()?;
977 let state = node.state()?;
977 Ok(state.is_none() && node.tracked_descendants_count() > 0)
978 Ok(state.is_none() && node.tracked_descendants_count() > 0)
978 } else {
979 } else {
979 Ok(false)
980 Ok(false)
980 }
981 }
981 }
982 }
982
983
983 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
984 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
984 if let Some(node) = self.get_node(directory)? {
985 if let Some(node) = self.get_node(directory)? {
985 // A node without a `DirstateEntry` was created to hold child
986 // A node without a `DirstateEntry` was created to hold child
986 // nodes, and is therefore a directory.
987 // nodes, and is therefore a directory.
987 let state = node.state()?;
988 let state = node.state()?;
988 Ok(state.is_none() && node.descendants_with_entry_count() > 0)
989 Ok(state.is_none() && node.descendants_with_entry_count() > 0)
989 } else {
990 } else {
990 Ok(false)
991 Ok(false)
991 }
992 }
992 }
993 }
993
994
994 #[timed]
995 #[timed]
995 fn pack_v1(
996 fn pack_v1(
996 &mut self,
997 &mut self,
997 parents: DirstateParents,
998 parents: DirstateParents,
998 now: Timestamp,
999 now: Timestamp,
999 ) -> Result<Vec<u8>, DirstateError> {
1000 ) -> Result<Vec<u8>, DirstateError> {
1000 let now: i32 = now.0.try_into().expect("time overflow");
1001 let now: i32 = now.0.try_into().expect("time overflow");
1001 let mut ambiguous_mtimes = Vec::new();
1002 let mut ambiguous_mtimes = Vec::new();
1002 // Optizimation (to be measured?): pre-compute size to avoid `Vec`
1003 // Optizimation (to be measured?): pre-compute size to avoid `Vec`
1003 // reallocations
1004 // reallocations
1004 let mut size = parents.as_bytes().len();
1005 let mut size = parents.as_bytes().len();
1005 for node in self.iter_nodes() {
1006 for node in self.iter_nodes() {
1006 let node = node?;
1007 let node = node?;
1007 if let Some(entry) = node.entry()? {
1008 if let Some(entry) = node.entry()? {
1008 size += packed_entry_size(
1009 size += packed_entry_size(
1009 node.full_path(self.on_disk)?,
1010 node.full_path(self.on_disk)?,
1010 node.copy_source(self.on_disk)?,
1011 node.copy_source(self.on_disk)?,
1011 );
1012 );
1012 if entry.mtime_is_ambiguous(now) {
1013 if entry.mtime_is_ambiguous(now) {
1013 ambiguous_mtimes.push(
1014 ambiguous_mtimes.push(
1014 node.full_path_borrowed(self.on_disk)?
1015 node.full_path_borrowed(self.on_disk)?
1015 .detach_from_tree(),
1016 .detach_from_tree(),
1016 )
1017 )
1017 }
1018 }
1018 }
1019 }
1019 }
1020 }
1020 self.clear_known_ambiguous_mtimes(&ambiguous_mtimes)?;
1021 self.clear_known_ambiguous_mtimes(&ambiguous_mtimes)?;
1021
1022
1022 let mut packed = Vec::with_capacity(size);
1023 let mut packed = Vec::with_capacity(size);
1023 packed.extend(parents.as_bytes());
1024 packed.extend(parents.as_bytes());
1024
1025
1025 for node in self.iter_nodes() {
1026 for node in self.iter_nodes() {
1026 let node = node?;
1027 let node = node?;
1027 if let Some(entry) = node.entry()? {
1028 if let Some(entry) = node.entry()? {
1028 pack_entry(
1029 pack_entry(
1029 node.full_path(self.on_disk)?,
1030 node.full_path(self.on_disk)?,
1030 &entry,
1031 &entry,
1031 node.copy_source(self.on_disk)?,
1032 node.copy_source(self.on_disk)?,
1032 &mut packed,
1033 &mut packed,
1033 );
1034 );
1034 }
1035 }
1035 }
1036 }
1036 Ok(packed)
1037 Ok(packed)
1037 }
1038 }
1038
1039
1039 #[timed]
1040 #[timed]
1040 fn pack_v2(
1041 fn pack_v2(
1041 &mut self,
1042 &mut self,
1042 parents: DirstateParents,
1043 parents: DirstateParents,
1043 now: Timestamp,
1044 now: Timestamp,
1044 ) -> Result<Vec<u8>, DirstateError> {
1045 ) -> Result<Vec<u8>, DirstateError> {
1045 // TODO:Β how do we want to handle this in 2038?
1046 // TODO:Β how do we want to handle this in 2038?
1046 let now: i32 = now.0.try_into().expect("time overflow");
1047 let now: i32 = now.0.try_into().expect("time overflow");
1047 let mut paths = Vec::new();
1048 let mut paths = Vec::new();
1048 for node in self.iter_nodes() {
1049 for node in self.iter_nodes() {
1049 let node = node?;
1050 let node = node?;
1050 if let Some(entry) = node.entry()? {
1051 if let Some(entry) = node.entry()? {
1051 if entry.mtime_is_ambiguous(now) {
1052 if entry.mtime_is_ambiguous(now) {
1052 paths.push(
1053 paths.push(
1053 node.full_path_borrowed(self.on_disk)?
1054 node.full_path_borrowed(self.on_disk)?
1054 .detach_from_tree(),
1055 .detach_from_tree(),
1055 )
1056 )
1056 }
1057 }
1057 }
1058 }
1058 }
1059 }
1059 // Borrow of `self` ends here since we collect cloned paths
1060 // Borrow of `self` ends here since we collect cloned paths
1060
1061
1061 self.clear_known_ambiguous_mtimes(&paths)?;
1062 self.clear_known_ambiguous_mtimes(&paths)?;
1062
1063
1063 on_disk::write(self, parents)
1064 on_disk::write(self, parents)
1064 }
1065 }
1065
1066
1066 fn status<'a>(
1067 fn status<'a>(
1067 &'a mut self,
1068 &'a mut self,
1068 matcher: &'a (dyn Matcher + Sync),
1069 matcher: &'a (dyn Matcher + Sync),
1069 root_dir: PathBuf,
1070 root_dir: PathBuf,
1070 ignore_files: Vec<PathBuf>,
1071 ignore_files: Vec<PathBuf>,
1071 options: StatusOptions,
1072 options: StatusOptions,
1072 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
1073 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
1073 {
1074 {
1074 super::status::status(self, matcher, root_dir, ignore_files, options)
1075 super::status::status(self, matcher, root_dir, ignore_files, options)
1075 }
1076 }
1076
1077
1077 fn copy_map_len(&self) -> usize {
1078 fn copy_map_len(&self) -> usize {
1078 self.nodes_with_copy_source_count as usize
1079 self.nodes_with_copy_source_count as usize
1079 }
1080 }
1080
1081
1081 fn copy_map_iter(&self) -> CopyMapIter<'_> {
1082 fn copy_map_iter(&self) -> CopyMapIter<'_> {
1082 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1083 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1083 Ok(if let Some(source) = node.copy_source(self.on_disk)? {
1084 Ok(if let Some(source) = node.copy_source(self.on_disk)? {
1084 Some((node.full_path(self.on_disk)?, source))
1085 Some((node.full_path(self.on_disk)?, source))
1085 } else {
1086 } else {
1086 None
1087 None
1087 })
1088 })
1088 }))
1089 }))
1089 }
1090 }
1090
1091
1091 fn copy_map_contains_key(
1092 fn copy_map_contains_key(
1092 &self,
1093 &self,
1093 key: &HgPath,
1094 key: &HgPath,
1094 ) -> Result<bool, DirstateV2ParseError> {
1095 ) -> Result<bool, DirstateV2ParseError> {
1095 Ok(if let Some(node) = self.get_node(key)? {
1096 Ok(if let Some(node) = self.get_node(key)? {
1096 node.has_copy_source()
1097 node.has_copy_source()
1097 } else {
1098 } else {
1098 false
1099 false
1099 })
1100 })
1100 }
1101 }
1101
1102
1102 fn copy_map_get(
1103 fn copy_map_get(
1103 &self,
1104 &self,
1104 key: &HgPath,
1105 key: &HgPath,
1105 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
1106 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
1106 if let Some(node) = self.get_node(key)? {
1107 if let Some(node) = self.get_node(key)? {
1107 if let Some(source) = node.copy_source(self.on_disk)? {
1108 if let Some(source) = node.copy_source(self.on_disk)? {
1108 return Ok(Some(source));
1109 return Ok(Some(source));
1109 }
1110 }
1110 }
1111 }
1111 Ok(None)
1112 Ok(None)
1112 }
1113 }
1113
1114
1114 fn copy_map_remove(
1115 fn copy_map_remove(
1115 &mut self,
1116 &mut self,
1116 key: &HgPath,
1117 key: &HgPath,
1117 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1118 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1118 let count = &mut self.nodes_with_copy_source_count;
1119 let count = &mut self.nodes_with_copy_source_count;
1119 Ok(
1120 Ok(
1120 Self::get_node_mut(self.on_disk, &mut self.root, key)?.and_then(
1121 Self::get_node_mut(self.on_disk, &mut self.root, key)?.and_then(
1121 |node| {
1122 |node| {
1122 if node.copy_source.is_some() {
1123 if node.copy_source.is_some() {
1123 *count -= 1
1124 *count -= 1
1124 }
1125 }
1125 node.copy_source.take().map(Cow::into_owned)
1126 node.copy_source.take().map(Cow::into_owned)
1126 },
1127 },
1127 ),
1128 ),
1128 )
1129 )
1129 }
1130 }
1130
1131
1131 fn copy_map_insert(
1132 fn copy_map_insert(
1132 &mut self,
1133 &mut self,
1133 key: HgPathBuf,
1134 key: HgPathBuf,
1134 value: HgPathBuf,
1135 value: HgPathBuf,
1135 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1136 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1136 let node = Self::get_or_insert_node(
1137 let node = Self::get_or_insert_node(
1137 self.on_disk,
1138 self.on_disk,
1138 &mut self.root,
1139 &mut self.root,
1139 &key,
1140 &key,
1140 WithBasename::to_cow_owned,
1141 WithBasename::to_cow_owned,
1141 |_ancestor| {},
1142 |_ancestor| {},
1142 )?;
1143 )?;
1143 if node.copy_source.is_none() {
1144 if node.copy_source.is_none() {
1144 self.nodes_with_copy_source_count += 1
1145 self.nodes_with_copy_source_count += 1
1145 }
1146 }
1146 Ok(node.copy_source.replace(value.into()).map(Cow::into_owned))
1147 Ok(node.copy_source.replace(value.into()).map(Cow::into_owned))
1147 }
1148 }
1148
1149
1149 fn len(&self) -> usize {
1150 fn len(&self) -> usize {
1150 self.nodes_with_entry_count as usize
1151 self.nodes_with_entry_count as usize
1151 }
1152 }
1152
1153
1153 fn contains_key(
1154 fn contains_key(
1154 &self,
1155 &self,
1155 key: &HgPath,
1156 key: &HgPath,
1156 ) -> Result<bool, DirstateV2ParseError> {
1157 ) -> Result<bool, DirstateV2ParseError> {
1157 Ok(self.get(key)?.is_some())
1158 Ok(self.get(key)?.is_some())
1158 }
1159 }
1159
1160
1160 fn get(
1161 fn get(
1161 &self,
1162 &self,
1162 key: &HgPath,
1163 key: &HgPath,
1163 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
1164 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
1164 Ok(if let Some(node) = self.get_node(key)? {
1165 Ok(if let Some(node) = self.get_node(key)? {
1165 node.entry()?
1166 node.entry()?
1166 } else {
1167 } else {
1167 None
1168 None
1168 })
1169 })
1169 }
1170 }
1170
1171
1171 fn iter(&self) -> StateMapIter<'_> {
1172 fn iter(&self) -> StateMapIter<'_> {
1172 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1173 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1173 Ok(if let Some(entry) = node.entry()? {
1174 Ok(if let Some(entry) = node.entry()? {
1174 Some((node.full_path(self.on_disk)?, entry))
1175 Some((node.full_path(self.on_disk)?, entry))
1175 } else {
1176 } else {
1176 None
1177 None
1177 })
1178 })
1178 }))
1179 }))
1179 }
1180 }
1180
1181
1181 fn iter_directories(
1182 fn iter_directories(
1182 &self,
1183 &self,
1183 ) -> Box<
1184 ) -> Box<
1184 dyn Iterator<
1185 dyn Iterator<
1185 Item = Result<
1186 Item = Result<
1186 (&HgPath, Option<Timestamp>),
1187 (&HgPath, Option<Timestamp>),
1187 DirstateV2ParseError,
1188 DirstateV2ParseError,
1188 >,
1189 >,
1189 > + Send
1190 > + Send
1190 + '_,
1191 + '_,
1191 > {
1192 > {
1192 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1193 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1193 Ok(if node.state()?.is_none() {
1194 Ok(if node.state()?.is_none() {
1194 Some((
1195 Some((
1195 node.full_path(self.on_disk)?,
1196 node.full_path(self.on_disk)?,
1196 node.cached_directory_mtime()
1197 node.cached_directory_mtime()
1197 .map(|mtime| Timestamp(mtime.seconds())),
1198 .map(|mtime| Timestamp(mtime.seconds())),
1198 ))
1199 ))
1199 } else {
1200 } else {
1200 None
1201 None
1201 })
1202 })
1202 }))
1203 }))
1203 }
1204 }
1204 }
1205 }
General Comments 0
You need to be logged in to leave comments. Login now