##// END OF EJS Templates
dirstate: use a `merged` parameter to _addpath...
marmoute -
r48316:c6b91a9c default
parent child Browse files
Show More
@@ -1,1437 +1,1439 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 dirstatetuple = parsers.dirstatetuple
48 dirstatetuple = parsers.dirstatetuple
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 @interfaceutil.implementer(intdirstate.idirstate)
75 @interfaceutil.implementer(intdirstate.idirstate)
76 class dirstate(object):
76 class dirstate(object):
77 def __init__(
77 def __init__(
78 self,
78 self,
79 opener,
79 opener,
80 ui,
80 ui,
81 root,
81 root,
82 validate,
82 validate,
83 sparsematchfn,
83 sparsematchfn,
84 nodeconstants,
84 nodeconstants,
85 use_dirstate_v2,
85 use_dirstate_v2,
86 ):
86 ):
87 """Create a new dirstate object.
87 """Create a new dirstate object.
88
88
89 opener is an open()-like callable that can be used to open the
89 opener is an open()-like callable that can be used to open the
90 dirstate file; root is the root of the directory tracked by
90 dirstate file; root is the root of the directory tracked by
91 the dirstate.
91 the dirstate.
92 """
92 """
93 self._use_dirstate_v2 = use_dirstate_v2
93 self._use_dirstate_v2 = use_dirstate_v2
94 self._nodeconstants = nodeconstants
94 self._nodeconstants = nodeconstants
95 self._opener = opener
95 self._opener = opener
96 self._validate = validate
96 self._validate = validate
97 self._root = root
97 self._root = root
98 self._sparsematchfn = sparsematchfn
98 self._sparsematchfn = sparsematchfn
99 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
99 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
100 # UNC path pointing to root share (issue4557)
100 # UNC path pointing to root share (issue4557)
101 self._rootdir = pathutil.normasprefix(root)
101 self._rootdir = pathutil.normasprefix(root)
102 self._dirty = False
102 self._dirty = False
103 self._lastnormaltime = 0
103 self._lastnormaltime = 0
104 self._ui = ui
104 self._ui = ui
105 self._filecache = {}
105 self._filecache = {}
106 self._parentwriters = 0
106 self._parentwriters = 0
107 self._filename = b'dirstate'
107 self._filename = b'dirstate'
108 self._pendingfilename = b'%s.pending' % self._filename
108 self._pendingfilename = b'%s.pending' % self._filename
109 self._plchangecallbacks = {}
109 self._plchangecallbacks = {}
110 self._origpl = None
110 self._origpl = None
111 self._updatedfiles = set()
111 self._updatedfiles = set()
112 self._mapcls = dirstatemap.dirstatemap
112 self._mapcls = dirstatemap.dirstatemap
113 # Access and cache cwd early, so we don't access it for the first time
113 # Access and cache cwd early, so we don't access it for the first time
114 # after a working-copy update caused it to not exist (accessing it then
114 # after a working-copy update caused it to not exist (accessing it then
115 # raises an exception).
115 # raises an exception).
116 self._cwd
116 self._cwd
117
117
118 def prefetch_parents(self):
118 def prefetch_parents(self):
119 """make sure the parents are loaded
119 """make sure the parents are loaded
120
120
121 Used to avoid a race condition.
121 Used to avoid a race condition.
122 """
122 """
123 self._pl
123 self._pl
124
124
125 @contextlib.contextmanager
125 @contextlib.contextmanager
126 def parentchange(self):
126 def parentchange(self):
127 """Context manager for handling dirstate parents.
127 """Context manager for handling dirstate parents.
128
128
129 If an exception occurs in the scope of the context manager,
129 If an exception occurs in the scope of the context manager,
130 the incoherent dirstate won't be written when wlock is
130 the incoherent dirstate won't be written when wlock is
131 released.
131 released.
132 """
132 """
133 self._parentwriters += 1
133 self._parentwriters += 1
134 yield
134 yield
135 # Typically we want the "undo" step of a context manager in a
135 # Typically we want the "undo" step of a context manager in a
136 # finally block so it happens even when an exception
136 # finally block so it happens even when an exception
137 # occurs. In this case, however, we only want to decrement
137 # occurs. In this case, however, we only want to decrement
138 # parentwriters if the code in the with statement exits
138 # parentwriters if the code in the with statement exits
139 # normally, so we don't have a try/finally here on purpose.
139 # normally, so we don't have a try/finally here on purpose.
140 self._parentwriters -= 1
140 self._parentwriters -= 1
141
141
142 def pendingparentchange(self):
142 def pendingparentchange(self):
143 """Returns true if the dirstate is in the middle of a set of changes
143 """Returns true if the dirstate is in the middle of a set of changes
144 that modify the dirstate parent.
144 that modify the dirstate parent.
145 """
145 """
146 return self._parentwriters > 0
146 return self._parentwriters > 0
147
147
148 @propertycache
148 @propertycache
149 def _map(self):
149 def _map(self):
150 """Return the dirstate contents (see documentation for dirstatemap)."""
150 """Return the dirstate contents (see documentation for dirstatemap)."""
151 self._map = self._mapcls(
151 self._map = self._mapcls(
152 self._ui,
152 self._ui,
153 self._opener,
153 self._opener,
154 self._root,
154 self._root,
155 self._nodeconstants,
155 self._nodeconstants,
156 self._use_dirstate_v2,
156 self._use_dirstate_v2,
157 )
157 )
158 return self._map
158 return self._map
159
159
160 @property
160 @property
161 def _sparsematcher(self):
161 def _sparsematcher(self):
162 """The matcher for the sparse checkout.
162 """The matcher for the sparse checkout.
163
163
164 The working directory may not include every file from a manifest. The
164 The working directory may not include every file from a manifest. The
165 matcher obtained by this property will match a path if it is to be
165 matcher obtained by this property will match a path if it is to be
166 included in the working directory.
166 included in the working directory.
167 """
167 """
168 # TODO there is potential to cache this property. For now, the matcher
168 # TODO there is potential to cache this property. For now, the matcher
169 # is resolved on every access. (But the called function does use a
169 # is resolved on every access. (But the called function does use a
170 # cache to keep the lookup fast.)
170 # cache to keep the lookup fast.)
171 return self._sparsematchfn()
171 return self._sparsematchfn()
172
172
173 @repocache(b'branch')
173 @repocache(b'branch')
174 def _branch(self):
174 def _branch(self):
175 try:
175 try:
176 return self._opener.read(b"branch").strip() or b"default"
176 return self._opener.read(b"branch").strip() or b"default"
177 except IOError as inst:
177 except IOError as inst:
178 if inst.errno != errno.ENOENT:
178 if inst.errno != errno.ENOENT:
179 raise
179 raise
180 return b"default"
180 return b"default"
181
181
182 @property
182 @property
183 def _pl(self):
183 def _pl(self):
184 return self._map.parents()
184 return self._map.parents()
185
185
186 def hasdir(self, d):
186 def hasdir(self, d):
187 return self._map.hastrackeddir(d)
187 return self._map.hastrackeddir(d)
188
188
189 @rootcache(b'.hgignore')
189 @rootcache(b'.hgignore')
190 def _ignore(self):
190 def _ignore(self):
191 files = self._ignorefiles()
191 files = self._ignorefiles()
192 if not files:
192 if not files:
193 return matchmod.never()
193 return matchmod.never()
194
194
195 pats = [b'include:%s' % f for f in files]
195 pats = [b'include:%s' % f for f in files]
196 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
196 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
197
197
198 @propertycache
198 @propertycache
199 def _slash(self):
199 def _slash(self):
200 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
200 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
201
201
202 @propertycache
202 @propertycache
203 def _checklink(self):
203 def _checklink(self):
204 return util.checklink(self._root)
204 return util.checklink(self._root)
205
205
206 @propertycache
206 @propertycache
207 def _checkexec(self):
207 def _checkexec(self):
208 return bool(util.checkexec(self._root))
208 return bool(util.checkexec(self._root))
209
209
210 @propertycache
210 @propertycache
211 def _checkcase(self):
211 def _checkcase(self):
212 return not util.fscasesensitive(self._join(b'.hg'))
212 return not util.fscasesensitive(self._join(b'.hg'))
213
213
214 def _join(self, f):
214 def _join(self, f):
215 # much faster than os.path.join()
215 # much faster than os.path.join()
216 # it's safe because f is always a relative path
216 # it's safe because f is always a relative path
217 return self._rootdir + f
217 return self._rootdir + f
218
218
219 def flagfunc(self, buildfallback):
219 def flagfunc(self, buildfallback):
220 if self._checklink and self._checkexec:
220 if self._checklink and self._checkexec:
221
221
222 def f(x):
222 def f(x):
223 try:
223 try:
224 st = os.lstat(self._join(x))
224 st = os.lstat(self._join(x))
225 if util.statislink(st):
225 if util.statislink(st):
226 return b'l'
226 return b'l'
227 if util.statisexec(st):
227 if util.statisexec(st):
228 return b'x'
228 return b'x'
229 except OSError:
229 except OSError:
230 pass
230 pass
231 return b''
231 return b''
232
232
233 return f
233 return f
234
234
235 fallback = buildfallback()
235 fallback = buildfallback()
236 if self._checklink:
236 if self._checklink:
237
237
238 def f(x):
238 def f(x):
239 if os.path.islink(self._join(x)):
239 if os.path.islink(self._join(x)):
240 return b'l'
240 return b'l'
241 if b'x' in fallback(x):
241 if b'x' in fallback(x):
242 return b'x'
242 return b'x'
243 return b''
243 return b''
244
244
245 return f
245 return f
246 if self._checkexec:
246 if self._checkexec:
247
247
248 def f(x):
248 def f(x):
249 if b'l' in fallback(x):
249 if b'l' in fallback(x):
250 return b'l'
250 return b'l'
251 if util.isexec(self._join(x)):
251 if util.isexec(self._join(x)):
252 return b'x'
252 return b'x'
253 return b''
253 return b''
254
254
255 return f
255 return f
256 else:
256 else:
257 return fallback
257 return fallback
258
258
259 @propertycache
259 @propertycache
260 def _cwd(self):
260 def _cwd(self):
261 # internal config: ui.forcecwd
261 # internal config: ui.forcecwd
262 forcecwd = self._ui.config(b'ui', b'forcecwd')
262 forcecwd = self._ui.config(b'ui', b'forcecwd')
263 if forcecwd:
263 if forcecwd:
264 return forcecwd
264 return forcecwd
265 return encoding.getcwd()
265 return encoding.getcwd()
266
266
267 def getcwd(self):
267 def getcwd(self):
268 """Return the path from which a canonical path is calculated.
268 """Return the path from which a canonical path is calculated.
269
269
270 This path should be used to resolve file patterns or to convert
270 This path should be used to resolve file patterns or to convert
271 canonical paths back to file paths for display. It shouldn't be
271 canonical paths back to file paths for display. It shouldn't be
272 used to get real file paths. Use vfs functions instead.
272 used to get real file paths. Use vfs functions instead.
273 """
273 """
274 cwd = self._cwd
274 cwd = self._cwd
275 if cwd == self._root:
275 if cwd == self._root:
276 return b''
276 return b''
277 # self._root ends with a path separator if self._root is '/' or 'C:\'
277 # self._root ends with a path separator if self._root is '/' or 'C:\'
278 rootsep = self._root
278 rootsep = self._root
279 if not util.endswithsep(rootsep):
279 if not util.endswithsep(rootsep):
280 rootsep += pycompat.ossep
280 rootsep += pycompat.ossep
281 if cwd.startswith(rootsep):
281 if cwd.startswith(rootsep):
282 return cwd[len(rootsep) :]
282 return cwd[len(rootsep) :]
283 else:
283 else:
284 # we're outside the repo. return an absolute path.
284 # we're outside the repo. return an absolute path.
285 return cwd
285 return cwd
286
286
287 def pathto(self, f, cwd=None):
287 def pathto(self, f, cwd=None):
288 if cwd is None:
288 if cwd is None:
289 cwd = self.getcwd()
289 cwd = self.getcwd()
290 path = util.pathto(self._root, cwd, f)
290 path = util.pathto(self._root, cwd, f)
291 if self._slash:
291 if self._slash:
292 return util.pconvert(path)
292 return util.pconvert(path)
293 return path
293 return path
294
294
295 def __getitem__(self, key):
295 def __getitem__(self, key):
296 """Return the current state of key (a filename) in the dirstate.
296 """Return the current state of key (a filename) in the dirstate.
297
297
298 States are:
298 States are:
299 n normal
299 n normal
300 m needs merging
300 m needs merging
301 r marked for removal
301 r marked for removal
302 a marked for addition
302 a marked for addition
303 ? not tracked
303 ? not tracked
304
304
305 XXX The "state" is a bit obscure to be in the "public" API. we should
305 XXX The "state" is a bit obscure to be in the "public" API. we should
306 consider migrating all user of this to going through the dirstate entry
306 consider migrating all user of this to going through the dirstate entry
307 instead.
307 instead.
308 """
308 """
309 entry = self._map.get(key)
309 entry = self._map.get(key)
310 if entry is not None:
310 if entry is not None:
311 return entry.state
311 return entry.state
312 return b'?'
312 return b'?'
313
313
314 def __contains__(self, key):
314 def __contains__(self, key):
315 return key in self._map
315 return key in self._map
316
316
317 def __iter__(self):
317 def __iter__(self):
318 return iter(sorted(self._map))
318 return iter(sorted(self._map))
319
319
320 def items(self):
320 def items(self):
321 return pycompat.iteritems(self._map)
321 return pycompat.iteritems(self._map)
322
322
323 iteritems = items
323 iteritems = items
324
324
325 def directories(self):
325 def directories(self):
326 return self._map.directories()
326 return self._map.directories()
327
327
328 def parents(self):
328 def parents(self):
329 return [self._validate(p) for p in self._pl]
329 return [self._validate(p) for p in self._pl]
330
330
331 def p1(self):
331 def p1(self):
332 return self._validate(self._pl[0])
332 return self._validate(self._pl[0])
333
333
334 def p2(self):
334 def p2(self):
335 return self._validate(self._pl[1])
335 return self._validate(self._pl[1])
336
336
337 @property
337 @property
338 def in_merge(self):
338 def in_merge(self):
339 """True if a merge is in progress"""
339 """True if a merge is in progress"""
340 return self._pl[1] != self._nodeconstants.nullid
340 return self._pl[1] != self._nodeconstants.nullid
341
341
342 def branch(self):
342 def branch(self):
343 return encoding.tolocal(self._branch)
343 return encoding.tolocal(self._branch)
344
344
345 def setparents(self, p1, p2=None):
345 def setparents(self, p1, p2=None):
346 """Set dirstate parents to p1 and p2.
346 """Set dirstate parents to p1 and p2.
347
347
348 When moving from two parents to one, "merged" entries a
348 When moving from two parents to one, "merged" entries a
349 adjusted to normal and previous copy records discarded and
349 adjusted to normal and previous copy records discarded and
350 returned by the call.
350 returned by the call.
351
351
352 See localrepo.setparents()
352 See localrepo.setparents()
353 """
353 """
354 if p2 is None:
354 if p2 is None:
355 p2 = self._nodeconstants.nullid
355 p2 = self._nodeconstants.nullid
356 if self._parentwriters == 0:
356 if self._parentwriters == 0:
357 raise ValueError(
357 raise ValueError(
358 b"cannot set dirstate parent outside of "
358 b"cannot set dirstate parent outside of "
359 b"dirstate.parentchange context manager"
359 b"dirstate.parentchange context manager"
360 )
360 )
361
361
362 self._dirty = True
362 self._dirty = True
363 oldp2 = self._pl[1]
363 oldp2 = self._pl[1]
364 if self._origpl is None:
364 if self._origpl is None:
365 self._origpl = self._pl
365 self._origpl = self._pl
366 self._map.setparents(p1, p2)
366 self._map.setparents(p1, p2)
367 copies = {}
367 copies = {}
368 if (
368 if (
369 oldp2 != self._nodeconstants.nullid
369 oldp2 != self._nodeconstants.nullid
370 and p2 == self._nodeconstants.nullid
370 and p2 == self._nodeconstants.nullid
371 ):
371 ):
372 candidatefiles = self._map.non_normal_or_other_parent_paths()
372 candidatefiles = self._map.non_normal_or_other_parent_paths()
373
373
374 for f in candidatefiles:
374 for f in candidatefiles:
375 s = self._map.get(f)
375 s = self._map.get(f)
376 if s is None:
376 if s is None:
377 continue
377 continue
378
378
379 # Discard "merged" markers when moving away from a merge state
379 # Discard "merged" markers when moving away from a merge state
380 if s.merged:
380 if s.merged:
381 source = self._map.copymap.get(f)
381 source = self._map.copymap.get(f)
382 if source:
382 if source:
383 copies[f] = source
383 copies[f] = source
384 self.normallookup(f)
384 self.normallookup(f)
385 # Also fix up otherparent markers
385 # Also fix up otherparent markers
386 elif s.from_p2:
386 elif s.from_p2:
387 source = self._map.copymap.get(f)
387 source = self._map.copymap.get(f)
388 if source:
388 if source:
389 copies[f] = source
389 copies[f] = source
390 self.add(f)
390 self.add(f)
391 return copies
391 return copies
392
392
393 def setbranch(self, branch):
393 def setbranch(self, branch):
394 self.__class__._branch.set(self, encoding.fromlocal(branch))
394 self.__class__._branch.set(self, encoding.fromlocal(branch))
395 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
395 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
396 try:
396 try:
397 f.write(self._branch + b'\n')
397 f.write(self._branch + b'\n')
398 f.close()
398 f.close()
399
399
400 # make sure filecache has the correct stat info for _branch after
400 # make sure filecache has the correct stat info for _branch after
401 # replacing the underlying file
401 # replacing the underlying file
402 ce = self._filecache[b'_branch']
402 ce = self._filecache[b'_branch']
403 if ce:
403 if ce:
404 ce.refresh()
404 ce.refresh()
405 except: # re-raises
405 except: # re-raises
406 f.discard()
406 f.discard()
407 raise
407 raise
408
408
409 def invalidate(self):
409 def invalidate(self):
410 """Causes the next access to reread the dirstate.
410 """Causes the next access to reread the dirstate.
411
411
412 This is different from localrepo.invalidatedirstate() because it always
412 This is different from localrepo.invalidatedirstate() because it always
413 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
413 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
414 check whether the dirstate has changed before rereading it."""
414 check whether the dirstate has changed before rereading it."""
415
415
416 for a in ("_map", "_branch", "_ignore"):
416 for a in ("_map", "_branch", "_ignore"):
417 if a in self.__dict__:
417 if a in self.__dict__:
418 delattr(self, a)
418 delattr(self, a)
419 self._lastnormaltime = 0
419 self._lastnormaltime = 0
420 self._dirty = False
420 self._dirty = False
421 self._updatedfiles.clear()
421 self._updatedfiles.clear()
422 self._parentwriters = 0
422 self._parentwriters = 0
423 self._origpl = None
423 self._origpl = None
424
424
425 def copy(self, source, dest):
425 def copy(self, source, dest):
426 """Mark dest as a copy of source. Unmark dest if source is None."""
426 """Mark dest as a copy of source. Unmark dest if source is None."""
427 if source == dest:
427 if source == dest:
428 return
428 return
429 self._dirty = True
429 self._dirty = True
430 if source is not None:
430 if source is not None:
431 self._map.copymap[dest] = source
431 self._map.copymap[dest] = source
432 self._updatedfiles.add(source)
432 self._updatedfiles.add(source)
433 self._updatedfiles.add(dest)
433 self._updatedfiles.add(dest)
434 elif self._map.copymap.pop(dest, None):
434 elif self._map.copymap.pop(dest, None):
435 self._updatedfiles.add(dest)
435 self._updatedfiles.add(dest)
436
436
437 def copied(self, file):
437 def copied(self, file):
438 return self._map.copymap.get(file, None)
438 return self._map.copymap.get(file, None)
439
439
440 def copies(self):
440 def copies(self):
441 return self._map.copymap
441 return self._map.copymap
442
442
443 def _addpath(
443 def _addpath(
444 self,
444 self,
445 f,
445 f,
446 state=None,
446 state=None,
447 mode=0,
447 mode=0,
448 size=None,
448 size=None,
449 mtime=None,
449 mtime=None,
450 added=False,
450 added=False,
451 merged=False,
451 from_p2=False,
452 from_p2=False,
452 possibly_dirty=False,
453 possibly_dirty=False,
453 ):
454 ):
454 entry = self._map.get(f)
455 entry = self._map.get(f)
455 if added or entry is not None and entry.removed:
456 if added or entry is not None and entry.removed:
456 scmutil.checkfilename(f)
457 scmutil.checkfilename(f)
457 if self._map.hastrackeddir(f):
458 if self._map.hastrackeddir(f):
458 msg = _(b'directory %r already in dirstate')
459 msg = _(b'directory %r already in dirstate')
459 msg %= pycompat.bytestr(f)
460 msg %= pycompat.bytestr(f)
460 raise error.Abort(msg)
461 raise error.Abort(msg)
461 # shadows
462 # shadows
462 for d in pathutil.finddirs(f):
463 for d in pathutil.finddirs(f):
463 if self._map.hastrackeddir(d):
464 if self._map.hastrackeddir(d):
464 break
465 break
465 entry = self._map.get(d)
466 entry = self._map.get(d)
466 if entry is not None and not entry.removed:
467 if entry is not None and not entry.removed:
467 msg = _(b'file %r in dirstate clashes with %r')
468 msg = _(b'file %r in dirstate clashes with %r')
468 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
469 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
469 raise error.Abort(msg)
470 raise error.Abort(msg)
470 self._dirty = True
471 self._dirty = True
471 self._updatedfiles.add(f)
472 self._updatedfiles.add(f)
472 self._map.addfile(
473 self._map.addfile(
473 f,
474 f,
474 state=state,
475 state=state,
475 mode=mode,
476 mode=mode,
476 size=size,
477 size=size,
477 mtime=mtime,
478 mtime=mtime,
478 added=added,
479 added=added,
480 merged=merged,
479 from_p2=from_p2,
481 from_p2=from_p2,
480 possibly_dirty=possibly_dirty,
482 possibly_dirty=possibly_dirty,
481 )
483 )
482
484
483 def normal(self, f, parentfiledata=None):
485 def normal(self, f, parentfiledata=None):
484 """Mark a file normal and clean.
486 """Mark a file normal and clean.
485
487
486 parentfiledata: (mode, size, mtime) of the clean file
488 parentfiledata: (mode, size, mtime) of the clean file
487
489
488 parentfiledata should be computed from memory (for mode,
490 parentfiledata should be computed from memory (for mode,
489 size), as or close as possible from the point where we
491 size), as or close as possible from the point where we
490 determined the file was clean, to limit the risk of the
492 determined the file was clean, to limit the risk of the
491 file having been changed by an external process between the
493 file having been changed by an external process between the
492 moment where the file was determined to be clean and now."""
494 moment where the file was determined to be clean and now."""
493 if parentfiledata:
495 if parentfiledata:
494 (mode, size, mtime) = parentfiledata
496 (mode, size, mtime) = parentfiledata
495 else:
497 else:
496 s = os.lstat(self._join(f))
498 s = os.lstat(self._join(f))
497 mode = s.st_mode
499 mode = s.st_mode
498 size = s.st_size
500 size = s.st_size
499 mtime = s[stat.ST_MTIME]
501 mtime = s[stat.ST_MTIME]
500 self._addpath(f, b'n', mode, size, mtime)
502 self._addpath(f, b'n', mode, size, mtime)
501 self._map.copymap.pop(f, None)
503 self._map.copymap.pop(f, None)
502 if f in self._map.nonnormalset:
504 if f in self._map.nonnormalset:
503 self._map.nonnormalset.remove(f)
505 self._map.nonnormalset.remove(f)
504 if mtime > self._lastnormaltime:
506 if mtime > self._lastnormaltime:
505 # Remember the most recent modification timeslot for status(),
507 # Remember the most recent modification timeslot for status(),
506 # to make sure we won't miss future size-preserving file content
508 # to make sure we won't miss future size-preserving file content
507 # modifications that happen within the same timeslot.
509 # modifications that happen within the same timeslot.
508 self._lastnormaltime = mtime
510 self._lastnormaltime = mtime
509
511
510 def normallookup(self, f):
512 def normallookup(self, f):
511 '''Mark a file normal, but possibly dirty.'''
513 '''Mark a file normal, but possibly dirty.'''
512 if self.in_merge:
514 if self.in_merge:
513 # if there is a merge going on and the file was either
515 # if there is a merge going on and the file was either
514 # "merged" or coming from other parent (-2) before
516 # "merged" or coming from other parent (-2) before
515 # being removed, restore that state.
517 # being removed, restore that state.
516 entry = self._map.get(f)
518 entry = self._map.get(f)
517 if entry is not None:
519 if entry is not None:
518 # XXX this should probably be dealt with a a lower level
520 # XXX this should probably be dealt with a a lower level
519 # (see `merged_removed` and `from_p2_removed`)
521 # (see `merged_removed` and `from_p2_removed`)
520 if entry.merged_removed or entry.from_p2_removed:
522 if entry.merged_removed or entry.from_p2_removed:
521 source = self._map.copymap.get(f)
523 source = self._map.copymap.get(f)
522 if entry.merged_removed:
524 if entry.merged_removed:
523 self.merge(f)
525 self.merge(f)
524 elif entry.from_p2_removed:
526 elif entry.from_p2_removed:
525 self.otherparent(f)
527 self.otherparent(f)
526 if source is not None:
528 if source is not None:
527 self.copy(source, f)
529 self.copy(source, f)
528 return
530 return
529 elif entry.merged or entry.from_p2:
531 elif entry.merged or entry.from_p2:
530 return
532 return
531 self._addpath(f, b'n', 0, possibly_dirty=True)
533 self._addpath(f, b'n', 0, possibly_dirty=True)
532 self._map.copymap.pop(f, None)
534 self._map.copymap.pop(f, None)
533
535
534 def otherparent(self, f):
536 def otherparent(self, f):
535 '''Mark as coming from the other parent, always dirty.'''
537 '''Mark as coming from the other parent, always dirty.'''
536 if not self.in_merge:
538 if not self.in_merge:
537 msg = _(b"setting %r to other parent only allowed in merges") % f
539 msg = _(b"setting %r to other parent only allowed in merges") % f
538 raise error.Abort(msg)
540 raise error.Abort(msg)
539 if f in self and self[f] == b'n':
541 if f in self and self[f] == b'n':
540 # merge-like
542 # merge-like
541 self._addpath(f, b'm', 0, from_p2=True)
543 self._addpath(f, merged=True)
542 else:
544 else:
543 # add-like
545 # add-like
544 self._addpath(f, b'n', 0, from_p2=True)
546 self._addpath(f, b'n', 0, from_p2=True)
545 self._map.copymap.pop(f, None)
547 self._map.copymap.pop(f, None)
546
548
547 def add(self, f):
549 def add(self, f):
548 '''Mark a file added.'''
550 '''Mark a file added.'''
549 self._addpath(f, added=True)
551 self._addpath(f, added=True)
550 self._map.copymap.pop(f, None)
552 self._map.copymap.pop(f, None)
551
553
552 def remove(self, f):
554 def remove(self, f):
553 '''Mark a file removed.'''
555 '''Mark a file removed.'''
554 self._dirty = True
556 self._dirty = True
555 self._updatedfiles.add(f)
557 self._updatedfiles.add(f)
556 self._map.removefile(f, in_merge=self.in_merge)
558 self._map.removefile(f, in_merge=self.in_merge)
557
559
558 def merge(self, f):
560 def merge(self, f):
559 '''Mark a file merged.'''
561 '''Mark a file merged.'''
560 if not self.in_merge:
562 if not self.in_merge:
561 return self.normallookup(f)
563 return self.normallookup(f)
562 return self.otherparent(f)
564 return self.otherparent(f)
563
565
564 def drop(self, f):
566 def drop(self, f):
565 '''Drop a file from the dirstate'''
567 '''Drop a file from the dirstate'''
566 oldstate = self[f]
568 oldstate = self[f]
567 if self._map.dropfile(f, oldstate):
569 if self._map.dropfile(f, oldstate):
568 self._dirty = True
570 self._dirty = True
569 self._updatedfiles.add(f)
571 self._updatedfiles.add(f)
570 self._map.copymap.pop(f, None)
572 self._map.copymap.pop(f, None)
571
573
572 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
574 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
573 if exists is None:
575 if exists is None:
574 exists = os.path.lexists(os.path.join(self._root, path))
576 exists = os.path.lexists(os.path.join(self._root, path))
575 if not exists:
577 if not exists:
576 # Maybe a path component exists
578 # Maybe a path component exists
577 if not ignoremissing and b'/' in path:
579 if not ignoremissing and b'/' in path:
578 d, f = path.rsplit(b'/', 1)
580 d, f = path.rsplit(b'/', 1)
579 d = self._normalize(d, False, ignoremissing, None)
581 d = self._normalize(d, False, ignoremissing, None)
580 folded = d + b"/" + f
582 folded = d + b"/" + f
581 else:
583 else:
582 # No path components, preserve original case
584 # No path components, preserve original case
583 folded = path
585 folded = path
584 else:
586 else:
585 # recursively normalize leading directory components
587 # recursively normalize leading directory components
586 # against dirstate
588 # against dirstate
587 if b'/' in normed:
589 if b'/' in normed:
588 d, f = normed.rsplit(b'/', 1)
590 d, f = normed.rsplit(b'/', 1)
589 d = self._normalize(d, False, ignoremissing, True)
591 d = self._normalize(d, False, ignoremissing, True)
590 r = self._root + b"/" + d
592 r = self._root + b"/" + d
591 folded = d + b"/" + util.fspath(f, r)
593 folded = d + b"/" + util.fspath(f, r)
592 else:
594 else:
593 folded = util.fspath(normed, self._root)
595 folded = util.fspath(normed, self._root)
594 storemap[normed] = folded
596 storemap[normed] = folded
595
597
596 return folded
598 return folded
597
599
598 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
600 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
599 normed = util.normcase(path)
601 normed = util.normcase(path)
600 folded = self._map.filefoldmap.get(normed, None)
602 folded = self._map.filefoldmap.get(normed, None)
601 if folded is None:
603 if folded is None:
602 if isknown:
604 if isknown:
603 folded = path
605 folded = path
604 else:
606 else:
605 folded = self._discoverpath(
607 folded = self._discoverpath(
606 path, normed, ignoremissing, exists, self._map.filefoldmap
608 path, normed, ignoremissing, exists, self._map.filefoldmap
607 )
609 )
608 return folded
610 return folded
609
611
610 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
612 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
611 normed = util.normcase(path)
613 normed = util.normcase(path)
612 folded = self._map.filefoldmap.get(normed, None)
614 folded = self._map.filefoldmap.get(normed, None)
613 if folded is None:
615 if folded is None:
614 folded = self._map.dirfoldmap.get(normed, None)
616 folded = self._map.dirfoldmap.get(normed, None)
615 if folded is None:
617 if folded is None:
616 if isknown:
618 if isknown:
617 folded = path
619 folded = path
618 else:
620 else:
619 # store discovered result in dirfoldmap so that future
621 # store discovered result in dirfoldmap so that future
620 # normalizefile calls don't start matching directories
622 # normalizefile calls don't start matching directories
621 folded = self._discoverpath(
623 folded = self._discoverpath(
622 path, normed, ignoremissing, exists, self._map.dirfoldmap
624 path, normed, ignoremissing, exists, self._map.dirfoldmap
623 )
625 )
624 return folded
626 return folded
625
627
626 def normalize(self, path, isknown=False, ignoremissing=False):
628 def normalize(self, path, isknown=False, ignoremissing=False):
627 """
629 """
628 normalize the case of a pathname when on a casefolding filesystem
630 normalize the case of a pathname when on a casefolding filesystem
629
631
630 isknown specifies whether the filename came from walking the
632 isknown specifies whether the filename came from walking the
631 disk, to avoid extra filesystem access.
633 disk, to avoid extra filesystem access.
632
634
633 If ignoremissing is True, missing path are returned
635 If ignoremissing is True, missing path are returned
634 unchanged. Otherwise, we try harder to normalize possibly
636 unchanged. Otherwise, we try harder to normalize possibly
635 existing path components.
637 existing path components.
636
638
637 The normalized case is determined based on the following precedence:
639 The normalized case is determined based on the following precedence:
638
640
639 - version of name already stored in the dirstate
641 - version of name already stored in the dirstate
640 - version of name stored on disk
642 - version of name stored on disk
641 - version provided via command arguments
643 - version provided via command arguments
642 """
644 """
643
645
644 if self._checkcase:
646 if self._checkcase:
645 return self._normalize(path, isknown, ignoremissing)
647 return self._normalize(path, isknown, ignoremissing)
646 return path
648 return path
647
649
648 def clear(self):
650 def clear(self):
649 self._map.clear()
651 self._map.clear()
650 self._lastnormaltime = 0
652 self._lastnormaltime = 0
651 self._updatedfiles.clear()
653 self._updatedfiles.clear()
652 self._dirty = True
654 self._dirty = True
653
655
654 def rebuild(self, parent, allfiles, changedfiles=None):
656 def rebuild(self, parent, allfiles, changedfiles=None):
655 if changedfiles is None:
657 if changedfiles is None:
656 # Rebuild entire dirstate
658 # Rebuild entire dirstate
657 to_lookup = allfiles
659 to_lookup = allfiles
658 to_drop = []
660 to_drop = []
659 lastnormaltime = self._lastnormaltime
661 lastnormaltime = self._lastnormaltime
660 self.clear()
662 self.clear()
661 self._lastnormaltime = lastnormaltime
663 self._lastnormaltime = lastnormaltime
662 elif len(changedfiles) < 10:
664 elif len(changedfiles) < 10:
663 # Avoid turning allfiles into a set, which can be expensive if it's
665 # Avoid turning allfiles into a set, which can be expensive if it's
664 # large.
666 # large.
665 to_lookup = []
667 to_lookup = []
666 to_drop = []
668 to_drop = []
667 for f in changedfiles:
669 for f in changedfiles:
668 if f in allfiles:
670 if f in allfiles:
669 to_lookup.append(f)
671 to_lookup.append(f)
670 else:
672 else:
671 to_drop.append(f)
673 to_drop.append(f)
672 else:
674 else:
673 changedfilesset = set(changedfiles)
675 changedfilesset = set(changedfiles)
674 to_lookup = changedfilesset & set(allfiles)
676 to_lookup = changedfilesset & set(allfiles)
675 to_drop = changedfilesset - to_lookup
677 to_drop = changedfilesset - to_lookup
676
678
677 if self._origpl is None:
679 if self._origpl is None:
678 self._origpl = self._pl
680 self._origpl = self._pl
679 self._map.setparents(parent, self._nodeconstants.nullid)
681 self._map.setparents(parent, self._nodeconstants.nullid)
680
682
681 for f in to_lookup:
683 for f in to_lookup:
682 self.normallookup(f)
684 self.normallookup(f)
683 for f in to_drop:
685 for f in to_drop:
684 self.drop(f)
686 self.drop(f)
685
687
686 self._dirty = True
688 self._dirty = True
687
689
688 def identity(self):
690 def identity(self):
689 """Return identity of dirstate itself to detect changing in storage
691 """Return identity of dirstate itself to detect changing in storage
690
692
691 If identity of previous dirstate is equal to this, writing
693 If identity of previous dirstate is equal to this, writing
692 changes based on the former dirstate out can keep consistency.
694 changes based on the former dirstate out can keep consistency.
693 """
695 """
694 return self._map.identity
696 return self._map.identity
695
697
696 def write(self, tr):
698 def write(self, tr):
697 if not self._dirty:
699 if not self._dirty:
698 return
700 return
699
701
700 filename = self._filename
702 filename = self._filename
701 if tr:
703 if tr:
702 # 'dirstate.write()' is not only for writing in-memory
704 # 'dirstate.write()' is not only for writing in-memory
703 # changes out, but also for dropping ambiguous timestamp.
705 # changes out, but also for dropping ambiguous timestamp.
704 # delayed writing re-raise "ambiguous timestamp issue".
706 # delayed writing re-raise "ambiguous timestamp issue".
705 # See also the wiki page below for detail:
707 # See also the wiki page below for detail:
706 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
708 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
707
709
708 # emulate dropping timestamp in 'parsers.pack_dirstate'
710 # emulate dropping timestamp in 'parsers.pack_dirstate'
709 now = _getfsnow(self._opener)
711 now = _getfsnow(self._opener)
710 self._map.clearambiguoustimes(self._updatedfiles, now)
712 self._map.clearambiguoustimes(self._updatedfiles, now)
711
713
712 # emulate that all 'dirstate.normal' results are written out
714 # emulate that all 'dirstate.normal' results are written out
713 self._lastnormaltime = 0
715 self._lastnormaltime = 0
714 self._updatedfiles.clear()
716 self._updatedfiles.clear()
715
717
716 # delay writing in-memory changes out
718 # delay writing in-memory changes out
717 tr.addfilegenerator(
719 tr.addfilegenerator(
718 b'dirstate',
720 b'dirstate',
719 (self._filename,),
721 (self._filename,),
720 self._writedirstate,
722 self._writedirstate,
721 location=b'plain',
723 location=b'plain',
722 )
724 )
723 return
725 return
724
726
725 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
727 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
726 self._writedirstate(st)
728 self._writedirstate(st)
727
729
728 def addparentchangecallback(self, category, callback):
730 def addparentchangecallback(self, category, callback):
729 """add a callback to be called when the wd parents are changed
731 """add a callback to be called when the wd parents are changed
730
732
731 Callback will be called with the following arguments:
733 Callback will be called with the following arguments:
732 dirstate, (oldp1, oldp2), (newp1, newp2)
734 dirstate, (oldp1, oldp2), (newp1, newp2)
733
735
734 Category is a unique identifier to allow overwriting an old callback
736 Category is a unique identifier to allow overwriting an old callback
735 with a newer callback.
737 with a newer callback.
736 """
738 """
737 self._plchangecallbacks[category] = callback
739 self._plchangecallbacks[category] = callback
738
740
739 def _writedirstate(self, st):
741 def _writedirstate(self, st):
740 # notify callbacks about parents change
742 # notify callbacks about parents change
741 if self._origpl is not None and self._origpl != self._pl:
743 if self._origpl is not None and self._origpl != self._pl:
742 for c, callback in sorted(
744 for c, callback in sorted(
743 pycompat.iteritems(self._plchangecallbacks)
745 pycompat.iteritems(self._plchangecallbacks)
744 ):
746 ):
745 callback(self, self._origpl, self._pl)
747 callback(self, self._origpl, self._pl)
746 self._origpl = None
748 self._origpl = None
747 # use the modification time of the newly created temporary file as the
749 # use the modification time of the newly created temporary file as the
748 # filesystem's notion of 'now'
750 # filesystem's notion of 'now'
749 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
751 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
750
752
751 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
753 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
752 # timestamp of each entries in dirstate, because of 'now > mtime'
754 # timestamp of each entries in dirstate, because of 'now > mtime'
753 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
755 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
754 if delaywrite > 0:
756 if delaywrite > 0:
755 # do we have any files to delay for?
757 # do we have any files to delay for?
756 for f, e in pycompat.iteritems(self._map):
758 for f, e in pycompat.iteritems(self._map):
757 if e.state == b'n' and e[3] == now:
759 if e.state == b'n' and e[3] == now:
758 import time # to avoid useless import
760 import time # to avoid useless import
759
761
760 # rather than sleep n seconds, sleep until the next
762 # rather than sleep n seconds, sleep until the next
761 # multiple of n seconds
763 # multiple of n seconds
762 clock = time.time()
764 clock = time.time()
763 start = int(clock) - (int(clock) % delaywrite)
765 start = int(clock) - (int(clock) % delaywrite)
764 end = start + delaywrite
766 end = start + delaywrite
765 time.sleep(end - clock)
767 time.sleep(end - clock)
766 now = end # trust our estimate that the end is near now
768 now = end # trust our estimate that the end is near now
767 break
769 break
768
770
769 self._map.write(st, now)
771 self._map.write(st, now)
770 self._lastnormaltime = 0
772 self._lastnormaltime = 0
771 self._dirty = False
773 self._dirty = False
772
774
773 def _dirignore(self, f):
775 def _dirignore(self, f):
774 if self._ignore(f):
776 if self._ignore(f):
775 return True
777 return True
776 for p in pathutil.finddirs(f):
778 for p in pathutil.finddirs(f):
777 if self._ignore(p):
779 if self._ignore(p):
778 return True
780 return True
779 return False
781 return False
780
782
781 def _ignorefiles(self):
783 def _ignorefiles(self):
782 files = []
784 files = []
783 if os.path.exists(self._join(b'.hgignore')):
785 if os.path.exists(self._join(b'.hgignore')):
784 files.append(self._join(b'.hgignore'))
786 files.append(self._join(b'.hgignore'))
785 for name, path in self._ui.configitems(b"ui"):
787 for name, path in self._ui.configitems(b"ui"):
786 if name == b'ignore' or name.startswith(b'ignore.'):
788 if name == b'ignore' or name.startswith(b'ignore.'):
787 # we need to use os.path.join here rather than self._join
789 # we need to use os.path.join here rather than self._join
788 # because path is arbitrary and user-specified
790 # because path is arbitrary and user-specified
789 files.append(os.path.join(self._rootdir, util.expandpath(path)))
791 files.append(os.path.join(self._rootdir, util.expandpath(path)))
790 return files
792 return files
791
793
792 def _ignorefileandline(self, f):
794 def _ignorefileandline(self, f):
793 files = collections.deque(self._ignorefiles())
795 files = collections.deque(self._ignorefiles())
794 visited = set()
796 visited = set()
795 while files:
797 while files:
796 i = files.popleft()
798 i = files.popleft()
797 patterns = matchmod.readpatternfile(
799 patterns = matchmod.readpatternfile(
798 i, self._ui.warn, sourceinfo=True
800 i, self._ui.warn, sourceinfo=True
799 )
801 )
800 for pattern, lineno, line in patterns:
802 for pattern, lineno, line in patterns:
801 kind, p = matchmod._patsplit(pattern, b'glob')
803 kind, p = matchmod._patsplit(pattern, b'glob')
802 if kind == b"subinclude":
804 if kind == b"subinclude":
803 if p not in visited:
805 if p not in visited:
804 files.append(p)
806 files.append(p)
805 continue
807 continue
806 m = matchmod.match(
808 m = matchmod.match(
807 self._root, b'', [], [pattern], warn=self._ui.warn
809 self._root, b'', [], [pattern], warn=self._ui.warn
808 )
810 )
809 if m(f):
811 if m(f):
810 return (i, lineno, line)
812 return (i, lineno, line)
811 visited.add(i)
813 visited.add(i)
812 return (None, -1, b"")
814 return (None, -1, b"")
813
815
814 def _walkexplicit(self, match, subrepos):
816 def _walkexplicit(self, match, subrepos):
815 """Get stat data about the files explicitly specified by match.
817 """Get stat data about the files explicitly specified by match.
816
818
817 Return a triple (results, dirsfound, dirsnotfound).
819 Return a triple (results, dirsfound, dirsnotfound).
818 - results is a mapping from filename to stat result. It also contains
820 - results is a mapping from filename to stat result. It also contains
819 listings mapping subrepos and .hg to None.
821 listings mapping subrepos and .hg to None.
820 - dirsfound is a list of files found to be directories.
822 - dirsfound is a list of files found to be directories.
821 - dirsnotfound is a list of files that the dirstate thinks are
823 - dirsnotfound is a list of files that the dirstate thinks are
822 directories and that were not found."""
824 directories and that were not found."""
823
825
824 def badtype(mode):
826 def badtype(mode):
825 kind = _(b'unknown')
827 kind = _(b'unknown')
826 if stat.S_ISCHR(mode):
828 if stat.S_ISCHR(mode):
827 kind = _(b'character device')
829 kind = _(b'character device')
828 elif stat.S_ISBLK(mode):
830 elif stat.S_ISBLK(mode):
829 kind = _(b'block device')
831 kind = _(b'block device')
830 elif stat.S_ISFIFO(mode):
832 elif stat.S_ISFIFO(mode):
831 kind = _(b'fifo')
833 kind = _(b'fifo')
832 elif stat.S_ISSOCK(mode):
834 elif stat.S_ISSOCK(mode):
833 kind = _(b'socket')
835 kind = _(b'socket')
834 elif stat.S_ISDIR(mode):
836 elif stat.S_ISDIR(mode):
835 kind = _(b'directory')
837 kind = _(b'directory')
836 return _(b'unsupported file type (type is %s)') % kind
838 return _(b'unsupported file type (type is %s)') % kind
837
839
838 badfn = match.bad
840 badfn = match.bad
839 dmap = self._map
841 dmap = self._map
840 lstat = os.lstat
842 lstat = os.lstat
841 getkind = stat.S_IFMT
843 getkind = stat.S_IFMT
842 dirkind = stat.S_IFDIR
844 dirkind = stat.S_IFDIR
843 regkind = stat.S_IFREG
845 regkind = stat.S_IFREG
844 lnkkind = stat.S_IFLNK
846 lnkkind = stat.S_IFLNK
845 join = self._join
847 join = self._join
846 dirsfound = []
848 dirsfound = []
847 foundadd = dirsfound.append
849 foundadd = dirsfound.append
848 dirsnotfound = []
850 dirsnotfound = []
849 notfoundadd = dirsnotfound.append
851 notfoundadd = dirsnotfound.append
850
852
851 if not match.isexact() and self._checkcase:
853 if not match.isexact() and self._checkcase:
852 normalize = self._normalize
854 normalize = self._normalize
853 else:
855 else:
854 normalize = None
856 normalize = None
855
857
856 files = sorted(match.files())
858 files = sorted(match.files())
857 subrepos.sort()
859 subrepos.sort()
858 i, j = 0, 0
860 i, j = 0, 0
859 while i < len(files) and j < len(subrepos):
861 while i < len(files) and j < len(subrepos):
860 subpath = subrepos[j] + b"/"
862 subpath = subrepos[j] + b"/"
861 if files[i] < subpath:
863 if files[i] < subpath:
862 i += 1
864 i += 1
863 continue
865 continue
864 while i < len(files) and files[i].startswith(subpath):
866 while i < len(files) and files[i].startswith(subpath):
865 del files[i]
867 del files[i]
866 j += 1
868 j += 1
867
869
868 if not files or b'' in files:
870 if not files or b'' in files:
869 files = [b'']
871 files = [b'']
870 # constructing the foldmap is expensive, so don't do it for the
872 # constructing the foldmap is expensive, so don't do it for the
871 # common case where files is ['']
873 # common case where files is ['']
872 normalize = None
874 normalize = None
873 results = dict.fromkeys(subrepos)
875 results = dict.fromkeys(subrepos)
874 results[b'.hg'] = None
876 results[b'.hg'] = None
875
877
876 for ff in files:
878 for ff in files:
877 if normalize:
879 if normalize:
878 nf = normalize(ff, False, True)
880 nf = normalize(ff, False, True)
879 else:
881 else:
880 nf = ff
882 nf = ff
881 if nf in results:
883 if nf in results:
882 continue
884 continue
883
885
884 try:
886 try:
885 st = lstat(join(nf))
887 st = lstat(join(nf))
886 kind = getkind(st.st_mode)
888 kind = getkind(st.st_mode)
887 if kind == dirkind:
889 if kind == dirkind:
888 if nf in dmap:
890 if nf in dmap:
889 # file replaced by dir on disk but still in dirstate
891 # file replaced by dir on disk but still in dirstate
890 results[nf] = None
892 results[nf] = None
891 foundadd((nf, ff))
893 foundadd((nf, ff))
892 elif kind == regkind or kind == lnkkind:
894 elif kind == regkind or kind == lnkkind:
893 results[nf] = st
895 results[nf] = st
894 else:
896 else:
895 badfn(ff, badtype(kind))
897 badfn(ff, badtype(kind))
896 if nf in dmap:
898 if nf in dmap:
897 results[nf] = None
899 results[nf] = None
898 except OSError as inst: # nf not found on disk - it is dirstate only
900 except OSError as inst: # nf not found on disk - it is dirstate only
899 if nf in dmap: # does it exactly match a missing file?
901 if nf in dmap: # does it exactly match a missing file?
900 results[nf] = None
902 results[nf] = None
901 else: # does it match a missing directory?
903 else: # does it match a missing directory?
902 if self._map.hasdir(nf):
904 if self._map.hasdir(nf):
903 notfoundadd(nf)
905 notfoundadd(nf)
904 else:
906 else:
905 badfn(ff, encoding.strtolocal(inst.strerror))
907 badfn(ff, encoding.strtolocal(inst.strerror))
906
908
907 # match.files() may contain explicitly-specified paths that shouldn't
909 # match.files() may contain explicitly-specified paths that shouldn't
908 # be taken; drop them from the list of files found. dirsfound/notfound
910 # be taken; drop them from the list of files found. dirsfound/notfound
909 # aren't filtered here because they will be tested later.
911 # aren't filtered here because they will be tested later.
910 if match.anypats():
912 if match.anypats():
911 for f in list(results):
913 for f in list(results):
912 if f == b'.hg' or f in subrepos:
914 if f == b'.hg' or f in subrepos:
913 # keep sentinel to disable further out-of-repo walks
915 # keep sentinel to disable further out-of-repo walks
914 continue
916 continue
915 if not match(f):
917 if not match(f):
916 del results[f]
918 del results[f]
917
919
918 # Case insensitive filesystems cannot rely on lstat() failing to detect
920 # Case insensitive filesystems cannot rely on lstat() failing to detect
919 # a case-only rename. Prune the stat object for any file that does not
921 # a case-only rename. Prune the stat object for any file that does not
920 # match the case in the filesystem, if there are multiple files that
922 # match the case in the filesystem, if there are multiple files that
921 # normalize to the same path.
923 # normalize to the same path.
922 if match.isexact() and self._checkcase:
924 if match.isexact() and self._checkcase:
923 normed = {}
925 normed = {}
924
926
925 for f, st in pycompat.iteritems(results):
927 for f, st in pycompat.iteritems(results):
926 if st is None:
928 if st is None:
927 continue
929 continue
928
930
929 nc = util.normcase(f)
931 nc = util.normcase(f)
930 paths = normed.get(nc)
932 paths = normed.get(nc)
931
933
932 if paths is None:
934 if paths is None:
933 paths = set()
935 paths = set()
934 normed[nc] = paths
936 normed[nc] = paths
935
937
936 paths.add(f)
938 paths.add(f)
937
939
938 for norm, paths in pycompat.iteritems(normed):
940 for norm, paths in pycompat.iteritems(normed):
939 if len(paths) > 1:
941 if len(paths) > 1:
940 for path in paths:
942 for path in paths:
941 folded = self._discoverpath(
943 folded = self._discoverpath(
942 path, norm, True, None, self._map.dirfoldmap
944 path, norm, True, None, self._map.dirfoldmap
943 )
945 )
944 if path != folded:
946 if path != folded:
945 results[path] = None
947 results[path] = None
946
948
947 return results, dirsfound, dirsnotfound
949 return results, dirsfound, dirsnotfound
948
950
949 def walk(self, match, subrepos, unknown, ignored, full=True):
951 def walk(self, match, subrepos, unknown, ignored, full=True):
950 """
952 """
951 Walk recursively through the directory tree, finding all files
953 Walk recursively through the directory tree, finding all files
952 matched by match.
954 matched by match.
953
955
954 If full is False, maybe skip some known-clean files.
956 If full is False, maybe skip some known-clean files.
955
957
956 Return a dict mapping filename to stat-like object (either
958 Return a dict mapping filename to stat-like object (either
957 mercurial.osutil.stat instance or return value of os.stat()).
959 mercurial.osutil.stat instance or return value of os.stat()).
958
960
959 """
961 """
960 # full is a flag that extensions that hook into walk can use -- this
962 # full is a flag that extensions that hook into walk can use -- this
961 # implementation doesn't use it at all. This satisfies the contract
963 # implementation doesn't use it at all. This satisfies the contract
962 # because we only guarantee a "maybe".
964 # because we only guarantee a "maybe".
963
965
964 if ignored:
966 if ignored:
965 ignore = util.never
967 ignore = util.never
966 dirignore = util.never
968 dirignore = util.never
967 elif unknown:
969 elif unknown:
968 ignore = self._ignore
970 ignore = self._ignore
969 dirignore = self._dirignore
971 dirignore = self._dirignore
970 else:
972 else:
971 # if not unknown and not ignored, drop dir recursion and step 2
973 # if not unknown and not ignored, drop dir recursion and step 2
972 ignore = util.always
974 ignore = util.always
973 dirignore = util.always
975 dirignore = util.always
974
976
975 matchfn = match.matchfn
977 matchfn = match.matchfn
976 matchalways = match.always()
978 matchalways = match.always()
977 matchtdir = match.traversedir
979 matchtdir = match.traversedir
978 dmap = self._map
980 dmap = self._map
979 listdir = util.listdir
981 listdir = util.listdir
980 lstat = os.lstat
982 lstat = os.lstat
981 dirkind = stat.S_IFDIR
983 dirkind = stat.S_IFDIR
982 regkind = stat.S_IFREG
984 regkind = stat.S_IFREG
983 lnkkind = stat.S_IFLNK
985 lnkkind = stat.S_IFLNK
984 join = self._join
986 join = self._join
985
987
986 exact = skipstep3 = False
988 exact = skipstep3 = False
987 if match.isexact(): # match.exact
989 if match.isexact(): # match.exact
988 exact = True
990 exact = True
989 dirignore = util.always # skip step 2
991 dirignore = util.always # skip step 2
990 elif match.prefix(): # match.match, no patterns
992 elif match.prefix(): # match.match, no patterns
991 skipstep3 = True
993 skipstep3 = True
992
994
993 if not exact and self._checkcase:
995 if not exact and self._checkcase:
994 normalize = self._normalize
996 normalize = self._normalize
995 normalizefile = self._normalizefile
997 normalizefile = self._normalizefile
996 skipstep3 = False
998 skipstep3 = False
997 else:
999 else:
998 normalize = self._normalize
1000 normalize = self._normalize
999 normalizefile = None
1001 normalizefile = None
1000
1002
1001 # step 1: find all explicit files
1003 # step 1: find all explicit files
1002 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1004 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1003 if matchtdir:
1005 if matchtdir:
1004 for d in work:
1006 for d in work:
1005 matchtdir(d[0])
1007 matchtdir(d[0])
1006 for d in dirsnotfound:
1008 for d in dirsnotfound:
1007 matchtdir(d)
1009 matchtdir(d)
1008
1010
1009 skipstep3 = skipstep3 and not (work or dirsnotfound)
1011 skipstep3 = skipstep3 and not (work or dirsnotfound)
1010 work = [d for d in work if not dirignore(d[0])]
1012 work = [d for d in work if not dirignore(d[0])]
1011
1013
1012 # step 2: visit subdirectories
1014 # step 2: visit subdirectories
1013 def traverse(work, alreadynormed):
1015 def traverse(work, alreadynormed):
1014 wadd = work.append
1016 wadd = work.append
1015 while work:
1017 while work:
1016 tracing.counter('dirstate.walk work', len(work))
1018 tracing.counter('dirstate.walk work', len(work))
1017 nd = work.pop()
1019 nd = work.pop()
1018 visitentries = match.visitchildrenset(nd)
1020 visitentries = match.visitchildrenset(nd)
1019 if not visitentries:
1021 if not visitentries:
1020 continue
1022 continue
1021 if visitentries == b'this' or visitentries == b'all':
1023 if visitentries == b'this' or visitentries == b'all':
1022 visitentries = None
1024 visitentries = None
1023 skip = None
1025 skip = None
1024 if nd != b'':
1026 if nd != b'':
1025 skip = b'.hg'
1027 skip = b'.hg'
1026 try:
1028 try:
1027 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1029 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1028 entries = listdir(join(nd), stat=True, skip=skip)
1030 entries = listdir(join(nd), stat=True, skip=skip)
1029 except OSError as inst:
1031 except OSError as inst:
1030 if inst.errno in (errno.EACCES, errno.ENOENT):
1032 if inst.errno in (errno.EACCES, errno.ENOENT):
1031 match.bad(
1033 match.bad(
1032 self.pathto(nd), encoding.strtolocal(inst.strerror)
1034 self.pathto(nd), encoding.strtolocal(inst.strerror)
1033 )
1035 )
1034 continue
1036 continue
1035 raise
1037 raise
1036 for f, kind, st in entries:
1038 for f, kind, st in entries:
1037 # Some matchers may return files in the visitentries set,
1039 # Some matchers may return files in the visitentries set,
1038 # instead of 'this', if the matcher explicitly mentions them
1040 # instead of 'this', if the matcher explicitly mentions them
1039 # and is not an exactmatcher. This is acceptable; we do not
1041 # and is not an exactmatcher. This is acceptable; we do not
1040 # make any hard assumptions about file-or-directory below
1042 # make any hard assumptions about file-or-directory below
1041 # based on the presence of `f` in visitentries. If
1043 # based on the presence of `f` in visitentries. If
1042 # visitchildrenset returned a set, we can always skip the
1044 # visitchildrenset returned a set, we can always skip the
1043 # entries *not* in the set it provided regardless of whether
1045 # entries *not* in the set it provided regardless of whether
1044 # they're actually a file or a directory.
1046 # they're actually a file or a directory.
1045 if visitentries and f not in visitentries:
1047 if visitentries and f not in visitentries:
1046 continue
1048 continue
1047 if normalizefile:
1049 if normalizefile:
1048 # even though f might be a directory, we're only
1050 # even though f might be a directory, we're only
1049 # interested in comparing it to files currently in the
1051 # interested in comparing it to files currently in the
1050 # dmap -- therefore normalizefile is enough
1052 # dmap -- therefore normalizefile is enough
1051 nf = normalizefile(
1053 nf = normalizefile(
1052 nd and (nd + b"/" + f) or f, True, True
1054 nd and (nd + b"/" + f) or f, True, True
1053 )
1055 )
1054 else:
1056 else:
1055 nf = nd and (nd + b"/" + f) or f
1057 nf = nd and (nd + b"/" + f) or f
1056 if nf not in results:
1058 if nf not in results:
1057 if kind == dirkind:
1059 if kind == dirkind:
1058 if not ignore(nf):
1060 if not ignore(nf):
1059 if matchtdir:
1061 if matchtdir:
1060 matchtdir(nf)
1062 matchtdir(nf)
1061 wadd(nf)
1063 wadd(nf)
1062 if nf in dmap and (matchalways or matchfn(nf)):
1064 if nf in dmap and (matchalways or matchfn(nf)):
1063 results[nf] = None
1065 results[nf] = None
1064 elif kind == regkind or kind == lnkkind:
1066 elif kind == regkind or kind == lnkkind:
1065 if nf in dmap:
1067 if nf in dmap:
1066 if matchalways or matchfn(nf):
1068 if matchalways or matchfn(nf):
1067 results[nf] = st
1069 results[nf] = st
1068 elif (matchalways or matchfn(nf)) and not ignore(
1070 elif (matchalways or matchfn(nf)) and not ignore(
1069 nf
1071 nf
1070 ):
1072 ):
1071 # unknown file -- normalize if necessary
1073 # unknown file -- normalize if necessary
1072 if not alreadynormed:
1074 if not alreadynormed:
1073 nf = normalize(nf, False, True)
1075 nf = normalize(nf, False, True)
1074 results[nf] = st
1076 results[nf] = st
1075 elif nf in dmap and (matchalways or matchfn(nf)):
1077 elif nf in dmap and (matchalways or matchfn(nf)):
1076 results[nf] = None
1078 results[nf] = None
1077
1079
1078 for nd, d in work:
1080 for nd, d in work:
1079 # alreadynormed means that processwork doesn't have to do any
1081 # alreadynormed means that processwork doesn't have to do any
1080 # expensive directory normalization
1082 # expensive directory normalization
1081 alreadynormed = not normalize or nd == d
1083 alreadynormed = not normalize or nd == d
1082 traverse([d], alreadynormed)
1084 traverse([d], alreadynormed)
1083
1085
1084 for s in subrepos:
1086 for s in subrepos:
1085 del results[s]
1087 del results[s]
1086 del results[b'.hg']
1088 del results[b'.hg']
1087
1089
1088 # step 3: visit remaining files from dmap
1090 # step 3: visit remaining files from dmap
1089 if not skipstep3 and not exact:
1091 if not skipstep3 and not exact:
1090 # If a dmap file is not in results yet, it was either
1092 # If a dmap file is not in results yet, it was either
1091 # a) not matching matchfn b) ignored, c) missing, or d) under a
1093 # a) not matching matchfn b) ignored, c) missing, or d) under a
1092 # symlink directory.
1094 # symlink directory.
1093 if not results and matchalways:
1095 if not results and matchalways:
1094 visit = [f for f in dmap]
1096 visit = [f for f in dmap]
1095 else:
1097 else:
1096 visit = [f for f in dmap if f not in results and matchfn(f)]
1098 visit = [f for f in dmap if f not in results and matchfn(f)]
1097 visit.sort()
1099 visit.sort()
1098
1100
1099 if unknown:
1101 if unknown:
1100 # unknown == True means we walked all dirs under the roots
1102 # unknown == True means we walked all dirs under the roots
1101 # that wasn't ignored, and everything that matched was stat'ed
1103 # that wasn't ignored, and everything that matched was stat'ed
1102 # and is already in results.
1104 # and is already in results.
1103 # The rest must thus be ignored or under a symlink.
1105 # The rest must thus be ignored or under a symlink.
1104 audit_path = pathutil.pathauditor(self._root, cached=True)
1106 audit_path = pathutil.pathauditor(self._root, cached=True)
1105
1107
1106 for nf in iter(visit):
1108 for nf in iter(visit):
1107 # If a stat for the same file was already added with a
1109 # If a stat for the same file was already added with a
1108 # different case, don't add one for this, since that would
1110 # different case, don't add one for this, since that would
1109 # make it appear as if the file exists under both names
1111 # make it appear as if the file exists under both names
1110 # on disk.
1112 # on disk.
1111 if (
1113 if (
1112 normalizefile
1114 normalizefile
1113 and normalizefile(nf, True, True) in results
1115 and normalizefile(nf, True, True) in results
1114 ):
1116 ):
1115 results[nf] = None
1117 results[nf] = None
1116 # Report ignored items in the dmap as long as they are not
1118 # Report ignored items in the dmap as long as they are not
1117 # under a symlink directory.
1119 # under a symlink directory.
1118 elif audit_path.check(nf):
1120 elif audit_path.check(nf):
1119 try:
1121 try:
1120 results[nf] = lstat(join(nf))
1122 results[nf] = lstat(join(nf))
1121 # file was just ignored, no links, and exists
1123 # file was just ignored, no links, and exists
1122 except OSError:
1124 except OSError:
1123 # file doesn't exist
1125 # file doesn't exist
1124 results[nf] = None
1126 results[nf] = None
1125 else:
1127 else:
1126 # It's either missing or under a symlink directory
1128 # It's either missing or under a symlink directory
1127 # which we in this case report as missing
1129 # which we in this case report as missing
1128 results[nf] = None
1130 results[nf] = None
1129 else:
1131 else:
1130 # We may not have walked the full directory tree above,
1132 # We may not have walked the full directory tree above,
1131 # so stat and check everything we missed.
1133 # so stat and check everything we missed.
1132 iv = iter(visit)
1134 iv = iter(visit)
1133 for st in util.statfiles([join(i) for i in visit]):
1135 for st in util.statfiles([join(i) for i in visit]):
1134 results[next(iv)] = st
1136 results[next(iv)] = st
1135 return results
1137 return results
1136
1138
1137 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1139 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1138 # Force Rayon (Rust parallelism library) to respect the number of
1140 # Force Rayon (Rust parallelism library) to respect the number of
1139 # workers. This is a temporary workaround until Rust code knows
1141 # workers. This is a temporary workaround until Rust code knows
1140 # how to read the config file.
1142 # how to read the config file.
1141 numcpus = self._ui.configint(b"worker", b"numcpus")
1143 numcpus = self._ui.configint(b"worker", b"numcpus")
1142 if numcpus is not None:
1144 if numcpus is not None:
1143 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1145 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1144
1146
1145 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1147 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1146 if not workers_enabled:
1148 if not workers_enabled:
1147 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1149 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1148
1150
1149 (
1151 (
1150 lookup,
1152 lookup,
1151 modified,
1153 modified,
1152 added,
1154 added,
1153 removed,
1155 removed,
1154 deleted,
1156 deleted,
1155 clean,
1157 clean,
1156 ignored,
1158 ignored,
1157 unknown,
1159 unknown,
1158 warnings,
1160 warnings,
1159 bad,
1161 bad,
1160 traversed,
1162 traversed,
1161 dirty,
1163 dirty,
1162 ) = rustmod.status(
1164 ) = rustmod.status(
1163 self._map._rustmap,
1165 self._map._rustmap,
1164 matcher,
1166 matcher,
1165 self._rootdir,
1167 self._rootdir,
1166 self._ignorefiles(),
1168 self._ignorefiles(),
1167 self._checkexec,
1169 self._checkexec,
1168 self._lastnormaltime,
1170 self._lastnormaltime,
1169 bool(list_clean),
1171 bool(list_clean),
1170 bool(list_ignored),
1172 bool(list_ignored),
1171 bool(list_unknown),
1173 bool(list_unknown),
1172 bool(matcher.traversedir),
1174 bool(matcher.traversedir),
1173 )
1175 )
1174
1176
1175 self._dirty |= dirty
1177 self._dirty |= dirty
1176
1178
1177 if matcher.traversedir:
1179 if matcher.traversedir:
1178 for dir in traversed:
1180 for dir in traversed:
1179 matcher.traversedir(dir)
1181 matcher.traversedir(dir)
1180
1182
1181 if self._ui.warn:
1183 if self._ui.warn:
1182 for item in warnings:
1184 for item in warnings:
1183 if isinstance(item, tuple):
1185 if isinstance(item, tuple):
1184 file_path, syntax = item
1186 file_path, syntax = item
1185 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1187 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1186 file_path,
1188 file_path,
1187 syntax,
1189 syntax,
1188 )
1190 )
1189 self._ui.warn(msg)
1191 self._ui.warn(msg)
1190 else:
1192 else:
1191 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1193 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1192 self._ui.warn(
1194 self._ui.warn(
1193 msg
1195 msg
1194 % (
1196 % (
1195 pathutil.canonpath(
1197 pathutil.canonpath(
1196 self._rootdir, self._rootdir, item
1198 self._rootdir, self._rootdir, item
1197 ),
1199 ),
1198 b"No such file or directory",
1200 b"No such file or directory",
1199 )
1201 )
1200 )
1202 )
1201
1203
1202 for (fn, message) in bad:
1204 for (fn, message) in bad:
1203 matcher.bad(fn, encoding.strtolocal(message))
1205 matcher.bad(fn, encoding.strtolocal(message))
1204
1206
1205 status = scmutil.status(
1207 status = scmutil.status(
1206 modified=modified,
1208 modified=modified,
1207 added=added,
1209 added=added,
1208 removed=removed,
1210 removed=removed,
1209 deleted=deleted,
1211 deleted=deleted,
1210 unknown=unknown,
1212 unknown=unknown,
1211 ignored=ignored,
1213 ignored=ignored,
1212 clean=clean,
1214 clean=clean,
1213 )
1215 )
1214 return (lookup, status)
1216 return (lookup, status)
1215
1217
1216 def status(self, match, subrepos, ignored, clean, unknown):
1218 def status(self, match, subrepos, ignored, clean, unknown):
1217 """Determine the status of the working copy relative to the
1219 """Determine the status of the working copy relative to the
1218 dirstate and return a pair of (unsure, status), where status is of type
1220 dirstate and return a pair of (unsure, status), where status is of type
1219 scmutil.status and:
1221 scmutil.status and:
1220
1222
1221 unsure:
1223 unsure:
1222 files that might have been modified since the dirstate was
1224 files that might have been modified since the dirstate was
1223 written, but need to be read to be sure (size is the same
1225 written, but need to be read to be sure (size is the same
1224 but mtime differs)
1226 but mtime differs)
1225 status.modified:
1227 status.modified:
1226 files that have definitely been modified since the dirstate
1228 files that have definitely been modified since the dirstate
1227 was written (different size or mode)
1229 was written (different size or mode)
1228 status.clean:
1230 status.clean:
1229 files that have definitely not been modified since the
1231 files that have definitely not been modified since the
1230 dirstate was written
1232 dirstate was written
1231 """
1233 """
1232 listignored, listclean, listunknown = ignored, clean, unknown
1234 listignored, listclean, listunknown = ignored, clean, unknown
1233 lookup, modified, added, unknown, ignored = [], [], [], [], []
1235 lookup, modified, added, unknown, ignored = [], [], [], [], []
1234 removed, deleted, clean = [], [], []
1236 removed, deleted, clean = [], [], []
1235
1237
1236 dmap = self._map
1238 dmap = self._map
1237 dmap.preload()
1239 dmap.preload()
1238
1240
1239 use_rust = True
1241 use_rust = True
1240
1242
1241 allowed_matchers = (
1243 allowed_matchers = (
1242 matchmod.alwaysmatcher,
1244 matchmod.alwaysmatcher,
1243 matchmod.exactmatcher,
1245 matchmod.exactmatcher,
1244 matchmod.includematcher,
1246 matchmod.includematcher,
1245 )
1247 )
1246
1248
1247 if rustmod is None:
1249 if rustmod is None:
1248 use_rust = False
1250 use_rust = False
1249 elif self._checkcase:
1251 elif self._checkcase:
1250 # Case-insensitive filesystems are not handled yet
1252 # Case-insensitive filesystems are not handled yet
1251 use_rust = False
1253 use_rust = False
1252 elif subrepos:
1254 elif subrepos:
1253 use_rust = False
1255 use_rust = False
1254 elif sparse.enabled:
1256 elif sparse.enabled:
1255 use_rust = False
1257 use_rust = False
1256 elif not isinstance(match, allowed_matchers):
1258 elif not isinstance(match, allowed_matchers):
1257 # Some matchers have yet to be implemented
1259 # Some matchers have yet to be implemented
1258 use_rust = False
1260 use_rust = False
1259
1261
1260 if use_rust:
1262 if use_rust:
1261 try:
1263 try:
1262 return self._rust_status(
1264 return self._rust_status(
1263 match, listclean, listignored, listunknown
1265 match, listclean, listignored, listunknown
1264 )
1266 )
1265 except rustmod.FallbackError:
1267 except rustmod.FallbackError:
1266 pass
1268 pass
1267
1269
1268 def noop(f):
1270 def noop(f):
1269 pass
1271 pass
1270
1272
1271 dcontains = dmap.__contains__
1273 dcontains = dmap.__contains__
1272 dget = dmap.__getitem__
1274 dget = dmap.__getitem__
1273 ladd = lookup.append # aka "unsure"
1275 ladd = lookup.append # aka "unsure"
1274 madd = modified.append
1276 madd = modified.append
1275 aadd = added.append
1277 aadd = added.append
1276 uadd = unknown.append if listunknown else noop
1278 uadd = unknown.append if listunknown else noop
1277 iadd = ignored.append if listignored else noop
1279 iadd = ignored.append if listignored else noop
1278 radd = removed.append
1280 radd = removed.append
1279 dadd = deleted.append
1281 dadd = deleted.append
1280 cadd = clean.append if listclean else noop
1282 cadd = clean.append if listclean else noop
1281 mexact = match.exact
1283 mexact = match.exact
1282 dirignore = self._dirignore
1284 dirignore = self._dirignore
1283 checkexec = self._checkexec
1285 checkexec = self._checkexec
1284 copymap = self._map.copymap
1286 copymap = self._map.copymap
1285 lastnormaltime = self._lastnormaltime
1287 lastnormaltime = self._lastnormaltime
1286
1288
1287 # We need to do full walks when either
1289 # We need to do full walks when either
1288 # - we're listing all clean files, or
1290 # - we're listing all clean files, or
1289 # - match.traversedir does something, because match.traversedir should
1291 # - match.traversedir does something, because match.traversedir should
1290 # be called for every dir in the working dir
1292 # be called for every dir in the working dir
1291 full = listclean or match.traversedir is not None
1293 full = listclean or match.traversedir is not None
1292 for fn, st in pycompat.iteritems(
1294 for fn, st in pycompat.iteritems(
1293 self.walk(match, subrepos, listunknown, listignored, full=full)
1295 self.walk(match, subrepos, listunknown, listignored, full=full)
1294 ):
1296 ):
1295 if not dcontains(fn):
1297 if not dcontains(fn):
1296 if (listignored or mexact(fn)) and dirignore(fn):
1298 if (listignored or mexact(fn)) and dirignore(fn):
1297 if listignored:
1299 if listignored:
1298 iadd(fn)
1300 iadd(fn)
1299 else:
1301 else:
1300 uadd(fn)
1302 uadd(fn)
1301 continue
1303 continue
1302
1304
1303 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1305 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1304 # written like that for performance reasons. dmap[fn] is not a
1306 # written like that for performance reasons. dmap[fn] is not a
1305 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1307 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1306 # opcode has fast paths when the value to be unpacked is a tuple or
1308 # opcode has fast paths when the value to be unpacked is a tuple or
1307 # a list, but falls back to creating a full-fledged iterator in
1309 # a list, but falls back to creating a full-fledged iterator in
1308 # general. That is much slower than simply accessing and storing the
1310 # general. That is much slower than simply accessing and storing the
1309 # tuple members one by one.
1311 # tuple members one by one.
1310 t = dget(fn)
1312 t = dget(fn)
1311 state = t.state
1313 state = t.state
1312 mode = t[1]
1314 mode = t[1]
1313 size = t[2]
1315 size = t[2]
1314 time = t[3]
1316 time = t[3]
1315
1317
1316 if not st and state in b"nma":
1318 if not st and state in b"nma":
1317 dadd(fn)
1319 dadd(fn)
1318 elif state == b'n':
1320 elif state == b'n':
1319 if (
1321 if (
1320 size >= 0
1322 size >= 0
1321 and (
1323 and (
1322 (size != st.st_size and size != st.st_size & _rangemask)
1324 (size != st.st_size and size != st.st_size & _rangemask)
1323 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1325 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1324 )
1326 )
1325 or t.from_p2
1327 or t.from_p2
1326 or fn in copymap
1328 or fn in copymap
1327 ):
1329 ):
1328 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1330 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1329 # issue6456: Size returned may be longer due to
1331 # issue6456: Size returned may be longer due to
1330 # encryption on EXT-4 fscrypt, undecided.
1332 # encryption on EXT-4 fscrypt, undecided.
1331 ladd(fn)
1333 ladd(fn)
1332 else:
1334 else:
1333 madd(fn)
1335 madd(fn)
1334 elif (
1336 elif (
1335 time != st[stat.ST_MTIME]
1337 time != st[stat.ST_MTIME]
1336 and time != st[stat.ST_MTIME] & _rangemask
1338 and time != st[stat.ST_MTIME] & _rangemask
1337 ):
1339 ):
1338 ladd(fn)
1340 ladd(fn)
1339 elif st[stat.ST_MTIME] == lastnormaltime:
1341 elif st[stat.ST_MTIME] == lastnormaltime:
1340 # fn may have just been marked as normal and it may have
1342 # fn may have just been marked as normal and it may have
1341 # changed in the same second without changing its size.
1343 # changed in the same second without changing its size.
1342 # This can happen if we quickly do multiple commits.
1344 # This can happen if we quickly do multiple commits.
1343 # Force lookup, so we don't miss such a racy file change.
1345 # Force lookup, so we don't miss such a racy file change.
1344 ladd(fn)
1346 ladd(fn)
1345 elif listclean:
1347 elif listclean:
1346 cadd(fn)
1348 cadd(fn)
1347 elif t.merged:
1349 elif t.merged:
1348 madd(fn)
1350 madd(fn)
1349 elif t.added:
1351 elif t.added:
1350 aadd(fn)
1352 aadd(fn)
1351 elif t.removed:
1353 elif t.removed:
1352 radd(fn)
1354 radd(fn)
1353 status = scmutil.status(
1355 status = scmutil.status(
1354 modified, added, removed, deleted, unknown, ignored, clean
1356 modified, added, removed, deleted, unknown, ignored, clean
1355 )
1357 )
1356 return (lookup, status)
1358 return (lookup, status)
1357
1359
1358 def matches(self, match):
1360 def matches(self, match):
1359 """
1361 """
1360 return files in the dirstate (in whatever state) filtered by match
1362 return files in the dirstate (in whatever state) filtered by match
1361 """
1363 """
1362 dmap = self._map
1364 dmap = self._map
1363 if rustmod is not None:
1365 if rustmod is not None:
1364 dmap = self._map._rustmap
1366 dmap = self._map._rustmap
1365
1367
1366 if match.always():
1368 if match.always():
1367 return dmap.keys()
1369 return dmap.keys()
1368 files = match.files()
1370 files = match.files()
1369 if match.isexact():
1371 if match.isexact():
1370 # fast path -- filter the other way around, since typically files is
1372 # fast path -- filter the other way around, since typically files is
1371 # much smaller than dmap
1373 # much smaller than dmap
1372 return [f for f in files if f in dmap]
1374 return [f for f in files if f in dmap]
1373 if match.prefix() and all(fn in dmap for fn in files):
1375 if match.prefix() and all(fn in dmap for fn in files):
1374 # fast path -- all the values are known to be files, so just return
1376 # fast path -- all the values are known to be files, so just return
1375 # that
1377 # that
1376 return list(files)
1378 return list(files)
1377 return [f for f in dmap if match(f)]
1379 return [f for f in dmap if match(f)]
1378
1380
1379 def _actualfilename(self, tr):
1381 def _actualfilename(self, tr):
1380 if tr:
1382 if tr:
1381 return self._pendingfilename
1383 return self._pendingfilename
1382 else:
1384 else:
1383 return self._filename
1385 return self._filename
1384
1386
1385 def savebackup(self, tr, backupname):
1387 def savebackup(self, tr, backupname):
1386 '''Save current dirstate into backup file'''
1388 '''Save current dirstate into backup file'''
1387 filename = self._actualfilename(tr)
1389 filename = self._actualfilename(tr)
1388 assert backupname != filename
1390 assert backupname != filename
1389
1391
1390 # use '_writedirstate' instead of 'write' to write changes certainly,
1392 # use '_writedirstate' instead of 'write' to write changes certainly,
1391 # because the latter omits writing out if transaction is running.
1393 # because the latter omits writing out if transaction is running.
1392 # output file will be used to create backup of dirstate at this point.
1394 # output file will be used to create backup of dirstate at this point.
1393 if self._dirty or not self._opener.exists(filename):
1395 if self._dirty or not self._opener.exists(filename):
1394 self._writedirstate(
1396 self._writedirstate(
1395 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1397 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1396 )
1398 )
1397
1399
1398 if tr:
1400 if tr:
1399 # ensure that subsequent tr.writepending returns True for
1401 # ensure that subsequent tr.writepending returns True for
1400 # changes written out above, even if dirstate is never
1402 # changes written out above, even if dirstate is never
1401 # changed after this
1403 # changed after this
1402 tr.addfilegenerator(
1404 tr.addfilegenerator(
1403 b'dirstate',
1405 b'dirstate',
1404 (self._filename,),
1406 (self._filename,),
1405 self._writedirstate,
1407 self._writedirstate,
1406 location=b'plain',
1408 location=b'plain',
1407 )
1409 )
1408
1410
1409 # ensure that pending file written above is unlinked at
1411 # ensure that pending file written above is unlinked at
1410 # failure, even if tr.writepending isn't invoked until the
1412 # failure, even if tr.writepending isn't invoked until the
1411 # end of this transaction
1413 # end of this transaction
1412 tr.registertmp(filename, location=b'plain')
1414 tr.registertmp(filename, location=b'plain')
1413
1415
1414 self._opener.tryunlink(backupname)
1416 self._opener.tryunlink(backupname)
1415 # hardlink backup is okay because _writedirstate is always called
1417 # hardlink backup is okay because _writedirstate is always called
1416 # with an "atomictemp=True" file.
1418 # with an "atomictemp=True" file.
1417 util.copyfile(
1419 util.copyfile(
1418 self._opener.join(filename),
1420 self._opener.join(filename),
1419 self._opener.join(backupname),
1421 self._opener.join(backupname),
1420 hardlink=True,
1422 hardlink=True,
1421 )
1423 )
1422
1424
1423 def restorebackup(self, tr, backupname):
1425 def restorebackup(self, tr, backupname):
1424 '''Restore dirstate by backup file'''
1426 '''Restore dirstate by backup file'''
1425 # this "invalidate()" prevents "wlock.release()" from writing
1427 # this "invalidate()" prevents "wlock.release()" from writing
1426 # changes of dirstate out after restoring from backup file
1428 # changes of dirstate out after restoring from backup file
1427 self.invalidate()
1429 self.invalidate()
1428 filename = self._actualfilename(tr)
1430 filename = self._actualfilename(tr)
1429 o = self._opener
1431 o = self._opener
1430 if util.samefile(o.join(backupname), o.join(filename)):
1432 if util.samefile(o.join(backupname), o.join(filename)):
1431 o.unlink(backupname)
1433 o.unlink(backupname)
1432 else:
1434 else:
1433 o.rename(backupname, filename, checkambig=True)
1435 o.rename(backupname, filename, checkambig=True)
1434
1436
1435 def clearbackup(self, tr, backupname):
1437 def clearbackup(self, tr, backupname):
1436 '''Clear backup file'''
1438 '''Clear backup file'''
1437 self._opener.unlink(backupname)
1439 self._opener.unlink(backupname)
@@ -1,673 +1,683 b''
1 # dirstatemap.py
1 # dirstatemap.py
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 import errno
8 import errno
9
9
10 from .i18n import _
10 from .i18n import _
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 pathutil,
14 pathutil,
15 policy,
15 policy,
16 pycompat,
16 pycompat,
17 txnutil,
17 txnutil,
18 util,
18 util,
19 )
19 )
20
20
21 parsers = policy.importmod('parsers')
21 parsers = policy.importmod('parsers')
22 rustmod = policy.importrust('dirstate')
22 rustmod = policy.importrust('dirstate')
23
23
24 propertycache = util.propertycache
24 propertycache = util.propertycache
25
25
26 dirstatetuple = parsers.dirstatetuple
26 dirstatetuple = parsers.dirstatetuple
27
27
28
28
29 # a special value used internally for `size` if the file come from the other parent
29 # a special value used internally for `size` if the file come from the other parent
30 FROM_P2 = -2
30 FROM_P2 = -2
31
31
32 # a special value used internally for `size` if the file is modified/merged/added
32 # a special value used internally for `size` if the file is modified/merged/added
33 NONNORMAL = -1
33 NONNORMAL = -1
34
34
35 # a special value used internally for `time` if the time is ambigeous
35 # a special value used internally for `time` if the time is ambigeous
36 AMBIGUOUS_TIME = -1
36 AMBIGUOUS_TIME = -1
37
37
38 rangemask = 0x7FFFFFFF
38 rangemask = 0x7FFFFFFF
39
39
40
40
41 class dirstatemap(object):
41 class dirstatemap(object):
42 """Map encapsulating the dirstate's contents.
42 """Map encapsulating the dirstate's contents.
43
43
44 The dirstate contains the following state:
44 The dirstate contains the following state:
45
45
46 - `identity` is the identity of the dirstate file, which can be used to
46 - `identity` is the identity of the dirstate file, which can be used to
47 detect when changes have occurred to the dirstate file.
47 detect when changes have occurred to the dirstate file.
48
48
49 - `parents` is a pair containing the parents of the working copy. The
49 - `parents` is a pair containing the parents of the working copy. The
50 parents are updated by calling `setparents`.
50 parents are updated by calling `setparents`.
51
51
52 - the state map maps filenames to tuples of (state, mode, size, mtime),
52 - the state map maps filenames to tuples of (state, mode, size, mtime),
53 where state is a single character representing 'normal', 'added',
53 where state is a single character representing 'normal', 'added',
54 'removed', or 'merged'. It is read by treating the dirstate as a
54 'removed', or 'merged'. It is read by treating the dirstate as a
55 dict. File state is updated by calling the `addfile`, `removefile` and
55 dict. File state is updated by calling the `addfile`, `removefile` and
56 `dropfile` methods.
56 `dropfile` methods.
57
57
58 - `copymap` maps destination filenames to their source filename.
58 - `copymap` maps destination filenames to their source filename.
59
59
60 The dirstate also provides the following views onto the state:
60 The dirstate also provides the following views onto the state:
61
61
62 - `nonnormalset` is a set of the filenames that have state other
62 - `nonnormalset` is a set of the filenames that have state other
63 than 'normal', or are normal but have an mtime of -1 ('normallookup').
63 than 'normal', or are normal but have an mtime of -1 ('normallookup').
64
64
65 - `otherparentset` is a set of the filenames that are marked as coming
65 - `otherparentset` is a set of the filenames that are marked as coming
66 from the second parent when the dirstate is currently being merged.
66 from the second parent when the dirstate is currently being merged.
67
67
68 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
68 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
69 form that they appear as in the dirstate.
69 form that they appear as in the dirstate.
70
70
71 - `dirfoldmap` is a dict mapping normalized directory names to the
71 - `dirfoldmap` is a dict mapping normalized directory names to the
72 denormalized form that they appear as in the dirstate.
72 denormalized form that they appear as in the dirstate.
73 """
73 """
74
74
75 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
75 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
76 self._ui = ui
76 self._ui = ui
77 self._opener = opener
77 self._opener = opener
78 self._root = root
78 self._root = root
79 self._filename = b'dirstate'
79 self._filename = b'dirstate'
80 self._nodelen = 20
80 self._nodelen = 20
81 self._nodeconstants = nodeconstants
81 self._nodeconstants = nodeconstants
82 assert (
82 assert (
83 not use_dirstate_v2
83 not use_dirstate_v2
84 ), "should have detected unsupported requirement"
84 ), "should have detected unsupported requirement"
85
85
86 self._parents = None
86 self._parents = None
87 self._dirtyparents = False
87 self._dirtyparents = False
88
88
89 # for consistent view between _pl() and _read() invocations
89 # for consistent view between _pl() and _read() invocations
90 self._pendingmode = None
90 self._pendingmode = None
91
91
92 @propertycache
92 @propertycache
93 def _map(self):
93 def _map(self):
94 self._map = {}
94 self._map = {}
95 self.read()
95 self.read()
96 return self._map
96 return self._map
97
97
98 @propertycache
98 @propertycache
99 def copymap(self):
99 def copymap(self):
100 self.copymap = {}
100 self.copymap = {}
101 self._map
101 self._map
102 return self.copymap
102 return self.copymap
103
103
104 def directories(self):
104 def directories(self):
105 # Rust / dirstate-v2 only
105 # Rust / dirstate-v2 only
106 return []
106 return []
107
107
108 def clear(self):
108 def clear(self):
109 self._map.clear()
109 self._map.clear()
110 self.copymap.clear()
110 self.copymap.clear()
111 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
111 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
112 util.clearcachedproperty(self, b"_dirs")
112 util.clearcachedproperty(self, b"_dirs")
113 util.clearcachedproperty(self, b"_alldirs")
113 util.clearcachedproperty(self, b"_alldirs")
114 util.clearcachedproperty(self, b"filefoldmap")
114 util.clearcachedproperty(self, b"filefoldmap")
115 util.clearcachedproperty(self, b"dirfoldmap")
115 util.clearcachedproperty(self, b"dirfoldmap")
116 util.clearcachedproperty(self, b"nonnormalset")
116 util.clearcachedproperty(self, b"nonnormalset")
117 util.clearcachedproperty(self, b"otherparentset")
117 util.clearcachedproperty(self, b"otherparentset")
118
118
119 def items(self):
119 def items(self):
120 return pycompat.iteritems(self._map)
120 return pycompat.iteritems(self._map)
121
121
122 # forward for python2,3 compat
122 # forward for python2,3 compat
123 iteritems = items
123 iteritems = items
124
124
125 def __len__(self):
125 def __len__(self):
126 return len(self._map)
126 return len(self._map)
127
127
128 def __iter__(self):
128 def __iter__(self):
129 return iter(self._map)
129 return iter(self._map)
130
130
131 def get(self, key, default=None):
131 def get(self, key, default=None):
132 return self._map.get(key, default)
132 return self._map.get(key, default)
133
133
134 def __contains__(self, key):
134 def __contains__(self, key):
135 return key in self._map
135 return key in self._map
136
136
137 def __getitem__(self, key):
137 def __getitem__(self, key):
138 return self._map[key]
138 return self._map[key]
139
139
140 def keys(self):
140 def keys(self):
141 return self._map.keys()
141 return self._map.keys()
142
142
143 def preload(self):
143 def preload(self):
144 """Loads the underlying data, if it's not already loaded"""
144 """Loads the underlying data, if it's not already loaded"""
145 self._map
145 self._map
146
146
147 def addfile(
147 def addfile(
148 self,
148 self,
149 f,
149 f,
150 state=None,
150 state=None,
151 mode=0,
151 mode=0,
152 size=None,
152 size=None,
153 mtime=None,
153 mtime=None,
154 added=False,
154 added=False,
155 merged=False,
155 from_p2=False,
156 from_p2=False,
156 possibly_dirty=False,
157 possibly_dirty=False,
157 ):
158 ):
158 """Add a tracked file to the dirstate."""
159 """Add a tracked file to the dirstate."""
159 if added:
160 if added:
161 assert not merged
160 assert not possibly_dirty
162 assert not possibly_dirty
161 assert not from_p2
163 assert not from_p2
162 state = b'a'
164 state = b'a'
163 size = NONNORMAL
165 size = NONNORMAL
164 mtime = AMBIGUOUS_TIME
166 mtime = AMBIGUOUS_TIME
167 elif merged:
168 assert not possibly_dirty
169 assert not from_p2
170 state = b'm'
171 size = FROM_P2
172 mtime = AMBIGUOUS_TIME
165 elif from_p2:
173 elif from_p2:
166 assert not possibly_dirty
174 assert not possibly_dirty
167 size = FROM_P2
175 size = FROM_P2
168 mtime = AMBIGUOUS_TIME
176 mtime = AMBIGUOUS_TIME
169 elif possibly_dirty:
177 elif possibly_dirty:
170 size = NONNORMAL
178 size = NONNORMAL
171 mtime = AMBIGUOUS_TIME
179 mtime = AMBIGUOUS_TIME
172 else:
180 else:
173 assert state != b'a'
181 assert state != b'a'
174 assert size != FROM_P2
182 assert size != FROM_P2
175 assert size != NONNORMAL
183 assert size != NONNORMAL
176 size = size & rangemask
184 size = size & rangemask
177 mtime = mtime & rangemask
185 mtime = mtime & rangemask
178 assert state is not None
186 assert state is not None
179 assert size is not None
187 assert size is not None
180 assert mtime is not None
188 assert mtime is not None
181 old_entry = self.get(f)
189 old_entry = self.get(f)
182 if (
190 if (
183 old_entry is None or old_entry.removed
191 old_entry is None or old_entry.removed
184 ) and "_dirs" in self.__dict__:
192 ) and "_dirs" in self.__dict__:
185 self._dirs.addpath(f)
193 self._dirs.addpath(f)
186 if old_entry is None and "_alldirs" in self.__dict__:
194 if old_entry is None and "_alldirs" in self.__dict__:
187 self._alldirs.addpath(f)
195 self._alldirs.addpath(f)
188 self._map[f] = dirstatetuple(state, mode, size, mtime)
196 self._map[f] = dirstatetuple(state, mode, size, mtime)
189 if state != b'n' or mtime == AMBIGUOUS_TIME:
197 if state != b'n' or mtime == AMBIGUOUS_TIME:
190 self.nonnormalset.add(f)
198 self.nonnormalset.add(f)
191 if size == FROM_P2:
199 if size == FROM_P2:
192 self.otherparentset.add(f)
200 self.otherparentset.add(f)
193
201
194 def removefile(self, f, in_merge=False):
202 def removefile(self, f, in_merge=False):
195 """
203 """
196 Mark a file as removed in the dirstate.
204 Mark a file as removed in the dirstate.
197
205
198 The `size` parameter is used to store sentinel values that indicate
206 The `size` parameter is used to store sentinel values that indicate
199 the file's previous state. In the future, we should refactor this
207 the file's previous state. In the future, we should refactor this
200 to be more explicit about what that state is.
208 to be more explicit about what that state is.
201 """
209 """
202 entry = self.get(f)
210 entry = self.get(f)
203 size = 0
211 size = 0
204 if in_merge:
212 if in_merge:
205 # XXX we should not be able to have 'm' state and 'FROM_P2' if not
213 # XXX we should not be able to have 'm' state and 'FROM_P2' if not
206 # during a merge. So I (marmoute) am not sure we need the
214 # during a merge. So I (marmoute) am not sure we need the
207 # conditionnal at all. Adding double checking this with assert
215 # conditionnal at all. Adding double checking this with assert
208 # would be nice.
216 # would be nice.
209 if entry is not None:
217 if entry is not None:
210 # backup the previous state
218 # backup the previous state
211 if entry.merged: # merge
219 if entry.merged: # merge
212 size = NONNORMAL
220 size = NONNORMAL
213 elif entry[0] == b'n' and entry.from_p2:
221 elif entry[0] == b'n' and entry.from_p2:
214 size = FROM_P2
222 size = FROM_P2
215 self.otherparentset.add(f)
223 self.otherparentset.add(f)
216 if size == 0:
224 if size == 0:
217 self.copymap.pop(f, None)
225 self.copymap.pop(f, None)
218
226
219 if entry is not None and entry[0] != b'r' and "_dirs" in self.__dict__:
227 if entry is not None and entry[0] != b'r' and "_dirs" in self.__dict__:
220 self._dirs.delpath(f)
228 self._dirs.delpath(f)
221 if entry is None and "_alldirs" in self.__dict__:
229 if entry is None and "_alldirs" in self.__dict__:
222 self._alldirs.addpath(f)
230 self._alldirs.addpath(f)
223 if "filefoldmap" in self.__dict__:
231 if "filefoldmap" in self.__dict__:
224 normed = util.normcase(f)
232 normed = util.normcase(f)
225 self.filefoldmap.pop(normed, None)
233 self.filefoldmap.pop(normed, None)
226 self._map[f] = dirstatetuple(b'r', 0, size, 0)
234 self._map[f] = dirstatetuple(b'r', 0, size, 0)
227 self.nonnormalset.add(f)
235 self.nonnormalset.add(f)
228
236
229 def dropfile(self, f, oldstate):
237 def dropfile(self, f, oldstate):
230 """
238 """
231 Remove a file from the dirstate. Returns True if the file was
239 Remove a file from the dirstate. Returns True if the file was
232 previously recorded.
240 previously recorded.
233 """
241 """
234 exists = self._map.pop(f, None) is not None
242 exists = self._map.pop(f, None) is not None
235 if exists:
243 if exists:
236 if oldstate != b"r" and "_dirs" in self.__dict__:
244 if oldstate != b"r" and "_dirs" in self.__dict__:
237 self._dirs.delpath(f)
245 self._dirs.delpath(f)
238 if "_alldirs" in self.__dict__:
246 if "_alldirs" in self.__dict__:
239 self._alldirs.delpath(f)
247 self._alldirs.delpath(f)
240 if "filefoldmap" in self.__dict__:
248 if "filefoldmap" in self.__dict__:
241 normed = util.normcase(f)
249 normed = util.normcase(f)
242 self.filefoldmap.pop(normed, None)
250 self.filefoldmap.pop(normed, None)
243 self.nonnormalset.discard(f)
251 self.nonnormalset.discard(f)
244 return exists
252 return exists
245
253
246 def clearambiguoustimes(self, files, now):
254 def clearambiguoustimes(self, files, now):
247 for f in files:
255 for f in files:
248 e = self.get(f)
256 e = self.get(f)
249 if e is not None and e[0] == b'n' and e[3] == now:
257 if e is not None and e[0] == b'n' and e[3] == now:
250 self._map[f] = dirstatetuple(e[0], e[1], e[2], AMBIGUOUS_TIME)
258 self._map[f] = dirstatetuple(e[0], e[1], e[2], AMBIGUOUS_TIME)
251 self.nonnormalset.add(f)
259 self.nonnormalset.add(f)
252
260
253 def nonnormalentries(self):
261 def nonnormalentries(self):
254 '''Compute the nonnormal dirstate entries from the dmap'''
262 '''Compute the nonnormal dirstate entries from the dmap'''
255 try:
263 try:
256 return parsers.nonnormalotherparententries(self._map)
264 return parsers.nonnormalotherparententries(self._map)
257 except AttributeError:
265 except AttributeError:
258 nonnorm = set()
266 nonnorm = set()
259 otherparent = set()
267 otherparent = set()
260 for fname, e in pycompat.iteritems(self._map):
268 for fname, e in pycompat.iteritems(self._map):
261 if e[0] != b'n' or e[3] == AMBIGUOUS_TIME:
269 if e[0] != b'n' or e[3] == AMBIGUOUS_TIME:
262 nonnorm.add(fname)
270 nonnorm.add(fname)
263 if e[0] == b'n' and e[2] == FROM_P2:
271 if e[0] == b'n' and e[2] == FROM_P2:
264 otherparent.add(fname)
272 otherparent.add(fname)
265 return nonnorm, otherparent
273 return nonnorm, otherparent
266
274
267 @propertycache
275 @propertycache
268 def filefoldmap(self):
276 def filefoldmap(self):
269 """Returns a dictionary mapping normalized case paths to their
277 """Returns a dictionary mapping normalized case paths to their
270 non-normalized versions.
278 non-normalized versions.
271 """
279 """
272 try:
280 try:
273 makefilefoldmap = parsers.make_file_foldmap
281 makefilefoldmap = parsers.make_file_foldmap
274 except AttributeError:
282 except AttributeError:
275 pass
283 pass
276 else:
284 else:
277 return makefilefoldmap(
285 return makefilefoldmap(
278 self._map, util.normcasespec, util.normcasefallback
286 self._map, util.normcasespec, util.normcasefallback
279 )
287 )
280
288
281 f = {}
289 f = {}
282 normcase = util.normcase
290 normcase = util.normcase
283 for name, s in pycompat.iteritems(self._map):
291 for name, s in pycompat.iteritems(self._map):
284 if s[0] != b'r':
292 if s[0] != b'r':
285 f[normcase(name)] = name
293 f[normcase(name)] = name
286 f[b'.'] = b'.' # prevents useless util.fspath() invocation
294 f[b'.'] = b'.' # prevents useless util.fspath() invocation
287 return f
295 return f
288
296
289 def hastrackeddir(self, d):
297 def hastrackeddir(self, d):
290 """
298 """
291 Returns True if the dirstate contains a tracked (not removed) file
299 Returns True if the dirstate contains a tracked (not removed) file
292 in this directory.
300 in this directory.
293 """
301 """
294 return d in self._dirs
302 return d in self._dirs
295
303
296 def hasdir(self, d):
304 def hasdir(self, d):
297 """
305 """
298 Returns True if the dirstate contains a file (tracked or removed)
306 Returns True if the dirstate contains a file (tracked or removed)
299 in this directory.
307 in this directory.
300 """
308 """
301 return d in self._alldirs
309 return d in self._alldirs
302
310
303 @propertycache
311 @propertycache
304 def _dirs(self):
312 def _dirs(self):
305 return pathutil.dirs(self._map, b'r')
313 return pathutil.dirs(self._map, b'r')
306
314
307 @propertycache
315 @propertycache
308 def _alldirs(self):
316 def _alldirs(self):
309 return pathutil.dirs(self._map)
317 return pathutil.dirs(self._map)
310
318
311 def _opendirstatefile(self):
319 def _opendirstatefile(self):
312 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
320 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
313 if self._pendingmode is not None and self._pendingmode != mode:
321 if self._pendingmode is not None and self._pendingmode != mode:
314 fp.close()
322 fp.close()
315 raise error.Abort(
323 raise error.Abort(
316 _(b'working directory state may be changed parallelly')
324 _(b'working directory state may be changed parallelly')
317 )
325 )
318 self._pendingmode = mode
326 self._pendingmode = mode
319 return fp
327 return fp
320
328
321 def parents(self):
329 def parents(self):
322 if not self._parents:
330 if not self._parents:
323 try:
331 try:
324 fp = self._opendirstatefile()
332 fp = self._opendirstatefile()
325 st = fp.read(2 * self._nodelen)
333 st = fp.read(2 * self._nodelen)
326 fp.close()
334 fp.close()
327 except IOError as err:
335 except IOError as err:
328 if err.errno != errno.ENOENT:
336 if err.errno != errno.ENOENT:
329 raise
337 raise
330 # File doesn't exist, so the current state is empty
338 # File doesn't exist, so the current state is empty
331 st = b''
339 st = b''
332
340
333 l = len(st)
341 l = len(st)
334 if l == self._nodelen * 2:
342 if l == self._nodelen * 2:
335 self._parents = (
343 self._parents = (
336 st[: self._nodelen],
344 st[: self._nodelen],
337 st[self._nodelen : 2 * self._nodelen],
345 st[self._nodelen : 2 * self._nodelen],
338 )
346 )
339 elif l == 0:
347 elif l == 0:
340 self._parents = (
348 self._parents = (
341 self._nodeconstants.nullid,
349 self._nodeconstants.nullid,
342 self._nodeconstants.nullid,
350 self._nodeconstants.nullid,
343 )
351 )
344 else:
352 else:
345 raise error.Abort(
353 raise error.Abort(
346 _(b'working directory state appears damaged!')
354 _(b'working directory state appears damaged!')
347 )
355 )
348
356
349 return self._parents
357 return self._parents
350
358
351 def setparents(self, p1, p2):
359 def setparents(self, p1, p2):
352 self._parents = (p1, p2)
360 self._parents = (p1, p2)
353 self._dirtyparents = True
361 self._dirtyparents = True
354
362
355 def read(self):
363 def read(self):
356 # ignore HG_PENDING because identity is used only for writing
364 # ignore HG_PENDING because identity is used only for writing
357 self.identity = util.filestat.frompath(
365 self.identity = util.filestat.frompath(
358 self._opener.join(self._filename)
366 self._opener.join(self._filename)
359 )
367 )
360
368
361 try:
369 try:
362 fp = self._opendirstatefile()
370 fp = self._opendirstatefile()
363 try:
371 try:
364 st = fp.read()
372 st = fp.read()
365 finally:
373 finally:
366 fp.close()
374 fp.close()
367 except IOError as err:
375 except IOError as err:
368 if err.errno != errno.ENOENT:
376 if err.errno != errno.ENOENT:
369 raise
377 raise
370 return
378 return
371 if not st:
379 if not st:
372 return
380 return
373
381
374 if util.safehasattr(parsers, b'dict_new_presized'):
382 if util.safehasattr(parsers, b'dict_new_presized'):
375 # Make an estimate of the number of files in the dirstate based on
383 # Make an estimate of the number of files in the dirstate based on
376 # its size. This trades wasting some memory for avoiding costly
384 # its size. This trades wasting some memory for avoiding costly
377 # resizes. Each entry have a prefix of 17 bytes followed by one or
385 # resizes. Each entry have a prefix of 17 bytes followed by one or
378 # two path names. Studies on various large-scale real-world repositories
386 # two path names. Studies on various large-scale real-world repositories
379 # found 54 bytes a reasonable upper limit for the average path names.
387 # found 54 bytes a reasonable upper limit for the average path names.
380 # Copy entries are ignored for the sake of this estimate.
388 # Copy entries are ignored for the sake of this estimate.
381 self._map = parsers.dict_new_presized(len(st) // 71)
389 self._map = parsers.dict_new_presized(len(st) // 71)
382
390
383 # Python's garbage collector triggers a GC each time a certain number
391 # Python's garbage collector triggers a GC each time a certain number
384 # of container objects (the number being defined by
392 # of container objects (the number being defined by
385 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
393 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
386 # for each file in the dirstate. The C version then immediately marks
394 # for each file in the dirstate. The C version then immediately marks
387 # them as not to be tracked by the collector. However, this has no
395 # them as not to be tracked by the collector. However, this has no
388 # effect on when GCs are triggered, only on what objects the GC looks
396 # effect on when GCs are triggered, only on what objects the GC looks
389 # into. This means that O(number of files) GCs are unavoidable.
397 # into. This means that O(number of files) GCs are unavoidable.
390 # Depending on when in the process's lifetime the dirstate is parsed,
398 # Depending on when in the process's lifetime the dirstate is parsed,
391 # this can get very expensive. As a workaround, disable GC while
399 # this can get very expensive. As a workaround, disable GC while
392 # parsing the dirstate.
400 # parsing the dirstate.
393 #
401 #
394 # (we cannot decorate the function directly since it is in a C module)
402 # (we cannot decorate the function directly since it is in a C module)
395 parse_dirstate = util.nogc(parsers.parse_dirstate)
403 parse_dirstate = util.nogc(parsers.parse_dirstate)
396 p = parse_dirstate(self._map, self.copymap, st)
404 p = parse_dirstate(self._map, self.copymap, st)
397 if not self._dirtyparents:
405 if not self._dirtyparents:
398 self.setparents(*p)
406 self.setparents(*p)
399
407
400 # Avoid excess attribute lookups by fast pathing certain checks
408 # Avoid excess attribute lookups by fast pathing certain checks
401 self.__contains__ = self._map.__contains__
409 self.__contains__ = self._map.__contains__
402 self.__getitem__ = self._map.__getitem__
410 self.__getitem__ = self._map.__getitem__
403 self.get = self._map.get
411 self.get = self._map.get
404
412
405 def write(self, st, now):
413 def write(self, st, now):
406 st.write(
414 st.write(
407 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
415 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
408 )
416 )
409 st.close()
417 st.close()
410 self._dirtyparents = False
418 self._dirtyparents = False
411 self.nonnormalset, self.otherparentset = self.nonnormalentries()
419 self.nonnormalset, self.otherparentset = self.nonnormalentries()
412
420
413 @propertycache
421 @propertycache
414 def nonnormalset(self):
422 def nonnormalset(self):
415 nonnorm, otherparents = self.nonnormalentries()
423 nonnorm, otherparents = self.nonnormalentries()
416 self.otherparentset = otherparents
424 self.otherparentset = otherparents
417 return nonnorm
425 return nonnorm
418
426
419 @propertycache
427 @propertycache
420 def otherparentset(self):
428 def otherparentset(self):
421 nonnorm, otherparents = self.nonnormalentries()
429 nonnorm, otherparents = self.nonnormalentries()
422 self.nonnormalset = nonnorm
430 self.nonnormalset = nonnorm
423 return otherparents
431 return otherparents
424
432
425 def non_normal_or_other_parent_paths(self):
433 def non_normal_or_other_parent_paths(self):
426 return self.nonnormalset.union(self.otherparentset)
434 return self.nonnormalset.union(self.otherparentset)
427
435
428 @propertycache
436 @propertycache
429 def identity(self):
437 def identity(self):
430 self._map
438 self._map
431 return self.identity
439 return self.identity
432
440
433 @propertycache
441 @propertycache
434 def dirfoldmap(self):
442 def dirfoldmap(self):
435 f = {}
443 f = {}
436 normcase = util.normcase
444 normcase = util.normcase
437 for name in self._dirs:
445 for name in self._dirs:
438 f[normcase(name)] = name
446 f[normcase(name)] = name
439 return f
447 return f
440
448
441
449
442 if rustmod is not None:
450 if rustmod is not None:
443
451
444 class dirstatemap(object):
452 class dirstatemap(object):
445 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
453 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
446 self._use_dirstate_v2 = use_dirstate_v2
454 self._use_dirstate_v2 = use_dirstate_v2
447 self._nodeconstants = nodeconstants
455 self._nodeconstants = nodeconstants
448 self._ui = ui
456 self._ui = ui
449 self._opener = opener
457 self._opener = opener
450 self._root = root
458 self._root = root
451 self._filename = b'dirstate'
459 self._filename = b'dirstate'
452 self._nodelen = 20 # Also update Rust code when changing this!
460 self._nodelen = 20 # Also update Rust code when changing this!
453 self._parents = None
461 self._parents = None
454 self._dirtyparents = False
462 self._dirtyparents = False
455
463
456 # for consistent view between _pl() and _read() invocations
464 # for consistent view between _pl() and _read() invocations
457 self._pendingmode = None
465 self._pendingmode = None
458
466
459 self._use_dirstate_tree = self._ui.configbool(
467 self._use_dirstate_tree = self._ui.configbool(
460 b"experimental",
468 b"experimental",
461 b"dirstate-tree.in-memory",
469 b"dirstate-tree.in-memory",
462 False,
470 False,
463 )
471 )
464
472
465 def addfile(
473 def addfile(
466 self,
474 self,
467 f,
475 f,
468 state=None,
476 state=None,
469 mode=0,
477 mode=0,
470 size=None,
478 size=None,
471 mtime=None,
479 mtime=None,
472 added=False,
480 added=False,
481 merged=False,
473 from_p2=False,
482 from_p2=False,
474 possibly_dirty=False,
483 possibly_dirty=False,
475 ):
484 ):
476 return self._rustmap.addfile(
485 return self._rustmap.addfile(
477 f,
486 f,
478 state,
487 state,
479 mode,
488 mode,
480 size,
489 size,
481 mtime,
490 mtime,
482 added,
491 added,
492 merged,
483 from_p2,
493 from_p2,
484 possibly_dirty,
494 possibly_dirty,
485 )
495 )
486
496
487 def removefile(self, *args, **kwargs):
497 def removefile(self, *args, **kwargs):
488 return self._rustmap.removefile(*args, **kwargs)
498 return self._rustmap.removefile(*args, **kwargs)
489
499
490 def dropfile(self, *args, **kwargs):
500 def dropfile(self, *args, **kwargs):
491 return self._rustmap.dropfile(*args, **kwargs)
501 return self._rustmap.dropfile(*args, **kwargs)
492
502
493 def clearambiguoustimes(self, *args, **kwargs):
503 def clearambiguoustimes(self, *args, **kwargs):
494 return self._rustmap.clearambiguoustimes(*args, **kwargs)
504 return self._rustmap.clearambiguoustimes(*args, **kwargs)
495
505
496 def nonnormalentries(self):
506 def nonnormalentries(self):
497 return self._rustmap.nonnormalentries()
507 return self._rustmap.nonnormalentries()
498
508
499 def get(self, *args, **kwargs):
509 def get(self, *args, **kwargs):
500 return self._rustmap.get(*args, **kwargs)
510 return self._rustmap.get(*args, **kwargs)
501
511
502 @property
512 @property
503 def copymap(self):
513 def copymap(self):
504 return self._rustmap.copymap()
514 return self._rustmap.copymap()
505
515
506 def directories(self):
516 def directories(self):
507 return self._rustmap.directories()
517 return self._rustmap.directories()
508
518
509 def preload(self):
519 def preload(self):
510 self._rustmap
520 self._rustmap
511
521
512 def clear(self):
522 def clear(self):
513 self._rustmap.clear()
523 self._rustmap.clear()
514 self.setparents(
524 self.setparents(
515 self._nodeconstants.nullid, self._nodeconstants.nullid
525 self._nodeconstants.nullid, self._nodeconstants.nullid
516 )
526 )
517 util.clearcachedproperty(self, b"_dirs")
527 util.clearcachedproperty(self, b"_dirs")
518 util.clearcachedproperty(self, b"_alldirs")
528 util.clearcachedproperty(self, b"_alldirs")
519 util.clearcachedproperty(self, b"dirfoldmap")
529 util.clearcachedproperty(self, b"dirfoldmap")
520
530
521 def items(self):
531 def items(self):
522 return self._rustmap.items()
532 return self._rustmap.items()
523
533
524 def keys(self):
534 def keys(self):
525 return iter(self._rustmap)
535 return iter(self._rustmap)
526
536
527 def __contains__(self, key):
537 def __contains__(self, key):
528 return key in self._rustmap
538 return key in self._rustmap
529
539
530 def __getitem__(self, item):
540 def __getitem__(self, item):
531 return self._rustmap[item]
541 return self._rustmap[item]
532
542
533 def __len__(self):
543 def __len__(self):
534 return len(self._rustmap)
544 return len(self._rustmap)
535
545
536 def __iter__(self):
546 def __iter__(self):
537 return iter(self._rustmap)
547 return iter(self._rustmap)
538
548
539 # forward for python2,3 compat
549 # forward for python2,3 compat
540 iteritems = items
550 iteritems = items
541
551
542 def _opendirstatefile(self):
552 def _opendirstatefile(self):
543 fp, mode = txnutil.trypending(
553 fp, mode = txnutil.trypending(
544 self._root, self._opener, self._filename
554 self._root, self._opener, self._filename
545 )
555 )
546 if self._pendingmode is not None and self._pendingmode != mode:
556 if self._pendingmode is not None and self._pendingmode != mode:
547 fp.close()
557 fp.close()
548 raise error.Abort(
558 raise error.Abort(
549 _(b'working directory state may be changed parallelly')
559 _(b'working directory state may be changed parallelly')
550 )
560 )
551 self._pendingmode = mode
561 self._pendingmode = mode
552 return fp
562 return fp
553
563
554 def setparents(self, p1, p2):
564 def setparents(self, p1, p2):
555 self._parents = (p1, p2)
565 self._parents = (p1, p2)
556 self._dirtyparents = True
566 self._dirtyparents = True
557
567
558 def parents(self):
568 def parents(self):
559 if not self._parents:
569 if not self._parents:
560 if self._use_dirstate_v2:
570 if self._use_dirstate_v2:
561 offset = len(rustmod.V2_FORMAT_MARKER)
571 offset = len(rustmod.V2_FORMAT_MARKER)
562 else:
572 else:
563 offset = 0
573 offset = 0
564 read_len = offset + self._nodelen * 2
574 read_len = offset + self._nodelen * 2
565 try:
575 try:
566 fp = self._opendirstatefile()
576 fp = self._opendirstatefile()
567 st = fp.read(read_len)
577 st = fp.read(read_len)
568 fp.close()
578 fp.close()
569 except IOError as err:
579 except IOError as err:
570 if err.errno != errno.ENOENT:
580 if err.errno != errno.ENOENT:
571 raise
581 raise
572 # File doesn't exist, so the current state is empty
582 # File doesn't exist, so the current state is empty
573 st = b''
583 st = b''
574
584
575 l = len(st)
585 l = len(st)
576 if l == read_len:
586 if l == read_len:
577 st = st[offset:]
587 st = st[offset:]
578 self._parents = (
588 self._parents = (
579 st[: self._nodelen],
589 st[: self._nodelen],
580 st[self._nodelen : 2 * self._nodelen],
590 st[self._nodelen : 2 * self._nodelen],
581 )
591 )
582 elif l == 0:
592 elif l == 0:
583 self._parents = (
593 self._parents = (
584 self._nodeconstants.nullid,
594 self._nodeconstants.nullid,
585 self._nodeconstants.nullid,
595 self._nodeconstants.nullid,
586 )
596 )
587 else:
597 else:
588 raise error.Abort(
598 raise error.Abort(
589 _(b'working directory state appears damaged!')
599 _(b'working directory state appears damaged!')
590 )
600 )
591
601
592 return self._parents
602 return self._parents
593
603
594 @propertycache
604 @propertycache
595 def _rustmap(self):
605 def _rustmap(self):
596 """
606 """
597 Fills the Dirstatemap when called.
607 Fills the Dirstatemap when called.
598 """
608 """
599 # ignore HG_PENDING because identity is used only for writing
609 # ignore HG_PENDING because identity is used only for writing
600 self.identity = util.filestat.frompath(
610 self.identity = util.filestat.frompath(
601 self._opener.join(self._filename)
611 self._opener.join(self._filename)
602 )
612 )
603
613
604 try:
614 try:
605 fp = self._opendirstatefile()
615 fp = self._opendirstatefile()
606 try:
616 try:
607 st = fp.read()
617 st = fp.read()
608 finally:
618 finally:
609 fp.close()
619 fp.close()
610 except IOError as err:
620 except IOError as err:
611 if err.errno != errno.ENOENT:
621 if err.errno != errno.ENOENT:
612 raise
622 raise
613 st = b''
623 st = b''
614
624
615 self._rustmap, parents = rustmod.DirstateMap.new(
625 self._rustmap, parents = rustmod.DirstateMap.new(
616 self._use_dirstate_tree, self._use_dirstate_v2, st
626 self._use_dirstate_tree, self._use_dirstate_v2, st
617 )
627 )
618
628
619 if parents and not self._dirtyparents:
629 if parents and not self._dirtyparents:
620 self.setparents(*parents)
630 self.setparents(*parents)
621
631
622 self.__contains__ = self._rustmap.__contains__
632 self.__contains__ = self._rustmap.__contains__
623 self.__getitem__ = self._rustmap.__getitem__
633 self.__getitem__ = self._rustmap.__getitem__
624 self.get = self._rustmap.get
634 self.get = self._rustmap.get
625 return self._rustmap
635 return self._rustmap
626
636
627 def write(self, st, now):
637 def write(self, st, now):
628 parents = self.parents()
638 parents = self.parents()
629 packed = self._rustmap.write(
639 packed = self._rustmap.write(
630 self._use_dirstate_v2, parents[0], parents[1], now
640 self._use_dirstate_v2, parents[0], parents[1], now
631 )
641 )
632 st.write(packed)
642 st.write(packed)
633 st.close()
643 st.close()
634 self._dirtyparents = False
644 self._dirtyparents = False
635
645
636 @propertycache
646 @propertycache
637 def filefoldmap(self):
647 def filefoldmap(self):
638 """Returns a dictionary mapping normalized case paths to their
648 """Returns a dictionary mapping normalized case paths to their
639 non-normalized versions.
649 non-normalized versions.
640 """
650 """
641 return self._rustmap.filefoldmapasdict()
651 return self._rustmap.filefoldmapasdict()
642
652
643 def hastrackeddir(self, d):
653 def hastrackeddir(self, d):
644 return self._rustmap.hastrackeddir(d)
654 return self._rustmap.hastrackeddir(d)
645
655
646 def hasdir(self, d):
656 def hasdir(self, d):
647 return self._rustmap.hasdir(d)
657 return self._rustmap.hasdir(d)
648
658
649 @propertycache
659 @propertycache
650 def identity(self):
660 def identity(self):
651 self._rustmap
661 self._rustmap
652 return self.identity
662 return self.identity
653
663
654 @property
664 @property
655 def nonnormalset(self):
665 def nonnormalset(self):
656 nonnorm = self._rustmap.non_normal_entries()
666 nonnorm = self._rustmap.non_normal_entries()
657 return nonnorm
667 return nonnorm
658
668
659 @propertycache
669 @propertycache
660 def otherparentset(self):
670 def otherparentset(self):
661 otherparents = self._rustmap.other_parent_entries()
671 otherparents = self._rustmap.other_parent_entries()
662 return otherparents
672 return otherparents
663
673
664 def non_normal_or_other_parent_paths(self):
674 def non_normal_or_other_parent_paths(self):
665 return self._rustmap.non_normal_or_other_parent_paths()
675 return self._rustmap.non_normal_or_other_parent_paths()
666
676
667 @propertycache
677 @propertycache
668 def dirfoldmap(self):
678 def dirfoldmap(self):
669 f = {}
679 f = {}
670 normcase = util.normcase
680 normcase = util.normcase
671 for name, _pseudo_entry in self.directories():
681 for name, _pseudo_entry in self.directories():
672 f[normcase(name)] = name
682 f[normcase(name)] = name
673 return f
683 return f
@@ -1,466 +1,475 b''
1 // dirstate_map.rs
1 // dirstate_map.rs
2 //
2 //
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 use crate::dirstate::parsers::Timestamp;
8 use crate::dirstate::parsers::Timestamp;
9 use crate::{
9 use crate::{
10 dirstate::EntryState,
10 dirstate::EntryState,
11 dirstate::MTIME_UNSET,
11 dirstate::MTIME_UNSET,
12 dirstate::SIZE_FROM_OTHER_PARENT,
12 dirstate::SIZE_FROM_OTHER_PARENT,
13 dirstate::SIZE_NON_NORMAL,
13 dirstate::SIZE_NON_NORMAL,
14 dirstate::V1_RANGEMASK,
14 dirstate::V1_RANGEMASK,
15 pack_dirstate, parse_dirstate,
15 pack_dirstate, parse_dirstate,
16 utils::hg_path::{HgPath, HgPathBuf},
16 utils::hg_path::{HgPath, HgPathBuf},
17 CopyMap, DirsMultiset, DirstateEntry, DirstateError, DirstateParents,
17 CopyMap, DirsMultiset, DirstateEntry, DirstateError, DirstateParents,
18 StateMap,
18 StateMap,
19 };
19 };
20 use micro_timer::timed;
20 use micro_timer::timed;
21 use std::collections::HashSet;
21 use std::collections::HashSet;
22 use std::iter::FromIterator;
22 use std::iter::FromIterator;
23 use std::ops::Deref;
23 use std::ops::Deref;
24
24
25 #[derive(Default)]
25 #[derive(Default)]
26 pub struct DirstateMap {
26 pub struct DirstateMap {
27 state_map: StateMap,
27 state_map: StateMap,
28 pub copy_map: CopyMap,
28 pub copy_map: CopyMap,
29 pub dirs: Option<DirsMultiset>,
29 pub dirs: Option<DirsMultiset>,
30 pub all_dirs: Option<DirsMultiset>,
30 pub all_dirs: Option<DirsMultiset>,
31 non_normal_set: Option<HashSet<HgPathBuf>>,
31 non_normal_set: Option<HashSet<HgPathBuf>>,
32 other_parent_set: Option<HashSet<HgPathBuf>>,
32 other_parent_set: Option<HashSet<HgPathBuf>>,
33 }
33 }
34
34
35 /// Should only really be used in python interface code, for clarity
35 /// Should only really be used in python interface code, for clarity
36 impl Deref for DirstateMap {
36 impl Deref for DirstateMap {
37 type Target = StateMap;
37 type Target = StateMap;
38
38
39 fn deref(&self) -> &Self::Target {
39 fn deref(&self) -> &Self::Target {
40 &self.state_map
40 &self.state_map
41 }
41 }
42 }
42 }
43
43
44 impl FromIterator<(HgPathBuf, DirstateEntry)> for DirstateMap {
44 impl FromIterator<(HgPathBuf, DirstateEntry)> for DirstateMap {
45 fn from_iter<I: IntoIterator<Item = (HgPathBuf, DirstateEntry)>>(
45 fn from_iter<I: IntoIterator<Item = (HgPathBuf, DirstateEntry)>>(
46 iter: I,
46 iter: I,
47 ) -> Self {
47 ) -> Self {
48 Self {
48 Self {
49 state_map: iter.into_iter().collect(),
49 state_map: iter.into_iter().collect(),
50 ..Self::default()
50 ..Self::default()
51 }
51 }
52 }
52 }
53 }
53 }
54
54
55 impl DirstateMap {
55 impl DirstateMap {
56 pub fn new() -> Self {
56 pub fn new() -> Self {
57 Self::default()
57 Self::default()
58 }
58 }
59
59
60 pub fn clear(&mut self) {
60 pub fn clear(&mut self) {
61 self.state_map = StateMap::default();
61 self.state_map = StateMap::default();
62 self.copy_map.clear();
62 self.copy_map.clear();
63 self.non_normal_set = None;
63 self.non_normal_set = None;
64 self.other_parent_set = None;
64 self.other_parent_set = None;
65 }
65 }
66
66
67 /// Add a tracked file to the dirstate
67 /// Add a tracked file to the dirstate
68 pub fn add_file(
68 pub fn add_file(
69 &mut self,
69 &mut self,
70 filename: &HgPath,
70 filename: &HgPath,
71 entry: DirstateEntry,
71 entry: DirstateEntry,
72 // XXX once the dust settle this should probably become an enum
72 // XXX once the dust settle this should probably become an enum
73 added: bool,
73 added: bool,
74 merged: bool,
74 from_p2: bool,
75 from_p2: bool,
75 possibly_dirty: bool,
76 possibly_dirty: bool,
76 ) -> Result<(), DirstateError> {
77 ) -> Result<(), DirstateError> {
77 let mut entry = entry;
78 let mut entry = entry;
78 if added {
79 if added {
80 assert!(!merged);
79 assert!(!possibly_dirty);
81 assert!(!possibly_dirty);
80 assert!(!from_p2);
82 assert!(!from_p2);
81 entry.state = EntryState::Added;
83 entry.state = EntryState::Added;
82 entry.size = SIZE_NON_NORMAL;
84 entry.size = SIZE_NON_NORMAL;
83 entry.mtime = MTIME_UNSET;
85 entry.mtime = MTIME_UNSET;
86 } else if merged {
87 assert!(!possibly_dirty);
88 assert!(!from_p2);
89 entry.state = EntryState::Merged;
90 entry.size = SIZE_FROM_OTHER_PARENT;
91 entry.mtime = MTIME_UNSET;
84 } else if from_p2 {
92 } else if from_p2 {
85 assert!(!possibly_dirty);
93 assert!(!possibly_dirty);
86 entry.size = SIZE_FROM_OTHER_PARENT;
94 entry.size = SIZE_FROM_OTHER_PARENT;
87 entry.mtime = MTIME_UNSET;
95 entry.mtime = MTIME_UNSET;
88 } else if possibly_dirty {
96 } else if possibly_dirty {
89 entry.size = SIZE_NON_NORMAL;
97 entry.size = SIZE_NON_NORMAL;
90 entry.mtime = MTIME_UNSET;
98 entry.mtime = MTIME_UNSET;
91 } else {
99 } else {
92 entry.size = entry.size & V1_RANGEMASK;
100 entry.size = entry.size & V1_RANGEMASK;
93 entry.mtime = entry.mtime & V1_RANGEMASK;
101 entry.mtime = entry.mtime & V1_RANGEMASK;
94 }
102 }
95 let old_state = match self.get(filename) {
103 let old_state = match self.get(filename) {
96 Some(e) => e.state,
104 Some(e) => e.state,
97 None => EntryState::Unknown,
105 None => EntryState::Unknown,
98 };
106 };
99 if old_state == EntryState::Unknown || old_state == EntryState::Removed
107 if old_state == EntryState::Unknown || old_state == EntryState::Removed
100 {
108 {
101 if let Some(ref mut dirs) = self.dirs {
109 if let Some(ref mut dirs) = self.dirs {
102 dirs.add_path(filename)?;
110 dirs.add_path(filename)?;
103 }
111 }
104 }
112 }
105 if old_state == EntryState::Unknown {
113 if old_state == EntryState::Unknown {
106 if let Some(ref mut all_dirs) = self.all_dirs {
114 if let Some(ref mut all_dirs) = self.all_dirs {
107 all_dirs.add_path(filename)?;
115 all_dirs.add_path(filename)?;
108 }
116 }
109 }
117 }
110 self.state_map.insert(filename.to_owned(), entry.to_owned());
118 self.state_map.insert(filename.to_owned(), entry.to_owned());
111
119
112 if entry.is_non_normal() {
120 if entry.is_non_normal() {
113 self.get_non_normal_other_parent_entries()
121 self.get_non_normal_other_parent_entries()
114 .0
122 .0
115 .insert(filename.to_owned());
123 .insert(filename.to_owned());
116 }
124 }
117
125
118 if entry.is_from_other_parent() {
126 if entry.is_from_other_parent() {
119 self.get_non_normal_other_parent_entries()
127 self.get_non_normal_other_parent_entries()
120 .1
128 .1
121 .insert(filename.to_owned());
129 .insert(filename.to_owned());
122 }
130 }
123 Ok(())
131 Ok(())
124 }
132 }
125
133
126 /// Mark a file as removed in the dirstate.
134 /// Mark a file as removed in the dirstate.
127 ///
135 ///
128 /// The `size` parameter is used to store sentinel values that indicate
136 /// The `size` parameter is used to store sentinel values that indicate
129 /// the file's previous state. In the future, we should refactor this
137 /// the file's previous state. In the future, we should refactor this
130 /// to be more explicit about what that state is.
138 /// to be more explicit about what that state is.
131 pub fn remove_file(
139 pub fn remove_file(
132 &mut self,
140 &mut self,
133 filename: &HgPath,
141 filename: &HgPath,
134 in_merge: bool,
142 in_merge: bool,
135 ) -> Result<(), DirstateError> {
143 ) -> Result<(), DirstateError> {
136 let old_entry_opt = self.get(filename);
144 let old_entry_opt = self.get(filename);
137 let old_state = match old_entry_opt {
145 let old_state = match old_entry_opt {
138 Some(e) => e.state,
146 Some(e) => e.state,
139 None => EntryState::Unknown,
147 None => EntryState::Unknown,
140 };
148 };
141 let mut size = 0;
149 let mut size = 0;
142 if in_merge {
150 if in_merge {
143 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
151 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
144 // during a merge. So I (marmoute) am not sure we need the
152 // during a merge. So I (marmoute) am not sure we need the
145 // conditionnal at all. Adding double checking this with assert
153 // conditionnal at all. Adding double checking this with assert
146 // would be nice.
154 // would be nice.
147 if let Some(old_entry) = old_entry_opt {
155 if let Some(old_entry) = old_entry_opt {
148 // backup the previous state
156 // backup the previous state
149 if old_entry.state == EntryState::Merged {
157 if old_entry.state == EntryState::Merged {
150 size = SIZE_NON_NORMAL;
158 size = SIZE_NON_NORMAL;
151 } else if old_entry.state == EntryState::Normal
159 } else if old_entry.state == EntryState::Normal
152 && old_entry.size == SIZE_FROM_OTHER_PARENT
160 && old_entry.size == SIZE_FROM_OTHER_PARENT
153 {
161 {
154 // other parent
162 // other parent
155 size = SIZE_FROM_OTHER_PARENT;
163 size = SIZE_FROM_OTHER_PARENT;
156 self.get_non_normal_other_parent_entries()
164 self.get_non_normal_other_parent_entries()
157 .1
165 .1
158 .insert(filename.to_owned());
166 .insert(filename.to_owned());
159 }
167 }
160 }
168 }
161 }
169 }
162 if old_state != EntryState::Unknown && old_state != EntryState::Removed
170 if old_state != EntryState::Unknown && old_state != EntryState::Removed
163 {
171 {
164 if let Some(ref mut dirs) = self.dirs {
172 if let Some(ref mut dirs) = self.dirs {
165 dirs.delete_path(filename)?;
173 dirs.delete_path(filename)?;
166 }
174 }
167 }
175 }
168 if old_state == EntryState::Unknown {
176 if old_state == EntryState::Unknown {
169 if let Some(ref mut all_dirs) = self.all_dirs {
177 if let Some(ref mut all_dirs) = self.all_dirs {
170 all_dirs.add_path(filename)?;
178 all_dirs.add_path(filename)?;
171 }
179 }
172 }
180 }
173 if size == 0 {
181 if size == 0 {
174 self.copy_map.remove(filename);
182 self.copy_map.remove(filename);
175 }
183 }
176
184
177 self.state_map.insert(
185 self.state_map.insert(
178 filename.to_owned(),
186 filename.to_owned(),
179 DirstateEntry {
187 DirstateEntry {
180 state: EntryState::Removed,
188 state: EntryState::Removed,
181 mode: 0,
189 mode: 0,
182 size,
190 size,
183 mtime: 0,
191 mtime: 0,
184 },
192 },
185 );
193 );
186 self.get_non_normal_other_parent_entries()
194 self.get_non_normal_other_parent_entries()
187 .0
195 .0
188 .insert(filename.to_owned());
196 .insert(filename.to_owned());
189 Ok(())
197 Ok(())
190 }
198 }
191
199
192 /// Remove a file from the dirstate.
200 /// Remove a file from the dirstate.
193 /// Returns `true` if the file was previously recorded.
201 /// Returns `true` if the file was previously recorded.
194 pub fn drop_file(
202 pub fn drop_file(
195 &mut self,
203 &mut self,
196 filename: &HgPath,
204 filename: &HgPath,
197 old_state: EntryState,
205 old_state: EntryState,
198 ) -> Result<bool, DirstateError> {
206 ) -> Result<bool, DirstateError> {
199 let exists = self.state_map.remove(filename).is_some();
207 let exists = self.state_map.remove(filename).is_some();
200
208
201 if exists {
209 if exists {
202 if old_state != EntryState::Removed {
210 if old_state != EntryState::Removed {
203 if let Some(ref mut dirs) = self.dirs {
211 if let Some(ref mut dirs) = self.dirs {
204 dirs.delete_path(filename)?;
212 dirs.delete_path(filename)?;
205 }
213 }
206 }
214 }
207 if let Some(ref mut all_dirs) = self.all_dirs {
215 if let Some(ref mut all_dirs) = self.all_dirs {
208 all_dirs.delete_path(filename)?;
216 all_dirs.delete_path(filename)?;
209 }
217 }
210 }
218 }
211 self.get_non_normal_other_parent_entries()
219 self.get_non_normal_other_parent_entries()
212 .0
220 .0
213 .remove(filename);
221 .remove(filename);
214
222
215 Ok(exists)
223 Ok(exists)
216 }
224 }
217
225
218 pub fn clear_ambiguous_times(
226 pub fn clear_ambiguous_times(
219 &mut self,
227 &mut self,
220 filenames: Vec<HgPathBuf>,
228 filenames: Vec<HgPathBuf>,
221 now: i32,
229 now: i32,
222 ) {
230 ) {
223 for filename in filenames {
231 for filename in filenames {
224 if let Some(entry) = self.state_map.get_mut(&filename) {
232 if let Some(entry) = self.state_map.get_mut(&filename) {
225 if entry.clear_ambiguous_mtime(now) {
233 if entry.clear_ambiguous_mtime(now) {
226 self.get_non_normal_other_parent_entries()
234 self.get_non_normal_other_parent_entries()
227 .0
235 .0
228 .insert(filename.to_owned());
236 .insert(filename.to_owned());
229 }
237 }
230 }
238 }
231 }
239 }
232 }
240 }
233
241
234 pub fn non_normal_entries_remove(&mut self, key: impl AsRef<HgPath>) {
242 pub fn non_normal_entries_remove(&mut self, key: impl AsRef<HgPath>) {
235 self.get_non_normal_other_parent_entries()
243 self.get_non_normal_other_parent_entries()
236 .0
244 .0
237 .remove(key.as_ref());
245 .remove(key.as_ref());
238 }
246 }
239
247
240 pub fn non_normal_entries_union(
248 pub fn non_normal_entries_union(
241 &mut self,
249 &mut self,
242 other: HashSet<HgPathBuf>,
250 other: HashSet<HgPathBuf>,
243 ) -> Vec<HgPathBuf> {
251 ) -> Vec<HgPathBuf> {
244 self.get_non_normal_other_parent_entries()
252 self.get_non_normal_other_parent_entries()
245 .0
253 .0
246 .union(&other)
254 .union(&other)
247 .map(ToOwned::to_owned)
255 .map(ToOwned::to_owned)
248 .collect()
256 .collect()
249 }
257 }
250
258
251 pub fn get_non_normal_other_parent_entries(
259 pub fn get_non_normal_other_parent_entries(
252 &mut self,
260 &mut self,
253 ) -> (&mut HashSet<HgPathBuf>, &mut HashSet<HgPathBuf>) {
261 ) -> (&mut HashSet<HgPathBuf>, &mut HashSet<HgPathBuf>) {
254 self.set_non_normal_other_parent_entries(false);
262 self.set_non_normal_other_parent_entries(false);
255 (
263 (
256 self.non_normal_set.as_mut().unwrap(),
264 self.non_normal_set.as_mut().unwrap(),
257 self.other_parent_set.as_mut().unwrap(),
265 self.other_parent_set.as_mut().unwrap(),
258 )
266 )
259 }
267 }
260
268
261 /// Useful to get immutable references to those sets in contexts where
269 /// Useful to get immutable references to those sets in contexts where
262 /// you only have an immutable reference to the `DirstateMap`, like when
270 /// you only have an immutable reference to the `DirstateMap`, like when
263 /// sharing references with Python.
271 /// sharing references with Python.
264 ///
272 ///
265 /// TODO, get rid of this along with the other "setter/getter" stuff when
273 /// TODO, get rid of this along with the other "setter/getter" stuff when
266 /// a nice typestate plan is defined.
274 /// a nice typestate plan is defined.
267 ///
275 ///
268 /// # Panics
276 /// # Panics
269 ///
277 ///
270 /// Will panic if either set is `None`.
278 /// Will panic if either set is `None`.
271 pub fn get_non_normal_other_parent_entries_panic(
279 pub fn get_non_normal_other_parent_entries_panic(
272 &self,
280 &self,
273 ) -> (&HashSet<HgPathBuf>, &HashSet<HgPathBuf>) {
281 ) -> (&HashSet<HgPathBuf>, &HashSet<HgPathBuf>) {
274 (
282 (
275 self.non_normal_set.as_ref().unwrap(),
283 self.non_normal_set.as_ref().unwrap(),
276 self.other_parent_set.as_ref().unwrap(),
284 self.other_parent_set.as_ref().unwrap(),
277 )
285 )
278 }
286 }
279
287
280 pub fn set_non_normal_other_parent_entries(&mut self, force: bool) {
288 pub fn set_non_normal_other_parent_entries(&mut self, force: bool) {
281 if !force
289 if !force
282 && self.non_normal_set.is_some()
290 && self.non_normal_set.is_some()
283 && self.other_parent_set.is_some()
291 && self.other_parent_set.is_some()
284 {
292 {
285 return;
293 return;
286 }
294 }
287 let mut non_normal = HashSet::new();
295 let mut non_normal = HashSet::new();
288 let mut other_parent = HashSet::new();
296 let mut other_parent = HashSet::new();
289
297
290 for (filename, entry) in self.state_map.iter() {
298 for (filename, entry) in self.state_map.iter() {
291 if entry.is_non_normal() {
299 if entry.is_non_normal() {
292 non_normal.insert(filename.to_owned());
300 non_normal.insert(filename.to_owned());
293 }
301 }
294 if entry.is_from_other_parent() {
302 if entry.is_from_other_parent() {
295 other_parent.insert(filename.to_owned());
303 other_parent.insert(filename.to_owned());
296 }
304 }
297 }
305 }
298 self.non_normal_set = Some(non_normal);
306 self.non_normal_set = Some(non_normal);
299 self.other_parent_set = Some(other_parent);
307 self.other_parent_set = Some(other_parent);
300 }
308 }
301
309
302 /// Both of these setters and their uses appear to be the simplest way to
310 /// Both of these setters and their uses appear to be the simplest way to
303 /// emulate a Python lazy property, but it is ugly and unidiomatic.
311 /// emulate a Python lazy property, but it is ugly and unidiomatic.
304 /// TODO One day, rewriting this struct using the typestate might be a
312 /// TODO One day, rewriting this struct using the typestate might be a
305 /// good idea.
313 /// good idea.
306 pub fn set_all_dirs(&mut self) -> Result<(), DirstateError> {
314 pub fn set_all_dirs(&mut self) -> Result<(), DirstateError> {
307 if self.all_dirs.is_none() {
315 if self.all_dirs.is_none() {
308 self.all_dirs = Some(DirsMultiset::from_dirstate(
316 self.all_dirs = Some(DirsMultiset::from_dirstate(
309 self.state_map.iter().map(|(k, v)| Ok((k, *v))),
317 self.state_map.iter().map(|(k, v)| Ok((k, *v))),
310 None,
318 None,
311 )?);
319 )?);
312 }
320 }
313 Ok(())
321 Ok(())
314 }
322 }
315
323
316 pub fn set_dirs(&mut self) -> Result<(), DirstateError> {
324 pub fn set_dirs(&mut self) -> Result<(), DirstateError> {
317 if self.dirs.is_none() {
325 if self.dirs.is_none() {
318 self.dirs = Some(DirsMultiset::from_dirstate(
326 self.dirs = Some(DirsMultiset::from_dirstate(
319 self.state_map.iter().map(|(k, v)| Ok((k, *v))),
327 self.state_map.iter().map(|(k, v)| Ok((k, *v))),
320 Some(EntryState::Removed),
328 Some(EntryState::Removed),
321 )?);
329 )?);
322 }
330 }
323 Ok(())
331 Ok(())
324 }
332 }
325
333
326 pub fn has_tracked_dir(
334 pub fn has_tracked_dir(
327 &mut self,
335 &mut self,
328 directory: &HgPath,
336 directory: &HgPath,
329 ) -> Result<bool, DirstateError> {
337 ) -> Result<bool, DirstateError> {
330 self.set_dirs()?;
338 self.set_dirs()?;
331 Ok(self.dirs.as_ref().unwrap().contains(directory))
339 Ok(self.dirs.as_ref().unwrap().contains(directory))
332 }
340 }
333
341
334 pub fn has_dir(
342 pub fn has_dir(
335 &mut self,
343 &mut self,
336 directory: &HgPath,
344 directory: &HgPath,
337 ) -> Result<bool, DirstateError> {
345 ) -> Result<bool, DirstateError> {
338 self.set_all_dirs()?;
346 self.set_all_dirs()?;
339 Ok(self.all_dirs.as_ref().unwrap().contains(directory))
347 Ok(self.all_dirs.as_ref().unwrap().contains(directory))
340 }
348 }
341
349
342 #[timed]
350 #[timed]
343 pub fn read(
351 pub fn read(
344 &mut self,
352 &mut self,
345 file_contents: &[u8],
353 file_contents: &[u8],
346 ) -> Result<Option<DirstateParents>, DirstateError> {
354 ) -> Result<Option<DirstateParents>, DirstateError> {
347 if file_contents.is_empty() {
355 if file_contents.is_empty() {
348 return Ok(None);
356 return Ok(None);
349 }
357 }
350
358
351 let (parents, entries, copies) = parse_dirstate(file_contents)?;
359 let (parents, entries, copies) = parse_dirstate(file_contents)?;
352 self.state_map.extend(
360 self.state_map.extend(
353 entries
361 entries
354 .into_iter()
362 .into_iter()
355 .map(|(path, entry)| (path.to_owned(), entry)),
363 .map(|(path, entry)| (path.to_owned(), entry)),
356 );
364 );
357 self.copy_map.extend(
365 self.copy_map.extend(
358 copies
366 copies
359 .into_iter()
367 .into_iter()
360 .map(|(path, copy)| (path.to_owned(), copy.to_owned())),
368 .map(|(path, copy)| (path.to_owned(), copy.to_owned())),
361 );
369 );
362 Ok(Some(parents.clone()))
370 Ok(Some(parents.clone()))
363 }
371 }
364
372
365 pub fn pack(
373 pub fn pack(
366 &mut self,
374 &mut self,
367 parents: DirstateParents,
375 parents: DirstateParents,
368 now: Timestamp,
376 now: Timestamp,
369 ) -> Result<Vec<u8>, DirstateError> {
377 ) -> Result<Vec<u8>, DirstateError> {
370 let packed =
378 let packed =
371 pack_dirstate(&mut self.state_map, &self.copy_map, parents, now)?;
379 pack_dirstate(&mut self.state_map, &self.copy_map, parents, now)?;
372
380
373 self.set_non_normal_other_parent_entries(true);
381 self.set_non_normal_other_parent_entries(true);
374 Ok(packed)
382 Ok(packed)
375 }
383 }
376 }
384 }
377
385
378 #[cfg(test)]
386 #[cfg(test)]
379 mod tests {
387 mod tests {
380 use super::*;
388 use super::*;
381
389
382 #[test]
390 #[test]
383 fn test_dirs_multiset() {
391 fn test_dirs_multiset() {
384 let mut map = DirstateMap::new();
392 let mut map = DirstateMap::new();
385 assert!(map.dirs.is_none());
393 assert!(map.dirs.is_none());
386 assert!(map.all_dirs.is_none());
394 assert!(map.all_dirs.is_none());
387
395
388 assert_eq!(map.has_dir(HgPath::new(b"nope")).unwrap(), false);
396 assert_eq!(map.has_dir(HgPath::new(b"nope")).unwrap(), false);
389 assert!(map.all_dirs.is_some());
397 assert!(map.all_dirs.is_some());
390 assert!(map.dirs.is_none());
398 assert!(map.dirs.is_none());
391
399
392 assert_eq!(map.has_tracked_dir(HgPath::new(b"nope")).unwrap(), false);
400 assert_eq!(map.has_tracked_dir(HgPath::new(b"nope")).unwrap(), false);
393 assert!(map.dirs.is_some());
401 assert!(map.dirs.is_some());
394 }
402 }
395
403
396 #[test]
404 #[test]
397 fn test_add_file() {
405 fn test_add_file() {
398 let mut map = DirstateMap::new();
406 let mut map = DirstateMap::new();
399
407
400 assert_eq!(0, map.len());
408 assert_eq!(0, map.len());
401
409
402 map.add_file(
410 map.add_file(
403 HgPath::new(b"meh"),
411 HgPath::new(b"meh"),
404 DirstateEntry {
412 DirstateEntry {
405 state: EntryState::Normal,
413 state: EntryState::Normal,
406 mode: 1337,
414 mode: 1337,
407 mtime: 1337,
415 mtime: 1337,
408 size: 1337,
416 size: 1337,
409 },
417 },
410 false,
418 false,
411 false,
419 false,
412 false,
420 false,
421 false,
413 )
422 )
414 .unwrap();
423 .unwrap();
415
424
416 assert_eq!(1, map.len());
425 assert_eq!(1, map.len());
417 assert_eq!(0, map.get_non_normal_other_parent_entries().0.len());
426 assert_eq!(0, map.get_non_normal_other_parent_entries().0.len());
418 assert_eq!(0, map.get_non_normal_other_parent_entries().1.len());
427 assert_eq!(0, map.get_non_normal_other_parent_entries().1.len());
419 }
428 }
420
429
421 #[test]
430 #[test]
422 fn test_non_normal_other_parent_entries() {
431 fn test_non_normal_other_parent_entries() {
423 let mut map: DirstateMap = [
432 let mut map: DirstateMap = [
424 (b"f1", (EntryState::Removed, 1337, 1337, 1337)),
433 (b"f1", (EntryState::Removed, 1337, 1337, 1337)),
425 (b"f2", (EntryState::Normal, 1337, 1337, -1)),
434 (b"f2", (EntryState::Normal, 1337, 1337, -1)),
426 (b"f3", (EntryState::Normal, 1337, 1337, 1337)),
435 (b"f3", (EntryState::Normal, 1337, 1337, 1337)),
427 (b"f4", (EntryState::Normal, 1337, -2, 1337)),
436 (b"f4", (EntryState::Normal, 1337, -2, 1337)),
428 (b"f5", (EntryState::Added, 1337, 1337, 1337)),
437 (b"f5", (EntryState::Added, 1337, 1337, 1337)),
429 (b"f6", (EntryState::Added, 1337, 1337, -1)),
438 (b"f6", (EntryState::Added, 1337, 1337, -1)),
430 (b"f7", (EntryState::Merged, 1337, 1337, -1)),
439 (b"f7", (EntryState::Merged, 1337, 1337, -1)),
431 (b"f8", (EntryState::Merged, 1337, 1337, 1337)),
440 (b"f8", (EntryState::Merged, 1337, 1337, 1337)),
432 (b"f9", (EntryState::Merged, 1337, -2, 1337)),
441 (b"f9", (EntryState::Merged, 1337, -2, 1337)),
433 (b"fa", (EntryState::Added, 1337, -2, 1337)),
442 (b"fa", (EntryState::Added, 1337, -2, 1337)),
434 (b"fb", (EntryState::Removed, 1337, -2, 1337)),
443 (b"fb", (EntryState::Removed, 1337, -2, 1337)),
435 ]
444 ]
436 .iter()
445 .iter()
437 .map(|(fname, (state, mode, size, mtime))| {
446 .map(|(fname, (state, mode, size, mtime))| {
438 (
447 (
439 HgPathBuf::from_bytes(fname.as_ref()),
448 HgPathBuf::from_bytes(fname.as_ref()),
440 DirstateEntry {
449 DirstateEntry {
441 state: *state,
450 state: *state,
442 mode: *mode,
451 mode: *mode,
443 size: *size,
452 size: *size,
444 mtime: *mtime,
453 mtime: *mtime,
445 },
454 },
446 )
455 )
447 })
456 })
448 .collect();
457 .collect();
449
458
450 let mut non_normal = [
459 let mut non_normal = [
451 b"f1", b"f2", b"f5", b"f6", b"f7", b"f8", b"f9", b"fa", b"fb",
460 b"f1", b"f2", b"f5", b"f6", b"f7", b"f8", b"f9", b"fa", b"fb",
452 ]
461 ]
453 .iter()
462 .iter()
454 .map(|x| HgPathBuf::from_bytes(x.as_ref()))
463 .map(|x| HgPathBuf::from_bytes(x.as_ref()))
455 .collect();
464 .collect();
456
465
457 let mut other_parent = HashSet::new();
466 let mut other_parent = HashSet::new();
458 other_parent.insert(HgPathBuf::from_bytes(b"f4"));
467 other_parent.insert(HgPathBuf::from_bytes(b"f4"));
459 let entries = map.get_non_normal_other_parent_entries();
468 let entries = map.get_non_normal_other_parent_entries();
460
469
461 assert_eq!(
470 assert_eq!(
462 (&mut non_normal, &mut other_parent),
471 (&mut non_normal, &mut other_parent),
463 (entries.0, entries.1)
472 (entries.0, entries.1)
464 );
473 );
465 }
474 }
466 }
475 }
@@ -1,1196 +1,1203 b''
1 use bytes_cast::BytesCast;
1 use bytes_cast::BytesCast;
2 use micro_timer::timed;
2 use micro_timer::timed;
3 use std::borrow::Cow;
3 use std::borrow::Cow;
4 use std::convert::TryInto;
4 use std::convert::TryInto;
5 use std::path::PathBuf;
5 use std::path::PathBuf;
6
6
7 use super::on_disk;
7 use super::on_disk;
8 use super::on_disk::DirstateV2ParseError;
8 use super::on_disk::DirstateV2ParseError;
9 use super::path_with_basename::WithBasename;
9 use super::path_with_basename::WithBasename;
10 use crate::dirstate::parsers::pack_entry;
10 use crate::dirstate::parsers::pack_entry;
11 use crate::dirstate::parsers::packed_entry_size;
11 use crate::dirstate::parsers::packed_entry_size;
12 use crate::dirstate::parsers::parse_dirstate_entries;
12 use crate::dirstate::parsers::parse_dirstate_entries;
13 use crate::dirstate::parsers::Timestamp;
13 use crate::dirstate::parsers::Timestamp;
14 use crate::dirstate::MTIME_UNSET;
14 use crate::dirstate::MTIME_UNSET;
15 use crate::dirstate::SIZE_FROM_OTHER_PARENT;
15 use crate::dirstate::SIZE_FROM_OTHER_PARENT;
16 use crate::dirstate::SIZE_NON_NORMAL;
16 use crate::dirstate::SIZE_NON_NORMAL;
17 use crate::dirstate::V1_RANGEMASK;
17 use crate::dirstate::V1_RANGEMASK;
18 use crate::matchers::Matcher;
18 use crate::matchers::Matcher;
19 use crate::utils::hg_path::{HgPath, HgPathBuf};
19 use crate::utils::hg_path::{HgPath, HgPathBuf};
20 use crate::CopyMapIter;
20 use crate::CopyMapIter;
21 use crate::DirstateEntry;
21 use crate::DirstateEntry;
22 use crate::DirstateError;
22 use crate::DirstateError;
23 use crate::DirstateParents;
23 use crate::DirstateParents;
24 use crate::DirstateStatus;
24 use crate::DirstateStatus;
25 use crate::EntryState;
25 use crate::EntryState;
26 use crate::FastHashMap;
26 use crate::FastHashMap;
27 use crate::PatternFileWarning;
27 use crate::PatternFileWarning;
28 use crate::StateMapIter;
28 use crate::StateMapIter;
29 use crate::StatusError;
29 use crate::StatusError;
30 use crate::StatusOptions;
30 use crate::StatusOptions;
31
31
32 pub struct DirstateMap<'on_disk> {
32 pub struct DirstateMap<'on_disk> {
33 /// Contents of the `.hg/dirstate` file
33 /// Contents of the `.hg/dirstate` file
34 pub(super) on_disk: &'on_disk [u8],
34 pub(super) on_disk: &'on_disk [u8],
35
35
36 pub(super) root: ChildNodes<'on_disk>,
36 pub(super) root: ChildNodes<'on_disk>,
37
37
38 /// Number of nodes anywhere in the tree that have `.entry.is_some()`.
38 /// Number of nodes anywhere in the tree that have `.entry.is_some()`.
39 pub(super) nodes_with_entry_count: u32,
39 pub(super) nodes_with_entry_count: u32,
40
40
41 /// Number of nodes anywhere in the tree that have
41 /// Number of nodes anywhere in the tree that have
42 /// `.copy_source.is_some()`.
42 /// `.copy_source.is_some()`.
43 pub(super) nodes_with_copy_source_count: u32,
43 pub(super) nodes_with_copy_source_count: u32,
44
44
45 /// See on_disk::Header
45 /// See on_disk::Header
46 pub(super) ignore_patterns_hash: on_disk::IgnorePatternsHash,
46 pub(super) ignore_patterns_hash: on_disk::IgnorePatternsHash,
47 }
47 }
48
48
49 /// Using a plain `HgPathBuf` of the full path from the repository root as a
49 /// Using a plain `HgPathBuf` of the full path from the repository root as a
50 /// map key would also work: all paths in a given map have the same parent
50 /// map key would also work: all paths in a given map have the same parent
51 /// path, so comparing full paths gives the same result as comparing base
51 /// path, so comparing full paths gives the same result as comparing base
52 /// names. However `HashMap` would waste time always re-hashing the same
52 /// names. However `HashMap` would waste time always re-hashing the same
53 /// string prefix.
53 /// string prefix.
54 pub(super) type NodeKey<'on_disk> = WithBasename<Cow<'on_disk, HgPath>>;
54 pub(super) type NodeKey<'on_disk> = WithBasename<Cow<'on_disk, HgPath>>;
55
55
56 /// Similar to `&'tree Cow<'on_disk, HgPath>`, but can also be returned
56 /// Similar to `&'tree Cow<'on_disk, HgPath>`, but can also be returned
57 /// for on-disk nodes that don’t actually have a `Cow` to borrow.
57 /// for on-disk nodes that don’t actually have a `Cow` to borrow.
58 pub(super) enum BorrowedPath<'tree, 'on_disk> {
58 pub(super) enum BorrowedPath<'tree, 'on_disk> {
59 InMemory(&'tree HgPathBuf),
59 InMemory(&'tree HgPathBuf),
60 OnDisk(&'on_disk HgPath),
60 OnDisk(&'on_disk HgPath),
61 }
61 }
62
62
63 pub(super) enum ChildNodes<'on_disk> {
63 pub(super) enum ChildNodes<'on_disk> {
64 InMemory(FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
64 InMemory(FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
65 OnDisk(&'on_disk [on_disk::Node]),
65 OnDisk(&'on_disk [on_disk::Node]),
66 }
66 }
67
67
68 pub(super) enum ChildNodesRef<'tree, 'on_disk> {
68 pub(super) enum ChildNodesRef<'tree, 'on_disk> {
69 InMemory(&'tree FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
69 InMemory(&'tree FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
70 OnDisk(&'on_disk [on_disk::Node]),
70 OnDisk(&'on_disk [on_disk::Node]),
71 }
71 }
72
72
73 pub(super) enum NodeRef<'tree, 'on_disk> {
73 pub(super) enum NodeRef<'tree, 'on_disk> {
74 InMemory(&'tree NodeKey<'on_disk>, &'tree Node<'on_disk>),
74 InMemory(&'tree NodeKey<'on_disk>, &'tree Node<'on_disk>),
75 OnDisk(&'on_disk on_disk::Node),
75 OnDisk(&'on_disk on_disk::Node),
76 }
76 }
77
77
78 impl<'tree, 'on_disk> BorrowedPath<'tree, 'on_disk> {
78 impl<'tree, 'on_disk> BorrowedPath<'tree, 'on_disk> {
79 pub fn detach_from_tree(&self) -> Cow<'on_disk, HgPath> {
79 pub fn detach_from_tree(&self) -> Cow<'on_disk, HgPath> {
80 match *self {
80 match *self {
81 BorrowedPath::InMemory(in_memory) => Cow::Owned(in_memory.clone()),
81 BorrowedPath::InMemory(in_memory) => Cow::Owned(in_memory.clone()),
82 BorrowedPath::OnDisk(on_disk) => Cow::Borrowed(on_disk),
82 BorrowedPath::OnDisk(on_disk) => Cow::Borrowed(on_disk),
83 }
83 }
84 }
84 }
85 }
85 }
86
86
87 impl<'tree, 'on_disk> std::ops::Deref for BorrowedPath<'tree, 'on_disk> {
87 impl<'tree, 'on_disk> std::ops::Deref for BorrowedPath<'tree, 'on_disk> {
88 type Target = HgPath;
88 type Target = HgPath;
89
89
90 fn deref(&self) -> &HgPath {
90 fn deref(&self) -> &HgPath {
91 match *self {
91 match *self {
92 BorrowedPath::InMemory(in_memory) => in_memory,
92 BorrowedPath::InMemory(in_memory) => in_memory,
93 BorrowedPath::OnDisk(on_disk) => on_disk,
93 BorrowedPath::OnDisk(on_disk) => on_disk,
94 }
94 }
95 }
95 }
96 }
96 }
97
97
98 impl Default for ChildNodes<'_> {
98 impl Default for ChildNodes<'_> {
99 fn default() -> Self {
99 fn default() -> Self {
100 ChildNodes::InMemory(Default::default())
100 ChildNodes::InMemory(Default::default())
101 }
101 }
102 }
102 }
103
103
104 impl<'on_disk> ChildNodes<'on_disk> {
104 impl<'on_disk> ChildNodes<'on_disk> {
105 pub(super) fn as_ref<'tree>(
105 pub(super) fn as_ref<'tree>(
106 &'tree self,
106 &'tree self,
107 ) -> ChildNodesRef<'tree, 'on_disk> {
107 ) -> ChildNodesRef<'tree, 'on_disk> {
108 match self {
108 match self {
109 ChildNodes::InMemory(nodes) => ChildNodesRef::InMemory(nodes),
109 ChildNodes::InMemory(nodes) => ChildNodesRef::InMemory(nodes),
110 ChildNodes::OnDisk(nodes) => ChildNodesRef::OnDisk(nodes),
110 ChildNodes::OnDisk(nodes) => ChildNodesRef::OnDisk(nodes),
111 }
111 }
112 }
112 }
113
113
114 pub(super) fn is_empty(&self) -> bool {
114 pub(super) fn is_empty(&self) -> bool {
115 match self {
115 match self {
116 ChildNodes::InMemory(nodes) => nodes.is_empty(),
116 ChildNodes::InMemory(nodes) => nodes.is_empty(),
117 ChildNodes::OnDisk(nodes) => nodes.is_empty(),
117 ChildNodes::OnDisk(nodes) => nodes.is_empty(),
118 }
118 }
119 }
119 }
120
120
121 pub(super) fn make_mut(
121 pub(super) fn make_mut(
122 &mut self,
122 &mut self,
123 on_disk: &'on_disk [u8],
123 on_disk: &'on_disk [u8],
124 ) -> Result<
124 ) -> Result<
125 &mut FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>,
125 &mut FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>,
126 DirstateV2ParseError,
126 DirstateV2ParseError,
127 > {
127 > {
128 match self {
128 match self {
129 ChildNodes::InMemory(nodes) => Ok(nodes),
129 ChildNodes::InMemory(nodes) => Ok(nodes),
130 ChildNodes::OnDisk(nodes) => {
130 ChildNodes::OnDisk(nodes) => {
131 let nodes = nodes
131 let nodes = nodes
132 .iter()
132 .iter()
133 .map(|node| {
133 .map(|node| {
134 Ok((
134 Ok((
135 node.path(on_disk)?,
135 node.path(on_disk)?,
136 node.to_in_memory_node(on_disk)?,
136 node.to_in_memory_node(on_disk)?,
137 ))
137 ))
138 })
138 })
139 .collect::<Result<_, _>>()?;
139 .collect::<Result<_, _>>()?;
140 *self = ChildNodes::InMemory(nodes);
140 *self = ChildNodes::InMemory(nodes);
141 match self {
141 match self {
142 ChildNodes::InMemory(nodes) => Ok(nodes),
142 ChildNodes::InMemory(nodes) => Ok(nodes),
143 ChildNodes::OnDisk(_) => unreachable!(),
143 ChildNodes::OnDisk(_) => unreachable!(),
144 }
144 }
145 }
145 }
146 }
146 }
147 }
147 }
148 }
148 }
149
149
150 impl<'tree, 'on_disk> ChildNodesRef<'tree, 'on_disk> {
150 impl<'tree, 'on_disk> ChildNodesRef<'tree, 'on_disk> {
151 pub(super) fn get(
151 pub(super) fn get(
152 &self,
152 &self,
153 base_name: &HgPath,
153 base_name: &HgPath,
154 on_disk: &'on_disk [u8],
154 on_disk: &'on_disk [u8],
155 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
155 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
156 match self {
156 match self {
157 ChildNodesRef::InMemory(nodes) => Ok(nodes
157 ChildNodesRef::InMemory(nodes) => Ok(nodes
158 .get_key_value(base_name)
158 .get_key_value(base_name)
159 .map(|(k, v)| NodeRef::InMemory(k, v))),
159 .map(|(k, v)| NodeRef::InMemory(k, v))),
160 ChildNodesRef::OnDisk(nodes) => {
160 ChildNodesRef::OnDisk(nodes) => {
161 let mut parse_result = Ok(());
161 let mut parse_result = Ok(());
162 let search_result = nodes.binary_search_by(|node| {
162 let search_result = nodes.binary_search_by(|node| {
163 match node.base_name(on_disk) {
163 match node.base_name(on_disk) {
164 Ok(node_base_name) => node_base_name.cmp(base_name),
164 Ok(node_base_name) => node_base_name.cmp(base_name),
165 Err(e) => {
165 Err(e) => {
166 parse_result = Err(e);
166 parse_result = Err(e);
167 // Dummy comparison result, `search_result` won’t
167 // Dummy comparison result, `search_result` won’t
168 // be used since `parse_result` is an error
168 // be used since `parse_result` is an error
169 std::cmp::Ordering::Equal
169 std::cmp::Ordering::Equal
170 }
170 }
171 }
171 }
172 });
172 });
173 parse_result.map(|()| {
173 parse_result.map(|()| {
174 search_result.ok().map(|i| NodeRef::OnDisk(&nodes[i]))
174 search_result.ok().map(|i| NodeRef::OnDisk(&nodes[i]))
175 })
175 })
176 }
176 }
177 }
177 }
178 }
178 }
179
179
180 /// Iterate in undefined order
180 /// Iterate in undefined order
181 pub(super) fn iter(
181 pub(super) fn iter(
182 &self,
182 &self,
183 ) -> impl Iterator<Item = NodeRef<'tree, 'on_disk>> {
183 ) -> impl Iterator<Item = NodeRef<'tree, 'on_disk>> {
184 match self {
184 match self {
185 ChildNodesRef::InMemory(nodes) => itertools::Either::Left(
185 ChildNodesRef::InMemory(nodes) => itertools::Either::Left(
186 nodes.iter().map(|(k, v)| NodeRef::InMemory(k, v)),
186 nodes.iter().map(|(k, v)| NodeRef::InMemory(k, v)),
187 ),
187 ),
188 ChildNodesRef::OnDisk(nodes) => {
188 ChildNodesRef::OnDisk(nodes) => {
189 itertools::Either::Right(nodes.iter().map(NodeRef::OnDisk))
189 itertools::Either::Right(nodes.iter().map(NodeRef::OnDisk))
190 }
190 }
191 }
191 }
192 }
192 }
193
193
194 /// Iterate in parallel in undefined order
194 /// Iterate in parallel in undefined order
195 pub(super) fn par_iter(
195 pub(super) fn par_iter(
196 &self,
196 &self,
197 ) -> impl rayon::iter::ParallelIterator<Item = NodeRef<'tree, 'on_disk>>
197 ) -> impl rayon::iter::ParallelIterator<Item = NodeRef<'tree, 'on_disk>>
198 {
198 {
199 use rayon::prelude::*;
199 use rayon::prelude::*;
200 match self {
200 match self {
201 ChildNodesRef::InMemory(nodes) => rayon::iter::Either::Left(
201 ChildNodesRef::InMemory(nodes) => rayon::iter::Either::Left(
202 nodes.par_iter().map(|(k, v)| NodeRef::InMemory(k, v)),
202 nodes.par_iter().map(|(k, v)| NodeRef::InMemory(k, v)),
203 ),
203 ),
204 ChildNodesRef::OnDisk(nodes) => rayon::iter::Either::Right(
204 ChildNodesRef::OnDisk(nodes) => rayon::iter::Either::Right(
205 nodes.par_iter().map(NodeRef::OnDisk),
205 nodes.par_iter().map(NodeRef::OnDisk),
206 ),
206 ),
207 }
207 }
208 }
208 }
209
209
210 pub(super) fn sorted(&self) -> Vec<NodeRef<'tree, 'on_disk>> {
210 pub(super) fn sorted(&self) -> Vec<NodeRef<'tree, 'on_disk>> {
211 match self {
211 match self {
212 ChildNodesRef::InMemory(nodes) => {
212 ChildNodesRef::InMemory(nodes) => {
213 let mut vec: Vec<_> = nodes
213 let mut vec: Vec<_> = nodes
214 .iter()
214 .iter()
215 .map(|(k, v)| NodeRef::InMemory(k, v))
215 .map(|(k, v)| NodeRef::InMemory(k, v))
216 .collect();
216 .collect();
217 fn sort_key<'a>(node: &'a NodeRef) -> &'a HgPath {
217 fn sort_key<'a>(node: &'a NodeRef) -> &'a HgPath {
218 match node {
218 match node {
219 NodeRef::InMemory(path, _node) => path.base_name(),
219 NodeRef::InMemory(path, _node) => path.base_name(),
220 NodeRef::OnDisk(_) => unreachable!(),
220 NodeRef::OnDisk(_) => unreachable!(),
221 }
221 }
222 }
222 }
223 // `sort_unstable_by_key` doesn’t allow keys borrowing from the
223 // `sort_unstable_by_key` doesn’t allow keys borrowing from the
224 // value: https://github.com/rust-lang/rust/issues/34162
224 // value: https://github.com/rust-lang/rust/issues/34162
225 vec.sort_unstable_by(|a, b| sort_key(a).cmp(sort_key(b)));
225 vec.sort_unstable_by(|a, b| sort_key(a).cmp(sort_key(b)));
226 vec
226 vec
227 }
227 }
228 ChildNodesRef::OnDisk(nodes) => {
228 ChildNodesRef::OnDisk(nodes) => {
229 // Nodes on disk are already sorted
229 // Nodes on disk are already sorted
230 nodes.iter().map(NodeRef::OnDisk).collect()
230 nodes.iter().map(NodeRef::OnDisk).collect()
231 }
231 }
232 }
232 }
233 }
233 }
234 }
234 }
235
235
236 impl<'tree, 'on_disk> NodeRef<'tree, 'on_disk> {
236 impl<'tree, 'on_disk> NodeRef<'tree, 'on_disk> {
237 pub(super) fn full_path(
237 pub(super) fn full_path(
238 &self,
238 &self,
239 on_disk: &'on_disk [u8],
239 on_disk: &'on_disk [u8],
240 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
240 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
241 match self {
241 match self {
242 NodeRef::InMemory(path, _node) => Ok(path.full_path()),
242 NodeRef::InMemory(path, _node) => Ok(path.full_path()),
243 NodeRef::OnDisk(node) => node.full_path(on_disk),
243 NodeRef::OnDisk(node) => node.full_path(on_disk),
244 }
244 }
245 }
245 }
246
246
247 /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk,
247 /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk,
248 /// HgPath>` detached from `'tree`
248 /// HgPath>` detached from `'tree`
249 pub(super) fn full_path_borrowed(
249 pub(super) fn full_path_borrowed(
250 &self,
250 &self,
251 on_disk: &'on_disk [u8],
251 on_disk: &'on_disk [u8],
252 ) -> Result<BorrowedPath<'tree, 'on_disk>, DirstateV2ParseError> {
252 ) -> Result<BorrowedPath<'tree, 'on_disk>, DirstateV2ParseError> {
253 match self {
253 match self {
254 NodeRef::InMemory(path, _node) => match path.full_path() {
254 NodeRef::InMemory(path, _node) => match path.full_path() {
255 Cow::Borrowed(on_disk) => Ok(BorrowedPath::OnDisk(on_disk)),
255 Cow::Borrowed(on_disk) => Ok(BorrowedPath::OnDisk(on_disk)),
256 Cow::Owned(in_memory) => Ok(BorrowedPath::InMemory(in_memory)),
256 Cow::Owned(in_memory) => Ok(BorrowedPath::InMemory(in_memory)),
257 },
257 },
258 NodeRef::OnDisk(node) => {
258 NodeRef::OnDisk(node) => {
259 Ok(BorrowedPath::OnDisk(node.full_path(on_disk)?))
259 Ok(BorrowedPath::OnDisk(node.full_path(on_disk)?))
260 }
260 }
261 }
261 }
262 }
262 }
263
263
264 pub(super) fn base_name(
264 pub(super) fn base_name(
265 &self,
265 &self,
266 on_disk: &'on_disk [u8],
266 on_disk: &'on_disk [u8],
267 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
267 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
268 match self {
268 match self {
269 NodeRef::InMemory(path, _node) => Ok(path.base_name()),
269 NodeRef::InMemory(path, _node) => Ok(path.base_name()),
270 NodeRef::OnDisk(node) => node.base_name(on_disk),
270 NodeRef::OnDisk(node) => node.base_name(on_disk),
271 }
271 }
272 }
272 }
273
273
274 pub(super) fn children(
274 pub(super) fn children(
275 &self,
275 &self,
276 on_disk: &'on_disk [u8],
276 on_disk: &'on_disk [u8],
277 ) -> Result<ChildNodesRef<'tree, 'on_disk>, DirstateV2ParseError> {
277 ) -> Result<ChildNodesRef<'tree, 'on_disk>, DirstateV2ParseError> {
278 match self {
278 match self {
279 NodeRef::InMemory(_path, node) => Ok(node.children.as_ref()),
279 NodeRef::InMemory(_path, node) => Ok(node.children.as_ref()),
280 NodeRef::OnDisk(node) => {
280 NodeRef::OnDisk(node) => {
281 Ok(ChildNodesRef::OnDisk(node.children(on_disk)?))
281 Ok(ChildNodesRef::OnDisk(node.children(on_disk)?))
282 }
282 }
283 }
283 }
284 }
284 }
285
285
286 pub(super) fn has_copy_source(&self) -> bool {
286 pub(super) fn has_copy_source(&self) -> bool {
287 match self {
287 match self {
288 NodeRef::InMemory(_path, node) => node.copy_source.is_some(),
288 NodeRef::InMemory(_path, node) => node.copy_source.is_some(),
289 NodeRef::OnDisk(node) => node.has_copy_source(),
289 NodeRef::OnDisk(node) => node.has_copy_source(),
290 }
290 }
291 }
291 }
292
292
293 pub(super) fn copy_source(
293 pub(super) fn copy_source(
294 &self,
294 &self,
295 on_disk: &'on_disk [u8],
295 on_disk: &'on_disk [u8],
296 ) -> Result<Option<&'tree HgPath>, DirstateV2ParseError> {
296 ) -> Result<Option<&'tree HgPath>, DirstateV2ParseError> {
297 match self {
297 match self {
298 NodeRef::InMemory(_path, node) => {
298 NodeRef::InMemory(_path, node) => {
299 Ok(node.copy_source.as_ref().map(|s| &**s))
299 Ok(node.copy_source.as_ref().map(|s| &**s))
300 }
300 }
301 NodeRef::OnDisk(node) => node.copy_source(on_disk),
301 NodeRef::OnDisk(node) => node.copy_source(on_disk),
302 }
302 }
303 }
303 }
304
304
305 pub(super) fn entry(
305 pub(super) fn entry(
306 &self,
306 &self,
307 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
307 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
308 match self {
308 match self {
309 NodeRef::InMemory(_path, node) => {
309 NodeRef::InMemory(_path, node) => {
310 Ok(node.data.as_entry().copied())
310 Ok(node.data.as_entry().copied())
311 }
311 }
312 NodeRef::OnDisk(node) => node.entry(),
312 NodeRef::OnDisk(node) => node.entry(),
313 }
313 }
314 }
314 }
315
315
316 pub(super) fn state(
316 pub(super) fn state(
317 &self,
317 &self,
318 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
318 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
319 match self {
319 match self {
320 NodeRef::InMemory(_path, node) => {
320 NodeRef::InMemory(_path, node) => {
321 Ok(node.data.as_entry().map(|entry| entry.state))
321 Ok(node.data.as_entry().map(|entry| entry.state))
322 }
322 }
323 NodeRef::OnDisk(node) => node.state(),
323 NodeRef::OnDisk(node) => node.state(),
324 }
324 }
325 }
325 }
326
326
327 pub(super) fn cached_directory_mtime(
327 pub(super) fn cached_directory_mtime(
328 &self,
328 &self,
329 ) -> Option<&'tree on_disk::Timestamp> {
329 ) -> Option<&'tree on_disk::Timestamp> {
330 match self {
330 match self {
331 NodeRef::InMemory(_path, node) => match &node.data {
331 NodeRef::InMemory(_path, node) => match &node.data {
332 NodeData::CachedDirectory { mtime } => Some(mtime),
332 NodeData::CachedDirectory { mtime } => Some(mtime),
333 _ => None,
333 _ => None,
334 },
334 },
335 NodeRef::OnDisk(node) => node.cached_directory_mtime(),
335 NodeRef::OnDisk(node) => node.cached_directory_mtime(),
336 }
336 }
337 }
337 }
338
338
339 pub(super) fn descendants_with_entry_count(&self) -> u32 {
339 pub(super) fn descendants_with_entry_count(&self) -> u32 {
340 match self {
340 match self {
341 NodeRef::InMemory(_path, node) => {
341 NodeRef::InMemory(_path, node) => {
342 node.descendants_with_entry_count
342 node.descendants_with_entry_count
343 }
343 }
344 NodeRef::OnDisk(node) => node.descendants_with_entry_count.get(),
344 NodeRef::OnDisk(node) => node.descendants_with_entry_count.get(),
345 }
345 }
346 }
346 }
347
347
348 pub(super) fn tracked_descendants_count(&self) -> u32 {
348 pub(super) fn tracked_descendants_count(&self) -> u32 {
349 match self {
349 match self {
350 NodeRef::InMemory(_path, node) => node.tracked_descendants_count,
350 NodeRef::InMemory(_path, node) => node.tracked_descendants_count,
351 NodeRef::OnDisk(node) => node.tracked_descendants_count.get(),
351 NodeRef::OnDisk(node) => node.tracked_descendants_count.get(),
352 }
352 }
353 }
353 }
354 }
354 }
355
355
356 /// Represents a file or a directory
356 /// Represents a file or a directory
357 #[derive(Default)]
357 #[derive(Default)]
358 pub(super) struct Node<'on_disk> {
358 pub(super) struct Node<'on_disk> {
359 pub(super) data: NodeData,
359 pub(super) data: NodeData,
360
360
361 pub(super) copy_source: Option<Cow<'on_disk, HgPath>>,
361 pub(super) copy_source: Option<Cow<'on_disk, HgPath>>,
362
362
363 pub(super) children: ChildNodes<'on_disk>,
363 pub(super) children: ChildNodes<'on_disk>,
364
364
365 /// How many (non-inclusive) descendants of this node have an entry.
365 /// How many (non-inclusive) descendants of this node have an entry.
366 pub(super) descendants_with_entry_count: u32,
366 pub(super) descendants_with_entry_count: u32,
367
367
368 /// How many (non-inclusive) descendants of this node have an entry whose
368 /// How many (non-inclusive) descendants of this node have an entry whose
369 /// state is "tracked".
369 /// state is "tracked".
370 pub(super) tracked_descendants_count: u32,
370 pub(super) tracked_descendants_count: u32,
371 }
371 }
372
372
373 pub(super) enum NodeData {
373 pub(super) enum NodeData {
374 Entry(DirstateEntry),
374 Entry(DirstateEntry),
375 CachedDirectory { mtime: on_disk::Timestamp },
375 CachedDirectory { mtime: on_disk::Timestamp },
376 None,
376 None,
377 }
377 }
378
378
379 impl Default for NodeData {
379 impl Default for NodeData {
380 fn default() -> Self {
380 fn default() -> Self {
381 NodeData::None
381 NodeData::None
382 }
382 }
383 }
383 }
384
384
385 impl NodeData {
385 impl NodeData {
386 fn has_entry(&self) -> bool {
386 fn has_entry(&self) -> bool {
387 match self {
387 match self {
388 NodeData::Entry(_) => true,
388 NodeData::Entry(_) => true,
389 _ => false,
389 _ => false,
390 }
390 }
391 }
391 }
392
392
393 fn as_entry(&self) -> Option<&DirstateEntry> {
393 fn as_entry(&self) -> Option<&DirstateEntry> {
394 match self {
394 match self {
395 NodeData::Entry(entry) => Some(entry),
395 NodeData::Entry(entry) => Some(entry),
396 _ => None,
396 _ => None,
397 }
397 }
398 }
398 }
399 }
399 }
400
400
401 impl<'on_disk> DirstateMap<'on_disk> {
401 impl<'on_disk> DirstateMap<'on_disk> {
402 pub(super) fn empty(on_disk: &'on_disk [u8]) -> Self {
402 pub(super) fn empty(on_disk: &'on_disk [u8]) -> Self {
403 Self {
403 Self {
404 on_disk,
404 on_disk,
405 root: ChildNodes::default(),
405 root: ChildNodes::default(),
406 nodes_with_entry_count: 0,
406 nodes_with_entry_count: 0,
407 nodes_with_copy_source_count: 0,
407 nodes_with_copy_source_count: 0,
408 ignore_patterns_hash: [0; on_disk::IGNORE_PATTERNS_HASH_LEN],
408 ignore_patterns_hash: [0; on_disk::IGNORE_PATTERNS_HASH_LEN],
409 }
409 }
410 }
410 }
411
411
412 #[timed]
412 #[timed]
413 pub fn new_v2(
413 pub fn new_v2(
414 on_disk: &'on_disk [u8],
414 on_disk: &'on_disk [u8],
415 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
415 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
416 Ok(on_disk::read(on_disk)?)
416 Ok(on_disk::read(on_disk)?)
417 }
417 }
418
418
419 #[timed]
419 #[timed]
420 pub fn new_v1(
420 pub fn new_v1(
421 on_disk: &'on_disk [u8],
421 on_disk: &'on_disk [u8],
422 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
422 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
423 let mut map = Self::empty(on_disk);
423 let mut map = Self::empty(on_disk);
424 if map.on_disk.is_empty() {
424 if map.on_disk.is_empty() {
425 return Ok((map, None));
425 return Ok((map, None));
426 }
426 }
427
427
428 let parents = parse_dirstate_entries(
428 let parents = parse_dirstate_entries(
429 map.on_disk,
429 map.on_disk,
430 |path, entry, copy_source| {
430 |path, entry, copy_source| {
431 let tracked = entry.state.is_tracked();
431 let tracked = entry.state.is_tracked();
432 let node = Self::get_or_insert_node(
432 let node = Self::get_or_insert_node(
433 map.on_disk,
433 map.on_disk,
434 &mut map.root,
434 &mut map.root,
435 path,
435 path,
436 WithBasename::to_cow_borrowed,
436 WithBasename::to_cow_borrowed,
437 |ancestor| {
437 |ancestor| {
438 if tracked {
438 if tracked {
439 ancestor.tracked_descendants_count += 1
439 ancestor.tracked_descendants_count += 1
440 }
440 }
441 ancestor.descendants_with_entry_count += 1
441 ancestor.descendants_with_entry_count += 1
442 },
442 },
443 )?;
443 )?;
444 assert!(
444 assert!(
445 !node.data.has_entry(),
445 !node.data.has_entry(),
446 "duplicate dirstate entry in read"
446 "duplicate dirstate entry in read"
447 );
447 );
448 assert!(
448 assert!(
449 node.copy_source.is_none(),
449 node.copy_source.is_none(),
450 "duplicate dirstate entry in read"
450 "duplicate dirstate entry in read"
451 );
451 );
452 node.data = NodeData::Entry(*entry);
452 node.data = NodeData::Entry(*entry);
453 node.copy_source = copy_source.map(Cow::Borrowed);
453 node.copy_source = copy_source.map(Cow::Borrowed);
454 map.nodes_with_entry_count += 1;
454 map.nodes_with_entry_count += 1;
455 if copy_source.is_some() {
455 if copy_source.is_some() {
456 map.nodes_with_copy_source_count += 1
456 map.nodes_with_copy_source_count += 1
457 }
457 }
458 Ok(())
458 Ok(())
459 },
459 },
460 )?;
460 )?;
461 let parents = Some(parents.clone());
461 let parents = Some(parents.clone());
462
462
463 Ok((map, parents))
463 Ok((map, parents))
464 }
464 }
465
465
466 fn get_node<'tree>(
466 fn get_node<'tree>(
467 &'tree self,
467 &'tree self,
468 path: &HgPath,
468 path: &HgPath,
469 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
469 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
470 let mut children = self.root.as_ref();
470 let mut children = self.root.as_ref();
471 let mut components = path.components();
471 let mut components = path.components();
472 let mut component =
472 let mut component =
473 components.next().expect("expected at least one components");
473 components.next().expect("expected at least one components");
474 loop {
474 loop {
475 if let Some(child) = children.get(component, self.on_disk)? {
475 if let Some(child) = children.get(component, self.on_disk)? {
476 if let Some(next_component) = components.next() {
476 if let Some(next_component) = components.next() {
477 component = next_component;
477 component = next_component;
478 children = child.children(self.on_disk)?;
478 children = child.children(self.on_disk)?;
479 } else {
479 } else {
480 return Ok(Some(child));
480 return Ok(Some(child));
481 }
481 }
482 } else {
482 } else {
483 return Ok(None);
483 return Ok(None);
484 }
484 }
485 }
485 }
486 }
486 }
487
487
488 /// Returns a mutable reference to the node at `path` if it exists
488 /// Returns a mutable reference to the node at `path` if it exists
489 ///
489 ///
490 /// This takes `root` instead of `&mut self` so that callers can mutate
490 /// This takes `root` instead of `&mut self` so that callers can mutate
491 /// other fields while the returned borrow is still valid
491 /// other fields while the returned borrow is still valid
492 fn get_node_mut<'tree>(
492 fn get_node_mut<'tree>(
493 on_disk: &'on_disk [u8],
493 on_disk: &'on_disk [u8],
494 root: &'tree mut ChildNodes<'on_disk>,
494 root: &'tree mut ChildNodes<'on_disk>,
495 path: &HgPath,
495 path: &HgPath,
496 ) -> Result<Option<&'tree mut Node<'on_disk>>, DirstateV2ParseError> {
496 ) -> Result<Option<&'tree mut Node<'on_disk>>, DirstateV2ParseError> {
497 let mut children = root;
497 let mut children = root;
498 let mut components = path.components();
498 let mut components = path.components();
499 let mut component =
499 let mut component =
500 components.next().expect("expected at least one components");
500 components.next().expect("expected at least one components");
501 loop {
501 loop {
502 if let Some(child) = children.make_mut(on_disk)?.get_mut(component)
502 if let Some(child) = children.make_mut(on_disk)?.get_mut(component)
503 {
503 {
504 if let Some(next_component) = components.next() {
504 if let Some(next_component) = components.next() {
505 component = next_component;
505 component = next_component;
506 children = &mut child.children;
506 children = &mut child.children;
507 } else {
507 } else {
508 return Ok(Some(child));
508 return Ok(Some(child));
509 }
509 }
510 } else {
510 } else {
511 return Ok(None);
511 return Ok(None);
512 }
512 }
513 }
513 }
514 }
514 }
515
515
516 pub(super) fn get_or_insert<'tree, 'path>(
516 pub(super) fn get_or_insert<'tree, 'path>(
517 &'tree mut self,
517 &'tree mut self,
518 path: &HgPath,
518 path: &HgPath,
519 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
519 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
520 Self::get_or_insert_node(
520 Self::get_or_insert_node(
521 self.on_disk,
521 self.on_disk,
522 &mut self.root,
522 &mut self.root,
523 path,
523 path,
524 WithBasename::to_cow_owned,
524 WithBasename::to_cow_owned,
525 |_| {},
525 |_| {},
526 )
526 )
527 }
527 }
528
528
529 pub(super) fn get_or_insert_node<'tree, 'path>(
529 pub(super) fn get_or_insert_node<'tree, 'path>(
530 on_disk: &'on_disk [u8],
530 on_disk: &'on_disk [u8],
531 root: &'tree mut ChildNodes<'on_disk>,
531 root: &'tree mut ChildNodes<'on_disk>,
532 path: &'path HgPath,
532 path: &'path HgPath,
533 to_cow: impl Fn(
533 to_cow: impl Fn(
534 WithBasename<&'path HgPath>,
534 WithBasename<&'path HgPath>,
535 ) -> WithBasename<Cow<'on_disk, HgPath>>,
535 ) -> WithBasename<Cow<'on_disk, HgPath>>,
536 mut each_ancestor: impl FnMut(&mut Node),
536 mut each_ancestor: impl FnMut(&mut Node),
537 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
537 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
538 let mut child_nodes = root;
538 let mut child_nodes = root;
539 let mut inclusive_ancestor_paths =
539 let mut inclusive_ancestor_paths =
540 WithBasename::inclusive_ancestors_of(path);
540 WithBasename::inclusive_ancestors_of(path);
541 let mut ancestor_path = inclusive_ancestor_paths
541 let mut ancestor_path = inclusive_ancestor_paths
542 .next()
542 .next()
543 .expect("expected at least one inclusive ancestor");
543 .expect("expected at least one inclusive ancestor");
544 loop {
544 loop {
545 // TODO: can we avoid allocating an owned key in cases where the
545 // TODO: can we avoid allocating an owned key in cases where the
546 // map already contains that key, without introducing double
546 // map already contains that key, without introducing double
547 // lookup?
547 // lookup?
548 let child_node = child_nodes
548 let child_node = child_nodes
549 .make_mut(on_disk)?
549 .make_mut(on_disk)?
550 .entry(to_cow(ancestor_path))
550 .entry(to_cow(ancestor_path))
551 .or_default();
551 .or_default();
552 if let Some(next) = inclusive_ancestor_paths.next() {
552 if let Some(next) = inclusive_ancestor_paths.next() {
553 each_ancestor(child_node);
553 each_ancestor(child_node);
554 ancestor_path = next;
554 ancestor_path = next;
555 child_nodes = &mut child_node.children;
555 child_nodes = &mut child_node.children;
556 } else {
556 } else {
557 return Ok(child_node);
557 return Ok(child_node);
558 }
558 }
559 }
559 }
560 }
560 }
561
561
562 fn add_or_remove_file(
562 fn add_or_remove_file(
563 &mut self,
563 &mut self,
564 path: &HgPath,
564 path: &HgPath,
565 old_state: EntryState,
565 old_state: EntryState,
566 new_entry: DirstateEntry,
566 new_entry: DirstateEntry,
567 ) -> Result<(), DirstateV2ParseError> {
567 ) -> Result<(), DirstateV2ParseError> {
568 let had_entry = old_state != EntryState::Unknown;
568 let had_entry = old_state != EntryState::Unknown;
569 let tracked_count_increment =
569 let tracked_count_increment =
570 match (old_state.is_tracked(), new_entry.state.is_tracked()) {
570 match (old_state.is_tracked(), new_entry.state.is_tracked()) {
571 (false, true) => 1,
571 (false, true) => 1,
572 (true, false) => -1,
572 (true, false) => -1,
573 _ => 0,
573 _ => 0,
574 };
574 };
575
575
576 let node = Self::get_or_insert_node(
576 let node = Self::get_or_insert_node(
577 self.on_disk,
577 self.on_disk,
578 &mut self.root,
578 &mut self.root,
579 path,
579 path,
580 WithBasename::to_cow_owned,
580 WithBasename::to_cow_owned,
581 |ancestor| {
581 |ancestor| {
582 if !had_entry {
582 if !had_entry {
583 ancestor.descendants_with_entry_count += 1;
583 ancestor.descendants_with_entry_count += 1;
584 }
584 }
585
585
586 // We can’t use `+= increment` because the counter is unsigned,
586 // We can’t use `+= increment` because the counter is unsigned,
587 // and we want debug builds to detect accidental underflow
587 // and we want debug builds to detect accidental underflow
588 // through zero
588 // through zero
589 match tracked_count_increment {
589 match tracked_count_increment {
590 1 => ancestor.tracked_descendants_count += 1,
590 1 => ancestor.tracked_descendants_count += 1,
591 -1 => ancestor.tracked_descendants_count -= 1,
591 -1 => ancestor.tracked_descendants_count -= 1,
592 _ => {}
592 _ => {}
593 }
593 }
594 },
594 },
595 )?;
595 )?;
596 if !had_entry {
596 if !had_entry {
597 self.nodes_with_entry_count += 1
597 self.nodes_with_entry_count += 1
598 }
598 }
599 node.data = NodeData::Entry(new_entry);
599 node.data = NodeData::Entry(new_entry);
600 Ok(())
600 Ok(())
601 }
601 }
602
602
603 fn iter_nodes<'tree>(
603 fn iter_nodes<'tree>(
604 &'tree self,
604 &'tree self,
605 ) -> impl Iterator<
605 ) -> impl Iterator<
606 Item = Result<NodeRef<'tree, 'on_disk>, DirstateV2ParseError>,
606 Item = Result<NodeRef<'tree, 'on_disk>, DirstateV2ParseError>,
607 > + 'tree {
607 > + 'tree {
608 // Depth first tree traversal.
608 // Depth first tree traversal.
609 //
609 //
610 // If we could afford internal iteration and recursion,
610 // If we could afford internal iteration and recursion,
611 // this would look like:
611 // this would look like:
612 //
612 //
613 // ```
613 // ```
614 // fn traverse_children(
614 // fn traverse_children(
615 // children: &ChildNodes,
615 // children: &ChildNodes,
616 // each: &mut impl FnMut(&Node),
616 // each: &mut impl FnMut(&Node),
617 // ) {
617 // ) {
618 // for child in children.values() {
618 // for child in children.values() {
619 // traverse_children(&child.children, each);
619 // traverse_children(&child.children, each);
620 // each(child);
620 // each(child);
621 // }
621 // }
622 // }
622 // }
623 // ```
623 // ```
624 //
624 //
625 // However we want an external iterator and therefore can’t use the
625 // However we want an external iterator and therefore can’t use the
626 // call stack. Use an explicit stack instead:
626 // call stack. Use an explicit stack instead:
627 let mut stack = Vec::new();
627 let mut stack = Vec::new();
628 let mut iter = self.root.as_ref().iter();
628 let mut iter = self.root.as_ref().iter();
629 std::iter::from_fn(move || {
629 std::iter::from_fn(move || {
630 while let Some(child_node) = iter.next() {
630 while let Some(child_node) = iter.next() {
631 let children = match child_node.children(self.on_disk) {
631 let children = match child_node.children(self.on_disk) {
632 Ok(children) => children,
632 Ok(children) => children,
633 Err(error) => return Some(Err(error)),
633 Err(error) => return Some(Err(error)),
634 };
634 };
635 // Pseudo-recursion
635 // Pseudo-recursion
636 let new_iter = children.iter();
636 let new_iter = children.iter();
637 let old_iter = std::mem::replace(&mut iter, new_iter);
637 let old_iter = std::mem::replace(&mut iter, new_iter);
638 stack.push((child_node, old_iter));
638 stack.push((child_node, old_iter));
639 }
639 }
640 // Found the end of a `children.iter()` iterator.
640 // Found the end of a `children.iter()` iterator.
641 if let Some((child_node, next_iter)) = stack.pop() {
641 if let Some((child_node, next_iter)) = stack.pop() {
642 // "Return" from pseudo-recursion by restoring state from the
642 // "Return" from pseudo-recursion by restoring state from the
643 // explicit stack
643 // explicit stack
644 iter = next_iter;
644 iter = next_iter;
645
645
646 Some(Ok(child_node))
646 Some(Ok(child_node))
647 } else {
647 } else {
648 // Reached the bottom of the stack, we’re done
648 // Reached the bottom of the stack, we’re done
649 None
649 None
650 }
650 }
651 })
651 })
652 }
652 }
653
653
654 fn clear_known_ambiguous_mtimes(
654 fn clear_known_ambiguous_mtimes(
655 &mut self,
655 &mut self,
656 paths: &[impl AsRef<HgPath>],
656 paths: &[impl AsRef<HgPath>],
657 ) -> Result<(), DirstateV2ParseError> {
657 ) -> Result<(), DirstateV2ParseError> {
658 for path in paths {
658 for path in paths {
659 if let Some(node) = Self::get_node_mut(
659 if let Some(node) = Self::get_node_mut(
660 self.on_disk,
660 self.on_disk,
661 &mut self.root,
661 &mut self.root,
662 path.as_ref(),
662 path.as_ref(),
663 )? {
663 )? {
664 if let NodeData::Entry(entry) = &mut node.data {
664 if let NodeData::Entry(entry) = &mut node.data {
665 entry.clear_mtime();
665 entry.clear_mtime();
666 }
666 }
667 }
667 }
668 }
668 }
669 Ok(())
669 Ok(())
670 }
670 }
671
671
672 /// Return a faillilble iterator of full paths of nodes that have an
672 /// Return a faillilble iterator of full paths of nodes that have an
673 /// `entry` for which the given `predicate` returns true.
673 /// `entry` for which the given `predicate` returns true.
674 ///
674 ///
675 /// Fallibility means that each iterator item is a `Result`, which may
675 /// Fallibility means that each iterator item is a `Result`, which may
676 /// indicate a parse error of the on-disk dirstate-v2 format. Such errors
676 /// indicate a parse error of the on-disk dirstate-v2 format. Such errors
677 /// should only happen if Mercurial is buggy or a repository is corrupted.
677 /// should only happen if Mercurial is buggy or a repository is corrupted.
678 fn filter_full_paths<'tree>(
678 fn filter_full_paths<'tree>(
679 &'tree self,
679 &'tree self,
680 predicate: impl Fn(&DirstateEntry) -> bool + 'tree,
680 predicate: impl Fn(&DirstateEntry) -> bool + 'tree,
681 ) -> impl Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + 'tree
681 ) -> impl Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + 'tree
682 {
682 {
683 filter_map_results(self.iter_nodes(), move |node| {
683 filter_map_results(self.iter_nodes(), move |node| {
684 if let Some(entry) = node.entry()? {
684 if let Some(entry) = node.entry()? {
685 if predicate(&entry) {
685 if predicate(&entry) {
686 return Ok(Some(node.full_path(self.on_disk)?));
686 return Ok(Some(node.full_path(self.on_disk)?));
687 }
687 }
688 }
688 }
689 Ok(None)
689 Ok(None)
690 })
690 })
691 }
691 }
692 }
692 }
693
693
694 /// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s.
694 /// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s.
695 ///
695 ///
696 /// The callback is only called for incoming `Ok` values. Errors are passed
696 /// The callback is only called for incoming `Ok` values. Errors are passed
697 /// through as-is. In order to let it use the `?` operator the callback is
697 /// through as-is. In order to let it use the `?` operator the callback is
698 /// expected to return a `Result` of `Option`, instead of an `Option` of
698 /// expected to return a `Result` of `Option`, instead of an `Option` of
699 /// `Result`.
699 /// `Result`.
700 fn filter_map_results<'a, I, F, A, B, E>(
700 fn filter_map_results<'a, I, F, A, B, E>(
701 iter: I,
701 iter: I,
702 f: F,
702 f: F,
703 ) -> impl Iterator<Item = Result<B, E>> + 'a
703 ) -> impl Iterator<Item = Result<B, E>> + 'a
704 where
704 where
705 I: Iterator<Item = Result<A, E>> + 'a,
705 I: Iterator<Item = Result<A, E>> + 'a,
706 F: Fn(A) -> Result<Option<B>, E> + 'a,
706 F: Fn(A) -> Result<Option<B>, E> + 'a,
707 {
707 {
708 iter.filter_map(move |result| match result {
708 iter.filter_map(move |result| match result {
709 Ok(node) => f(node).transpose(),
709 Ok(node) => f(node).transpose(),
710 Err(e) => Some(Err(e)),
710 Err(e) => Some(Err(e)),
711 })
711 })
712 }
712 }
713
713
714 impl<'on_disk> super::dispatch::DirstateMapMethods for DirstateMap<'on_disk> {
714 impl<'on_disk> super::dispatch::DirstateMapMethods for DirstateMap<'on_disk> {
715 fn clear(&mut self) {
715 fn clear(&mut self) {
716 self.root = Default::default();
716 self.root = Default::default();
717 self.nodes_with_entry_count = 0;
717 self.nodes_with_entry_count = 0;
718 self.nodes_with_copy_source_count = 0;
718 self.nodes_with_copy_source_count = 0;
719 }
719 }
720
720
721 fn add_file(
721 fn add_file(
722 &mut self,
722 &mut self,
723 filename: &HgPath,
723 filename: &HgPath,
724 entry: DirstateEntry,
724 entry: DirstateEntry,
725 added: bool,
725 added: bool,
726 merged: bool,
726 from_p2: bool,
727 from_p2: bool,
727 possibly_dirty: bool,
728 possibly_dirty: bool,
728 ) -> Result<(), DirstateError> {
729 ) -> Result<(), DirstateError> {
729 let mut entry = entry;
730 let mut entry = entry;
730 if added {
731 if added {
731 assert!(!possibly_dirty);
732 assert!(!possibly_dirty);
732 assert!(!from_p2);
733 assert!(!from_p2);
733 entry.state = EntryState::Added;
734 entry.state = EntryState::Added;
734 entry.size = SIZE_NON_NORMAL;
735 entry.size = SIZE_NON_NORMAL;
735 entry.mtime = MTIME_UNSET;
736 entry.mtime = MTIME_UNSET;
737 } else if merged {
738 assert!(!possibly_dirty);
739 assert!(!from_p2);
740 entry.state = EntryState::Merged;
741 entry.size = SIZE_FROM_OTHER_PARENT;
742 entry.mtime = MTIME_UNSET;
736 } else if from_p2 {
743 } else if from_p2 {
737 assert!(!possibly_dirty);
744 assert!(!possibly_dirty);
738 entry.size = SIZE_FROM_OTHER_PARENT;
745 entry.size = SIZE_FROM_OTHER_PARENT;
739 entry.mtime = MTIME_UNSET;
746 entry.mtime = MTIME_UNSET;
740 } else if possibly_dirty {
747 } else if possibly_dirty {
741 entry.size = SIZE_NON_NORMAL;
748 entry.size = SIZE_NON_NORMAL;
742 entry.mtime = MTIME_UNSET;
749 entry.mtime = MTIME_UNSET;
743 } else {
750 } else {
744 entry.size = entry.size & V1_RANGEMASK;
751 entry.size = entry.size & V1_RANGEMASK;
745 entry.mtime = entry.mtime & V1_RANGEMASK;
752 entry.mtime = entry.mtime & V1_RANGEMASK;
746 }
753 }
747
754
748 let old_state = match self.get(filename)? {
755 let old_state = match self.get(filename)? {
749 Some(e) => e.state,
756 Some(e) => e.state,
750 None => EntryState::Unknown,
757 None => EntryState::Unknown,
751 };
758 };
752
759
753 Ok(self.add_or_remove_file(filename, old_state, entry)?)
760 Ok(self.add_or_remove_file(filename, old_state, entry)?)
754 }
761 }
755
762
756 fn remove_file(
763 fn remove_file(
757 &mut self,
764 &mut self,
758 filename: &HgPath,
765 filename: &HgPath,
759 in_merge: bool,
766 in_merge: bool,
760 ) -> Result<(), DirstateError> {
767 ) -> Result<(), DirstateError> {
761 let old_entry_opt = self.get(filename)?;
768 let old_entry_opt = self.get(filename)?;
762 let old_state = match old_entry_opt {
769 let old_state = match old_entry_opt {
763 Some(e) => e.state,
770 Some(e) => e.state,
764 None => EntryState::Unknown,
771 None => EntryState::Unknown,
765 };
772 };
766 let mut size = 0;
773 let mut size = 0;
767 if in_merge {
774 if in_merge {
768 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
775 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
769 // during a merge. So I (marmoute) am not sure we need the
776 // during a merge. So I (marmoute) am not sure we need the
770 // conditionnal at all. Adding double checking this with assert
777 // conditionnal at all. Adding double checking this with assert
771 // would be nice.
778 // would be nice.
772 if let Some(old_entry) = old_entry_opt {
779 if let Some(old_entry) = old_entry_opt {
773 // backup the previous state
780 // backup the previous state
774 if old_entry.state == EntryState::Merged {
781 if old_entry.state == EntryState::Merged {
775 size = SIZE_NON_NORMAL;
782 size = SIZE_NON_NORMAL;
776 } else if old_entry.state == EntryState::Normal
783 } else if old_entry.state == EntryState::Normal
777 && old_entry.size == SIZE_FROM_OTHER_PARENT
784 && old_entry.size == SIZE_FROM_OTHER_PARENT
778 {
785 {
779 // other parent
786 // other parent
780 size = SIZE_FROM_OTHER_PARENT;
787 size = SIZE_FROM_OTHER_PARENT;
781 }
788 }
782 }
789 }
783 }
790 }
784 if size == 0 {
791 if size == 0 {
785 self.copy_map_remove(filename)?;
792 self.copy_map_remove(filename)?;
786 }
793 }
787 let entry = DirstateEntry {
794 let entry = DirstateEntry {
788 state: EntryState::Removed,
795 state: EntryState::Removed,
789 mode: 0,
796 mode: 0,
790 size,
797 size,
791 mtime: 0,
798 mtime: 0,
792 };
799 };
793 Ok(self.add_or_remove_file(filename, old_state, entry)?)
800 Ok(self.add_or_remove_file(filename, old_state, entry)?)
794 }
801 }
795
802
796 fn drop_file(
803 fn drop_file(
797 &mut self,
804 &mut self,
798 filename: &HgPath,
805 filename: &HgPath,
799 old_state: EntryState,
806 old_state: EntryState,
800 ) -> Result<bool, DirstateError> {
807 ) -> Result<bool, DirstateError> {
801 struct Dropped {
808 struct Dropped {
802 was_tracked: bool,
809 was_tracked: bool,
803 had_entry: bool,
810 had_entry: bool,
804 had_copy_source: bool,
811 had_copy_source: bool,
805 }
812 }
806
813
807 /// If this returns `Ok(Some((dropped, removed)))`, then
814 /// If this returns `Ok(Some((dropped, removed)))`, then
808 ///
815 ///
809 /// * `dropped` is about the leaf node that was at `filename`
816 /// * `dropped` is about the leaf node that was at `filename`
810 /// * `removed` is whether this particular level of recursion just
817 /// * `removed` is whether this particular level of recursion just
811 /// removed a node in `nodes`.
818 /// removed a node in `nodes`.
812 fn recur<'on_disk>(
819 fn recur<'on_disk>(
813 on_disk: &'on_disk [u8],
820 on_disk: &'on_disk [u8],
814 nodes: &mut ChildNodes<'on_disk>,
821 nodes: &mut ChildNodes<'on_disk>,
815 path: &HgPath,
822 path: &HgPath,
816 ) -> Result<Option<(Dropped, bool)>, DirstateV2ParseError> {
823 ) -> Result<Option<(Dropped, bool)>, DirstateV2ParseError> {
817 let (first_path_component, rest_of_path) =
824 let (first_path_component, rest_of_path) =
818 path.split_first_component();
825 path.split_first_component();
819 let node = if let Some(node) =
826 let node = if let Some(node) =
820 nodes.make_mut(on_disk)?.get_mut(first_path_component)
827 nodes.make_mut(on_disk)?.get_mut(first_path_component)
821 {
828 {
822 node
829 node
823 } else {
830 } else {
824 return Ok(None);
831 return Ok(None);
825 };
832 };
826 let dropped;
833 let dropped;
827 if let Some(rest) = rest_of_path {
834 if let Some(rest) = rest_of_path {
828 if let Some((d, removed)) =
835 if let Some((d, removed)) =
829 recur(on_disk, &mut node.children, rest)?
836 recur(on_disk, &mut node.children, rest)?
830 {
837 {
831 dropped = d;
838 dropped = d;
832 if dropped.had_entry {
839 if dropped.had_entry {
833 node.descendants_with_entry_count -= 1;
840 node.descendants_with_entry_count -= 1;
834 }
841 }
835 if dropped.was_tracked {
842 if dropped.was_tracked {
836 node.tracked_descendants_count -= 1;
843 node.tracked_descendants_count -= 1;
837 }
844 }
838
845
839 // Directory caches must be invalidated when removing a
846 // Directory caches must be invalidated when removing a
840 // child node
847 // child node
841 if removed {
848 if removed {
842 if let NodeData::CachedDirectory { .. } = &node.data {
849 if let NodeData::CachedDirectory { .. } = &node.data {
843 node.data = NodeData::None
850 node.data = NodeData::None
844 }
851 }
845 }
852 }
846 } else {
853 } else {
847 return Ok(None);
854 return Ok(None);
848 }
855 }
849 } else {
856 } else {
850 let had_entry = node.data.has_entry();
857 let had_entry = node.data.has_entry();
851 if had_entry {
858 if had_entry {
852 node.data = NodeData::None
859 node.data = NodeData::None
853 }
860 }
854 dropped = Dropped {
861 dropped = Dropped {
855 was_tracked: node
862 was_tracked: node
856 .data
863 .data
857 .as_entry()
864 .as_entry()
858 .map_or(false, |entry| entry.state.is_tracked()),
865 .map_or(false, |entry| entry.state.is_tracked()),
859 had_entry,
866 had_entry,
860 had_copy_source: node.copy_source.take().is_some(),
867 had_copy_source: node.copy_source.take().is_some(),
861 };
868 };
862 }
869 }
863 // After recursion, for both leaf (rest_of_path is None) nodes and
870 // After recursion, for both leaf (rest_of_path is None) nodes and
864 // parent nodes, remove a node if it just became empty.
871 // parent nodes, remove a node if it just became empty.
865 let remove = !node.data.has_entry()
872 let remove = !node.data.has_entry()
866 && node.copy_source.is_none()
873 && node.copy_source.is_none()
867 && node.children.is_empty();
874 && node.children.is_empty();
868 if remove {
875 if remove {
869 nodes.make_mut(on_disk)?.remove(first_path_component);
876 nodes.make_mut(on_disk)?.remove(first_path_component);
870 }
877 }
871 Ok(Some((dropped, remove)))
878 Ok(Some((dropped, remove)))
872 }
879 }
873
880
874 if let Some((dropped, _removed)) =
881 if let Some((dropped, _removed)) =
875 recur(self.on_disk, &mut self.root, filename)?
882 recur(self.on_disk, &mut self.root, filename)?
876 {
883 {
877 if dropped.had_entry {
884 if dropped.had_entry {
878 self.nodes_with_entry_count -= 1
885 self.nodes_with_entry_count -= 1
879 }
886 }
880 if dropped.had_copy_source {
887 if dropped.had_copy_source {
881 self.nodes_with_copy_source_count -= 1
888 self.nodes_with_copy_source_count -= 1
882 }
889 }
883 Ok(dropped.had_entry)
890 Ok(dropped.had_entry)
884 } else {
891 } else {
885 debug_assert!(!old_state.is_tracked());
892 debug_assert!(!old_state.is_tracked());
886 Ok(false)
893 Ok(false)
887 }
894 }
888 }
895 }
889
896
890 fn clear_ambiguous_times(
897 fn clear_ambiguous_times(
891 &mut self,
898 &mut self,
892 filenames: Vec<HgPathBuf>,
899 filenames: Vec<HgPathBuf>,
893 now: i32,
900 now: i32,
894 ) -> Result<(), DirstateV2ParseError> {
901 ) -> Result<(), DirstateV2ParseError> {
895 for filename in filenames {
902 for filename in filenames {
896 if let Some(node) =
903 if let Some(node) =
897 Self::get_node_mut(self.on_disk, &mut self.root, &filename)?
904 Self::get_node_mut(self.on_disk, &mut self.root, &filename)?
898 {
905 {
899 if let NodeData::Entry(entry) = &mut node.data {
906 if let NodeData::Entry(entry) = &mut node.data {
900 entry.clear_ambiguous_mtime(now);
907 entry.clear_ambiguous_mtime(now);
901 }
908 }
902 }
909 }
903 }
910 }
904 Ok(())
911 Ok(())
905 }
912 }
906
913
907 fn non_normal_entries_contains(
914 fn non_normal_entries_contains(
908 &mut self,
915 &mut self,
909 key: &HgPath,
916 key: &HgPath,
910 ) -> Result<bool, DirstateV2ParseError> {
917 ) -> Result<bool, DirstateV2ParseError> {
911 Ok(if let Some(node) = self.get_node(key)? {
918 Ok(if let Some(node) = self.get_node(key)? {
912 node.entry()?.map_or(false, |entry| entry.is_non_normal())
919 node.entry()?.map_or(false, |entry| entry.is_non_normal())
913 } else {
920 } else {
914 false
921 false
915 })
922 })
916 }
923 }
917
924
918 fn non_normal_entries_remove(&mut self, _key: &HgPath) {
925 fn non_normal_entries_remove(&mut self, _key: &HgPath) {
919 // Do nothing, this `DirstateMap` does not have a separate "non normal
926 // Do nothing, this `DirstateMap` does not have a separate "non normal
920 // entries" set that need to be kept up to date
927 // entries" set that need to be kept up to date
921 }
928 }
922
929
923 fn non_normal_or_other_parent_paths(
930 fn non_normal_or_other_parent_paths(
924 &mut self,
931 &mut self,
925 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
932 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
926 {
933 {
927 Box::new(self.filter_full_paths(|entry| {
934 Box::new(self.filter_full_paths(|entry| {
928 entry.is_non_normal() || entry.is_from_other_parent()
935 entry.is_non_normal() || entry.is_from_other_parent()
929 }))
936 }))
930 }
937 }
931
938
932 fn set_non_normal_other_parent_entries(&mut self, _force: bool) {
939 fn set_non_normal_other_parent_entries(&mut self, _force: bool) {
933 // Do nothing, this `DirstateMap` does not have a separate "non normal
940 // Do nothing, this `DirstateMap` does not have a separate "non normal
934 // entries" and "from other parent" sets that need to be recomputed
941 // entries" and "from other parent" sets that need to be recomputed
935 }
942 }
936
943
937 fn iter_non_normal_paths(
944 fn iter_non_normal_paths(
938 &mut self,
945 &mut self,
939 ) -> Box<
946 ) -> Box<
940 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
947 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
941 > {
948 > {
942 self.iter_non_normal_paths_panic()
949 self.iter_non_normal_paths_panic()
943 }
950 }
944
951
945 fn iter_non_normal_paths_panic(
952 fn iter_non_normal_paths_panic(
946 &self,
953 &self,
947 ) -> Box<
954 ) -> Box<
948 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
955 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
949 > {
956 > {
950 Box::new(self.filter_full_paths(|entry| entry.is_non_normal()))
957 Box::new(self.filter_full_paths(|entry| entry.is_non_normal()))
951 }
958 }
952
959
953 fn iter_other_parent_paths(
960 fn iter_other_parent_paths(
954 &mut self,
961 &mut self,
955 ) -> Box<
962 ) -> Box<
956 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
963 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
957 > {
964 > {
958 Box::new(self.filter_full_paths(|entry| entry.is_from_other_parent()))
965 Box::new(self.filter_full_paths(|entry| entry.is_from_other_parent()))
959 }
966 }
960
967
961 fn has_tracked_dir(
968 fn has_tracked_dir(
962 &mut self,
969 &mut self,
963 directory: &HgPath,
970 directory: &HgPath,
964 ) -> Result<bool, DirstateError> {
971 ) -> Result<bool, DirstateError> {
965 if let Some(node) = self.get_node(directory)? {
972 if let Some(node) = self.get_node(directory)? {
966 // A node without a `DirstateEntry` was created to hold child
973 // A node without a `DirstateEntry` was created to hold child
967 // nodes, and is therefore a directory.
974 // nodes, and is therefore a directory.
968 let state = node.state()?;
975 let state = node.state()?;
969 Ok(state.is_none() && node.tracked_descendants_count() > 0)
976 Ok(state.is_none() && node.tracked_descendants_count() > 0)
970 } else {
977 } else {
971 Ok(false)
978 Ok(false)
972 }
979 }
973 }
980 }
974
981
975 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
982 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
976 if let Some(node) = self.get_node(directory)? {
983 if let Some(node) = self.get_node(directory)? {
977 // A node without a `DirstateEntry` was created to hold child
984 // A node without a `DirstateEntry` was created to hold child
978 // nodes, and is therefore a directory.
985 // nodes, and is therefore a directory.
979 let state = node.state()?;
986 let state = node.state()?;
980 Ok(state.is_none() && node.descendants_with_entry_count() > 0)
987 Ok(state.is_none() && node.descendants_with_entry_count() > 0)
981 } else {
988 } else {
982 Ok(false)
989 Ok(false)
983 }
990 }
984 }
991 }
985
992
986 #[timed]
993 #[timed]
987 fn pack_v1(
994 fn pack_v1(
988 &mut self,
995 &mut self,
989 parents: DirstateParents,
996 parents: DirstateParents,
990 now: Timestamp,
997 now: Timestamp,
991 ) -> Result<Vec<u8>, DirstateError> {
998 ) -> Result<Vec<u8>, DirstateError> {
992 let now: i32 = now.0.try_into().expect("time overflow");
999 let now: i32 = now.0.try_into().expect("time overflow");
993 let mut ambiguous_mtimes = Vec::new();
1000 let mut ambiguous_mtimes = Vec::new();
994 // Optizimation (to be measured?): pre-compute size to avoid `Vec`
1001 // Optizimation (to be measured?): pre-compute size to avoid `Vec`
995 // reallocations
1002 // reallocations
996 let mut size = parents.as_bytes().len();
1003 let mut size = parents.as_bytes().len();
997 for node in self.iter_nodes() {
1004 for node in self.iter_nodes() {
998 let node = node?;
1005 let node = node?;
999 if let Some(entry) = node.entry()? {
1006 if let Some(entry) = node.entry()? {
1000 size += packed_entry_size(
1007 size += packed_entry_size(
1001 node.full_path(self.on_disk)?,
1008 node.full_path(self.on_disk)?,
1002 node.copy_source(self.on_disk)?,
1009 node.copy_source(self.on_disk)?,
1003 );
1010 );
1004 if entry.mtime_is_ambiguous(now) {
1011 if entry.mtime_is_ambiguous(now) {
1005 ambiguous_mtimes.push(
1012 ambiguous_mtimes.push(
1006 node.full_path_borrowed(self.on_disk)?
1013 node.full_path_borrowed(self.on_disk)?
1007 .detach_from_tree(),
1014 .detach_from_tree(),
1008 )
1015 )
1009 }
1016 }
1010 }
1017 }
1011 }
1018 }
1012 self.clear_known_ambiguous_mtimes(&ambiguous_mtimes)?;
1019 self.clear_known_ambiguous_mtimes(&ambiguous_mtimes)?;
1013
1020
1014 let mut packed = Vec::with_capacity(size);
1021 let mut packed = Vec::with_capacity(size);
1015 packed.extend(parents.as_bytes());
1022 packed.extend(parents.as_bytes());
1016
1023
1017 for node in self.iter_nodes() {
1024 for node in self.iter_nodes() {
1018 let node = node?;
1025 let node = node?;
1019 if let Some(entry) = node.entry()? {
1026 if let Some(entry) = node.entry()? {
1020 pack_entry(
1027 pack_entry(
1021 node.full_path(self.on_disk)?,
1028 node.full_path(self.on_disk)?,
1022 &entry,
1029 &entry,
1023 node.copy_source(self.on_disk)?,
1030 node.copy_source(self.on_disk)?,
1024 &mut packed,
1031 &mut packed,
1025 );
1032 );
1026 }
1033 }
1027 }
1034 }
1028 Ok(packed)
1035 Ok(packed)
1029 }
1036 }
1030
1037
1031 #[timed]
1038 #[timed]
1032 fn pack_v2(
1039 fn pack_v2(
1033 &mut self,
1040 &mut self,
1034 parents: DirstateParents,
1041 parents: DirstateParents,
1035 now: Timestamp,
1042 now: Timestamp,
1036 ) -> Result<Vec<u8>, DirstateError> {
1043 ) -> Result<Vec<u8>, DirstateError> {
1037 // TODO:Β how do we want to handle this in 2038?
1044 // TODO:Β how do we want to handle this in 2038?
1038 let now: i32 = now.0.try_into().expect("time overflow");
1045 let now: i32 = now.0.try_into().expect("time overflow");
1039 let mut paths = Vec::new();
1046 let mut paths = Vec::new();
1040 for node in self.iter_nodes() {
1047 for node in self.iter_nodes() {
1041 let node = node?;
1048 let node = node?;
1042 if let Some(entry) = node.entry()? {
1049 if let Some(entry) = node.entry()? {
1043 if entry.mtime_is_ambiguous(now) {
1050 if entry.mtime_is_ambiguous(now) {
1044 paths.push(
1051 paths.push(
1045 node.full_path_borrowed(self.on_disk)?
1052 node.full_path_borrowed(self.on_disk)?
1046 .detach_from_tree(),
1053 .detach_from_tree(),
1047 )
1054 )
1048 }
1055 }
1049 }
1056 }
1050 }
1057 }
1051 // Borrow of `self` ends here since we collect cloned paths
1058 // Borrow of `self` ends here since we collect cloned paths
1052
1059
1053 self.clear_known_ambiguous_mtimes(&paths)?;
1060 self.clear_known_ambiguous_mtimes(&paths)?;
1054
1061
1055 on_disk::write(self, parents)
1062 on_disk::write(self, parents)
1056 }
1063 }
1057
1064
1058 fn status<'a>(
1065 fn status<'a>(
1059 &'a mut self,
1066 &'a mut self,
1060 matcher: &'a (dyn Matcher + Sync),
1067 matcher: &'a (dyn Matcher + Sync),
1061 root_dir: PathBuf,
1068 root_dir: PathBuf,
1062 ignore_files: Vec<PathBuf>,
1069 ignore_files: Vec<PathBuf>,
1063 options: StatusOptions,
1070 options: StatusOptions,
1064 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
1071 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
1065 {
1072 {
1066 super::status::status(self, matcher, root_dir, ignore_files, options)
1073 super::status::status(self, matcher, root_dir, ignore_files, options)
1067 }
1074 }
1068
1075
1069 fn copy_map_len(&self) -> usize {
1076 fn copy_map_len(&self) -> usize {
1070 self.nodes_with_copy_source_count as usize
1077 self.nodes_with_copy_source_count as usize
1071 }
1078 }
1072
1079
1073 fn copy_map_iter(&self) -> CopyMapIter<'_> {
1080 fn copy_map_iter(&self) -> CopyMapIter<'_> {
1074 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1081 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1075 Ok(if let Some(source) = node.copy_source(self.on_disk)? {
1082 Ok(if let Some(source) = node.copy_source(self.on_disk)? {
1076 Some((node.full_path(self.on_disk)?, source))
1083 Some((node.full_path(self.on_disk)?, source))
1077 } else {
1084 } else {
1078 None
1085 None
1079 })
1086 })
1080 }))
1087 }))
1081 }
1088 }
1082
1089
1083 fn copy_map_contains_key(
1090 fn copy_map_contains_key(
1084 &self,
1091 &self,
1085 key: &HgPath,
1092 key: &HgPath,
1086 ) -> Result<bool, DirstateV2ParseError> {
1093 ) -> Result<bool, DirstateV2ParseError> {
1087 Ok(if let Some(node) = self.get_node(key)? {
1094 Ok(if let Some(node) = self.get_node(key)? {
1088 node.has_copy_source()
1095 node.has_copy_source()
1089 } else {
1096 } else {
1090 false
1097 false
1091 })
1098 })
1092 }
1099 }
1093
1100
1094 fn copy_map_get(
1101 fn copy_map_get(
1095 &self,
1102 &self,
1096 key: &HgPath,
1103 key: &HgPath,
1097 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
1104 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
1098 if let Some(node) = self.get_node(key)? {
1105 if let Some(node) = self.get_node(key)? {
1099 if let Some(source) = node.copy_source(self.on_disk)? {
1106 if let Some(source) = node.copy_source(self.on_disk)? {
1100 return Ok(Some(source));
1107 return Ok(Some(source));
1101 }
1108 }
1102 }
1109 }
1103 Ok(None)
1110 Ok(None)
1104 }
1111 }
1105
1112
1106 fn copy_map_remove(
1113 fn copy_map_remove(
1107 &mut self,
1114 &mut self,
1108 key: &HgPath,
1115 key: &HgPath,
1109 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1116 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1110 let count = &mut self.nodes_with_copy_source_count;
1117 let count = &mut self.nodes_with_copy_source_count;
1111 Ok(
1118 Ok(
1112 Self::get_node_mut(self.on_disk, &mut self.root, key)?.and_then(
1119 Self::get_node_mut(self.on_disk, &mut self.root, key)?.and_then(
1113 |node| {
1120 |node| {
1114 if node.copy_source.is_some() {
1121 if node.copy_source.is_some() {
1115 *count -= 1
1122 *count -= 1
1116 }
1123 }
1117 node.copy_source.take().map(Cow::into_owned)
1124 node.copy_source.take().map(Cow::into_owned)
1118 },
1125 },
1119 ),
1126 ),
1120 )
1127 )
1121 }
1128 }
1122
1129
1123 fn copy_map_insert(
1130 fn copy_map_insert(
1124 &mut self,
1131 &mut self,
1125 key: HgPathBuf,
1132 key: HgPathBuf,
1126 value: HgPathBuf,
1133 value: HgPathBuf,
1127 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1134 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1128 let node = Self::get_or_insert_node(
1135 let node = Self::get_or_insert_node(
1129 self.on_disk,
1136 self.on_disk,
1130 &mut self.root,
1137 &mut self.root,
1131 &key,
1138 &key,
1132 WithBasename::to_cow_owned,
1139 WithBasename::to_cow_owned,
1133 |_ancestor| {},
1140 |_ancestor| {},
1134 )?;
1141 )?;
1135 if node.copy_source.is_none() {
1142 if node.copy_source.is_none() {
1136 self.nodes_with_copy_source_count += 1
1143 self.nodes_with_copy_source_count += 1
1137 }
1144 }
1138 Ok(node.copy_source.replace(value.into()).map(Cow::into_owned))
1145 Ok(node.copy_source.replace(value.into()).map(Cow::into_owned))
1139 }
1146 }
1140
1147
1141 fn len(&self) -> usize {
1148 fn len(&self) -> usize {
1142 self.nodes_with_entry_count as usize
1149 self.nodes_with_entry_count as usize
1143 }
1150 }
1144
1151
1145 fn contains_key(
1152 fn contains_key(
1146 &self,
1153 &self,
1147 key: &HgPath,
1154 key: &HgPath,
1148 ) -> Result<bool, DirstateV2ParseError> {
1155 ) -> Result<bool, DirstateV2ParseError> {
1149 Ok(self.get(key)?.is_some())
1156 Ok(self.get(key)?.is_some())
1150 }
1157 }
1151
1158
1152 fn get(
1159 fn get(
1153 &self,
1160 &self,
1154 key: &HgPath,
1161 key: &HgPath,
1155 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
1162 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
1156 Ok(if let Some(node) = self.get_node(key)? {
1163 Ok(if let Some(node) = self.get_node(key)? {
1157 node.entry()?
1164 node.entry()?
1158 } else {
1165 } else {
1159 None
1166 None
1160 })
1167 })
1161 }
1168 }
1162
1169
1163 fn iter(&self) -> StateMapIter<'_> {
1170 fn iter(&self) -> StateMapIter<'_> {
1164 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1171 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1165 Ok(if let Some(entry) = node.entry()? {
1172 Ok(if let Some(entry) = node.entry()? {
1166 Some((node.full_path(self.on_disk)?, entry))
1173 Some((node.full_path(self.on_disk)?, entry))
1167 } else {
1174 } else {
1168 None
1175 None
1169 })
1176 })
1170 }))
1177 }))
1171 }
1178 }
1172
1179
1173 fn iter_directories(
1180 fn iter_directories(
1174 &self,
1181 &self,
1175 ) -> Box<
1182 ) -> Box<
1176 dyn Iterator<
1183 dyn Iterator<
1177 Item = Result<
1184 Item = Result<
1178 (&HgPath, Option<Timestamp>),
1185 (&HgPath, Option<Timestamp>),
1179 DirstateV2ParseError,
1186 DirstateV2ParseError,
1180 >,
1187 >,
1181 > + Send
1188 > + Send
1182 + '_,
1189 + '_,
1183 > {
1190 > {
1184 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1191 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1185 Ok(if node.state()?.is_none() {
1192 Ok(if node.state()?.is_none() {
1186 Some((
1193 Some((
1187 node.full_path(self.on_disk)?,
1194 node.full_path(self.on_disk)?,
1188 node.cached_directory_mtime()
1195 node.cached_directory_mtime()
1189 .map(|mtime| Timestamp(mtime.seconds())),
1196 .map(|mtime| Timestamp(mtime.seconds())),
1190 ))
1197 ))
1191 } else {
1198 } else {
1192 None
1199 None
1193 })
1200 })
1194 }))
1201 }))
1195 }
1202 }
1196 }
1203 }
@@ -1,494 +1,496 b''
1 use std::path::PathBuf;
1 use std::path::PathBuf;
2
2
3 use crate::dirstate::parsers::Timestamp;
3 use crate::dirstate::parsers::Timestamp;
4 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
4 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
5 use crate::matchers::Matcher;
5 use crate::matchers::Matcher;
6 use crate::utils::hg_path::{HgPath, HgPathBuf};
6 use crate::utils::hg_path::{HgPath, HgPathBuf};
7 use crate::CopyMapIter;
7 use crate::CopyMapIter;
8 use crate::DirstateEntry;
8 use crate::DirstateEntry;
9 use crate::DirstateError;
9 use crate::DirstateError;
10 use crate::DirstateMap;
10 use crate::DirstateMap;
11 use crate::DirstateParents;
11 use crate::DirstateParents;
12 use crate::DirstateStatus;
12 use crate::DirstateStatus;
13 use crate::EntryState;
13 use crate::EntryState;
14 use crate::PatternFileWarning;
14 use crate::PatternFileWarning;
15 use crate::StateMapIter;
15 use crate::StateMapIter;
16 use crate::StatusError;
16 use crate::StatusError;
17 use crate::StatusOptions;
17 use crate::StatusOptions;
18
18
19 /// `rust/hg-cpython/src/dirstate/dirstate_map.rs` implements in Rust a
19 /// `rust/hg-cpython/src/dirstate/dirstate_map.rs` implements in Rust a
20 /// `DirstateMap` Python class that wraps `Box<dyn DirstateMapMethods + Send>`,
20 /// `DirstateMap` Python class that wraps `Box<dyn DirstateMapMethods + Send>`,
21 /// a trait object of this trait. Except for constructors, this trait defines
21 /// a trait object of this trait. Except for constructors, this trait defines
22 /// all APIs that the class needs to interact with its inner dirstate map.
22 /// all APIs that the class needs to interact with its inner dirstate map.
23 ///
23 ///
24 /// A trait object is used to support two different concrete types:
24 /// A trait object is used to support two different concrete types:
25 ///
25 ///
26 /// * `rust/hg-core/src/dirstate/dirstate_map.rs` defines the "flat dirstate
26 /// * `rust/hg-core/src/dirstate/dirstate_map.rs` defines the "flat dirstate
27 /// map" which is based on a few large `HgPath`-keyed `HashMap` and `HashSet`
27 /// map" which is based on a few large `HgPath`-keyed `HashMap` and `HashSet`
28 /// fields.
28 /// fields.
29 /// * `rust/hg-core/src/dirstate_tree/dirstate_map.rs` defines the "tree
29 /// * `rust/hg-core/src/dirstate_tree/dirstate_map.rs` defines the "tree
30 /// dirstate map" based on a tree data struture with nodes for directories
30 /// dirstate map" based on a tree data struture with nodes for directories
31 /// containing child nodes for their files and sub-directories. This tree
31 /// containing child nodes for their files and sub-directories. This tree
32 /// enables a more efficient algorithm for `hg status`, but its details are
32 /// enables a more efficient algorithm for `hg status`, but its details are
33 /// abstracted in this trait.
33 /// abstracted in this trait.
34 ///
34 ///
35 /// The dirstate map associates paths of files in the working directory to
35 /// The dirstate map associates paths of files in the working directory to
36 /// various information about the state of those files.
36 /// various information about the state of those files.
37 pub trait DirstateMapMethods {
37 pub trait DirstateMapMethods {
38 /// Remove information about all files in this map
38 /// Remove information about all files in this map
39 fn clear(&mut self);
39 fn clear(&mut self);
40
40
41 /// Add or change the information associated to a given file.
41 /// Add or change the information associated to a given file.
42 ///
42 ///
43 /// `old_state` is the state in the entry that `get` would have returned
43 /// `old_state` is the state in the entry that `get` would have returned
44 /// before this call, or `EntryState::Unknown` if there was no such entry.
44 /// before this call, or `EntryState::Unknown` if there was no such entry.
45 ///
45 ///
46 /// `entry.state` should never be `EntryState::Unknown`.
46 /// `entry.state` should never be `EntryState::Unknown`.
47 fn add_file(
47 fn add_file(
48 &mut self,
48 &mut self,
49 filename: &HgPath,
49 filename: &HgPath,
50 entry: DirstateEntry,
50 entry: DirstateEntry,
51 added: bool,
51 added: bool,
52 merged: bool,
52 from_p2: bool,
53 from_p2: bool,
53 possibly_dirty: bool,
54 possibly_dirty: bool,
54 ) -> Result<(), DirstateError>;
55 ) -> Result<(), DirstateError>;
55
56
56 /// Mark a file as "removed" (as in `hg rm`).
57 /// Mark a file as "removed" (as in `hg rm`).
57 ///
58 ///
58 /// `old_state` is the state in the entry that `get` would have returned
59 /// `old_state` is the state in the entry that `get` would have returned
59 /// before this call, or `EntryState::Unknown` if there was no such entry.
60 /// before this call, or `EntryState::Unknown` if there was no such entry.
60 ///
61 ///
61 /// `size` is not actually a size but the 0 or -1 or -2 value that would be
62 /// `size` is not actually a size but the 0 or -1 or -2 value that would be
62 /// put in the size field in the dirstate-v1Β format.
63 /// put in the size field in the dirstate-v1Β format.
63 fn remove_file(
64 fn remove_file(
64 &mut self,
65 &mut self,
65 filename: &HgPath,
66 filename: &HgPath,
66 in_merge: bool,
67 in_merge: bool,
67 ) -> Result<(), DirstateError>;
68 ) -> Result<(), DirstateError>;
68
69
69 /// Drop information about this file from the map if any, and return
70 /// Drop information about this file from the map if any, and return
70 /// whether there was any.
71 /// whether there was any.
71 ///
72 ///
72 /// `get` will now return `None` for this filename.
73 /// `get` will now return `None` for this filename.
73 ///
74 ///
74 /// `old_state` is the state in the entry that `get` would have returned
75 /// `old_state` is the state in the entry that `get` would have returned
75 /// before this call, or `EntryState::Unknown` if there was no such entry.
76 /// before this call, or `EntryState::Unknown` if there was no such entry.
76 fn drop_file(
77 fn drop_file(
77 &mut self,
78 &mut self,
78 filename: &HgPath,
79 filename: &HgPath,
79 old_state: EntryState,
80 old_state: EntryState,
80 ) -> Result<bool, DirstateError>;
81 ) -> Result<bool, DirstateError>;
81
82
82 /// Among given files, mark the stored `mtime` as ambiguous if there is one
83 /// Among given files, mark the stored `mtime` as ambiguous if there is one
83 /// (if `state == EntryState::Normal`) equal to the given current Unix
84 /// (if `state == EntryState::Normal`) equal to the given current Unix
84 /// timestamp.
85 /// timestamp.
85 fn clear_ambiguous_times(
86 fn clear_ambiguous_times(
86 &mut self,
87 &mut self,
87 filenames: Vec<HgPathBuf>,
88 filenames: Vec<HgPathBuf>,
88 now: i32,
89 now: i32,
89 ) -> Result<(), DirstateV2ParseError>;
90 ) -> Result<(), DirstateV2ParseError>;
90
91
91 /// Return whether the map has an "non-normal" entry for the given
92 /// Return whether the map has an "non-normal" entry for the given
92 /// filename. That is, any entry with a `state` other than
93 /// filename. That is, any entry with a `state` other than
93 /// `EntryState::Normal` or with an ambiguous `mtime`.
94 /// `EntryState::Normal` or with an ambiguous `mtime`.
94 fn non_normal_entries_contains(
95 fn non_normal_entries_contains(
95 &mut self,
96 &mut self,
96 key: &HgPath,
97 key: &HgPath,
97 ) -> Result<bool, DirstateV2ParseError>;
98 ) -> Result<bool, DirstateV2ParseError>;
98
99
99 /// Mark the given path as "normal" file. This is only relevant in the flat
100 /// Mark the given path as "normal" file. This is only relevant in the flat
100 /// dirstate map where there is a separate `HashSet` that needs to be kept
101 /// dirstate map where there is a separate `HashSet` that needs to be kept
101 /// up to date.
102 /// up to date.
102 fn non_normal_entries_remove(&mut self, key: &HgPath);
103 fn non_normal_entries_remove(&mut self, key: &HgPath);
103
104
104 /// Return an iterator of paths whose respective entry are either
105 /// Return an iterator of paths whose respective entry are either
105 /// "non-normal" (see `non_normal_entries_contains`) or "from other
106 /// "non-normal" (see `non_normal_entries_contains`) or "from other
106 /// parent".
107 /// parent".
107 ///
108 ///
108 /// If that information is cached, create the cache as needed.
109 /// If that information is cached, create the cache as needed.
109 ///
110 ///
110 /// "From other parent" is defined as `state == Normal && size == -2`.
111 /// "From other parent" is defined as `state == Normal && size == -2`.
111 ///
112 ///
112 /// Because parse errors can happen during iteration, the iterated items
113 /// Because parse errors can happen during iteration, the iterated items
113 /// are `Result`s.
114 /// are `Result`s.
114 fn non_normal_or_other_parent_paths(
115 fn non_normal_or_other_parent_paths(
115 &mut self,
116 &mut self,
116 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>;
117 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>;
117
118
118 /// Create the cache for `non_normal_or_other_parent_paths` if needed.
119 /// Create the cache for `non_normal_or_other_parent_paths` if needed.
119 ///
120 ///
120 /// If `force` is true, the cache is re-created even if it already exists.
121 /// If `force` is true, the cache is re-created even if it already exists.
121 fn set_non_normal_other_parent_entries(&mut self, force: bool);
122 fn set_non_normal_other_parent_entries(&mut self, force: bool);
122
123
123 /// Return an iterator of paths whose respective entry are "non-normal"
124 /// Return an iterator of paths whose respective entry are "non-normal"
124 /// (see `non_normal_entries_contains`).
125 /// (see `non_normal_entries_contains`).
125 ///
126 ///
126 /// If that information is cached, create the cache as needed.
127 /// If that information is cached, create the cache as needed.
127 ///
128 ///
128 /// Because parse errors can happen during iteration, the iterated items
129 /// Because parse errors can happen during iteration, the iterated items
129 /// are `Result`s.
130 /// are `Result`s.
130 fn iter_non_normal_paths(
131 fn iter_non_normal_paths(
131 &mut self,
132 &mut self,
132 ) -> Box<
133 ) -> Box<
133 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
134 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
134 >;
135 >;
135
136
136 /// Same as `iter_non_normal_paths`, but takes `&self` instead of `&mut
137 /// Same as `iter_non_normal_paths`, but takes `&self` instead of `&mut
137 /// self`.
138 /// self`.
138 ///
139 ///
139 /// Panics if a cache is necessary but does not exist yet.
140 /// Panics if a cache is necessary but does not exist yet.
140 fn iter_non_normal_paths_panic(
141 fn iter_non_normal_paths_panic(
141 &self,
142 &self,
142 ) -> Box<
143 ) -> Box<
143 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
144 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
144 >;
145 >;
145
146
146 /// Return an iterator of paths whose respective entry are "from other
147 /// Return an iterator of paths whose respective entry are "from other
147 /// parent".
148 /// parent".
148 ///
149 ///
149 /// If that information is cached, create the cache as needed.
150 /// If that information is cached, create the cache as needed.
150 ///
151 ///
151 /// "From other parent" is defined as `state == Normal && size == -2`.
152 /// "From other parent" is defined as `state == Normal && size == -2`.
152 ///
153 ///
153 /// Because parse errors can happen during iteration, the iterated items
154 /// Because parse errors can happen during iteration, the iterated items
154 /// are `Result`s.
155 /// are `Result`s.
155 fn iter_other_parent_paths(
156 fn iter_other_parent_paths(
156 &mut self,
157 &mut self,
157 ) -> Box<
158 ) -> Box<
158 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
159 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
159 >;
160 >;
160
161
161 /// Returns whether the sub-tree rooted at the given directory contains any
162 /// Returns whether the sub-tree rooted at the given directory contains any
162 /// tracked file.
163 /// tracked file.
163 ///
164 ///
164 /// A file is tracked if it has a `state` other than `EntryState::Removed`.
165 /// A file is tracked if it has a `state` other than `EntryState::Removed`.
165 fn has_tracked_dir(
166 fn has_tracked_dir(
166 &mut self,
167 &mut self,
167 directory: &HgPath,
168 directory: &HgPath,
168 ) -> Result<bool, DirstateError>;
169 ) -> Result<bool, DirstateError>;
169
170
170 /// Returns whether the sub-tree rooted at the given directory contains any
171 /// Returns whether the sub-tree rooted at the given directory contains any
171 /// file with a dirstate entry.
172 /// file with a dirstate entry.
172 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError>;
173 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError>;
173
174
174 /// Clear mtimes that are ambigous with `now` (similar to
175 /// Clear mtimes that are ambigous with `now` (similar to
175 /// `clear_ambiguous_times` but for all files in the dirstate map), and
176 /// `clear_ambiguous_times` but for all files in the dirstate map), and
176 /// serialize bytes to write the `.hg/dirstate` file to disk in dirstate-v1
177 /// serialize bytes to write the `.hg/dirstate` file to disk in dirstate-v1
177 /// format.
178 /// format.
178 fn pack_v1(
179 fn pack_v1(
179 &mut self,
180 &mut self,
180 parents: DirstateParents,
181 parents: DirstateParents,
181 now: Timestamp,
182 now: Timestamp,
182 ) -> Result<Vec<u8>, DirstateError>;
183 ) -> Result<Vec<u8>, DirstateError>;
183
184
184 /// Clear mtimes that are ambigous with `now` (similar to
185 /// Clear mtimes that are ambigous with `now` (similar to
185 /// `clear_ambiguous_times` but for all files in the dirstate map), and
186 /// `clear_ambiguous_times` but for all files in the dirstate map), and
186 /// serialize bytes to write the `.hg/dirstate` file to disk in dirstate-v2
187 /// serialize bytes to write the `.hg/dirstate` file to disk in dirstate-v2
187 /// format.
188 /// format.
188 ///
189 ///
189 /// Note: this is only supported by the tree dirstate map.
190 /// Note: this is only supported by the tree dirstate map.
190 fn pack_v2(
191 fn pack_v2(
191 &mut self,
192 &mut self,
192 parents: DirstateParents,
193 parents: DirstateParents,
193 now: Timestamp,
194 now: Timestamp,
194 ) -> Result<Vec<u8>, DirstateError>;
195 ) -> Result<Vec<u8>, DirstateError>;
195
196
196 /// Run the status algorithm.
197 /// Run the status algorithm.
197 ///
198 ///
198 /// This is not sematically a method of the dirstate map, but a different
199 /// This is not sematically a method of the dirstate map, but a different
199 /// algorithm is used for the flat v.s. tree dirstate map so having it in
200 /// algorithm is used for the flat v.s. tree dirstate map so having it in
200 /// this trait enables the same dynamic dispatch as with other methods.
201 /// this trait enables the same dynamic dispatch as with other methods.
201 fn status<'a>(
202 fn status<'a>(
202 &'a mut self,
203 &'a mut self,
203 matcher: &'a (dyn Matcher + Sync),
204 matcher: &'a (dyn Matcher + Sync),
204 root_dir: PathBuf,
205 root_dir: PathBuf,
205 ignore_files: Vec<PathBuf>,
206 ignore_files: Vec<PathBuf>,
206 options: StatusOptions,
207 options: StatusOptions,
207 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>;
208 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>;
208
209
209 /// Returns how many files in the dirstate map have a recorded copy source.
210 /// Returns how many files in the dirstate map have a recorded copy source.
210 fn copy_map_len(&self) -> usize;
211 fn copy_map_len(&self) -> usize;
211
212
212 /// Returns an iterator of `(path, copy_source)` for all files that have a
213 /// Returns an iterator of `(path, copy_source)` for all files that have a
213 /// copy source.
214 /// copy source.
214 fn copy_map_iter(&self) -> CopyMapIter<'_>;
215 fn copy_map_iter(&self) -> CopyMapIter<'_>;
215
216
216 /// Returns whether the givef file has a copy source.
217 /// Returns whether the givef file has a copy source.
217 fn copy_map_contains_key(
218 fn copy_map_contains_key(
218 &self,
219 &self,
219 key: &HgPath,
220 key: &HgPath,
220 ) -> Result<bool, DirstateV2ParseError>;
221 ) -> Result<bool, DirstateV2ParseError>;
221
222
222 /// Returns the copy source for the given file.
223 /// Returns the copy source for the given file.
223 fn copy_map_get(
224 fn copy_map_get(
224 &self,
225 &self,
225 key: &HgPath,
226 key: &HgPath,
226 ) -> Result<Option<&HgPath>, DirstateV2ParseError>;
227 ) -> Result<Option<&HgPath>, DirstateV2ParseError>;
227
228
228 /// Removes the recorded copy source if any for the given file, and returns
229 /// Removes the recorded copy source if any for the given file, and returns
229 /// it.
230 /// it.
230 fn copy_map_remove(
231 fn copy_map_remove(
231 &mut self,
232 &mut self,
232 key: &HgPath,
233 key: &HgPath,
233 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
234 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
234
235
235 /// Set the given `value` copy source for the given `key` file.
236 /// Set the given `value` copy source for the given `key` file.
236 fn copy_map_insert(
237 fn copy_map_insert(
237 &mut self,
238 &mut self,
238 key: HgPathBuf,
239 key: HgPathBuf,
239 value: HgPathBuf,
240 value: HgPathBuf,
240 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
241 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
241
242
242 /// Returns the number of files that have an entry.
243 /// Returns the number of files that have an entry.
243 fn len(&self) -> usize;
244 fn len(&self) -> usize;
244
245
245 /// Returns whether the given file has an entry.
246 /// Returns whether the given file has an entry.
246 fn contains_key(&self, key: &HgPath)
247 fn contains_key(&self, key: &HgPath)
247 -> Result<bool, DirstateV2ParseError>;
248 -> Result<bool, DirstateV2ParseError>;
248
249
249 /// Returns the entry, if any, for the given file.
250 /// Returns the entry, if any, for the given file.
250 fn get(
251 fn get(
251 &self,
252 &self,
252 key: &HgPath,
253 key: &HgPath,
253 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError>;
254 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError>;
254
255
255 /// Returns a `(path, entry)` iterator of files that have an entry.
256 /// Returns a `(path, entry)` iterator of files that have an entry.
256 ///
257 ///
257 /// Because parse errors can happen during iteration, the iterated items
258 /// Because parse errors can happen during iteration, the iterated items
258 /// are `Result`s.
259 /// are `Result`s.
259 fn iter(&self) -> StateMapIter<'_>;
260 fn iter(&self) -> StateMapIter<'_>;
260
261
261 /// In the tree dirstate, return an iterator of "directory" (entry-less)
262 /// In the tree dirstate, return an iterator of "directory" (entry-less)
262 /// nodes with the data stored for them. This is for `hg debugdirstate
263 /// nodes with the data stored for them. This is for `hg debugdirstate
263 /// --dirs`.
264 /// --dirs`.
264 ///
265 ///
265 /// In the flat dirstate, returns an empty iterator.
266 /// In the flat dirstate, returns an empty iterator.
266 ///
267 ///
267 /// Because parse errors can happen during iteration, the iterated items
268 /// Because parse errors can happen during iteration, the iterated items
268 /// are `Result`s.
269 /// are `Result`s.
269 fn iter_directories(
270 fn iter_directories(
270 &self,
271 &self,
271 ) -> Box<
272 ) -> Box<
272 dyn Iterator<
273 dyn Iterator<
273 Item = Result<
274 Item = Result<
274 (&HgPath, Option<Timestamp>),
275 (&HgPath, Option<Timestamp>),
275 DirstateV2ParseError,
276 DirstateV2ParseError,
276 >,
277 >,
277 > + Send
278 > + Send
278 + '_,
279 + '_,
279 >;
280 >;
280 }
281 }
281
282
282 impl DirstateMapMethods for DirstateMap {
283 impl DirstateMapMethods for DirstateMap {
283 fn clear(&mut self) {
284 fn clear(&mut self) {
284 self.clear()
285 self.clear()
285 }
286 }
286
287
287 fn add_file(
288 fn add_file(
288 &mut self,
289 &mut self,
289 filename: &HgPath,
290 filename: &HgPath,
290 entry: DirstateEntry,
291 entry: DirstateEntry,
291 added: bool,
292 added: bool,
293 merged: bool,
292 from_p2: bool,
294 from_p2: bool,
293 possibly_dirty: bool,
295 possibly_dirty: bool,
294 ) -> Result<(), DirstateError> {
296 ) -> Result<(), DirstateError> {
295 self.add_file(filename, entry, added, from_p2, possibly_dirty)
297 self.add_file(filename, entry, added, merged, from_p2, possibly_dirty)
296 }
298 }
297
299
298 fn remove_file(
300 fn remove_file(
299 &mut self,
301 &mut self,
300 filename: &HgPath,
302 filename: &HgPath,
301 in_merge: bool,
303 in_merge: bool,
302 ) -> Result<(), DirstateError> {
304 ) -> Result<(), DirstateError> {
303 self.remove_file(filename, in_merge)
305 self.remove_file(filename, in_merge)
304 }
306 }
305
307
306 fn drop_file(
308 fn drop_file(
307 &mut self,
309 &mut self,
308 filename: &HgPath,
310 filename: &HgPath,
309 old_state: EntryState,
311 old_state: EntryState,
310 ) -> Result<bool, DirstateError> {
312 ) -> Result<bool, DirstateError> {
311 self.drop_file(filename, old_state)
313 self.drop_file(filename, old_state)
312 }
314 }
313
315
314 fn clear_ambiguous_times(
316 fn clear_ambiguous_times(
315 &mut self,
317 &mut self,
316 filenames: Vec<HgPathBuf>,
318 filenames: Vec<HgPathBuf>,
317 now: i32,
319 now: i32,
318 ) -> Result<(), DirstateV2ParseError> {
320 ) -> Result<(), DirstateV2ParseError> {
319 Ok(self.clear_ambiguous_times(filenames, now))
321 Ok(self.clear_ambiguous_times(filenames, now))
320 }
322 }
321
323
322 fn non_normal_entries_contains(
324 fn non_normal_entries_contains(
323 &mut self,
325 &mut self,
324 key: &HgPath,
326 key: &HgPath,
325 ) -> Result<bool, DirstateV2ParseError> {
327 ) -> Result<bool, DirstateV2ParseError> {
326 let (non_normal, _other_parent) =
328 let (non_normal, _other_parent) =
327 self.get_non_normal_other_parent_entries();
329 self.get_non_normal_other_parent_entries();
328 Ok(non_normal.contains(key))
330 Ok(non_normal.contains(key))
329 }
331 }
330
332
331 fn non_normal_entries_remove(&mut self, key: &HgPath) {
333 fn non_normal_entries_remove(&mut self, key: &HgPath) {
332 self.non_normal_entries_remove(key)
334 self.non_normal_entries_remove(key)
333 }
335 }
334
336
335 fn non_normal_or_other_parent_paths(
337 fn non_normal_or_other_parent_paths(
336 &mut self,
338 &mut self,
337 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
339 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
338 {
340 {
339 let (non_normal, other_parent) =
341 let (non_normal, other_parent) =
340 self.get_non_normal_other_parent_entries();
342 self.get_non_normal_other_parent_entries();
341 Box::new(non_normal.union(other_parent).map(|p| Ok(&**p)))
343 Box::new(non_normal.union(other_parent).map(|p| Ok(&**p)))
342 }
344 }
343
345
344 fn set_non_normal_other_parent_entries(&mut self, force: bool) {
346 fn set_non_normal_other_parent_entries(&mut self, force: bool) {
345 self.set_non_normal_other_parent_entries(force)
347 self.set_non_normal_other_parent_entries(force)
346 }
348 }
347
349
348 fn iter_non_normal_paths(
350 fn iter_non_normal_paths(
349 &mut self,
351 &mut self,
350 ) -> Box<
352 ) -> Box<
351 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
353 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
352 > {
354 > {
353 let (non_normal, _other_parent) =
355 let (non_normal, _other_parent) =
354 self.get_non_normal_other_parent_entries();
356 self.get_non_normal_other_parent_entries();
355 Box::new(non_normal.iter().map(|p| Ok(&**p)))
357 Box::new(non_normal.iter().map(|p| Ok(&**p)))
356 }
358 }
357
359
358 fn iter_non_normal_paths_panic(
360 fn iter_non_normal_paths_panic(
359 &self,
361 &self,
360 ) -> Box<
362 ) -> Box<
361 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
363 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
362 > {
364 > {
363 let (non_normal, _other_parent) =
365 let (non_normal, _other_parent) =
364 self.get_non_normal_other_parent_entries_panic();
366 self.get_non_normal_other_parent_entries_panic();
365 Box::new(non_normal.iter().map(|p| Ok(&**p)))
367 Box::new(non_normal.iter().map(|p| Ok(&**p)))
366 }
368 }
367
369
368 fn iter_other_parent_paths(
370 fn iter_other_parent_paths(
369 &mut self,
371 &mut self,
370 ) -> Box<
372 ) -> Box<
371 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
373 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
372 > {
374 > {
373 let (_non_normal, other_parent) =
375 let (_non_normal, other_parent) =
374 self.get_non_normal_other_parent_entries();
376 self.get_non_normal_other_parent_entries();
375 Box::new(other_parent.iter().map(|p| Ok(&**p)))
377 Box::new(other_parent.iter().map(|p| Ok(&**p)))
376 }
378 }
377
379
378 fn has_tracked_dir(
380 fn has_tracked_dir(
379 &mut self,
381 &mut self,
380 directory: &HgPath,
382 directory: &HgPath,
381 ) -> Result<bool, DirstateError> {
383 ) -> Result<bool, DirstateError> {
382 self.has_tracked_dir(directory)
384 self.has_tracked_dir(directory)
383 }
385 }
384
386
385 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
387 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
386 self.has_dir(directory)
388 self.has_dir(directory)
387 }
389 }
388
390
389 fn pack_v1(
391 fn pack_v1(
390 &mut self,
392 &mut self,
391 parents: DirstateParents,
393 parents: DirstateParents,
392 now: Timestamp,
394 now: Timestamp,
393 ) -> Result<Vec<u8>, DirstateError> {
395 ) -> Result<Vec<u8>, DirstateError> {
394 self.pack(parents, now)
396 self.pack(parents, now)
395 }
397 }
396
398
397 fn pack_v2(
399 fn pack_v2(
398 &mut self,
400 &mut self,
399 _parents: DirstateParents,
401 _parents: DirstateParents,
400 _now: Timestamp,
402 _now: Timestamp,
401 ) -> Result<Vec<u8>, DirstateError> {
403 ) -> Result<Vec<u8>, DirstateError> {
402 panic!(
404 panic!(
403 "should have used dirstate_tree::DirstateMap to use the v2 format"
405 "should have used dirstate_tree::DirstateMap to use the v2 format"
404 )
406 )
405 }
407 }
406
408
407 fn status<'a>(
409 fn status<'a>(
408 &'a mut self,
410 &'a mut self,
409 matcher: &'a (dyn Matcher + Sync),
411 matcher: &'a (dyn Matcher + Sync),
410 root_dir: PathBuf,
412 root_dir: PathBuf,
411 ignore_files: Vec<PathBuf>,
413 ignore_files: Vec<PathBuf>,
412 options: StatusOptions,
414 options: StatusOptions,
413 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
415 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
414 {
416 {
415 crate::status(self, matcher, root_dir, ignore_files, options)
417 crate::status(self, matcher, root_dir, ignore_files, options)
416 }
418 }
417
419
418 fn copy_map_len(&self) -> usize {
420 fn copy_map_len(&self) -> usize {
419 self.copy_map.len()
421 self.copy_map.len()
420 }
422 }
421
423
422 fn copy_map_iter(&self) -> CopyMapIter<'_> {
424 fn copy_map_iter(&self) -> CopyMapIter<'_> {
423 Box::new(
425 Box::new(
424 self.copy_map
426 self.copy_map
425 .iter()
427 .iter()
426 .map(|(key, value)| Ok((&**key, &**value))),
428 .map(|(key, value)| Ok((&**key, &**value))),
427 )
429 )
428 }
430 }
429
431
430 fn copy_map_contains_key(
432 fn copy_map_contains_key(
431 &self,
433 &self,
432 key: &HgPath,
434 key: &HgPath,
433 ) -> Result<bool, DirstateV2ParseError> {
435 ) -> Result<bool, DirstateV2ParseError> {
434 Ok(self.copy_map.contains_key(key))
436 Ok(self.copy_map.contains_key(key))
435 }
437 }
436
438
437 fn copy_map_get(
439 fn copy_map_get(
438 &self,
440 &self,
439 key: &HgPath,
441 key: &HgPath,
440 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
442 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
441 Ok(self.copy_map.get(key).map(|p| &**p))
443 Ok(self.copy_map.get(key).map(|p| &**p))
442 }
444 }
443
445
444 fn copy_map_remove(
446 fn copy_map_remove(
445 &mut self,
447 &mut self,
446 key: &HgPath,
448 key: &HgPath,
447 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
449 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
448 Ok(self.copy_map.remove(key))
450 Ok(self.copy_map.remove(key))
449 }
451 }
450
452
451 fn copy_map_insert(
453 fn copy_map_insert(
452 &mut self,
454 &mut self,
453 key: HgPathBuf,
455 key: HgPathBuf,
454 value: HgPathBuf,
456 value: HgPathBuf,
455 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
457 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
456 Ok(self.copy_map.insert(key, value))
458 Ok(self.copy_map.insert(key, value))
457 }
459 }
458
460
459 fn len(&self) -> usize {
461 fn len(&self) -> usize {
460 (&**self).len()
462 (&**self).len()
461 }
463 }
462
464
463 fn contains_key(
465 fn contains_key(
464 &self,
466 &self,
465 key: &HgPath,
467 key: &HgPath,
466 ) -> Result<bool, DirstateV2ParseError> {
468 ) -> Result<bool, DirstateV2ParseError> {
467 Ok((&**self).contains_key(key))
469 Ok((&**self).contains_key(key))
468 }
470 }
469
471
470 fn get(
472 fn get(
471 &self,
473 &self,
472 key: &HgPath,
474 key: &HgPath,
473 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
475 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
474 Ok((&**self).get(key).cloned())
476 Ok((&**self).get(key).cloned())
475 }
477 }
476
478
477 fn iter(&self) -> StateMapIter<'_> {
479 fn iter(&self) -> StateMapIter<'_> {
478 Box::new((&**self).iter().map(|(key, value)| Ok((&**key, *value))))
480 Box::new((&**self).iter().map(|(key, value)| Ok((&**key, *value))))
479 }
481 }
480
482
481 fn iter_directories(
483 fn iter_directories(
482 &self,
484 &self,
483 ) -> Box<
485 ) -> Box<
484 dyn Iterator<
486 dyn Iterator<
485 Item = Result<
487 Item = Result<
486 (&HgPath, Option<Timestamp>),
488 (&HgPath, Option<Timestamp>),
487 DirstateV2ParseError,
489 DirstateV2ParseError,
488 >,
490 >,
489 > + Send
491 > + Send
490 + '_,
492 + '_,
491 > {
493 > {
492 Box::new(std::iter::empty())
494 Box::new(std::iter::empty())
493 }
495 }
494 }
496 }
@@ -1,594 +1,597 b''
1 // dirstate_map.rs
1 // dirstate_map.rs
2 //
2 //
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 //! Bindings for the `hg::dirstate::dirstate_map` file provided by the
8 //! Bindings for the `hg::dirstate::dirstate_map` file provided by the
9 //! `hg-core` package.
9 //! `hg-core` package.
10
10
11 use std::cell::{RefCell, RefMut};
11 use std::cell::{RefCell, RefMut};
12 use std::convert::TryInto;
12 use std::convert::TryInto;
13
13
14 use cpython::{
14 use cpython::{
15 exc, ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList,
15 exc, ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList,
16 PyObject, PyResult, PySet, PyString, Python, PythonObject, ToPyObject,
16 PyObject, PyResult, PySet, PyString, Python, PythonObject, ToPyObject,
17 UnsafePyLeaked,
17 UnsafePyLeaked,
18 };
18 };
19
19
20 use crate::{
20 use crate::{
21 dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
21 dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
22 dirstate::make_dirstate_tuple,
22 dirstate::make_dirstate_tuple,
23 dirstate::non_normal_entries::{
23 dirstate::non_normal_entries::{
24 NonNormalEntries, NonNormalEntriesIterator,
24 NonNormalEntries, NonNormalEntriesIterator,
25 },
25 },
26 dirstate::owning::OwningDirstateMap,
26 dirstate::owning::OwningDirstateMap,
27 parsers::dirstate_parents_to_pytuple,
27 parsers::dirstate_parents_to_pytuple,
28 };
28 };
29 use hg::{
29 use hg::{
30 dirstate::parsers::Timestamp,
30 dirstate::parsers::Timestamp,
31 dirstate::MTIME_UNSET,
31 dirstate::MTIME_UNSET,
32 dirstate::SIZE_NON_NORMAL,
32 dirstate::SIZE_NON_NORMAL,
33 dirstate_tree::dispatch::DirstateMapMethods,
33 dirstate_tree::dispatch::DirstateMapMethods,
34 dirstate_tree::on_disk::DirstateV2ParseError,
34 dirstate_tree::on_disk::DirstateV2ParseError,
35 errors::HgError,
35 errors::HgError,
36 revlog::Node,
36 revlog::Node,
37 utils::files::normalize_case,
37 utils::files::normalize_case,
38 utils::hg_path::{HgPath, HgPathBuf},
38 utils::hg_path::{HgPath, HgPathBuf},
39 DirstateEntry, DirstateError, DirstateMap as RustDirstateMap,
39 DirstateEntry, DirstateError, DirstateMap as RustDirstateMap,
40 DirstateParents, EntryState, StateMapIter,
40 DirstateParents, EntryState, StateMapIter,
41 };
41 };
42
42
43 // TODO
43 // TODO
44 // This object needs to share references to multiple members of its Rust
44 // This object needs to share references to multiple members of its Rust
45 // inner struct, namely `copy_map`, `dirs` and `all_dirs`.
45 // inner struct, namely `copy_map`, `dirs` and `all_dirs`.
46 // Right now `CopyMap` is done, but it needs to have an explicit reference
46 // Right now `CopyMap` is done, but it needs to have an explicit reference
47 // to `RustDirstateMap` which itself needs to have an encapsulation for
47 // to `RustDirstateMap` which itself needs to have an encapsulation for
48 // every method in `CopyMap` (copymapcopy, etc.).
48 // every method in `CopyMap` (copymapcopy, etc.).
49 // This is ugly and hard to maintain.
49 // This is ugly and hard to maintain.
50 // The same logic applies to `dirs` and `all_dirs`, however the `Dirs`
50 // The same logic applies to `dirs` and `all_dirs`, however the `Dirs`
51 // `py_class!` is already implemented and does not mention
51 // `py_class!` is already implemented and does not mention
52 // `RustDirstateMap`, rightfully so.
52 // `RustDirstateMap`, rightfully so.
53 // All attributes also have to have a separate refcount data attribute for
53 // All attributes also have to have a separate refcount data attribute for
54 // leaks, with all methods that go along for reference sharing.
54 // leaks, with all methods that go along for reference sharing.
55 py_class!(pub class DirstateMap |py| {
55 py_class!(pub class DirstateMap |py| {
56 @shared data inner: Box<dyn DirstateMapMethods + Send>;
56 @shared data inner: Box<dyn DirstateMapMethods + Send>;
57
57
58 /// Returns a `(dirstate_map, parents)` tuple
58 /// Returns a `(dirstate_map, parents)` tuple
59 @staticmethod
59 @staticmethod
60 def new(
60 def new(
61 use_dirstate_tree: bool,
61 use_dirstate_tree: bool,
62 use_dirstate_v2: bool,
62 use_dirstate_v2: bool,
63 on_disk: PyBytes,
63 on_disk: PyBytes,
64 ) -> PyResult<PyObject> {
64 ) -> PyResult<PyObject> {
65 let dirstate_error = |e: DirstateError| {
65 let dirstate_error = |e: DirstateError| {
66 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
66 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
67 };
67 };
68 let (inner, parents) = if use_dirstate_tree || use_dirstate_v2 {
68 let (inner, parents) = if use_dirstate_tree || use_dirstate_v2 {
69 let (map, parents) =
69 let (map, parents) =
70 OwningDirstateMap::new(py, on_disk, use_dirstate_v2)
70 OwningDirstateMap::new(py, on_disk, use_dirstate_v2)
71 .map_err(dirstate_error)?;
71 .map_err(dirstate_error)?;
72 (Box::new(map) as _, parents)
72 (Box::new(map) as _, parents)
73 } else {
73 } else {
74 let bytes = on_disk.data(py);
74 let bytes = on_disk.data(py);
75 let mut map = RustDirstateMap::default();
75 let mut map = RustDirstateMap::default();
76 let parents = map.read(bytes).map_err(dirstate_error)?;
76 let parents = map.read(bytes).map_err(dirstate_error)?;
77 (Box::new(map) as _, parents)
77 (Box::new(map) as _, parents)
78 };
78 };
79 let map = Self::create_instance(py, inner)?;
79 let map = Self::create_instance(py, inner)?;
80 let parents = parents.map(|p| dirstate_parents_to_pytuple(py, &p));
80 let parents = parents.map(|p| dirstate_parents_to_pytuple(py, &p));
81 Ok((map, parents).to_py_object(py).into_object())
81 Ok((map, parents).to_py_object(py).into_object())
82 }
82 }
83
83
84 def clear(&self) -> PyResult<PyObject> {
84 def clear(&self) -> PyResult<PyObject> {
85 self.inner(py).borrow_mut().clear();
85 self.inner(py).borrow_mut().clear();
86 Ok(py.None())
86 Ok(py.None())
87 }
87 }
88
88
89 def get(
89 def get(
90 &self,
90 &self,
91 key: PyObject,
91 key: PyObject,
92 default: Option<PyObject> = None
92 default: Option<PyObject> = None
93 ) -> PyResult<Option<PyObject>> {
93 ) -> PyResult<Option<PyObject>> {
94 let key = key.extract::<PyBytes>(py)?;
94 let key = key.extract::<PyBytes>(py)?;
95 match self
95 match self
96 .inner(py)
96 .inner(py)
97 .borrow()
97 .borrow()
98 .get(HgPath::new(key.data(py)))
98 .get(HgPath::new(key.data(py)))
99 .map_err(|e| v2_error(py, e))?
99 .map_err(|e| v2_error(py, e))?
100 {
100 {
101 Some(entry) => {
101 Some(entry) => {
102 Ok(Some(make_dirstate_tuple(py, &entry)?))
102 Ok(Some(make_dirstate_tuple(py, &entry)?))
103 },
103 },
104 None => Ok(default)
104 None => Ok(default)
105 }
105 }
106 }
106 }
107
107
108 def addfile(
108 def addfile(
109 &self,
109 &self,
110 f: PyObject,
110 f: PyObject,
111 state: PyObject,
111 state: PyObject,
112 mode: PyObject,
112 mode: PyObject,
113 size: PyObject,
113 size: PyObject,
114 mtime: PyObject,
114 mtime: PyObject,
115 added: PyObject,
115 added: PyObject,
116 merged: PyObject,
116 from_p2: PyObject,
117 from_p2: PyObject,
117 possibly_dirty: PyObject,
118 possibly_dirty: PyObject,
118 ) -> PyResult<PyObject> {
119 ) -> PyResult<PyObject> {
119 let f = f.extract::<PyBytes>(py)?;
120 let f = f.extract::<PyBytes>(py)?;
120 let filename = HgPath::new(f.data(py));
121 let filename = HgPath::new(f.data(py));
121 let state = if state.is_none(py) {
122 let state = if state.is_none(py) {
122 // Arbitrary default value
123 // Arbitrary default value
123 EntryState::Normal
124 EntryState::Normal
124 } else {
125 } else {
125 state.extract::<PyBytes>(py)?.data(py)[0]
126 state.extract::<PyBytes>(py)?.data(py)[0]
126 .try_into()
127 .try_into()
127 .map_err(|e: HgError| {
128 .map_err(|e: HgError| {
128 PyErr::new::<exc::ValueError, _>(py, e.to_string())
129 PyErr::new::<exc::ValueError, _>(py, e.to_string())
129 })?
130 })?
130 };
131 };
131 let mode = if mode.is_none(py) {
132 let mode = if mode.is_none(py) {
132 // fallback default value
133 // fallback default value
133 0
134 0
134 } else {
135 } else {
135 mode.extract(py)?
136 mode.extract(py)?
136 };
137 };
137 let size = if size.is_none(py) {
138 let size = if size.is_none(py) {
138 // fallback default value
139 // fallback default value
139 SIZE_NON_NORMAL
140 SIZE_NON_NORMAL
140 } else {
141 } else {
141 size.extract(py)?
142 size.extract(py)?
142 };
143 };
143 let mtime = if mtime.is_none(py) {
144 let mtime = if mtime.is_none(py) {
144 // fallback default value
145 // fallback default value
145 MTIME_UNSET
146 MTIME_UNSET
146 } else {
147 } else {
147 mtime.extract(py)?
148 mtime.extract(py)?
148 };
149 };
149 let entry = DirstateEntry {
150 let entry = DirstateEntry {
150 state: state,
151 state: state,
151 mode: mode,
152 mode: mode,
152 size: size,
153 size: size,
153 mtime: mtime,
154 mtime: mtime,
154 };
155 };
155 let added = added.extract::<PyBool>(py)?.is_true();
156 let added = added.extract::<PyBool>(py)?.is_true();
157 let merged = merged.extract::<PyBool>(py)?.is_true();
156 let from_p2 = from_p2.extract::<PyBool>(py)?.is_true();
158 let from_p2 = from_p2.extract::<PyBool>(py)?.is_true();
157 let possibly_dirty = possibly_dirty.extract::<PyBool>(py)?.is_true();
159 let possibly_dirty = possibly_dirty.extract::<PyBool>(py)?.is_true();
158 self.inner(py).borrow_mut().add_file(
160 self.inner(py).borrow_mut().add_file(
159 filename,
161 filename,
160 entry,
162 entry,
161 added,
163 added,
164 merged,
162 from_p2,
165 from_p2,
163 possibly_dirty
166 possibly_dirty
164 ).and(Ok(py.None())).or_else(|e: DirstateError| {
167 ).and(Ok(py.None())).or_else(|e: DirstateError| {
165 Err(PyErr::new::<exc::ValueError, _>(py, e.to_string()))
168 Err(PyErr::new::<exc::ValueError, _>(py, e.to_string()))
166 })
169 })
167 }
170 }
168
171
169 def removefile(
172 def removefile(
170 &self,
173 &self,
171 f: PyObject,
174 f: PyObject,
172 in_merge: PyObject
175 in_merge: PyObject
173 ) -> PyResult<PyObject> {
176 ) -> PyResult<PyObject> {
174 self.inner(py).borrow_mut()
177 self.inner(py).borrow_mut()
175 .remove_file(
178 .remove_file(
176 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
179 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
177 in_merge.extract::<PyBool>(py)?.is_true(),
180 in_merge.extract::<PyBool>(py)?.is_true(),
178 )
181 )
179 .or_else(|_| {
182 .or_else(|_| {
180 Err(PyErr::new::<exc::OSError, _>(
183 Err(PyErr::new::<exc::OSError, _>(
181 py,
184 py,
182 "Dirstate error".to_string(),
185 "Dirstate error".to_string(),
183 ))
186 ))
184 })?;
187 })?;
185 Ok(py.None())
188 Ok(py.None())
186 }
189 }
187
190
188 def dropfile(
191 def dropfile(
189 &self,
192 &self,
190 f: PyObject,
193 f: PyObject,
191 oldstate: PyObject
194 oldstate: PyObject
192 ) -> PyResult<PyBool> {
195 ) -> PyResult<PyBool> {
193 self.inner(py).borrow_mut()
196 self.inner(py).borrow_mut()
194 .drop_file(
197 .drop_file(
195 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
198 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
196 oldstate.extract::<PyBytes>(py)?.data(py)[0]
199 oldstate.extract::<PyBytes>(py)?.data(py)[0]
197 .try_into()
200 .try_into()
198 .map_err(|e: HgError| {
201 .map_err(|e: HgError| {
199 PyErr::new::<exc::ValueError, _>(py, e.to_string())
202 PyErr::new::<exc::ValueError, _>(py, e.to_string())
200 })?,
203 })?,
201 )
204 )
202 .and_then(|b| Ok(b.to_py_object(py)))
205 .and_then(|b| Ok(b.to_py_object(py)))
203 .or_else(|e| {
206 .or_else(|e| {
204 Err(PyErr::new::<exc::OSError, _>(
207 Err(PyErr::new::<exc::OSError, _>(
205 py,
208 py,
206 format!("Dirstate error: {}", e.to_string()),
209 format!("Dirstate error: {}", e.to_string()),
207 ))
210 ))
208 })
211 })
209 }
212 }
210
213
211 def clearambiguoustimes(
214 def clearambiguoustimes(
212 &self,
215 &self,
213 files: PyObject,
216 files: PyObject,
214 now: PyObject
217 now: PyObject
215 ) -> PyResult<PyObject> {
218 ) -> PyResult<PyObject> {
216 let files: PyResult<Vec<HgPathBuf>> = files
219 let files: PyResult<Vec<HgPathBuf>> = files
217 .iter(py)?
220 .iter(py)?
218 .map(|filename| {
221 .map(|filename| {
219 Ok(HgPathBuf::from_bytes(
222 Ok(HgPathBuf::from_bytes(
220 filename?.extract::<PyBytes>(py)?.data(py),
223 filename?.extract::<PyBytes>(py)?.data(py),
221 ))
224 ))
222 })
225 })
223 .collect();
226 .collect();
224 self.inner(py)
227 self.inner(py)
225 .borrow_mut()
228 .borrow_mut()
226 .clear_ambiguous_times(files?, now.extract(py)?)
229 .clear_ambiguous_times(files?, now.extract(py)?)
227 .map_err(|e| v2_error(py, e))?;
230 .map_err(|e| v2_error(py, e))?;
228 Ok(py.None())
231 Ok(py.None())
229 }
232 }
230
233
231 def other_parent_entries(&self) -> PyResult<PyObject> {
234 def other_parent_entries(&self) -> PyResult<PyObject> {
232 let mut inner_shared = self.inner(py).borrow_mut();
235 let mut inner_shared = self.inner(py).borrow_mut();
233 let set = PySet::empty(py)?;
236 let set = PySet::empty(py)?;
234 for path in inner_shared.iter_other_parent_paths() {
237 for path in inner_shared.iter_other_parent_paths() {
235 let path = path.map_err(|e| v2_error(py, e))?;
238 let path = path.map_err(|e| v2_error(py, e))?;
236 set.add(py, PyBytes::new(py, path.as_bytes()))?;
239 set.add(py, PyBytes::new(py, path.as_bytes()))?;
237 }
240 }
238 Ok(set.into_object())
241 Ok(set.into_object())
239 }
242 }
240
243
241 def non_normal_entries(&self) -> PyResult<NonNormalEntries> {
244 def non_normal_entries(&self) -> PyResult<NonNormalEntries> {
242 NonNormalEntries::from_inner(py, self.clone_ref(py))
245 NonNormalEntries::from_inner(py, self.clone_ref(py))
243 }
246 }
244
247
245 def non_normal_entries_contains(&self, key: PyObject) -> PyResult<bool> {
248 def non_normal_entries_contains(&self, key: PyObject) -> PyResult<bool> {
246 let key = key.extract::<PyBytes>(py)?;
249 let key = key.extract::<PyBytes>(py)?;
247 self.inner(py)
250 self.inner(py)
248 .borrow_mut()
251 .borrow_mut()
249 .non_normal_entries_contains(HgPath::new(key.data(py)))
252 .non_normal_entries_contains(HgPath::new(key.data(py)))
250 .map_err(|e| v2_error(py, e))
253 .map_err(|e| v2_error(py, e))
251 }
254 }
252
255
253 def non_normal_entries_display(&self) -> PyResult<PyString> {
256 def non_normal_entries_display(&self) -> PyResult<PyString> {
254 let mut inner = self.inner(py).borrow_mut();
257 let mut inner = self.inner(py).borrow_mut();
255 let paths = inner
258 let paths = inner
256 .iter_non_normal_paths()
259 .iter_non_normal_paths()
257 .collect::<Result<Vec<_>, _>>()
260 .collect::<Result<Vec<_>, _>>()
258 .map_err(|e| v2_error(py, e))?;
261 .map_err(|e| v2_error(py, e))?;
259 let formatted = format!("NonNormalEntries: {}", hg::utils::join_display(paths, ", "));
262 let formatted = format!("NonNormalEntries: {}", hg::utils::join_display(paths, ", "));
260 Ok(PyString::new(py, &formatted))
263 Ok(PyString::new(py, &formatted))
261 }
264 }
262
265
263 def non_normal_entries_remove(&self, key: PyObject) -> PyResult<PyObject> {
266 def non_normal_entries_remove(&self, key: PyObject) -> PyResult<PyObject> {
264 let key = key.extract::<PyBytes>(py)?;
267 let key = key.extract::<PyBytes>(py)?;
265 self
268 self
266 .inner(py)
269 .inner(py)
267 .borrow_mut()
270 .borrow_mut()
268 .non_normal_entries_remove(HgPath::new(key.data(py)));
271 .non_normal_entries_remove(HgPath::new(key.data(py)));
269 Ok(py.None())
272 Ok(py.None())
270 }
273 }
271
274
272 def non_normal_or_other_parent_paths(&self) -> PyResult<PyList> {
275 def non_normal_or_other_parent_paths(&self) -> PyResult<PyList> {
273 let mut inner = self.inner(py).borrow_mut();
276 let mut inner = self.inner(py).borrow_mut();
274
277
275 let ret = PyList::new(py, &[]);
278 let ret = PyList::new(py, &[]);
276 for filename in inner.non_normal_or_other_parent_paths() {
279 for filename in inner.non_normal_or_other_parent_paths() {
277 let filename = filename.map_err(|e| v2_error(py, e))?;
280 let filename = filename.map_err(|e| v2_error(py, e))?;
278 let as_pystring = PyBytes::new(py, filename.as_bytes());
281 let as_pystring = PyBytes::new(py, filename.as_bytes());
279 ret.append(py, as_pystring.into_object());
282 ret.append(py, as_pystring.into_object());
280 }
283 }
281 Ok(ret)
284 Ok(ret)
282 }
285 }
283
286
284 def non_normal_entries_iter(&self) -> PyResult<NonNormalEntriesIterator> {
287 def non_normal_entries_iter(&self) -> PyResult<NonNormalEntriesIterator> {
285 // Make sure the sets are defined before we no longer have a mutable
288 // Make sure the sets are defined before we no longer have a mutable
286 // reference to the dmap.
289 // reference to the dmap.
287 self.inner(py)
290 self.inner(py)
288 .borrow_mut()
291 .borrow_mut()
289 .set_non_normal_other_parent_entries(false);
292 .set_non_normal_other_parent_entries(false);
290
293
291 let leaked_ref = self.inner(py).leak_immutable();
294 let leaked_ref = self.inner(py).leak_immutable();
292
295
293 NonNormalEntriesIterator::from_inner(py, unsafe {
296 NonNormalEntriesIterator::from_inner(py, unsafe {
294 leaked_ref.map(py, |o| {
297 leaked_ref.map(py, |o| {
295 o.iter_non_normal_paths_panic()
298 o.iter_non_normal_paths_panic()
296 })
299 })
297 })
300 })
298 }
301 }
299
302
300 def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> {
303 def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> {
301 let d = d.extract::<PyBytes>(py)?;
304 let d = d.extract::<PyBytes>(py)?;
302 Ok(self.inner(py).borrow_mut()
305 Ok(self.inner(py).borrow_mut()
303 .has_tracked_dir(HgPath::new(d.data(py)))
306 .has_tracked_dir(HgPath::new(d.data(py)))
304 .map_err(|e| {
307 .map_err(|e| {
305 PyErr::new::<exc::ValueError, _>(py, e.to_string())
308 PyErr::new::<exc::ValueError, _>(py, e.to_string())
306 })?
309 })?
307 .to_py_object(py))
310 .to_py_object(py))
308 }
311 }
309
312
310 def hasdir(&self, d: PyObject) -> PyResult<PyBool> {
313 def hasdir(&self, d: PyObject) -> PyResult<PyBool> {
311 let d = d.extract::<PyBytes>(py)?;
314 let d = d.extract::<PyBytes>(py)?;
312 Ok(self.inner(py).borrow_mut()
315 Ok(self.inner(py).borrow_mut()
313 .has_dir(HgPath::new(d.data(py)))
316 .has_dir(HgPath::new(d.data(py)))
314 .map_err(|e| {
317 .map_err(|e| {
315 PyErr::new::<exc::ValueError, _>(py, e.to_string())
318 PyErr::new::<exc::ValueError, _>(py, e.to_string())
316 })?
319 })?
317 .to_py_object(py))
320 .to_py_object(py))
318 }
321 }
319
322
320 def write(
323 def write(
321 &self,
324 &self,
322 use_dirstate_v2: bool,
325 use_dirstate_v2: bool,
323 p1: PyObject,
326 p1: PyObject,
324 p2: PyObject,
327 p2: PyObject,
325 now: PyObject
328 now: PyObject
326 ) -> PyResult<PyBytes> {
329 ) -> PyResult<PyBytes> {
327 let now = Timestamp(now.extract(py)?);
330 let now = Timestamp(now.extract(py)?);
328 let parents = DirstateParents {
331 let parents = DirstateParents {
329 p1: extract_node_id(py, &p1)?,
332 p1: extract_node_id(py, &p1)?,
330 p2: extract_node_id(py, &p2)?,
333 p2: extract_node_id(py, &p2)?,
331 };
334 };
332
335
333 let mut inner = self.inner(py).borrow_mut();
336 let mut inner = self.inner(py).borrow_mut();
334 let result = if use_dirstate_v2 {
337 let result = if use_dirstate_v2 {
335 inner.pack_v2(parents, now)
338 inner.pack_v2(parents, now)
336 } else {
339 } else {
337 inner.pack_v1(parents, now)
340 inner.pack_v1(parents, now)
338 };
341 };
339 match result {
342 match result {
340 Ok(packed) => Ok(PyBytes::new(py, &packed)),
343 Ok(packed) => Ok(PyBytes::new(py, &packed)),
341 Err(_) => Err(PyErr::new::<exc::OSError, _>(
344 Err(_) => Err(PyErr::new::<exc::OSError, _>(
342 py,
345 py,
343 "Dirstate error".to_string(),
346 "Dirstate error".to_string(),
344 )),
347 )),
345 }
348 }
346 }
349 }
347
350
348 def filefoldmapasdict(&self) -> PyResult<PyDict> {
351 def filefoldmapasdict(&self) -> PyResult<PyDict> {
349 let dict = PyDict::new(py);
352 let dict = PyDict::new(py);
350 for item in self.inner(py).borrow_mut().iter() {
353 for item in self.inner(py).borrow_mut().iter() {
351 let (path, entry) = item.map_err(|e| v2_error(py, e))?;
354 let (path, entry) = item.map_err(|e| v2_error(py, e))?;
352 if entry.state != EntryState::Removed {
355 if entry.state != EntryState::Removed {
353 let key = normalize_case(path);
356 let key = normalize_case(path);
354 let value = path;
357 let value = path;
355 dict.set_item(
358 dict.set_item(
356 py,
359 py,
357 PyBytes::new(py, key.as_bytes()).into_object(),
360 PyBytes::new(py, key.as_bytes()).into_object(),
358 PyBytes::new(py, value.as_bytes()).into_object(),
361 PyBytes::new(py, value.as_bytes()).into_object(),
359 )?;
362 )?;
360 }
363 }
361 }
364 }
362 Ok(dict)
365 Ok(dict)
363 }
366 }
364
367
365 def __len__(&self) -> PyResult<usize> {
368 def __len__(&self) -> PyResult<usize> {
366 Ok(self.inner(py).borrow().len())
369 Ok(self.inner(py).borrow().len())
367 }
370 }
368
371
369 def __contains__(&self, key: PyObject) -> PyResult<bool> {
372 def __contains__(&self, key: PyObject) -> PyResult<bool> {
370 let key = key.extract::<PyBytes>(py)?;
373 let key = key.extract::<PyBytes>(py)?;
371 self.inner(py)
374 self.inner(py)
372 .borrow()
375 .borrow()
373 .contains_key(HgPath::new(key.data(py)))
376 .contains_key(HgPath::new(key.data(py)))
374 .map_err(|e| v2_error(py, e))
377 .map_err(|e| v2_error(py, e))
375 }
378 }
376
379
377 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
380 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
378 let key = key.extract::<PyBytes>(py)?;
381 let key = key.extract::<PyBytes>(py)?;
379 let key = HgPath::new(key.data(py));
382 let key = HgPath::new(key.data(py));
380 match self
383 match self
381 .inner(py)
384 .inner(py)
382 .borrow()
385 .borrow()
383 .get(key)
386 .get(key)
384 .map_err(|e| v2_error(py, e))?
387 .map_err(|e| v2_error(py, e))?
385 {
388 {
386 Some(entry) => {
389 Some(entry) => {
387 Ok(make_dirstate_tuple(py, &entry)?)
390 Ok(make_dirstate_tuple(py, &entry)?)
388 },
391 },
389 None => Err(PyErr::new::<exc::KeyError, _>(
392 None => Err(PyErr::new::<exc::KeyError, _>(
390 py,
393 py,
391 String::from_utf8_lossy(key.as_bytes()),
394 String::from_utf8_lossy(key.as_bytes()),
392 )),
395 )),
393 }
396 }
394 }
397 }
395
398
396 def keys(&self) -> PyResult<DirstateMapKeysIterator> {
399 def keys(&self) -> PyResult<DirstateMapKeysIterator> {
397 let leaked_ref = self.inner(py).leak_immutable();
400 let leaked_ref = self.inner(py).leak_immutable();
398 DirstateMapKeysIterator::from_inner(
401 DirstateMapKeysIterator::from_inner(
399 py,
402 py,
400 unsafe { leaked_ref.map(py, |o| o.iter()) },
403 unsafe { leaked_ref.map(py, |o| o.iter()) },
401 )
404 )
402 }
405 }
403
406
404 def items(&self) -> PyResult<DirstateMapItemsIterator> {
407 def items(&self) -> PyResult<DirstateMapItemsIterator> {
405 let leaked_ref = self.inner(py).leak_immutable();
408 let leaked_ref = self.inner(py).leak_immutable();
406 DirstateMapItemsIterator::from_inner(
409 DirstateMapItemsIterator::from_inner(
407 py,
410 py,
408 unsafe { leaked_ref.map(py, |o| o.iter()) },
411 unsafe { leaked_ref.map(py, |o| o.iter()) },
409 )
412 )
410 }
413 }
411
414
412 def __iter__(&self) -> PyResult<DirstateMapKeysIterator> {
415 def __iter__(&self) -> PyResult<DirstateMapKeysIterator> {
413 let leaked_ref = self.inner(py).leak_immutable();
416 let leaked_ref = self.inner(py).leak_immutable();
414 DirstateMapKeysIterator::from_inner(
417 DirstateMapKeysIterator::from_inner(
415 py,
418 py,
416 unsafe { leaked_ref.map(py, |o| o.iter()) },
419 unsafe { leaked_ref.map(py, |o| o.iter()) },
417 )
420 )
418 }
421 }
419
422
420 // TODO all copymap* methods, see docstring above
423 // TODO all copymap* methods, see docstring above
421 def copymapcopy(&self) -> PyResult<PyDict> {
424 def copymapcopy(&self) -> PyResult<PyDict> {
422 let dict = PyDict::new(py);
425 let dict = PyDict::new(py);
423 for item in self.inner(py).borrow().copy_map_iter() {
426 for item in self.inner(py).borrow().copy_map_iter() {
424 let (key, value) = item.map_err(|e| v2_error(py, e))?;
427 let (key, value) = item.map_err(|e| v2_error(py, e))?;
425 dict.set_item(
428 dict.set_item(
426 py,
429 py,
427 PyBytes::new(py, key.as_bytes()),
430 PyBytes::new(py, key.as_bytes()),
428 PyBytes::new(py, value.as_bytes()),
431 PyBytes::new(py, value.as_bytes()),
429 )?;
432 )?;
430 }
433 }
431 Ok(dict)
434 Ok(dict)
432 }
435 }
433
436
434 def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
437 def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
435 let key = key.extract::<PyBytes>(py)?;
438 let key = key.extract::<PyBytes>(py)?;
436 match self
439 match self
437 .inner(py)
440 .inner(py)
438 .borrow()
441 .borrow()
439 .copy_map_get(HgPath::new(key.data(py)))
442 .copy_map_get(HgPath::new(key.data(py)))
440 .map_err(|e| v2_error(py, e))?
443 .map_err(|e| v2_error(py, e))?
441 {
444 {
442 Some(copy) => Ok(PyBytes::new(py, copy.as_bytes())),
445 Some(copy) => Ok(PyBytes::new(py, copy.as_bytes())),
443 None => Err(PyErr::new::<exc::KeyError, _>(
446 None => Err(PyErr::new::<exc::KeyError, _>(
444 py,
447 py,
445 String::from_utf8_lossy(key.data(py)),
448 String::from_utf8_lossy(key.data(py)),
446 )),
449 )),
447 }
450 }
448 }
451 }
449 def copymap(&self) -> PyResult<CopyMap> {
452 def copymap(&self) -> PyResult<CopyMap> {
450 CopyMap::from_inner(py, self.clone_ref(py))
453 CopyMap::from_inner(py, self.clone_ref(py))
451 }
454 }
452
455
453 def copymaplen(&self) -> PyResult<usize> {
456 def copymaplen(&self) -> PyResult<usize> {
454 Ok(self.inner(py).borrow().copy_map_len())
457 Ok(self.inner(py).borrow().copy_map_len())
455 }
458 }
456 def copymapcontains(&self, key: PyObject) -> PyResult<bool> {
459 def copymapcontains(&self, key: PyObject) -> PyResult<bool> {
457 let key = key.extract::<PyBytes>(py)?;
460 let key = key.extract::<PyBytes>(py)?;
458 self.inner(py)
461 self.inner(py)
459 .borrow()
462 .borrow()
460 .copy_map_contains_key(HgPath::new(key.data(py)))
463 .copy_map_contains_key(HgPath::new(key.data(py)))
461 .map_err(|e| v2_error(py, e))
464 .map_err(|e| v2_error(py, e))
462 }
465 }
463 def copymapget(
466 def copymapget(
464 &self,
467 &self,
465 key: PyObject,
468 key: PyObject,
466 default: Option<PyObject>
469 default: Option<PyObject>
467 ) -> PyResult<Option<PyObject>> {
470 ) -> PyResult<Option<PyObject>> {
468 let key = key.extract::<PyBytes>(py)?;
471 let key = key.extract::<PyBytes>(py)?;
469 match self
472 match self
470 .inner(py)
473 .inner(py)
471 .borrow()
474 .borrow()
472 .copy_map_get(HgPath::new(key.data(py)))
475 .copy_map_get(HgPath::new(key.data(py)))
473 .map_err(|e| v2_error(py, e))?
476 .map_err(|e| v2_error(py, e))?
474 {
477 {
475 Some(copy) => Ok(Some(
478 Some(copy) => Ok(Some(
476 PyBytes::new(py, copy.as_bytes()).into_object(),
479 PyBytes::new(py, copy.as_bytes()).into_object(),
477 )),
480 )),
478 None => Ok(default),
481 None => Ok(default),
479 }
482 }
480 }
483 }
481 def copymapsetitem(
484 def copymapsetitem(
482 &self,
485 &self,
483 key: PyObject,
486 key: PyObject,
484 value: PyObject
487 value: PyObject
485 ) -> PyResult<PyObject> {
488 ) -> PyResult<PyObject> {
486 let key = key.extract::<PyBytes>(py)?;
489 let key = key.extract::<PyBytes>(py)?;
487 let value = value.extract::<PyBytes>(py)?;
490 let value = value.extract::<PyBytes>(py)?;
488 self.inner(py)
491 self.inner(py)
489 .borrow_mut()
492 .borrow_mut()
490 .copy_map_insert(
493 .copy_map_insert(
491 HgPathBuf::from_bytes(key.data(py)),
494 HgPathBuf::from_bytes(key.data(py)),
492 HgPathBuf::from_bytes(value.data(py)),
495 HgPathBuf::from_bytes(value.data(py)),
493 )
496 )
494 .map_err(|e| v2_error(py, e))?;
497 .map_err(|e| v2_error(py, e))?;
495 Ok(py.None())
498 Ok(py.None())
496 }
499 }
497 def copymappop(
500 def copymappop(
498 &self,
501 &self,
499 key: PyObject,
502 key: PyObject,
500 default: Option<PyObject>
503 default: Option<PyObject>
501 ) -> PyResult<Option<PyObject>> {
504 ) -> PyResult<Option<PyObject>> {
502 let key = key.extract::<PyBytes>(py)?;
505 let key = key.extract::<PyBytes>(py)?;
503 match self
506 match self
504 .inner(py)
507 .inner(py)
505 .borrow_mut()
508 .borrow_mut()
506 .copy_map_remove(HgPath::new(key.data(py)))
509 .copy_map_remove(HgPath::new(key.data(py)))
507 .map_err(|e| v2_error(py, e))?
510 .map_err(|e| v2_error(py, e))?
508 {
511 {
509 Some(_) => Ok(None),
512 Some(_) => Ok(None),
510 None => Ok(default),
513 None => Ok(default),
511 }
514 }
512 }
515 }
513
516
514 def copymapiter(&self) -> PyResult<CopyMapKeysIterator> {
517 def copymapiter(&self) -> PyResult<CopyMapKeysIterator> {
515 let leaked_ref = self.inner(py).leak_immutable();
518 let leaked_ref = self.inner(py).leak_immutable();
516 CopyMapKeysIterator::from_inner(
519 CopyMapKeysIterator::from_inner(
517 py,
520 py,
518 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
521 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
519 )
522 )
520 }
523 }
521
524
522 def copymapitemsiter(&self) -> PyResult<CopyMapItemsIterator> {
525 def copymapitemsiter(&self) -> PyResult<CopyMapItemsIterator> {
523 let leaked_ref = self.inner(py).leak_immutable();
526 let leaked_ref = self.inner(py).leak_immutable();
524 CopyMapItemsIterator::from_inner(
527 CopyMapItemsIterator::from_inner(
525 py,
528 py,
526 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
529 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
527 )
530 )
528 }
531 }
529
532
530 def directories(&self) -> PyResult<PyList> {
533 def directories(&self) -> PyResult<PyList> {
531 let dirs = PyList::new(py, &[]);
534 let dirs = PyList::new(py, &[]);
532 for item in self.inner(py).borrow().iter_directories() {
535 for item in self.inner(py).borrow().iter_directories() {
533 let (path, mtime) = item.map_err(|e| v2_error(py, e))?;
536 let (path, mtime) = item.map_err(|e| v2_error(py, e))?;
534 let path = PyBytes::new(py, path.as_bytes());
537 let path = PyBytes::new(py, path.as_bytes());
535 let mtime = mtime.map(|t| t.0).unwrap_or(-1);
538 let mtime = mtime.map(|t| t.0).unwrap_or(-1);
536 let tuple = (path, (b'd', 0, 0, mtime));
539 let tuple = (path, (b'd', 0, 0, mtime));
537 dirs.append(py, tuple.to_py_object(py).into_object())
540 dirs.append(py, tuple.to_py_object(py).into_object())
538 }
541 }
539 Ok(dirs)
542 Ok(dirs)
540 }
543 }
541
544
542 });
545 });
543
546
544 impl DirstateMap {
547 impl DirstateMap {
545 pub fn get_inner_mut<'a>(
548 pub fn get_inner_mut<'a>(
546 &'a self,
549 &'a self,
547 py: Python<'a>,
550 py: Python<'a>,
548 ) -> RefMut<'a, Box<dyn DirstateMapMethods + Send>> {
551 ) -> RefMut<'a, Box<dyn DirstateMapMethods + Send>> {
549 self.inner(py).borrow_mut()
552 self.inner(py).borrow_mut()
550 }
553 }
551 fn translate_key(
554 fn translate_key(
552 py: Python,
555 py: Python,
553 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
556 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
554 ) -> PyResult<Option<PyBytes>> {
557 ) -> PyResult<Option<PyBytes>> {
555 let (f, _entry) = res.map_err(|e| v2_error(py, e))?;
558 let (f, _entry) = res.map_err(|e| v2_error(py, e))?;
556 Ok(Some(PyBytes::new(py, f.as_bytes())))
559 Ok(Some(PyBytes::new(py, f.as_bytes())))
557 }
560 }
558 fn translate_key_value(
561 fn translate_key_value(
559 py: Python,
562 py: Python,
560 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
563 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
561 ) -> PyResult<Option<(PyBytes, PyObject)>> {
564 ) -> PyResult<Option<(PyBytes, PyObject)>> {
562 let (f, entry) = res.map_err(|e| v2_error(py, e))?;
565 let (f, entry) = res.map_err(|e| v2_error(py, e))?;
563 Ok(Some((
566 Ok(Some((
564 PyBytes::new(py, f.as_bytes()),
567 PyBytes::new(py, f.as_bytes()),
565 make_dirstate_tuple(py, &entry)?,
568 make_dirstate_tuple(py, &entry)?,
566 )))
569 )))
567 }
570 }
568 }
571 }
569
572
570 py_shared_iterator!(
573 py_shared_iterator!(
571 DirstateMapKeysIterator,
574 DirstateMapKeysIterator,
572 UnsafePyLeaked<StateMapIter<'static>>,
575 UnsafePyLeaked<StateMapIter<'static>>,
573 DirstateMap::translate_key,
576 DirstateMap::translate_key,
574 Option<PyBytes>
577 Option<PyBytes>
575 );
578 );
576
579
577 py_shared_iterator!(
580 py_shared_iterator!(
578 DirstateMapItemsIterator,
581 DirstateMapItemsIterator,
579 UnsafePyLeaked<StateMapIter<'static>>,
582 UnsafePyLeaked<StateMapIter<'static>>,
580 DirstateMap::translate_key_value,
583 DirstateMap::translate_key_value,
581 Option<(PyBytes, PyObject)>
584 Option<(PyBytes, PyObject)>
582 );
585 );
583
586
584 fn extract_node_id(py: Python, obj: &PyObject) -> PyResult<Node> {
587 fn extract_node_id(py: Python, obj: &PyObject) -> PyResult<Node> {
585 let bytes = obj.extract::<PyBytes>(py)?;
588 let bytes = obj.extract::<PyBytes>(py)?;
586 match bytes.data(py).try_into() {
589 match bytes.data(py).try_into() {
587 Ok(s) => Ok(s),
590 Ok(s) => Ok(s),
588 Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())),
591 Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())),
589 }
592 }
590 }
593 }
591
594
592 pub(super) fn v2_error(py: Python<'_>, _: DirstateV2ParseError) -> PyErr {
595 pub(super) fn v2_error(py: Python<'_>, _: DirstateV2ParseError) -> PyErr {
593 PyErr::new::<exc::ValueError, _>(py, "corrupted dirstate-v2")
596 PyErr::new::<exc::ValueError, _>(py, "corrupted dirstate-v2")
594 }
597 }
@@ -1,222 +1,224 b''
1 use crate::dirstate::owning::OwningDirstateMap;
1 use crate::dirstate::owning::OwningDirstateMap;
2 use hg::dirstate::parsers::Timestamp;
2 use hg::dirstate::parsers::Timestamp;
3 use hg::dirstate_tree::dispatch::DirstateMapMethods;
3 use hg::dirstate_tree::dispatch::DirstateMapMethods;
4 use hg::dirstate_tree::on_disk::DirstateV2ParseError;
4 use hg::dirstate_tree::on_disk::DirstateV2ParseError;
5 use hg::matchers::Matcher;
5 use hg::matchers::Matcher;
6 use hg::utils::hg_path::{HgPath, HgPathBuf};
6 use hg::utils::hg_path::{HgPath, HgPathBuf};
7 use hg::CopyMapIter;
7 use hg::CopyMapIter;
8 use hg::DirstateEntry;
8 use hg::DirstateEntry;
9 use hg::DirstateError;
9 use hg::DirstateError;
10 use hg::DirstateParents;
10 use hg::DirstateParents;
11 use hg::DirstateStatus;
11 use hg::DirstateStatus;
12 use hg::EntryState;
12 use hg::EntryState;
13 use hg::PatternFileWarning;
13 use hg::PatternFileWarning;
14 use hg::StateMapIter;
14 use hg::StateMapIter;
15 use hg::StatusError;
15 use hg::StatusError;
16 use hg::StatusOptions;
16 use hg::StatusOptions;
17 use std::path::PathBuf;
17 use std::path::PathBuf;
18
18
19 impl DirstateMapMethods for OwningDirstateMap {
19 impl DirstateMapMethods for OwningDirstateMap {
20 fn clear(&mut self) {
20 fn clear(&mut self) {
21 self.get_mut().clear()
21 self.get_mut().clear()
22 }
22 }
23
23
24 fn add_file(
24 fn add_file(
25 &mut self,
25 &mut self,
26 filename: &HgPath,
26 filename: &HgPath,
27 entry: DirstateEntry,
27 entry: DirstateEntry,
28 added: bool,
28 added: bool,
29 merged: bool,
29 from_p2: bool,
30 from_p2: bool,
30 possibly_dirty: bool,
31 possibly_dirty: bool,
31 ) -> Result<(), DirstateError> {
32 ) -> Result<(), DirstateError> {
32 self.get_mut().add_file(
33 self.get_mut().add_file(
33 filename,
34 filename,
34 entry,
35 entry,
35 added,
36 added,
37 merged,
36 from_p2,
38 from_p2,
37 possibly_dirty,
39 possibly_dirty,
38 )
40 )
39 }
41 }
40
42
41 fn remove_file(
43 fn remove_file(
42 &mut self,
44 &mut self,
43 filename: &HgPath,
45 filename: &HgPath,
44 in_merge: bool,
46 in_merge: bool,
45 ) -> Result<(), DirstateError> {
47 ) -> Result<(), DirstateError> {
46 self.get_mut().remove_file(filename, in_merge)
48 self.get_mut().remove_file(filename, in_merge)
47 }
49 }
48
50
49 fn drop_file(
51 fn drop_file(
50 &mut self,
52 &mut self,
51 filename: &HgPath,
53 filename: &HgPath,
52 old_state: EntryState,
54 old_state: EntryState,
53 ) -> Result<bool, DirstateError> {
55 ) -> Result<bool, DirstateError> {
54 self.get_mut().drop_file(filename, old_state)
56 self.get_mut().drop_file(filename, old_state)
55 }
57 }
56
58
57 fn clear_ambiguous_times(
59 fn clear_ambiguous_times(
58 &mut self,
60 &mut self,
59 filenames: Vec<HgPathBuf>,
61 filenames: Vec<HgPathBuf>,
60 now: i32,
62 now: i32,
61 ) -> Result<(), DirstateV2ParseError> {
63 ) -> Result<(), DirstateV2ParseError> {
62 self.get_mut().clear_ambiguous_times(filenames, now)
64 self.get_mut().clear_ambiguous_times(filenames, now)
63 }
65 }
64
66
65 fn non_normal_entries_contains(
67 fn non_normal_entries_contains(
66 &mut self,
68 &mut self,
67 key: &HgPath,
69 key: &HgPath,
68 ) -> Result<bool, DirstateV2ParseError> {
70 ) -> Result<bool, DirstateV2ParseError> {
69 self.get_mut().non_normal_entries_contains(key)
71 self.get_mut().non_normal_entries_contains(key)
70 }
72 }
71
73
72 fn non_normal_entries_remove(&mut self, key: &HgPath) {
74 fn non_normal_entries_remove(&mut self, key: &HgPath) {
73 self.get_mut().non_normal_entries_remove(key)
75 self.get_mut().non_normal_entries_remove(key)
74 }
76 }
75
77
76 fn non_normal_or_other_parent_paths(
78 fn non_normal_or_other_parent_paths(
77 &mut self,
79 &mut self,
78 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
80 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
79 {
81 {
80 self.get_mut().non_normal_or_other_parent_paths()
82 self.get_mut().non_normal_or_other_parent_paths()
81 }
83 }
82
84
83 fn set_non_normal_other_parent_entries(&mut self, force: bool) {
85 fn set_non_normal_other_parent_entries(&mut self, force: bool) {
84 self.get_mut().set_non_normal_other_parent_entries(force)
86 self.get_mut().set_non_normal_other_parent_entries(force)
85 }
87 }
86
88
87 fn iter_non_normal_paths(
89 fn iter_non_normal_paths(
88 &mut self,
90 &mut self,
89 ) -> Box<
91 ) -> Box<
90 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
92 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
91 > {
93 > {
92 self.get_mut().iter_non_normal_paths()
94 self.get_mut().iter_non_normal_paths()
93 }
95 }
94
96
95 fn iter_non_normal_paths_panic(
97 fn iter_non_normal_paths_panic(
96 &self,
98 &self,
97 ) -> Box<
99 ) -> Box<
98 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
100 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
99 > {
101 > {
100 self.get().iter_non_normal_paths_panic()
102 self.get().iter_non_normal_paths_panic()
101 }
103 }
102
104
103 fn iter_other_parent_paths(
105 fn iter_other_parent_paths(
104 &mut self,
106 &mut self,
105 ) -> Box<
107 ) -> Box<
106 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
108 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
107 > {
109 > {
108 self.get_mut().iter_other_parent_paths()
110 self.get_mut().iter_other_parent_paths()
109 }
111 }
110
112
111 fn has_tracked_dir(
113 fn has_tracked_dir(
112 &mut self,
114 &mut self,
113 directory: &HgPath,
115 directory: &HgPath,
114 ) -> Result<bool, DirstateError> {
116 ) -> Result<bool, DirstateError> {
115 self.get_mut().has_tracked_dir(directory)
117 self.get_mut().has_tracked_dir(directory)
116 }
118 }
117
119
118 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
120 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
119 self.get_mut().has_dir(directory)
121 self.get_mut().has_dir(directory)
120 }
122 }
121
123
122 fn pack_v1(
124 fn pack_v1(
123 &mut self,
125 &mut self,
124 parents: DirstateParents,
126 parents: DirstateParents,
125 now: Timestamp,
127 now: Timestamp,
126 ) -> Result<Vec<u8>, DirstateError> {
128 ) -> Result<Vec<u8>, DirstateError> {
127 self.get_mut().pack_v1(parents, now)
129 self.get_mut().pack_v1(parents, now)
128 }
130 }
129
131
130 fn pack_v2(
132 fn pack_v2(
131 &mut self,
133 &mut self,
132 parents: DirstateParents,
134 parents: DirstateParents,
133 now: Timestamp,
135 now: Timestamp,
134 ) -> Result<Vec<u8>, DirstateError> {
136 ) -> Result<Vec<u8>, DirstateError> {
135 self.get_mut().pack_v2(parents, now)
137 self.get_mut().pack_v2(parents, now)
136 }
138 }
137
139
138 fn status<'a>(
140 fn status<'a>(
139 &'a mut self,
141 &'a mut self,
140 matcher: &'a (dyn Matcher + Sync),
142 matcher: &'a (dyn Matcher + Sync),
141 root_dir: PathBuf,
143 root_dir: PathBuf,
142 ignore_files: Vec<PathBuf>,
144 ignore_files: Vec<PathBuf>,
143 options: StatusOptions,
145 options: StatusOptions,
144 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
146 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
145 {
147 {
146 self.get_mut()
148 self.get_mut()
147 .status(matcher, root_dir, ignore_files, options)
149 .status(matcher, root_dir, ignore_files, options)
148 }
150 }
149
151
150 fn copy_map_len(&self) -> usize {
152 fn copy_map_len(&self) -> usize {
151 self.get().copy_map_len()
153 self.get().copy_map_len()
152 }
154 }
153
155
154 fn copy_map_iter(&self) -> CopyMapIter<'_> {
156 fn copy_map_iter(&self) -> CopyMapIter<'_> {
155 self.get().copy_map_iter()
157 self.get().copy_map_iter()
156 }
158 }
157
159
158 fn copy_map_contains_key(
160 fn copy_map_contains_key(
159 &self,
161 &self,
160 key: &HgPath,
162 key: &HgPath,
161 ) -> Result<bool, DirstateV2ParseError> {
163 ) -> Result<bool, DirstateV2ParseError> {
162 self.get().copy_map_contains_key(key)
164 self.get().copy_map_contains_key(key)
163 }
165 }
164
166
165 fn copy_map_get(
167 fn copy_map_get(
166 &self,
168 &self,
167 key: &HgPath,
169 key: &HgPath,
168 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
170 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
169 self.get().copy_map_get(key)
171 self.get().copy_map_get(key)
170 }
172 }
171
173
172 fn copy_map_remove(
174 fn copy_map_remove(
173 &mut self,
175 &mut self,
174 key: &HgPath,
176 key: &HgPath,
175 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
177 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
176 self.get_mut().copy_map_remove(key)
178 self.get_mut().copy_map_remove(key)
177 }
179 }
178
180
179 fn copy_map_insert(
181 fn copy_map_insert(
180 &mut self,
182 &mut self,
181 key: HgPathBuf,
183 key: HgPathBuf,
182 value: HgPathBuf,
184 value: HgPathBuf,
183 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
185 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
184 self.get_mut().copy_map_insert(key, value)
186 self.get_mut().copy_map_insert(key, value)
185 }
187 }
186
188
187 fn len(&self) -> usize {
189 fn len(&self) -> usize {
188 self.get().len()
190 self.get().len()
189 }
191 }
190
192
191 fn contains_key(
193 fn contains_key(
192 &self,
194 &self,
193 key: &HgPath,
195 key: &HgPath,
194 ) -> Result<bool, DirstateV2ParseError> {
196 ) -> Result<bool, DirstateV2ParseError> {
195 self.get().contains_key(key)
197 self.get().contains_key(key)
196 }
198 }
197
199
198 fn get(
200 fn get(
199 &self,
201 &self,
200 key: &HgPath,
202 key: &HgPath,
201 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
203 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
202 self.get().get(key)
204 self.get().get(key)
203 }
205 }
204
206
205 fn iter(&self) -> StateMapIter<'_> {
207 fn iter(&self) -> StateMapIter<'_> {
206 self.get().iter()
208 self.get().iter()
207 }
209 }
208
210
209 fn iter_directories(
211 fn iter_directories(
210 &self,
212 &self,
211 ) -> Box<
213 ) -> Box<
212 dyn Iterator<
214 dyn Iterator<
213 Item = Result<
215 Item = Result<
214 (&HgPath, Option<Timestamp>),
216 (&HgPath, Option<Timestamp>),
215 DirstateV2ParseError,
217 DirstateV2ParseError,
216 >,
218 >,
217 > + Send
219 > + Send
218 + '_,
220 + '_,
219 > {
221 > {
220 self.get().iter_directories()
222 self.get().iter_directories()
221 }
223 }
222 }
224 }
General Comments 0
You need to be logged in to leave comments. Login now