##// END OF EJS Templates
dirstate-map: move most of `dirstate.update_file` logic in the dsmap...
marmoute -
r48492:e5fb14a0 default
parent child Browse files
Show More
@@ -1,1624 +1,1640
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = parsers.DirstateItem
48 DirstateItem = parsers.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._updatedfiles = set()
133 self._updatedfiles = set()
134 self._mapcls = dirstatemap.dirstatemap
134 self._mapcls = dirstatemap.dirstatemap
135 # Access and cache cwd early, so we don't access it for the first time
135 # Access and cache cwd early, so we don't access it for the first time
136 # after a working-copy update caused it to not exist (accessing it then
136 # after a working-copy update caused it to not exist (accessing it then
137 # raises an exception).
137 # raises an exception).
138 self._cwd
138 self._cwd
139
139
140 def prefetch_parents(self):
140 def prefetch_parents(self):
141 """make sure the parents are loaded
141 """make sure the parents are loaded
142
142
143 Used to avoid a race condition.
143 Used to avoid a race condition.
144 """
144 """
145 self._pl
145 self._pl
146
146
147 @contextlib.contextmanager
147 @contextlib.contextmanager
148 def parentchange(self):
148 def parentchange(self):
149 """Context manager for handling dirstate parents.
149 """Context manager for handling dirstate parents.
150
150
151 If an exception occurs in the scope of the context manager,
151 If an exception occurs in the scope of the context manager,
152 the incoherent dirstate won't be written when wlock is
152 the incoherent dirstate won't be written when wlock is
153 released.
153 released.
154 """
154 """
155 self._parentwriters += 1
155 self._parentwriters += 1
156 yield
156 yield
157 # Typically we want the "undo" step of a context manager in a
157 # Typically we want the "undo" step of a context manager in a
158 # finally block so it happens even when an exception
158 # finally block so it happens even when an exception
159 # occurs. In this case, however, we only want to decrement
159 # occurs. In this case, however, we only want to decrement
160 # parentwriters if the code in the with statement exits
160 # parentwriters if the code in the with statement exits
161 # normally, so we don't have a try/finally here on purpose.
161 # normally, so we don't have a try/finally here on purpose.
162 self._parentwriters -= 1
162 self._parentwriters -= 1
163
163
164 def pendingparentchange(self):
164 def pendingparentchange(self):
165 """Returns true if the dirstate is in the middle of a set of changes
165 """Returns true if the dirstate is in the middle of a set of changes
166 that modify the dirstate parent.
166 that modify the dirstate parent.
167 """
167 """
168 return self._parentwriters > 0
168 return self._parentwriters > 0
169
169
170 @propertycache
170 @propertycache
171 def _map(self):
171 def _map(self):
172 """Return the dirstate contents (see documentation for dirstatemap)."""
172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 self._map = self._mapcls(
173 self._map = self._mapcls(
174 self._ui,
174 self._ui,
175 self._opener,
175 self._opener,
176 self._root,
176 self._root,
177 self._nodeconstants,
177 self._nodeconstants,
178 self._use_dirstate_v2,
178 self._use_dirstate_v2,
179 )
179 )
180 return self._map
180 return self._map
181
181
182 @property
182 @property
183 def _sparsematcher(self):
183 def _sparsematcher(self):
184 """The matcher for the sparse checkout.
184 """The matcher for the sparse checkout.
185
185
186 The working directory may not include every file from a manifest. The
186 The working directory may not include every file from a manifest. The
187 matcher obtained by this property will match a path if it is to be
187 matcher obtained by this property will match a path if it is to be
188 included in the working directory.
188 included in the working directory.
189 """
189 """
190 # TODO there is potential to cache this property. For now, the matcher
190 # TODO there is potential to cache this property. For now, the matcher
191 # is resolved on every access. (But the called function does use a
191 # is resolved on every access. (But the called function does use a
192 # cache to keep the lookup fast.)
192 # cache to keep the lookup fast.)
193 return self._sparsematchfn()
193 return self._sparsematchfn()
194
194
195 @repocache(b'branch')
195 @repocache(b'branch')
196 def _branch(self):
196 def _branch(self):
197 try:
197 try:
198 return self._opener.read(b"branch").strip() or b"default"
198 return self._opener.read(b"branch").strip() or b"default"
199 except IOError as inst:
199 except IOError as inst:
200 if inst.errno != errno.ENOENT:
200 if inst.errno != errno.ENOENT:
201 raise
201 raise
202 return b"default"
202 return b"default"
203
203
204 @property
204 @property
205 def _pl(self):
205 def _pl(self):
206 return self._map.parents()
206 return self._map.parents()
207
207
208 def hasdir(self, d):
208 def hasdir(self, d):
209 return self._map.hastrackeddir(d)
209 return self._map.hastrackeddir(d)
210
210
211 @rootcache(b'.hgignore')
211 @rootcache(b'.hgignore')
212 def _ignore(self):
212 def _ignore(self):
213 files = self._ignorefiles()
213 files = self._ignorefiles()
214 if not files:
214 if not files:
215 return matchmod.never()
215 return matchmod.never()
216
216
217 pats = [b'include:%s' % f for f in files]
217 pats = [b'include:%s' % f for f in files]
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219
219
220 @propertycache
220 @propertycache
221 def _slash(self):
221 def _slash(self):
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223
223
224 @propertycache
224 @propertycache
225 def _checklink(self):
225 def _checklink(self):
226 return util.checklink(self._root)
226 return util.checklink(self._root)
227
227
228 @propertycache
228 @propertycache
229 def _checkexec(self):
229 def _checkexec(self):
230 return bool(util.checkexec(self._root))
230 return bool(util.checkexec(self._root))
231
231
232 @propertycache
232 @propertycache
233 def _checkcase(self):
233 def _checkcase(self):
234 return not util.fscasesensitive(self._join(b'.hg'))
234 return not util.fscasesensitive(self._join(b'.hg'))
235
235
236 def _join(self, f):
236 def _join(self, f):
237 # much faster than os.path.join()
237 # much faster than os.path.join()
238 # it's safe because f is always a relative path
238 # it's safe because f is always a relative path
239 return self._rootdir + f
239 return self._rootdir + f
240
240
241 def flagfunc(self, buildfallback):
241 def flagfunc(self, buildfallback):
242 if self._checklink and self._checkexec:
242 if self._checklink and self._checkexec:
243
243
244 def f(x):
244 def f(x):
245 try:
245 try:
246 st = os.lstat(self._join(x))
246 st = os.lstat(self._join(x))
247 if util.statislink(st):
247 if util.statislink(st):
248 return b'l'
248 return b'l'
249 if util.statisexec(st):
249 if util.statisexec(st):
250 return b'x'
250 return b'x'
251 except OSError:
251 except OSError:
252 pass
252 pass
253 return b''
253 return b''
254
254
255 return f
255 return f
256
256
257 fallback = buildfallback()
257 fallback = buildfallback()
258 if self._checklink:
258 if self._checklink:
259
259
260 def f(x):
260 def f(x):
261 if os.path.islink(self._join(x)):
261 if os.path.islink(self._join(x)):
262 return b'l'
262 return b'l'
263 if b'x' in fallback(x):
263 if b'x' in fallback(x):
264 return b'x'
264 return b'x'
265 return b''
265 return b''
266
266
267 return f
267 return f
268 if self._checkexec:
268 if self._checkexec:
269
269
270 def f(x):
270 def f(x):
271 if b'l' in fallback(x):
271 if b'l' in fallback(x):
272 return b'l'
272 return b'l'
273 if util.isexec(self._join(x)):
273 if util.isexec(self._join(x)):
274 return b'x'
274 return b'x'
275 return b''
275 return b''
276
276
277 return f
277 return f
278 else:
278 else:
279 return fallback
279 return fallback
280
280
281 @propertycache
281 @propertycache
282 def _cwd(self):
282 def _cwd(self):
283 # internal config: ui.forcecwd
283 # internal config: ui.forcecwd
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 if forcecwd:
285 if forcecwd:
286 return forcecwd
286 return forcecwd
287 return encoding.getcwd()
287 return encoding.getcwd()
288
288
289 def getcwd(self):
289 def getcwd(self):
290 """Return the path from which a canonical path is calculated.
290 """Return the path from which a canonical path is calculated.
291
291
292 This path should be used to resolve file patterns or to convert
292 This path should be used to resolve file patterns or to convert
293 canonical paths back to file paths for display. It shouldn't be
293 canonical paths back to file paths for display. It shouldn't be
294 used to get real file paths. Use vfs functions instead.
294 used to get real file paths. Use vfs functions instead.
295 """
295 """
296 cwd = self._cwd
296 cwd = self._cwd
297 if cwd == self._root:
297 if cwd == self._root:
298 return b''
298 return b''
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 rootsep = self._root
300 rootsep = self._root
301 if not util.endswithsep(rootsep):
301 if not util.endswithsep(rootsep):
302 rootsep += pycompat.ossep
302 rootsep += pycompat.ossep
303 if cwd.startswith(rootsep):
303 if cwd.startswith(rootsep):
304 return cwd[len(rootsep) :]
304 return cwd[len(rootsep) :]
305 else:
305 else:
306 # we're outside the repo. return an absolute path.
306 # we're outside the repo. return an absolute path.
307 return cwd
307 return cwd
308
308
309 def pathto(self, f, cwd=None):
309 def pathto(self, f, cwd=None):
310 if cwd is None:
310 if cwd is None:
311 cwd = self.getcwd()
311 cwd = self.getcwd()
312 path = util.pathto(self._root, cwd, f)
312 path = util.pathto(self._root, cwd, f)
313 if self._slash:
313 if self._slash:
314 return util.pconvert(path)
314 return util.pconvert(path)
315 return path
315 return path
316
316
317 def __getitem__(self, key):
317 def __getitem__(self, key):
318 """Return the current state of key (a filename) in the dirstate.
318 """Return the current state of key (a filename) in the dirstate.
319
319
320 States are:
320 States are:
321 n normal
321 n normal
322 m needs merging
322 m needs merging
323 r marked for removal
323 r marked for removal
324 a marked for addition
324 a marked for addition
325 ? not tracked
325 ? not tracked
326
326
327 XXX The "state" is a bit obscure to be in the "public" API. we should
327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 consider migrating all user of this to going through the dirstate entry
328 consider migrating all user of this to going through the dirstate entry
329 instead.
329 instead.
330 """
330 """
331 entry = self._map.get(key)
331 entry = self._map.get(key)
332 if entry is not None:
332 if entry is not None:
333 return entry.state
333 return entry.state
334 return b'?'
334 return b'?'
335
335
336 def __contains__(self, key):
336 def __contains__(self, key):
337 return key in self._map
337 return key in self._map
338
338
339 def __iter__(self):
339 def __iter__(self):
340 return iter(sorted(self._map))
340 return iter(sorted(self._map))
341
341
342 def items(self):
342 def items(self):
343 return pycompat.iteritems(self._map)
343 return pycompat.iteritems(self._map)
344
344
345 iteritems = items
345 iteritems = items
346
346
347 def directories(self):
347 def directories(self):
348 return self._map.directories()
348 return self._map.directories()
349
349
350 def parents(self):
350 def parents(self):
351 return [self._validate(p) for p in self._pl]
351 return [self._validate(p) for p in self._pl]
352
352
353 def p1(self):
353 def p1(self):
354 return self._validate(self._pl[0])
354 return self._validate(self._pl[0])
355
355
356 def p2(self):
356 def p2(self):
357 return self._validate(self._pl[1])
357 return self._validate(self._pl[1])
358
358
359 @property
359 @property
360 def in_merge(self):
360 def in_merge(self):
361 """True if a merge is in progress"""
361 """True if a merge is in progress"""
362 return self._pl[1] != self._nodeconstants.nullid
362 return self._pl[1] != self._nodeconstants.nullid
363
363
364 def branch(self):
364 def branch(self):
365 return encoding.tolocal(self._branch)
365 return encoding.tolocal(self._branch)
366
366
367 def setparents(self, p1, p2=None):
367 def setparents(self, p1, p2=None):
368 """Set dirstate parents to p1 and p2.
368 """Set dirstate parents to p1 and p2.
369
369
370 When moving from two parents to one, "merged" entries a
370 When moving from two parents to one, "merged" entries a
371 adjusted to normal and previous copy records discarded and
371 adjusted to normal and previous copy records discarded and
372 returned by the call.
372 returned by the call.
373
373
374 See localrepo.setparents()
374 See localrepo.setparents()
375 """
375 """
376 if p2 is None:
376 if p2 is None:
377 p2 = self._nodeconstants.nullid
377 p2 = self._nodeconstants.nullid
378 if self._parentwriters == 0:
378 if self._parentwriters == 0:
379 raise ValueError(
379 raise ValueError(
380 b"cannot set dirstate parent outside of "
380 b"cannot set dirstate parent outside of "
381 b"dirstate.parentchange context manager"
381 b"dirstate.parentchange context manager"
382 )
382 )
383
383
384 self._dirty = True
384 self._dirty = True
385 oldp2 = self._pl[1]
385 oldp2 = self._pl[1]
386 if self._origpl is None:
386 if self._origpl is None:
387 self._origpl = self._pl
387 self._origpl = self._pl
388 self._map.setparents(p1, p2)
388 self._map.setparents(p1, p2)
389 copies = {}
389 copies = {}
390 if (
390 if (
391 oldp2 != self._nodeconstants.nullid
391 oldp2 != self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
393 ):
393 ):
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395
395
396 for f in candidatefiles:
396 for f in candidatefiles:
397 s = self._map.get(f)
397 s = self._map.get(f)
398 if s is None:
398 if s is None:
399 continue
399 continue
400
400
401 # Discard "merged" markers when moving away from a merge state
401 # Discard "merged" markers when moving away from a merge state
402 if s.merged:
402 if s.merged:
403 source = self._map.copymap.get(f)
403 source = self._map.copymap.get(f)
404 if source:
404 if source:
405 copies[f] = source
405 copies[f] = source
406 self.normallookup(f)
406 self.normallookup(f)
407 # Also fix up otherparent markers
407 # Also fix up otherparent markers
408 elif s.from_p2:
408 elif s.from_p2:
409 source = self._map.copymap.get(f)
409 source = self._map.copymap.get(f)
410 if source:
410 if source:
411 copies[f] = source
411 copies[f] = source
412 self._add(f)
412 self._add(f)
413 return copies
413 return copies
414
414
415 def setbranch(self, branch):
415 def setbranch(self, branch):
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 try:
418 try:
419 f.write(self._branch + b'\n')
419 f.write(self._branch + b'\n')
420 f.close()
420 f.close()
421
421
422 # make sure filecache has the correct stat info for _branch after
422 # make sure filecache has the correct stat info for _branch after
423 # replacing the underlying file
423 # replacing the underlying file
424 ce = self._filecache[b'_branch']
424 ce = self._filecache[b'_branch']
425 if ce:
425 if ce:
426 ce.refresh()
426 ce.refresh()
427 except: # re-raises
427 except: # re-raises
428 f.discard()
428 f.discard()
429 raise
429 raise
430
430
431 def invalidate(self):
431 def invalidate(self):
432 """Causes the next access to reread the dirstate.
432 """Causes the next access to reread the dirstate.
433
433
434 This is different from localrepo.invalidatedirstate() because it always
434 This is different from localrepo.invalidatedirstate() because it always
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 check whether the dirstate has changed before rereading it."""
436 check whether the dirstate has changed before rereading it."""
437
437
438 for a in ("_map", "_branch", "_ignore"):
438 for a in ("_map", "_branch", "_ignore"):
439 if a in self.__dict__:
439 if a in self.__dict__:
440 delattr(self, a)
440 delattr(self, a)
441 self._lastnormaltime = 0
441 self._lastnormaltime = 0
442 self._dirty = False
442 self._dirty = False
443 self._updatedfiles.clear()
443 self._updatedfiles.clear()
444 self._parentwriters = 0
444 self._parentwriters = 0
445 self._origpl = None
445 self._origpl = None
446
446
447 def copy(self, source, dest):
447 def copy(self, source, dest):
448 """Mark dest as a copy of source. Unmark dest if source is None."""
448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 if source == dest:
449 if source == dest:
450 return
450 return
451 self._dirty = True
451 self._dirty = True
452 if source is not None:
452 if source is not None:
453 self._map.copymap[dest] = source
453 self._map.copymap[dest] = source
454 self._updatedfiles.add(source)
454 self._updatedfiles.add(source)
455 self._updatedfiles.add(dest)
455 self._updatedfiles.add(dest)
456 elif self._map.copymap.pop(dest, None):
456 elif self._map.copymap.pop(dest, None):
457 self._updatedfiles.add(dest)
457 self._updatedfiles.add(dest)
458
458
459 def copied(self, file):
459 def copied(self, file):
460 return self._map.copymap.get(file, None)
460 return self._map.copymap.get(file, None)
461
461
462 def copies(self):
462 def copies(self):
463 return self._map.copymap
463 return self._map.copymap
464
464
465 @requires_no_parents_change
465 @requires_no_parents_change
466 def set_tracked(self, filename):
466 def set_tracked(self, filename):
467 """a "public" method for generic code to mark a file as tracked
467 """a "public" method for generic code to mark a file as tracked
468
468
469 This function is to be called outside of "update/merge" case. For
469 This function is to be called outside of "update/merge" case. For
470 example by a command like `hg add X`.
470 example by a command like `hg add X`.
471
471
472 return True the file was previously untracked, False otherwise.
472 return True the file was previously untracked, False otherwise.
473 """
473 """
474 entry = self._map.get(filename)
474 entry = self._map.get(filename)
475 if entry is None:
475 if entry is None:
476 self._add(filename)
476 self._add(filename)
477 return True
477 return True
478 elif not entry.tracked:
478 elif not entry.tracked:
479 self.normallookup(filename)
479 self.normallookup(filename)
480 return True
480 return True
481 return False
481 return False
482
482
483 @requires_no_parents_change
483 @requires_no_parents_change
484 def set_untracked(self, filename):
484 def set_untracked(self, filename):
485 """a "public" method for generic code to mark a file as untracked
485 """a "public" method for generic code to mark a file as untracked
486
486
487 This function is to be called outside of "update/merge" case. For
487 This function is to be called outside of "update/merge" case. For
488 example by a command like `hg remove X`.
488 example by a command like `hg remove X`.
489
489
490 return True the file was previously tracked, False otherwise.
490 return True the file was previously tracked, False otherwise.
491 """
491 """
492 entry = self._map.get(filename)
492 entry = self._map.get(filename)
493 if entry is None:
493 if entry is None:
494 return False
494 return False
495 elif entry.added:
495 elif entry.added:
496 self._drop(filename)
496 self._drop(filename)
497 return True
497 return True
498 else:
498 else:
499 self._remove(filename)
499 self._remove(filename)
500 return True
500 return True
501
501
502 @requires_parents_change
502 @requires_parents_change
503 def update_file_reference(
503 def update_file_reference(
504 self,
504 self,
505 filename,
505 filename,
506 p1_tracked,
506 p1_tracked,
507 ):
507 ):
508 """Set a file as tracked in the parent (or not)
508 """Set a file as tracked in the parent (or not)
509
509
510 This is to be called when adjust the dirstate to a new parent after an history
510 This is to be called when adjust the dirstate to a new parent after an history
511 rewriting operation.
511 rewriting operation.
512
512
513 It should not be called during a merge (p2 != nullid) and only within
513 It should not be called during a merge (p2 != nullid) and only within
514 a `with dirstate.parentchange():` context.
514 a `with dirstate.parentchange():` context.
515 """
515 """
516 if self.in_merge:
516 if self.in_merge:
517 msg = b'update_file_reference should not be called when merging'
517 msg = b'update_file_reference should not be called when merging'
518 raise error.ProgrammingError(msg)
518 raise error.ProgrammingError(msg)
519 entry = self._map.get(filename)
519 entry = self._map.get(filename)
520 if entry is None:
520 if entry is None:
521 wc_tracked = False
521 wc_tracked = False
522 else:
522 else:
523 wc_tracked = entry.tracked
523 wc_tracked = entry.tracked
524 if p1_tracked and wc_tracked:
524 if p1_tracked and wc_tracked:
525 # the underlying reference might have changed, we will have to
525 # the underlying reference might have changed, we will have to
526 # check it.
526 # check it.
527 self.normallookup(filename)
527 self.normallookup(filename)
528 elif not (p1_tracked or wc_tracked):
528 elif not (p1_tracked or wc_tracked):
529 # the file is no longer relevant to anyone
529 # the file is no longer relevant to anyone
530 self._drop(filename)
530 self._drop(filename)
531 elif (not p1_tracked) and wc_tracked:
531 elif (not p1_tracked) and wc_tracked:
532 if not entry.added:
532 if not entry.added:
533 self._add(filename)
533 self._add(filename)
534 elif p1_tracked and not wc_tracked:
534 elif p1_tracked and not wc_tracked:
535 if entry is None or not entry.removed:
535 if entry is None or not entry.removed:
536 self._remove(filename)
536 self._remove(filename)
537 else:
537 else:
538 assert False, 'unreachable'
538 assert False, 'unreachable'
539
539
540 @requires_parents_change
540 @requires_parents_change
541 def update_file(
541 def update_file(
542 self,
542 self,
543 filename,
543 filename,
544 wc_tracked,
544 wc_tracked,
545 p1_tracked,
545 p1_tracked,
546 p2_tracked=False,
546 p2_tracked=False,
547 merged=False,
547 merged=False,
548 clean_p1=False,
548 clean_p1=False,
549 clean_p2=False,
549 clean_p2=False,
550 possibly_dirty=False,
550 possibly_dirty=False,
551 parentfiledata=None,
551 parentfiledata=None,
552 ):
552 ):
553 """update the information about a file in the dirstate
553 """update the information about a file in the dirstate
554
554
555 This is to be called when the direstates parent changes to keep track
555 This is to be called when the direstates parent changes to keep track
556 of what is the file situation in regards to the working copy and its parent.
556 of what is the file situation in regards to the working copy and its parent.
557
557
558 This function must be called within a `dirstate.parentchange` context.
558 This function must be called within a `dirstate.parentchange` context.
559
559
560 note: the API is at an early stage and we might need to ajust it
560 note: the API is at an early stage and we might need to ajust it
561 depending of what information ends up being relevant and useful to
561 depending of what information ends up being relevant and useful to
562 other processing.
562 other processing.
563 """
563 """
564 if merged and (clean_p1 or clean_p2):
564 if merged and (clean_p1 or clean_p2):
565 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
565 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
566 raise error.ProgrammingError(msg)
566 raise error.ProgrammingError(msg)
567 if not (p1_tracked or p2_tracked or wc_tracked):
567
568 self._drop(filename)
568 # note: I do not think we need to double check name clash here since we
569 elif merged:
569 # are in a update/merge case that should already have taken care of
570 assert wc_tracked
570 # this. The test agrees
571 assert self.in_merge # we are never in the "normallookup" case
571
572 self.otherparent(filename)
572 self._dirty = True
573 elif not (p1_tracked or p2_tracked) and wc_tracked:
573 self._updatedfiles.add(filename)
574 self._addpath(filename, added=True, possibly_dirty=possibly_dirty)
574
575 self._map.copymap.pop(filename, None)
575 need_parent_file_data = (
576 elif (p1_tracked or p2_tracked) and not wc_tracked:
576 not (possibly_dirty or clean_p2 or merged)
577 self._remove(filename)
577 and wc_tracked
578 elif clean_p2 and wc_tracked:
578 and p1_tracked
579 assert p2_tracked
579 )
580 self.otherparent(filename)
580
581 elif not p1_tracked and p2_tracked and wc_tracked:
581 # this mean we are doing call for file we do not really care about the
582 self._addpath(filename, from_p2=True, possibly_dirty=possibly_dirty)
582 # data (eg: added or removed), however this should be a minor overhead
583 self._map.copymap.pop(filename, None)
583 # compared to the overall update process calling this.
584 elif possibly_dirty:
584 if need_parent_file_data:
585 self._addpath(filename, possibly_dirty=possibly_dirty)
585 if parentfiledata is None:
586 elif wc_tracked:
586 parentfiledata = self._get_filedata(filename)
587 self.normal(filename, parentfiledata=parentfiledata)
587 mtime = parentfiledata[2]
588 # XXX We need something for file that are dirty after an update
588
589 else:
589 if mtime > self._lastnormaltime:
590 assert False, 'unreachable'
590 # Remember the most recent modification timeslot for
591 # status(), to make sure we won't miss future
592 # size-preserving file content modifications that happen
593 # within the same timeslot.
594 self._lastnormaltime = mtime
595
596 self._map.reset_state(
597 filename,
598 wc_tracked,
599 p1_tracked,
600 p2_tracked=p2_tracked,
601 merged=merged,
602 clean_p1=clean_p1,
603 clean_p2=clean_p2,
604 possibly_dirty=possibly_dirty,
605 parentfiledata=parentfiledata,
606 )
591
607
592 def _addpath(
608 def _addpath(
593 self,
609 self,
594 f,
610 f,
595 mode=0,
611 mode=0,
596 size=None,
612 size=None,
597 mtime=None,
613 mtime=None,
598 added=False,
614 added=False,
599 merged=False,
615 merged=False,
600 from_p2=False,
616 from_p2=False,
601 possibly_dirty=False,
617 possibly_dirty=False,
602 ):
618 ):
603 entry = self._map.get(f)
619 entry = self._map.get(f)
604 if added or entry is not None and entry.removed:
620 if added or entry is not None and entry.removed:
605 scmutil.checkfilename(f)
621 scmutil.checkfilename(f)
606 if self._map.hastrackeddir(f):
622 if self._map.hastrackeddir(f):
607 msg = _(b'directory %r already in dirstate')
623 msg = _(b'directory %r already in dirstate')
608 msg %= pycompat.bytestr(f)
624 msg %= pycompat.bytestr(f)
609 raise error.Abort(msg)
625 raise error.Abort(msg)
610 # shadows
626 # shadows
611 for d in pathutil.finddirs(f):
627 for d in pathutil.finddirs(f):
612 if self._map.hastrackeddir(d):
628 if self._map.hastrackeddir(d):
613 break
629 break
614 entry = self._map.get(d)
630 entry = self._map.get(d)
615 if entry is not None and not entry.removed:
631 if entry is not None and not entry.removed:
616 msg = _(b'file %r in dirstate clashes with %r')
632 msg = _(b'file %r in dirstate clashes with %r')
617 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
633 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
618 raise error.Abort(msg)
634 raise error.Abort(msg)
619 self._dirty = True
635 self._dirty = True
620 self._updatedfiles.add(f)
636 self._updatedfiles.add(f)
621 self._map.addfile(
637 self._map.addfile(
622 f,
638 f,
623 mode=mode,
639 mode=mode,
624 size=size,
640 size=size,
625 mtime=mtime,
641 mtime=mtime,
626 added=added,
642 added=added,
627 merged=merged,
643 merged=merged,
628 from_p2=from_p2,
644 from_p2=from_p2,
629 possibly_dirty=possibly_dirty,
645 possibly_dirty=possibly_dirty,
630 )
646 )
631
647
632 def _get_filedata(self, filename):
648 def _get_filedata(self, filename):
633 """returns"""
649 """returns"""
634 s = os.lstat(self._join(filename))
650 s = os.lstat(self._join(filename))
635 mode = s.st_mode
651 mode = s.st_mode
636 size = s.st_size
652 size = s.st_size
637 mtime = s[stat.ST_MTIME]
653 mtime = s[stat.ST_MTIME]
638 return (mode, size, mtime)
654 return (mode, size, mtime)
639
655
640 def normal(self, f, parentfiledata=None):
656 def normal(self, f, parentfiledata=None):
641 """Mark a file normal and clean.
657 """Mark a file normal and clean.
642
658
643 parentfiledata: (mode, size, mtime) of the clean file
659 parentfiledata: (mode, size, mtime) of the clean file
644
660
645 parentfiledata should be computed from memory (for mode,
661 parentfiledata should be computed from memory (for mode,
646 size), as or close as possible from the point where we
662 size), as or close as possible from the point where we
647 determined the file was clean, to limit the risk of the
663 determined the file was clean, to limit the risk of the
648 file having been changed by an external process between the
664 file having been changed by an external process between the
649 moment where the file was determined to be clean and now."""
665 moment where the file was determined to be clean and now."""
650 if parentfiledata:
666 if parentfiledata:
651 (mode, size, mtime) = parentfiledata
667 (mode, size, mtime) = parentfiledata
652 else:
668 else:
653 (mode, size, mtime) = self._get_filedata(f)
669 (mode, size, mtime) = self._get_filedata(f)
654 self._addpath(f, mode=mode, size=size, mtime=mtime)
670 self._addpath(f, mode=mode, size=size, mtime=mtime)
655 self._map.copymap.pop(f, None)
671 self._map.copymap.pop(f, None)
656 if f in self._map.nonnormalset:
672 if f in self._map.nonnormalset:
657 self._map.nonnormalset.remove(f)
673 self._map.nonnormalset.remove(f)
658 if mtime > self._lastnormaltime:
674 if mtime > self._lastnormaltime:
659 # Remember the most recent modification timeslot for status(),
675 # Remember the most recent modification timeslot for status(),
660 # to make sure we won't miss future size-preserving file content
676 # to make sure we won't miss future size-preserving file content
661 # modifications that happen within the same timeslot.
677 # modifications that happen within the same timeslot.
662 self._lastnormaltime = mtime
678 self._lastnormaltime = mtime
663
679
664 def normallookup(self, f):
680 def normallookup(self, f):
665 '''Mark a file normal, but possibly dirty.'''
681 '''Mark a file normal, but possibly dirty.'''
666 if self.in_merge:
682 if self.in_merge:
667 # if there is a merge going on and the file was either
683 # if there is a merge going on and the file was either
668 # "merged" or coming from other parent (-2) before
684 # "merged" or coming from other parent (-2) before
669 # being removed, restore that state.
685 # being removed, restore that state.
670 entry = self._map.get(f)
686 entry = self._map.get(f)
671 if entry is not None:
687 if entry is not None:
672 # XXX this should probably be dealt with a a lower level
688 # XXX this should probably be dealt with a a lower level
673 # (see `merged_removed` and `from_p2_removed`)
689 # (see `merged_removed` and `from_p2_removed`)
674 if entry.merged_removed or entry.from_p2_removed:
690 if entry.merged_removed or entry.from_p2_removed:
675 source = self._map.copymap.get(f)
691 source = self._map.copymap.get(f)
676 if entry.merged_removed:
692 if entry.merged_removed:
677 self.merge(f)
693 self.merge(f)
678 elif entry.from_p2_removed:
694 elif entry.from_p2_removed:
679 self.otherparent(f)
695 self.otherparent(f)
680 if source is not None:
696 if source is not None:
681 self.copy(source, f)
697 self.copy(source, f)
682 return
698 return
683 elif entry.merged or entry.from_p2:
699 elif entry.merged or entry.from_p2:
684 return
700 return
685 self._addpath(f, possibly_dirty=True)
701 self._addpath(f, possibly_dirty=True)
686 self._map.copymap.pop(f, None)
702 self._map.copymap.pop(f, None)
687
703
688 def otherparent(self, f):
704 def otherparent(self, f):
689 '''Mark as coming from the other parent, always dirty.'''
705 '''Mark as coming from the other parent, always dirty.'''
690 if not self.in_merge:
706 if not self.in_merge:
691 msg = _(b"setting %r to other parent only allowed in merges") % f
707 msg = _(b"setting %r to other parent only allowed in merges") % f
692 raise error.Abort(msg)
708 raise error.Abort(msg)
693 entry = self._map.get(f)
709 entry = self._map.get(f)
694 if entry is not None and entry.tracked:
710 if entry is not None and entry.tracked:
695 # merge-like
711 # merge-like
696 self._addpath(f, merged=True)
712 self._addpath(f, merged=True)
697 else:
713 else:
698 # add-like
714 # add-like
699 self._addpath(f, from_p2=True)
715 self._addpath(f, from_p2=True)
700 self._map.copymap.pop(f, None)
716 self._map.copymap.pop(f, None)
701
717
702 def add(self, f):
718 def add(self, f):
703 '''Mark a file added.'''
719 '''Mark a file added.'''
704 if not self.pendingparentchange():
720 if not self.pendingparentchange():
705 util.nouideprecwarn(
721 util.nouideprecwarn(
706 b"do not use `add` outside of update/merge context."
722 b"do not use `add` outside of update/merge context."
707 b" Use `set_tracked`",
723 b" Use `set_tracked`",
708 b'6.0',
724 b'6.0',
709 stacklevel=2,
725 stacklevel=2,
710 )
726 )
711 self._add(f)
727 self._add(f)
712
728
713 def _add(self, filename):
729 def _add(self, filename):
714 """internal function to mark a file as added"""
730 """internal function to mark a file as added"""
715 self._addpath(filename, added=True)
731 self._addpath(filename, added=True)
716 self._map.copymap.pop(filename, None)
732 self._map.copymap.pop(filename, None)
717
733
718 def remove(self, f):
734 def remove(self, f):
719 '''Mark a file removed'''
735 '''Mark a file removed'''
720 if not self.pendingparentchange():
736 if not self.pendingparentchange():
721 util.nouideprecwarn(
737 util.nouideprecwarn(
722 b"do not use `remove` outside of update/merge context."
738 b"do not use `remove` outside of update/merge context."
723 b" Use `set_untracked`",
739 b" Use `set_untracked`",
724 b'6.0',
740 b'6.0',
725 stacklevel=2,
741 stacklevel=2,
726 )
742 )
727 self._remove(f)
743 self._remove(f)
728
744
729 def _remove(self, filename):
745 def _remove(self, filename):
730 """internal function to mark a file removed"""
746 """internal function to mark a file removed"""
731 self._dirty = True
747 self._dirty = True
732 self._updatedfiles.add(filename)
748 self._updatedfiles.add(filename)
733 self._map.removefile(filename, in_merge=self.in_merge)
749 self._map.removefile(filename, in_merge=self.in_merge)
734
750
735 def merge(self, f):
751 def merge(self, f):
736 '''Mark a file merged.'''
752 '''Mark a file merged.'''
737 if not self.in_merge:
753 if not self.in_merge:
738 return self.normallookup(f)
754 return self.normallookup(f)
739 return self.otherparent(f)
755 return self.otherparent(f)
740
756
741 def drop(self, f):
757 def drop(self, f):
742 '''Drop a file from the dirstate'''
758 '''Drop a file from the dirstate'''
743 if not self.pendingparentchange():
759 if not self.pendingparentchange():
744 util.nouideprecwarn(
760 util.nouideprecwarn(
745 b"do not use `drop` outside of update/merge context."
761 b"do not use `drop` outside of update/merge context."
746 b" Use `set_untracked`",
762 b" Use `set_untracked`",
747 b'6.0',
763 b'6.0',
748 stacklevel=2,
764 stacklevel=2,
749 )
765 )
750 self._drop(f)
766 self._drop(f)
751
767
752 def _drop(self, filename):
768 def _drop(self, filename):
753 """internal function to drop a file from the dirstate"""
769 """internal function to drop a file from the dirstate"""
754 if self._map.dropfile(filename):
770 if self._map.dropfile(filename):
755 self._dirty = True
771 self._dirty = True
756 self._updatedfiles.add(filename)
772 self._updatedfiles.add(filename)
757 self._map.copymap.pop(filename, None)
773 self._map.copymap.pop(filename, None)
758
774
759 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
775 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
760 if exists is None:
776 if exists is None:
761 exists = os.path.lexists(os.path.join(self._root, path))
777 exists = os.path.lexists(os.path.join(self._root, path))
762 if not exists:
778 if not exists:
763 # Maybe a path component exists
779 # Maybe a path component exists
764 if not ignoremissing and b'/' in path:
780 if not ignoremissing and b'/' in path:
765 d, f = path.rsplit(b'/', 1)
781 d, f = path.rsplit(b'/', 1)
766 d = self._normalize(d, False, ignoremissing, None)
782 d = self._normalize(d, False, ignoremissing, None)
767 folded = d + b"/" + f
783 folded = d + b"/" + f
768 else:
784 else:
769 # No path components, preserve original case
785 # No path components, preserve original case
770 folded = path
786 folded = path
771 else:
787 else:
772 # recursively normalize leading directory components
788 # recursively normalize leading directory components
773 # against dirstate
789 # against dirstate
774 if b'/' in normed:
790 if b'/' in normed:
775 d, f = normed.rsplit(b'/', 1)
791 d, f = normed.rsplit(b'/', 1)
776 d = self._normalize(d, False, ignoremissing, True)
792 d = self._normalize(d, False, ignoremissing, True)
777 r = self._root + b"/" + d
793 r = self._root + b"/" + d
778 folded = d + b"/" + util.fspath(f, r)
794 folded = d + b"/" + util.fspath(f, r)
779 else:
795 else:
780 folded = util.fspath(normed, self._root)
796 folded = util.fspath(normed, self._root)
781 storemap[normed] = folded
797 storemap[normed] = folded
782
798
783 return folded
799 return folded
784
800
785 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
801 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
786 normed = util.normcase(path)
802 normed = util.normcase(path)
787 folded = self._map.filefoldmap.get(normed, None)
803 folded = self._map.filefoldmap.get(normed, None)
788 if folded is None:
804 if folded is None:
789 if isknown:
805 if isknown:
790 folded = path
806 folded = path
791 else:
807 else:
792 folded = self._discoverpath(
808 folded = self._discoverpath(
793 path, normed, ignoremissing, exists, self._map.filefoldmap
809 path, normed, ignoremissing, exists, self._map.filefoldmap
794 )
810 )
795 return folded
811 return folded
796
812
797 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
813 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
798 normed = util.normcase(path)
814 normed = util.normcase(path)
799 folded = self._map.filefoldmap.get(normed, None)
815 folded = self._map.filefoldmap.get(normed, None)
800 if folded is None:
816 if folded is None:
801 folded = self._map.dirfoldmap.get(normed, None)
817 folded = self._map.dirfoldmap.get(normed, None)
802 if folded is None:
818 if folded is None:
803 if isknown:
819 if isknown:
804 folded = path
820 folded = path
805 else:
821 else:
806 # store discovered result in dirfoldmap so that future
822 # store discovered result in dirfoldmap so that future
807 # normalizefile calls don't start matching directories
823 # normalizefile calls don't start matching directories
808 folded = self._discoverpath(
824 folded = self._discoverpath(
809 path, normed, ignoremissing, exists, self._map.dirfoldmap
825 path, normed, ignoremissing, exists, self._map.dirfoldmap
810 )
826 )
811 return folded
827 return folded
812
828
813 def normalize(self, path, isknown=False, ignoremissing=False):
829 def normalize(self, path, isknown=False, ignoremissing=False):
814 """
830 """
815 normalize the case of a pathname when on a casefolding filesystem
831 normalize the case of a pathname when on a casefolding filesystem
816
832
817 isknown specifies whether the filename came from walking the
833 isknown specifies whether the filename came from walking the
818 disk, to avoid extra filesystem access.
834 disk, to avoid extra filesystem access.
819
835
820 If ignoremissing is True, missing path are returned
836 If ignoremissing is True, missing path are returned
821 unchanged. Otherwise, we try harder to normalize possibly
837 unchanged. Otherwise, we try harder to normalize possibly
822 existing path components.
838 existing path components.
823
839
824 The normalized case is determined based on the following precedence:
840 The normalized case is determined based on the following precedence:
825
841
826 - version of name already stored in the dirstate
842 - version of name already stored in the dirstate
827 - version of name stored on disk
843 - version of name stored on disk
828 - version provided via command arguments
844 - version provided via command arguments
829 """
845 """
830
846
831 if self._checkcase:
847 if self._checkcase:
832 return self._normalize(path, isknown, ignoremissing)
848 return self._normalize(path, isknown, ignoremissing)
833 return path
849 return path
834
850
835 def clear(self):
851 def clear(self):
836 self._map.clear()
852 self._map.clear()
837 self._lastnormaltime = 0
853 self._lastnormaltime = 0
838 self._updatedfiles.clear()
854 self._updatedfiles.clear()
839 self._dirty = True
855 self._dirty = True
840
856
841 def rebuild(self, parent, allfiles, changedfiles=None):
857 def rebuild(self, parent, allfiles, changedfiles=None):
842 if changedfiles is None:
858 if changedfiles is None:
843 # Rebuild entire dirstate
859 # Rebuild entire dirstate
844 to_lookup = allfiles
860 to_lookup = allfiles
845 to_drop = []
861 to_drop = []
846 lastnormaltime = self._lastnormaltime
862 lastnormaltime = self._lastnormaltime
847 self.clear()
863 self.clear()
848 self._lastnormaltime = lastnormaltime
864 self._lastnormaltime = lastnormaltime
849 elif len(changedfiles) < 10:
865 elif len(changedfiles) < 10:
850 # Avoid turning allfiles into a set, which can be expensive if it's
866 # Avoid turning allfiles into a set, which can be expensive if it's
851 # large.
867 # large.
852 to_lookup = []
868 to_lookup = []
853 to_drop = []
869 to_drop = []
854 for f in changedfiles:
870 for f in changedfiles:
855 if f in allfiles:
871 if f in allfiles:
856 to_lookup.append(f)
872 to_lookup.append(f)
857 else:
873 else:
858 to_drop.append(f)
874 to_drop.append(f)
859 else:
875 else:
860 changedfilesset = set(changedfiles)
876 changedfilesset = set(changedfiles)
861 to_lookup = changedfilesset & set(allfiles)
877 to_lookup = changedfilesset & set(allfiles)
862 to_drop = changedfilesset - to_lookup
878 to_drop = changedfilesset - to_lookup
863
879
864 if self._origpl is None:
880 if self._origpl is None:
865 self._origpl = self._pl
881 self._origpl = self._pl
866 self._map.setparents(parent, self._nodeconstants.nullid)
882 self._map.setparents(parent, self._nodeconstants.nullid)
867
883
868 for f in to_lookup:
884 for f in to_lookup:
869 self.normallookup(f)
885 self.normallookup(f)
870 for f in to_drop:
886 for f in to_drop:
871 self._drop(f)
887 self._drop(f)
872
888
873 self._dirty = True
889 self._dirty = True
874
890
875 def identity(self):
891 def identity(self):
876 """Return identity of dirstate itself to detect changing in storage
892 """Return identity of dirstate itself to detect changing in storage
877
893
878 If identity of previous dirstate is equal to this, writing
894 If identity of previous dirstate is equal to this, writing
879 changes based on the former dirstate out can keep consistency.
895 changes based on the former dirstate out can keep consistency.
880 """
896 """
881 return self._map.identity
897 return self._map.identity
882
898
883 def write(self, tr):
899 def write(self, tr):
884 if not self._dirty:
900 if not self._dirty:
885 return
901 return
886
902
887 filename = self._filename
903 filename = self._filename
888 if tr:
904 if tr:
889 # 'dirstate.write()' is not only for writing in-memory
905 # 'dirstate.write()' is not only for writing in-memory
890 # changes out, but also for dropping ambiguous timestamp.
906 # changes out, but also for dropping ambiguous timestamp.
891 # delayed writing re-raise "ambiguous timestamp issue".
907 # delayed writing re-raise "ambiguous timestamp issue".
892 # See also the wiki page below for detail:
908 # See also the wiki page below for detail:
893 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
909 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
894
910
895 # emulate dropping timestamp in 'parsers.pack_dirstate'
911 # emulate dropping timestamp in 'parsers.pack_dirstate'
896 now = _getfsnow(self._opener)
912 now = _getfsnow(self._opener)
897 self._map.clearambiguoustimes(self._updatedfiles, now)
913 self._map.clearambiguoustimes(self._updatedfiles, now)
898
914
899 # emulate that all 'dirstate.normal' results are written out
915 # emulate that all 'dirstate.normal' results are written out
900 self._lastnormaltime = 0
916 self._lastnormaltime = 0
901 self._updatedfiles.clear()
917 self._updatedfiles.clear()
902
918
903 # delay writing in-memory changes out
919 # delay writing in-memory changes out
904 tr.addfilegenerator(
920 tr.addfilegenerator(
905 b'dirstate',
921 b'dirstate',
906 (self._filename,),
922 (self._filename,),
907 lambda f: self._writedirstate(tr, f),
923 lambda f: self._writedirstate(tr, f),
908 location=b'plain',
924 location=b'plain',
909 )
925 )
910 return
926 return
911
927
912 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
928 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
913 self._writedirstate(tr, st)
929 self._writedirstate(tr, st)
914
930
915 def addparentchangecallback(self, category, callback):
931 def addparentchangecallback(self, category, callback):
916 """add a callback to be called when the wd parents are changed
932 """add a callback to be called when the wd parents are changed
917
933
918 Callback will be called with the following arguments:
934 Callback will be called with the following arguments:
919 dirstate, (oldp1, oldp2), (newp1, newp2)
935 dirstate, (oldp1, oldp2), (newp1, newp2)
920
936
921 Category is a unique identifier to allow overwriting an old callback
937 Category is a unique identifier to allow overwriting an old callback
922 with a newer callback.
938 with a newer callback.
923 """
939 """
924 self._plchangecallbacks[category] = callback
940 self._plchangecallbacks[category] = callback
925
941
926 def _writedirstate(self, tr, st):
942 def _writedirstate(self, tr, st):
927 # notify callbacks about parents change
943 # notify callbacks about parents change
928 if self._origpl is not None and self._origpl != self._pl:
944 if self._origpl is not None and self._origpl != self._pl:
929 for c, callback in sorted(
945 for c, callback in sorted(
930 pycompat.iteritems(self._plchangecallbacks)
946 pycompat.iteritems(self._plchangecallbacks)
931 ):
947 ):
932 callback(self, self._origpl, self._pl)
948 callback(self, self._origpl, self._pl)
933 self._origpl = None
949 self._origpl = None
934 # use the modification time of the newly created temporary file as the
950 # use the modification time of the newly created temporary file as the
935 # filesystem's notion of 'now'
951 # filesystem's notion of 'now'
936 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
952 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
937
953
938 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
954 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
939 # timestamp of each entries in dirstate, because of 'now > mtime'
955 # timestamp of each entries in dirstate, because of 'now > mtime'
940 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
956 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
941 if delaywrite > 0:
957 if delaywrite > 0:
942 # do we have any files to delay for?
958 # do we have any files to delay for?
943 for f, e in pycompat.iteritems(self._map):
959 for f, e in pycompat.iteritems(self._map):
944 if e.need_delay(now):
960 if e.need_delay(now):
945 import time # to avoid useless import
961 import time # to avoid useless import
946
962
947 # rather than sleep n seconds, sleep until the next
963 # rather than sleep n seconds, sleep until the next
948 # multiple of n seconds
964 # multiple of n seconds
949 clock = time.time()
965 clock = time.time()
950 start = int(clock) - (int(clock) % delaywrite)
966 start = int(clock) - (int(clock) % delaywrite)
951 end = start + delaywrite
967 end = start + delaywrite
952 time.sleep(end - clock)
968 time.sleep(end - clock)
953 now = end # trust our estimate that the end is near now
969 now = end # trust our estimate that the end is near now
954 break
970 break
955
971
956 self._map.write(tr, st, now)
972 self._map.write(tr, st, now)
957 self._lastnormaltime = 0
973 self._lastnormaltime = 0
958 self._dirty = False
974 self._dirty = False
959
975
960 def _dirignore(self, f):
976 def _dirignore(self, f):
961 if self._ignore(f):
977 if self._ignore(f):
962 return True
978 return True
963 for p in pathutil.finddirs(f):
979 for p in pathutil.finddirs(f):
964 if self._ignore(p):
980 if self._ignore(p):
965 return True
981 return True
966 return False
982 return False
967
983
968 def _ignorefiles(self):
984 def _ignorefiles(self):
969 files = []
985 files = []
970 if os.path.exists(self._join(b'.hgignore')):
986 if os.path.exists(self._join(b'.hgignore')):
971 files.append(self._join(b'.hgignore'))
987 files.append(self._join(b'.hgignore'))
972 for name, path in self._ui.configitems(b"ui"):
988 for name, path in self._ui.configitems(b"ui"):
973 if name == b'ignore' or name.startswith(b'ignore.'):
989 if name == b'ignore' or name.startswith(b'ignore.'):
974 # we need to use os.path.join here rather than self._join
990 # we need to use os.path.join here rather than self._join
975 # because path is arbitrary and user-specified
991 # because path is arbitrary and user-specified
976 files.append(os.path.join(self._rootdir, util.expandpath(path)))
992 files.append(os.path.join(self._rootdir, util.expandpath(path)))
977 return files
993 return files
978
994
979 def _ignorefileandline(self, f):
995 def _ignorefileandline(self, f):
980 files = collections.deque(self._ignorefiles())
996 files = collections.deque(self._ignorefiles())
981 visited = set()
997 visited = set()
982 while files:
998 while files:
983 i = files.popleft()
999 i = files.popleft()
984 patterns = matchmod.readpatternfile(
1000 patterns = matchmod.readpatternfile(
985 i, self._ui.warn, sourceinfo=True
1001 i, self._ui.warn, sourceinfo=True
986 )
1002 )
987 for pattern, lineno, line in patterns:
1003 for pattern, lineno, line in patterns:
988 kind, p = matchmod._patsplit(pattern, b'glob')
1004 kind, p = matchmod._patsplit(pattern, b'glob')
989 if kind == b"subinclude":
1005 if kind == b"subinclude":
990 if p not in visited:
1006 if p not in visited:
991 files.append(p)
1007 files.append(p)
992 continue
1008 continue
993 m = matchmod.match(
1009 m = matchmod.match(
994 self._root, b'', [], [pattern], warn=self._ui.warn
1010 self._root, b'', [], [pattern], warn=self._ui.warn
995 )
1011 )
996 if m(f):
1012 if m(f):
997 return (i, lineno, line)
1013 return (i, lineno, line)
998 visited.add(i)
1014 visited.add(i)
999 return (None, -1, b"")
1015 return (None, -1, b"")
1000
1016
1001 def _walkexplicit(self, match, subrepos):
1017 def _walkexplicit(self, match, subrepos):
1002 """Get stat data about the files explicitly specified by match.
1018 """Get stat data about the files explicitly specified by match.
1003
1019
1004 Return a triple (results, dirsfound, dirsnotfound).
1020 Return a triple (results, dirsfound, dirsnotfound).
1005 - results is a mapping from filename to stat result. It also contains
1021 - results is a mapping from filename to stat result. It also contains
1006 listings mapping subrepos and .hg to None.
1022 listings mapping subrepos and .hg to None.
1007 - dirsfound is a list of files found to be directories.
1023 - dirsfound is a list of files found to be directories.
1008 - dirsnotfound is a list of files that the dirstate thinks are
1024 - dirsnotfound is a list of files that the dirstate thinks are
1009 directories and that were not found."""
1025 directories and that were not found."""
1010
1026
1011 def badtype(mode):
1027 def badtype(mode):
1012 kind = _(b'unknown')
1028 kind = _(b'unknown')
1013 if stat.S_ISCHR(mode):
1029 if stat.S_ISCHR(mode):
1014 kind = _(b'character device')
1030 kind = _(b'character device')
1015 elif stat.S_ISBLK(mode):
1031 elif stat.S_ISBLK(mode):
1016 kind = _(b'block device')
1032 kind = _(b'block device')
1017 elif stat.S_ISFIFO(mode):
1033 elif stat.S_ISFIFO(mode):
1018 kind = _(b'fifo')
1034 kind = _(b'fifo')
1019 elif stat.S_ISSOCK(mode):
1035 elif stat.S_ISSOCK(mode):
1020 kind = _(b'socket')
1036 kind = _(b'socket')
1021 elif stat.S_ISDIR(mode):
1037 elif stat.S_ISDIR(mode):
1022 kind = _(b'directory')
1038 kind = _(b'directory')
1023 return _(b'unsupported file type (type is %s)') % kind
1039 return _(b'unsupported file type (type is %s)') % kind
1024
1040
1025 badfn = match.bad
1041 badfn = match.bad
1026 dmap = self._map
1042 dmap = self._map
1027 lstat = os.lstat
1043 lstat = os.lstat
1028 getkind = stat.S_IFMT
1044 getkind = stat.S_IFMT
1029 dirkind = stat.S_IFDIR
1045 dirkind = stat.S_IFDIR
1030 regkind = stat.S_IFREG
1046 regkind = stat.S_IFREG
1031 lnkkind = stat.S_IFLNK
1047 lnkkind = stat.S_IFLNK
1032 join = self._join
1048 join = self._join
1033 dirsfound = []
1049 dirsfound = []
1034 foundadd = dirsfound.append
1050 foundadd = dirsfound.append
1035 dirsnotfound = []
1051 dirsnotfound = []
1036 notfoundadd = dirsnotfound.append
1052 notfoundadd = dirsnotfound.append
1037
1053
1038 if not match.isexact() and self._checkcase:
1054 if not match.isexact() and self._checkcase:
1039 normalize = self._normalize
1055 normalize = self._normalize
1040 else:
1056 else:
1041 normalize = None
1057 normalize = None
1042
1058
1043 files = sorted(match.files())
1059 files = sorted(match.files())
1044 subrepos.sort()
1060 subrepos.sort()
1045 i, j = 0, 0
1061 i, j = 0, 0
1046 while i < len(files) and j < len(subrepos):
1062 while i < len(files) and j < len(subrepos):
1047 subpath = subrepos[j] + b"/"
1063 subpath = subrepos[j] + b"/"
1048 if files[i] < subpath:
1064 if files[i] < subpath:
1049 i += 1
1065 i += 1
1050 continue
1066 continue
1051 while i < len(files) and files[i].startswith(subpath):
1067 while i < len(files) and files[i].startswith(subpath):
1052 del files[i]
1068 del files[i]
1053 j += 1
1069 j += 1
1054
1070
1055 if not files or b'' in files:
1071 if not files or b'' in files:
1056 files = [b'']
1072 files = [b'']
1057 # constructing the foldmap is expensive, so don't do it for the
1073 # constructing the foldmap is expensive, so don't do it for the
1058 # common case where files is ['']
1074 # common case where files is ['']
1059 normalize = None
1075 normalize = None
1060 results = dict.fromkeys(subrepos)
1076 results = dict.fromkeys(subrepos)
1061 results[b'.hg'] = None
1077 results[b'.hg'] = None
1062
1078
1063 for ff in files:
1079 for ff in files:
1064 if normalize:
1080 if normalize:
1065 nf = normalize(ff, False, True)
1081 nf = normalize(ff, False, True)
1066 else:
1082 else:
1067 nf = ff
1083 nf = ff
1068 if nf in results:
1084 if nf in results:
1069 continue
1085 continue
1070
1086
1071 try:
1087 try:
1072 st = lstat(join(nf))
1088 st = lstat(join(nf))
1073 kind = getkind(st.st_mode)
1089 kind = getkind(st.st_mode)
1074 if kind == dirkind:
1090 if kind == dirkind:
1075 if nf in dmap:
1091 if nf in dmap:
1076 # file replaced by dir on disk but still in dirstate
1092 # file replaced by dir on disk but still in dirstate
1077 results[nf] = None
1093 results[nf] = None
1078 foundadd((nf, ff))
1094 foundadd((nf, ff))
1079 elif kind == regkind or kind == lnkkind:
1095 elif kind == regkind or kind == lnkkind:
1080 results[nf] = st
1096 results[nf] = st
1081 else:
1097 else:
1082 badfn(ff, badtype(kind))
1098 badfn(ff, badtype(kind))
1083 if nf in dmap:
1099 if nf in dmap:
1084 results[nf] = None
1100 results[nf] = None
1085 except OSError as inst: # nf not found on disk - it is dirstate only
1101 except OSError as inst: # nf not found on disk - it is dirstate only
1086 if nf in dmap: # does it exactly match a missing file?
1102 if nf in dmap: # does it exactly match a missing file?
1087 results[nf] = None
1103 results[nf] = None
1088 else: # does it match a missing directory?
1104 else: # does it match a missing directory?
1089 if self._map.hasdir(nf):
1105 if self._map.hasdir(nf):
1090 notfoundadd(nf)
1106 notfoundadd(nf)
1091 else:
1107 else:
1092 badfn(ff, encoding.strtolocal(inst.strerror))
1108 badfn(ff, encoding.strtolocal(inst.strerror))
1093
1109
1094 # match.files() may contain explicitly-specified paths that shouldn't
1110 # match.files() may contain explicitly-specified paths that shouldn't
1095 # be taken; drop them from the list of files found. dirsfound/notfound
1111 # be taken; drop them from the list of files found. dirsfound/notfound
1096 # aren't filtered here because they will be tested later.
1112 # aren't filtered here because they will be tested later.
1097 if match.anypats():
1113 if match.anypats():
1098 for f in list(results):
1114 for f in list(results):
1099 if f == b'.hg' or f in subrepos:
1115 if f == b'.hg' or f in subrepos:
1100 # keep sentinel to disable further out-of-repo walks
1116 # keep sentinel to disable further out-of-repo walks
1101 continue
1117 continue
1102 if not match(f):
1118 if not match(f):
1103 del results[f]
1119 del results[f]
1104
1120
1105 # Case insensitive filesystems cannot rely on lstat() failing to detect
1121 # Case insensitive filesystems cannot rely on lstat() failing to detect
1106 # a case-only rename. Prune the stat object for any file that does not
1122 # a case-only rename. Prune the stat object for any file that does not
1107 # match the case in the filesystem, if there are multiple files that
1123 # match the case in the filesystem, if there are multiple files that
1108 # normalize to the same path.
1124 # normalize to the same path.
1109 if match.isexact() and self._checkcase:
1125 if match.isexact() and self._checkcase:
1110 normed = {}
1126 normed = {}
1111
1127
1112 for f, st in pycompat.iteritems(results):
1128 for f, st in pycompat.iteritems(results):
1113 if st is None:
1129 if st is None:
1114 continue
1130 continue
1115
1131
1116 nc = util.normcase(f)
1132 nc = util.normcase(f)
1117 paths = normed.get(nc)
1133 paths = normed.get(nc)
1118
1134
1119 if paths is None:
1135 if paths is None:
1120 paths = set()
1136 paths = set()
1121 normed[nc] = paths
1137 normed[nc] = paths
1122
1138
1123 paths.add(f)
1139 paths.add(f)
1124
1140
1125 for norm, paths in pycompat.iteritems(normed):
1141 for norm, paths in pycompat.iteritems(normed):
1126 if len(paths) > 1:
1142 if len(paths) > 1:
1127 for path in paths:
1143 for path in paths:
1128 folded = self._discoverpath(
1144 folded = self._discoverpath(
1129 path, norm, True, None, self._map.dirfoldmap
1145 path, norm, True, None, self._map.dirfoldmap
1130 )
1146 )
1131 if path != folded:
1147 if path != folded:
1132 results[path] = None
1148 results[path] = None
1133
1149
1134 return results, dirsfound, dirsnotfound
1150 return results, dirsfound, dirsnotfound
1135
1151
1136 def walk(self, match, subrepos, unknown, ignored, full=True):
1152 def walk(self, match, subrepos, unknown, ignored, full=True):
1137 """
1153 """
1138 Walk recursively through the directory tree, finding all files
1154 Walk recursively through the directory tree, finding all files
1139 matched by match.
1155 matched by match.
1140
1156
1141 If full is False, maybe skip some known-clean files.
1157 If full is False, maybe skip some known-clean files.
1142
1158
1143 Return a dict mapping filename to stat-like object (either
1159 Return a dict mapping filename to stat-like object (either
1144 mercurial.osutil.stat instance or return value of os.stat()).
1160 mercurial.osutil.stat instance or return value of os.stat()).
1145
1161
1146 """
1162 """
1147 # full is a flag that extensions that hook into walk can use -- this
1163 # full is a flag that extensions that hook into walk can use -- this
1148 # implementation doesn't use it at all. This satisfies the contract
1164 # implementation doesn't use it at all. This satisfies the contract
1149 # because we only guarantee a "maybe".
1165 # because we only guarantee a "maybe".
1150
1166
1151 if ignored:
1167 if ignored:
1152 ignore = util.never
1168 ignore = util.never
1153 dirignore = util.never
1169 dirignore = util.never
1154 elif unknown:
1170 elif unknown:
1155 ignore = self._ignore
1171 ignore = self._ignore
1156 dirignore = self._dirignore
1172 dirignore = self._dirignore
1157 else:
1173 else:
1158 # if not unknown and not ignored, drop dir recursion and step 2
1174 # if not unknown and not ignored, drop dir recursion and step 2
1159 ignore = util.always
1175 ignore = util.always
1160 dirignore = util.always
1176 dirignore = util.always
1161
1177
1162 matchfn = match.matchfn
1178 matchfn = match.matchfn
1163 matchalways = match.always()
1179 matchalways = match.always()
1164 matchtdir = match.traversedir
1180 matchtdir = match.traversedir
1165 dmap = self._map
1181 dmap = self._map
1166 listdir = util.listdir
1182 listdir = util.listdir
1167 lstat = os.lstat
1183 lstat = os.lstat
1168 dirkind = stat.S_IFDIR
1184 dirkind = stat.S_IFDIR
1169 regkind = stat.S_IFREG
1185 regkind = stat.S_IFREG
1170 lnkkind = stat.S_IFLNK
1186 lnkkind = stat.S_IFLNK
1171 join = self._join
1187 join = self._join
1172
1188
1173 exact = skipstep3 = False
1189 exact = skipstep3 = False
1174 if match.isexact(): # match.exact
1190 if match.isexact(): # match.exact
1175 exact = True
1191 exact = True
1176 dirignore = util.always # skip step 2
1192 dirignore = util.always # skip step 2
1177 elif match.prefix(): # match.match, no patterns
1193 elif match.prefix(): # match.match, no patterns
1178 skipstep3 = True
1194 skipstep3 = True
1179
1195
1180 if not exact and self._checkcase:
1196 if not exact and self._checkcase:
1181 normalize = self._normalize
1197 normalize = self._normalize
1182 normalizefile = self._normalizefile
1198 normalizefile = self._normalizefile
1183 skipstep3 = False
1199 skipstep3 = False
1184 else:
1200 else:
1185 normalize = self._normalize
1201 normalize = self._normalize
1186 normalizefile = None
1202 normalizefile = None
1187
1203
1188 # step 1: find all explicit files
1204 # step 1: find all explicit files
1189 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1205 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1190 if matchtdir:
1206 if matchtdir:
1191 for d in work:
1207 for d in work:
1192 matchtdir(d[0])
1208 matchtdir(d[0])
1193 for d in dirsnotfound:
1209 for d in dirsnotfound:
1194 matchtdir(d)
1210 matchtdir(d)
1195
1211
1196 skipstep3 = skipstep3 and not (work or dirsnotfound)
1212 skipstep3 = skipstep3 and not (work or dirsnotfound)
1197 work = [d for d in work if not dirignore(d[0])]
1213 work = [d for d in work if not dirignore(d[0])]
1198
1214
1199 # step 2: visit subdirectories
1215 # step 2: visit subdirectories
1200 def traverse(work, alreadynormed):
1216 def traverse(work, alreadynormed):
1201 wadd = work.append
1217 wadd = work.append
1202 while work:
1218 while work:
1203 tracing.counter('dirstate.walk work', len(work))
1219 tracing.counter('dirstate.walk work', len(work))
1204 nd = work.pop()
1220 nd = work.pop()
1205 visitentries = match.visitchildrenset(nd)
1221 visitentries = match.visitchildrenset(nd)
1206 if not visitentries:
1222 if not visitentries:
1207 continue
1223 continue
1208 if visitentries == b'this' or visitentries == b'all':
1224 if visitentries == b'this' or visitentries == b'all':
1209 visitentries = None
1225 visitentries = None
1210 skip = None
1226 skip = None
1211 if nd != b'':
1227 if nd != b'':
1212 skip = b'.hg'
1228 skip = b'.hg'
1213 try:
1229 try:
1214 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1230 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1215 entries = listdir(join(nd), stat=True, skip=skip)
1231 entries = listdir(join(nd), stat=True, skip=skip)
1216 except OSError as inst:
1232 except OSError as inst:
1217 if inst.errno in (errno.EACCES, errno.ENOENT):
1233 if inst.errno in (errno.EACCES, errno.ENOENT):
1218 match.bad(
1234 match.bad(
1219 self.pathto(nd), encoding.strtolocal(inst.strerror)
1235 self.pathto(nd), encoding.strtolocal(inst.strerror)
1220 )
1236 )
1221 continue
1237 continue
1222 raise
1238 raise
1223 for f, kind, st in entries:
1239 for f, kind, st in entries:
1224 # Some matchers may return files in the visitentries set,
1240 # Some matchers may return files in the visitentries set,
1225 # instead of 'this', if the matcher explicitly mentions them
1241 # instead of 'this', if the matcher explicitly mentions them
1226 # and is not an exactmatcher. This is acceptable; we do not
1242 # and is not an exactmatcher. This is acceptable; we do not
1227 # make any hard assumptions about file-or-directory below
1243 # make any hard assumptions about file-or-directory below
1228 # based on the presence of `f` in visitentries. If
1244 # based on the presence of `f` in visitentries. If
1229 # visitchildrenset returned a set, we can always skip the
1245 # visitchildrenset returned a set, we can always skip the
1230 # entries *not* in the set it provided regardless of whether
1246 # entries *not* in the set it provided regardless of whether
1231 # they're actually a file or a directory.
1247 # they're actually a file or a directory.
1232 if visitentries and f not in visitentries:
1248 if visitentries and f not in visitentries:
1233 continue
1249 continue
1234 if normalizefile:
1250 if normalizefile:
1235 # even though f might be a directory, we're only
1251 # even though f might be a directory, we're only
1236 # interested in comparing it to files currently in the
1252 # interested in comparing it to files currently in the
1237 # dmap -- therefore normalizefile is enough
1253 # dmap -- therefore normalizefile is enough
1238 nf = normalizefile(
1254 nf = normalizefile(
1239 nd and (nd + b"/" + f) or f, True, True
1255 nd and (nd + b"/" + f) or f, True, True
1240 )
1256 )
1241 else:
1257 else:
1242 nf = nd and (nd + b"/" + f) or f
1258 nf = nd and (nd + b"/" + f) or f
1243 if nf not in results:
1259 if nf not in results:
1244 if kind == dirkind:
1260 if kind == dirkind:
1245 if not ignore(nf):
1261 if not ignore(nf):
1246 if matchtdir:
1262 if matchtdir:
1247 matchtdir(nf)
1263 matchtdir(nf)
1248 wadd(nf)
1264 wadd(nf)
1249 if nf in dmap and (matchalways or matchfn(nf)):
1265 if nf in dmap and (matchalways or matchfn(nf)):
1250 results[nf] = None
1266 results[nf] = None
1251 elif kind == regkind or kind == lnkkind:
1267 elif kind == regkind or kind == lnkkind:
1252 if nf in dmap:
1268 if nf in dmap:
1253 if matchalways or matchfn(nf):
1269 if matchalways or matchfn(nf):
1254 results[nf] = st
1270 results[nf] = st
1255 elif (matchalways or matchfn(nf)) and not ignore(
1271 elif (matchalways or matchfn(nf)) and not ignore(
1256 nf
1272 nf
1257 ):
1273 ):
1258 # unknown file -- normalize if necessary
1274 # unknown file -- normalize if necessary
1259 if not alreadynormed:
1275 if not alreadynormed:
1260 nf = normalize(nf, False, True)
1276 nf = normalize(nf, False, True)
1261 results[nf] = st
1277 results[nf] = st
1262 elif nf in dmap and (matchalways or matchfn(nf)):
1278 elif nf in dmap and (matchalways or matchfn(nf)):
1263 results[nf] = None
1279 results[nf] = None
1264
1280
1265 for nd, d in work:
1281 for nd, d in work:
1266 # alreadynormed means that processwork doesn't have to do any
1282 # alreadynormed means that processwork doesn't have to do any
1267 # expensive directory normalization
1283 # expensive directory normalization
1268 alreadynormed = not normalize or nd == d
1284 alreadynormed = not normalize or nd == d
1269 traverse([d], alreadynormed)
1285 traverse([d], alreadynormed)
1270
1286
1271 for s in subrepos:
1287 for s in subrepos:
1272 del results[s]
1288 del results[s]
1273 del results[b'.hg']
1289 del results[b'.hg']
1274
1290
1275 # step 3: visit remaining files from dmap
1291 # step 3: visit remaining files from dmap
1276 if not skipstep3 and not exact:
1292 if not skipstep3 and not exact:
1277 # If a dmap file is not in results yet, it was either
1293 # If a dmap file is not in results yet, it was either
1278 # a) not matching matchfn b) ignored, c) missing, or d) under a
1294 # a) not matching matchfn b) ignored, c) missing, or d) under a
1279 # symlink directory.
1295 # symlink directory.
1280 if not results and matchalways:
1296 if not results and matchalways:
1281 visit = [f for f in dmap]
1297 visit = [f for f in dmap]
1282 else:
1298 else:
1283 visit = [f for f in dmap if f not in results and matchfn(f)]
1299 visit = [f for f in dmap if f not in results and matchfn(f)]
1284 visit.sort()
1300 visit.sort()
1285
1301
1286 if unknown:
1302 if unknown:
1287 # unknown == True means we walked all dirs under the roots
1303 # unknown == True means we walked all dirs under the roots
1288 # that wasn't ignored, and everything that matched was stat'ed
1304 # that wasn't ignored, and everything that matched was stat'ed
1289 # and is already in results.
1305 # and is already in results.
1290 # The rest must thus be ignored or under a symlink.
1306 # The rest must thus be ignored or under a symlink.
1291 audit_path = pathutil.pathauditor(self._root, cached=True)
1307 audit_path = pathutil.pathauditor(self._root, cached=True)
1292
1308
1293 for nf in iter(visit):
1309 for nf in iter(visit):
1294 # If a stat for the same file was already added with a
1310 # If a stat for the same file was already added with a
1295 # different case, don't add one for this, since that would
1311 # different case, don't add one for this, since that would
1296 # make it appear as if the file exists under both names
1312 # make it appear as if the file exists under both names
1297 # on disk.
1313 # on disk.
1298 if (
1314 if (
1299 normalizefile
1315 normalizefile
1300 and normalizefile(nf, True, True) in results
1316 and normalizefile(nf, True, True) in results
1301 ):
1317 ):
1302 results[nf] = None
1318 results[nf] = None
1303 # Report ignored items in the dmap as long as they are not
1319 # Report ignored items in the dmap as long as they are not
1304 # under a symlink directory.
1320 # under a symlink directory.
1305 elif audit_path.check(nf):
1321 elif audit_path.check(nf):
1306 try:
1322 try:
1307 results[nf] = lstat(join(nf))
1323 results[nf] = lstat(join(nf))
1308 # file was just ignored, no links, and exists
1324 # file was just ignored, no links, and exists
1309 except OSError:
1325 except OSError:
1310 # file doesn't exist
1326 # file doesn't exist
1311 results[nf] = None
1327 results[nf] = None
1312 else:
1328 else:
1313 # It's either missing or under a symlink directory
1329 # It's either missing or under a symlink directory
1314 # which we in this case report as missing
1330 # which we in this case report as missing
1315 results[nf] = None
1331 results[nf] = None
1316 else:
1332 else:
1317 # We may not have walked the full directory tree above,
1333 # We may not have walked the full directory tree above,
1318 # so stat and check everything we missed.
1334 # so stat and check everything we missed.
1319 iv = iter(visit)
1335 iv = iter(visit)
1320 for st in util.statfiles([join(i) for i in visit]):
1336 for st in util.statfiles([join(i) for i in visit]):
1321 results[next(iv)] = st
1337 results[next(iv)] = st
1322 return results
1338 return results
1323
1339
1324 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1340 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1325 # Force Rayon (Rust parallelism library) to respect the number of
1341 # Force Rayon (Rust parallelism library) to respect the number of
1326 # workers. This is a temporary workaround until Rust code knows
1342 # workers. This is a temporary workaround until Rust code knows
1327 # how to read the config file.
1343 # how to read the config file.
1328 numcpus = self._ui.configint(b"worker", b"numcpus")
1344 numcpus = self._ui.configint(b"worker", b"numcpus")
1329 if numcpus is not None:
1345 if numcpus is not None:
1330 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1346 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1331
1347
1332 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1348 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1333 if not workers_enabled:
1349 if not workers_enabled:
1334 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1350 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1335
1351
1336 (
1352 (
1337 lookup,
1353 lookup,
1338 modified,
1354 modified,
1339 added,
1355 added,
1340 removed,
1356 removed,
1341 deleted,
1357 deleted,
1342 clean,
1358 clean,
1343 ignored,
1359 ignored,
1344 unknown,
1360 unknown,
1345 warnings,
1361 warnings,
1346 bad,
1362 bad,
1347 traversed,
1363 traversed,
1348 dirty,
1364 dirty,
1349 ) = rustmod.status(
1365 ) = rustmod.status(
1350 self._map._rustmap,
1366 self._map._rustmap,
1351 matcher,
1367 matcher,
1352 self._rootdir,
1368 self._rootdir,
1353 self._ignorefiles(),
1369 self._ignorefiles(),
1354 self._checkexec,
1370 self._checkexec,
1355 self._lastnormaltime,
1371 self._lastnormaltime,
1356 bool(list_clean),
1372 bool(list_clean),
1357 bool(list_ignored),
1373 bool(list_ignored),
1358 bool(list_unknown),
1374 bool(list_unknown),
1359 bool(matcher.traversedir),
1375 bool(matcher.traversedir),
1360 )
1376 )
1361
1377
1362 self._dirty |= dirty
1378 self._dirty |= dirty
1363
1379
1364 if matcher.traversedir:
1380 if matcher.traversedir:
1365 for dir in traversed:
1381 for dir in traversed:
1366 matcher.traversedir(dir)
1382 matcher.traversedir(dir)
1367
1383
1368 if self._ui.warn:
1384 if self._ui.warn:
1369 for item in warnings:
1385 for item in warnings:
1370 if isinstance(item, tuple):
1386 if isinstance(item, tuple):
1371 file_path, syntax = item
1387 file_path, syntax = item
1372 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1388 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1373 file_path,
1389 file_path,
1374 syntax,
1390 syntax,
1375 )
1391 )
1376 self._ui.warn(msg)
1392 self._ui.warn(msg)
1377 else:
1393 else:
1378 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1394 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1379 self._ui.warn(
1395 self._ui.warn(
1380 msg
1396 msg
1381 % (
1397 % (
1382 pathutil.canonpath(
1398 pathutil.canonpath(
1383 self._rootdir, self._rootdir, item
1399 self._rootdir, self._rootdir, item
1384 ),
1400 ),
1385 b"No such file or directory",
1401 b"No such file or directory",
1386 )
1402 )
1387 )
1403 )
1388
1404
1389 for (fn, message) in bad:
1405 for (fn, message) in bad:
1390 matcher.bad(fn, encoding.strtolocal(message))
1406 matcher.bad(fn, encoding.strtolocal(message))
1391
1407
1392 status = scmutil.status(
1408 status = scmutil.status(
1393 modified=modified,
1409 modified=modified,
1394 added=added,
1410 added=added,
1395 removed=removed,
1411 removed=removed,
1396 deleted=deleted,
1412 deleted=deleted,
1397 unknown=unknown,
1413 unknown=unknown,
1398 ignored=ignored,
1414 ignored=ignored,
1399 clean=clean,
1415 clean=clean,
1400 )
1416 )
1401 return (lookup, status)
1417 return (lookup, status)
1402
1418
1403 def status(self, match, subrepos, ignored, clean, unknown):
1419 def status(self, match, subrepos, ignored, clean, unknown):
1404 """Determine the status of the working copy relative to the
1420 """Determine the status of the working copy relative to the
1405 dirstate and return a pair of (unsure, status), where status is of type
1421 dirstate and return a pair of (unsure, status), where status is of type
1406 scmutil.status and:
1422 scmutil.status and:
1407
1423
1408 unsure:
1424 unsure:
1409 files that might have been modified since the dirstate was
1425 files that might have been modified since the dirstate was
1410 written, but need to be read to be sure (size is the same
1426 written, but need to be read to be sure (size is the same
1411 but mtime differs)
1427 but mtime differs)
1412 status.modified:
1428 status.modified:
1413 files that have definitely been modified since the dirstate
1429 files that have definitely been modified since the dirstate
1414 was written (different size or mode)
1430 was written (different size or mode)
1415 status.clean:
1431 status.clean:
1416 files that have definitely not been modified since the
1432 files that have definitely not been modified since the
1417 dirstate was written
1433 dirstate was written
1418 """
1434 """
1419 listignored, listclean, listunknown = ignored, clean, unknown
1435 listignored, listclean, listunknown = ignored, clean, unknown
1420 lookup, modified, added, unknown, ignored = [], [], [], [], []
1436 lookup, modified, added, unknown, ignored = [], [], [], [], []
1421 removed, deleted, clean = [], [], []
1437 removed, deleted, clean = [], [], []
1422
1438
1423 dmap = self._map
1439 dmap = self._map
1424 dmap.preload()
1440 dmap.preload()
1425
1441
1426 use_rust = True
1442 use_rust = True
1427
1443
1428 allowed_matchers = (
1444 allowed_matchers = (
1429 matchmod.alwaysmatcher,
1445 matchmod.alwaysmatcher,
1430 matchmod.exactmatcher,
1446 matchmod.exactmatcher,
1431 matchmod.includematcher,
1447 matchmod.includematcher,
1432 )
1448 )
1433
1449
1434 if rustmod is None:
1450 if rustmod is None:
1435 use_rust = False
1451 use_rust = False
1436 elif self._checkcase:
1452 elif self._checkcase:
1437 # Case-insensitive filesystems are not handled yet
1453 # Case-insensitive filesystems are not handled yet
1438 use_rust = False
1454 use_rust = False
1439 elif subrepos:
1455 elif subrepos:
1440 use_rust = False
1456 use_rust = False
1441 elif sparse.enabled:
1457 elif sparse.enabled:
1442 use_rust = False
1458 use_rust = False
1443 elif not isinstance(match, allowed_matchers):
1459 elif not isinstance(match, allowed_matchers):
1444 # Some matchers have yet to be implemented
1460 # Some matchers have yet to be implemented
1445 use_rust = False
1461 use_rust = False
1446
1462
1447 if use_rust:
1463 if use_rust:
1448 try:
1464 try:
1449 return self._rust_status(
1465 return self._rust_status(
1450 match, listclean, listignored, listunknown
1466 match, listclean, listignored, listunknown
1451 )
1467 )
1452 except rustmod.FallbackError:
1468 except rustmod.FallbackError:
1453 pass
1469 pass
1454
1470
1455 def noop(f):
1471 def noop(f):
1456 pass
1472 pass
1457
1473
1458 dcontains = dmap.__contains__
1474 dcontains = dmap.__contains__
1459 dget = dmap.__getitem__
1475 dget = dmap.__getitem__
1460 ladd = lookup.append # aka "unsure"
1476 ladd = lookup.append # aka "unsure"
1461 madd = modified.append
1477 madd = modified.append
1462 aadd = added.append
1478 aadd = added.append
1463 uadd = unknown.append if listunknown else noop
1479 uadd = unknown.append if listunknown else noop
1464 iadd = ignored.append if listignored else noop
1480 iadd = ignored.append if listignored else noop
1465 radd = removed.append
1481 radd = removed.append
1466 dadd = deleted.append
1482 dadd = deleted.append
1467 cadd = clean.append if listclean else noop
1483 cadd = clean.append if listclean else noop
1468 mexact = match.exact
1484 mexact = match.exact
1469 dirignore = self._dirignore
1485 dirignore = self._dirignore
1470 checkexec = self._checkexec
1486 checkexec = self._checkexec
1471 copymap = self._map.copymap
1487 copymap = self._map.copymap
1472 lastnormaltime = self._lastnormaltime
1488 lastnormaltime = self._lastnormaltime
1473
1489
1474 # We need to do full walks when either
1490 # We need to do full walks when either
1475 # - we're listing all clean files, or
1491 # - we're listing all clean files, or
1476 # - match.traversedir does something, because match.traversedir should
1492 # - match.traversedir does something, because match.traversedir should
1477 # be called for every dir in the working dir
1493 # be called for every dir in the working dir
1478 full = listclean or match.traversedir is not None
1494 full = listclean or match.traversedir is not None
1479 for fn, st in pycompat.iteritems(
1495 for fn, st in pycompat.iteritems(
1480 self.walk(match, subrepos, listunknown, listignored, full=full)
1496 self.walk(match, subrepos, listunknown, listignored, full=full)
1481 ):
1497 ):
1482 if not dcontains(fn):
1498 if not dcontains(fn):
1483 if (listignored or mexact(fn)) and dirignore(fn):
1499 if (listignored or mexact(fn)) and dirignore(fn):
1484 if listignored:
1500 if listignored:
1485 iadd(fn)
1501 iadd(fn)
1486 else:
1502 else:
1487 uadd(fn)
1503 uadd(fn)
1488 continue
1504 continue
1489
1505
1490 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1506 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1491 # written like that for performance reasons. dmap[fn] is not a
1507 # written like that for performance reasons. dmap[fn] is not a
1492 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1508 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1493 # opcode has fast paths when the value to be unpacked is a tuple or
1509 # opcode has fast paths when the value to be unpacked is a tuple or
1494 # a list, but falls back to creating a full-fledged iterator in
1510 # a list, but falls back to creating a full-fledged iterator in
1495 # general. That is much slower than simply accessing and storing the
1511 # general. That is much slower than simply accessing and storing the
1496 # tuple members one by one.
1512 # tuple members one by one.
1497 t = dget(fn)
1513 t = dget(fn)
1498 mode = t.mode
1514 mode = t.mode
1499 size = t.size
1515 size = t.size
1500 time = t.mtime
1516 time = t.mtime
1501
1517
1502 if not st and t.tracked:
1518 if not st and t.tracked:
1503 dadd(fn)
1519 dadd(fn)
1504 elif t.merged:
1520 elif t.merged:
1505 madd(fn)
1521 madd(fn)
1506 elif t.added:
1522 elif t.added:
1507 aadd(fn)
1523 aadd(fn)
1508 elif t.removed:
1524 elif t.removed:
1509 radd(fn)
1525 radd(fn)
1510 elif t.tracked:
1526 elif t.tracked:
1511 if (
1527 if (
1512 size >= 0
1528 size >= 0
1513 and (
1529 and (
1514 (size != st.st_size and size != st.st_size & _rangemask)
1530 (size != st.st_size and size != st.st_size & _rangemask)
1515 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1531 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1516 )
1532 )
1517 or t.from_p2
1533 or t.from_p2
1518 or fn in copymap
1534 or fn in copymap
1519 ):
1535 ):
1520 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1536 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1521 # issue6456: Size returned may be longer due to
1537 # issue6456: Size returned may be longer due to
1522 # encryption on EXT-4 fscrypt, undecided.
1538 # encryption on EXT-4 fscrypt, undecided.
1523 ladd(fn)
1539 ladd(fn)
1524 else:
1540 else:
1525 madd(fn)
1541 madd(fn)
1526 elif (
1542 elif (
1527 time != st[stat.ST_MTIME]
1543 time != st[stat.ST_MTIME]
1528 and time != st[stat.ST_MTIME] & _rangemask
1544 and time != st[stat.ST_MTIME] & _rangemask
1529 ):
1545 ):
1530 ladd(fn)
1546 ladd(fn)
1531 elif st[stat.ST_MTIME] == lastnormaltime:
1547 elif st[stat.ST_MTIME] == lastnormaltime:
1532 # fn may have just been marked as normal and it may have
1548 # fn may have just been marked as normal and it may have
1533 # changed in the same second without changing its size.
1549 # changed in the same second without changing its size.
1534 # This can happen if we quickly do multiple commits.
1550 # This can happen if we quickly do multiple commits.
1535 # Force lookup, so we don't miss such a racy file change.
1551 # Force lookup, so we don't miss such a racy file change.
1536 ladd(fn)
1552 ladd(fn)
1537 elif listclean:
1553 elif listclean:
1538 cadd(fn)
1554 cadd(fn)
1539 status = scmutil.status(
1555 status = scmutil.status(
1540 modified, added, removed, deleted, unknown, ignored, clean
1556 modified, added, removed, deleted, unknown, ignored, clean
1541 )
1557 )
1542 return (lookup, status)
1558 return (lookup, status)
1543
1559
1544 def matches(self, match):
1560 def matches(self, match):
1545 """
1561 """
1546 return files in the dirstate (in whatever state) filtered by match
1562 return files in the dirstate (in whatever state) filtered by match
1547 """
1563 """
1548 dmap = self._map
1564 dmap = self._map
1549 if rustmod is not None:
1565 if rustmod is not None:
1550 dmap = self._map._rustmap
1566 dmap = self._map._rustmap
1551
1567
1552 if match.always():
1568 if match.always():
1553 return dmap.keys()
1569 return dmap.keys()
1554 files = match.files()
1570 files = match.files()
1555 if match.isexact():
1571 if match.isexact():
1556 # fast path -- filter the other way around, since typically files is
1572 # fast path -- filter the other way around, since typically files is
1557 # much smaller than dmap
1573 # much smaller than dmap
1558 return [f for f in files if f in dmap]
1574 return [f for f in files if f in dmap]
1559 if match.prefix() and all(fn in dmap for fn in files):
1575 if match.prefix() and all(fn in dmap for fn in files):
1560 # fast path -- all the values are known to be files, so just return
1576 # fast path -- all the values are known to be files, so just return
1561 # that
1577 # that
1562 return list(files)
1578 return list(files)
1563 return [f for f in dmap if match(f)]
1579 return [f for f in dmap if match(f)]
1564
1580
1565 def _actualfilename(self, tr):
1581 def _actualfilename(self, tr):
1566 if tr:
1582 if tr:
1567 return self._pendingfilename
1583 return self._pendingfilename
1568 else:
1584 else:
1569 return self._filename
1585 return self._filename
1570
1586
1571 def savebackup(self, tr, backupname):
1587 def savebackup(self, tr, backupname):
1572 '''Save current dirstate into backup file'''
1588 '''Save current dirstate into backup file'''
1573 filename = self._actualfilename(tr)
1589 filename = self._actualfilename(tr)
1574 assert backupname != filename
1590 assert backupname != filename
1575
1591
1576 # use '_writedirstate' instead of 'write' to write changes certainly,
1592 # use '_writedirstate' instead of 'write' to write changes certainly,
1577 # because the latter omits writing out if transaction is running.
1593 # because the latter omits writing out if transaction is running.
1578 # output file will be used to create backup of dirstate at this point.
1594 # output file will be used to create backup of dirstate at this point.
1579 if self._dirty or not self._opener.exists(filename):
1595 if self._dirty or not self._opener.exists(filename):
1580 self._writedirstate(
1596 self._writedirstate(
1581 tr,
1597 tr,
1582 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1598 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1583 )
1599 )
1584
1600
1585 if tr:
1601 if tr:
1586 # ensure that subsequent tr.writepending returns True for
1602 # ensure that subsequent tr.writepending returns True for
1587 # changes written out above, even if dirstate is never
1603 # changes written out above, even if dirstate is never
1588 # changed after this
1604 # changed after this
1589 tr.addfilegenerator(
1605 tr.addfilegenerator(
1590 b'dirstate',
1606 b'dirstate',
1591 (self._filename,),
1607 (self._filename,),
1592 lambda f: self._writedirstate(tr, f),
1608 lambda f: self._writedirstate(tr, f),
1593 location=b'plain',
1609 location=b'plain',
1594 )
1610 )
1595
1611
1596 # ensure that pending file written above is unlinked at
1612 # ensure that pending file written above is unlinked at
1597 # failure, even if tr.writepending isn't invoked until the
1613 # failure, even if tr.writepending isn't invoked until the
1598 # end of this transaction
1614 # end of this transaction
1599 tr.registertmp(filename, location=b'plain')
1615 tr.registertmp(filename, location=b'plain')
1600
1616
1601 self._opener.tryunlink(backupname)
1617 self._opener.tryunlink(backupname)
1602 # hardlink backup is okay because _writedirstate is always called
1618 # hardlink backup is okay because _writedirstate is always called
1603 # with an "atomictemp=True" file.
1619 # with an "atomictemp=True" file.
1604 util.copyfile(
1620 util.copyfile(
1605 self._opener.join(filename),
1621 self._opener.join(filename),
1606 self._opener.join(backupname),
1622 self._opener.join(backupname),
1607 hardlink=True,
1623 hardlink=True,
1608 )
1624 )
1609
1625
1610 def restorebackup(self, tr, backupname):
1626 def restorebackup(self, tr, backupname):
1611 '''Restore dirstate by backup file'''
1627 '''Restore dirstate by backup file'''
1612 # this "invalidate()" prevents "wlock.release()" from writing
1628 # this "invalidate()" prevents "wlock.release()" from writing
1613 # changes of dirstate out after restoring from backup file
1629 # changes of dirstate out after restoring from backup file
1614 self.invalidate()
1630 self.invalidate()
1615 filename = self._actualfilename(tr)
1631 filename = self._actualfilename(tr)
1616 o = self._opener
1632 o = self._opener
1617 if util.samefile(o.join(backupname), o.join(filename)):
1633 if util.samefile(o.join(backupname), o.join(filename)):
1618 o.unlink(backupname)
1634 o.unlink(backupname)
1619 else:
1635 else:
1620 o.rename(backupname, filename, checkambig=True)
1636 o.rename(backupname, filename, checkambig=True)
1621
1637
1622 def clearbackup(self, tr, backupname):
1638 def clearbackup(self, tr, backupname):
1623 '''Clear backup file'''
1639 '''Clear backup file'''
1624 self._opener.unlink(backupname)
1640 self._opener.unlink(backupname)
@@ -1,750 +1,912
1 # dirstatemap.py
1 # dirstatemap.py
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 import errno
8 import errno
9
9
10 from .i18n import _
10 from .i18n import _
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 pathutil,
14 pathutil,
15 policy,
15 policy,
16 pycompat,
16 pycompat,
17 txnutil,
17 txnutil,
18 util,
18 util,
19 )
19 )
20
20
21 from .dirstateutils import (
21 from .dirstateutils import (
22 docket as docketmod,
22 docket as docketmod,
23 )
23 )
24
24
25 parsers = policy.importmod('parsers')
25 parsers = policy.importmod('parsers')
26 rustmod = policy.importrust('dirstate')
26 rustmod = policy.importrust('dirstate')
27
27
28 propertycache = util.propertycache
28 propertycache = util.propertycache
29
29
30 DirstateItem = parsers.DirstateItem
30 DirstateItem = parsers.DirstateItem
31
31
32
32
33 # a special value used internally for `size` if the file come from the other parent
33 # a special value used internally for `size` if the file come from the other parent
34 FROM_P2 = -2
34 FROM_P2 = -2
35
35
36 # a special value used internally for `size` if the file is modified/merged/added
36 # a special value used internally for `size` if the file is modified/merged/added
37 NONNORMAL = -1
37 NONNORMAL = -1
38
38
39 # a special value used internally for `time` if the time is ambigeous
39 # a special value used internally for `time` if the time is ambigeous
40 AMBIGUOUS_TIME = -1
40 AMBIGUOUS_TIME = -1
41
41
42 rangemask = 0x7FFFFFFF
42 rangemask = 0x7FFFFFFF
43
43
44
44
45 class dirstatemap(object):
45 class dirstatemap(object):
46 """Map encapsulating the dirstate's contents.
46 """Map encapsulating the dirstate's contents.
47
47
48 The dirstate contains the following state:
48 The dirstate contains the following state:
49
49
50 - `identity` is the identity of the dirstate file, which can be used to
50 - `identity` is the identity of the dirstate file, which can be used to
51 detect when changes have occurred to the dirstate file.
51 detect when changes have occurred to the dirstate file.
52
52
53 - `parents` is a pair containing the parents of the working copy. The
53 - `parents` is a pair containing the parents of the working copy. The
54 parents are updated by calling `setparents`.
54 parents are updated by calling `setparents`.
55
55
56 - the state map maps filenames to tuples of (state, mode, size, mtime),
56 - the state map maps filenames to tuples of (state, mode, size, mtime),
57 where state is a single character representing 'normal', 'added',
57 where state is a single character representing 'normal', 'added',
58 'removed', or 'merged'. It is read by treating the dirstate as a
58 'removed', or 'merged'. It is read by treating the dirstate as a
59 dict. File state is updated by calling the `addfile`, `removefile` and
59 dict. File state is updated by calling the `addfile`, `removefile` and
60 `dropfile` methods.
60 `dropfile` methods.
61
61
62 - `copymap` maps destination filenames to their source filename.
62 - `copymap` maps destination filenames to their source filename.
63
63
64 The dirstate also provides the following views onto the state:
64 The dirstate also provides the following views onto the state:
65
65
66 - `nonnormalset` is a set of the filenames that have state other
66 - `nonnormalset` is a set of the filenames that have state other
67 than 'normal', or are normal but have an mtime of -1 ('normallookup').
67 than 'normal', or are normal but have an mtime of -1 ('normallookup').
68
68
69 - `otherparentset` is a set of the filenames that are marked as coming
69 - `otherparentset` is a set of the filenames that are marked as coming
70 from the second parent when the dirstate is currently being merged.
70 from the second parent when the dirstate is currently being merged.
71
71
72 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
72 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
73 form that they appear as in the dirstate.
73 form that they appear as in the dirstate.
74
74
75 - `dirfoldmap` is a dict mapping normalized directory names to the
75 - `dirfoldmap` is a dict mapping normalized directory names to the
76 denormalized form that they appear as in the dirstate.
76 denormalized form that they appear as in the dirstate.
77 """
77 """
78
78
79 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
79 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
80 self._ui = ui
80 self._ui = ui
81 self._opener = opener
81 self._opener = opener
82 self._root = root
82 self._root = root
83 self._filename = b'dirstate'
83 self._filename = b'dirstate'
84 self._nodelen = 20
84 self._nodelen = 20
85 self._nodeconstants = nodeconstants
85 self._nodeconstants = nodeconstants
86 assert (
86 assert (
87 not use_dirstate_v2
87 not use_dirstate_v2
88 ), "should have detected unsupported requirement"
88 ), "should have detected unsupported requirement"
89
89
90 self._parents = None
90 self._parents = None
91 self._dirtyparents = False
91 self._dirtyparents = False
92
92
93 # for consistent view between _pl() and _read() invocations
93 # for consistent view between _pl() and _read() invocations
94 self._pendingmode = None
94 self._pendingmode = None
95
95
96 @propertycache
96 @propertycache
97 def _map(self):
97 def _map(self):
98 self._map = {}
98 self._map = {}
99 self.read()
99 self.read()
100 return self._map
100 return self._map
101
101
102 @propertycache
102 @propertycache
103 def copymap(self):
103 def copymap(self):
104 self.copymap = {}
104 self.copymap = {}
105 self._map
105 self._map
106 return self.copymap
106 return self.copymap
107
107
108 def clear(self):
108 def clear(self):
109 self._map.clear()
109 self._map.clear()
110 self.copymap.clear()
110 self.copymap.clear()
111 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
111 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
112 util.clearcachedproperty(self, b"_dirs")
112 util.clearcachedproperty(self, b"_dirs")
113 util.clearcachedproperty(self, b"_alldirs")
113 util.clearcachedproperty(self, b"_alldirs")
114 util.clearcachedproperty(self, b"filefoldmap")
114 util.clearcachedproperty(self, b"filefoldmap")
115 util.clearcachedproperty(self, b"dirfoldmap")
115 util.clearcachedproperty(self, b"dirfoldmap")
116 util.clearcachedproperty(self, b"nonnormalset")
116 util.clearcachedproperty(self, b"nonnormalset")
117 util.clearcachedproperty(self, b"otherparentset")
117 util.clearcachedproperty(self, b"otherparentset")
118
118
119 def items(self):
119 def items(self):
120 return pycompat.iteritems(self._map)
120 return pycompat.iteritems(self._map)
121
121
122 # forward for python2,3 compat
122 # forward for python2,3 compat
123 iteritems = items
123 iteritems = items
124
124
125 debug_iter = items
125 debug_iter = items
126
126
127 def __len__(self):
127 def __len__(self):
128 return len(self._map)
128 return len(self._map)
129
129
130 def __iter__(self):
130 def __iter__(self):
131 return iter(self._map)
131 return iter(self._map)
132
132
133 def get(self, key, default=None):
133 def get(self, key, default=None):
134 return self._map.get(key, default)
134 return self._map.get(key, default)
135
135
136 def __contains__(self, key):
136 def __contains__(self, key):
137 return key in self._map
137 return key in self._map
138
138
139 def __getitem__(self, key):
139 def __getitem__(self, key):
140 return self._map[key]
140 return self._map[key]
141
141
142 def keys(self):
142 def keys(self):
143 return self._map.keys()
143 return self._map.keys()
144
144
145 def preload(self):
145 def preload(self):
146 """Loads the underlying data, if it's not already loaded"""
146 """Loads the underlying data, if it's not already loaded"""
147 self._map
147 self._map
148
148
149 def _dirs_incr(self, filename, old_entry=None):
149 def _dirs_incr(self, filename, old_entry=None):
150 """incremente the dirstate counter if applicable"""
150 """incremente the dirstate counter if applicable"""
151 if (
151 if (
152 old_entry is None or old_entry.removed
152 old_entry is None or old_entry.removed
153 ) and "_dirs" in self.__dict__:
153 ) and "_dirs" in self.__dict__:
154 self._dirs.addpath(filename)
154 self._dirs.addpath(filename)
155 if old_entry is None and "_alldirs" in self.__dict__:
155 if old_entry is None and "_alldirs" in self.__dict__:
156 self._alldirs.addpath(filename)
156 self._alldirs.addpath(filename)
157
157
158 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
158 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
159 """decremente the dirstate counter if applicable"""
159 """decremente the dirstate counter if applicable"""
160 if old_entry is not None:
160 if old_entry is not None:
161 if "_dirs" in self.__dict__ and not old_entry.removed:
161 if "_dirs" in self.__dict__ and not old_entry.removed:
162 self._dirs.delpath(filename)
162 self._dirs.delpath(filename)
163 if "_alldirs" in self.__dict__ and not remove_variant:
163 if "_alldirs" in self.__dict__ and not remove_variant:
164 self._alldirs.delpath(filename)
164 self._alldirs.delpath(filename)
165 elif remove_variant and "_alldirs" in self.__dict__:
165 elif remove_variant and "_alldirs" in self.__dict__:
166 self._alldirs.addpath(filename)
166 self._alldirs.addpath(filename)
167 if "filefoldmap" in self.__dict__:
167 if "filefoldmap" in self.__dict__:
168 normed = util.normcase(filename)
168 normed = util.normcase(filename)
169 self.filefoldmap.pop(normed, None)
169 self.filefoldmap.pop(normed, None)
170
170
171 def addfile(
171 def addfile(
172 self,
172 self,
173 f,
173 f,
174 mode=0,
174 mode=0,
175 size=None,
175 size=None,
176 mtime=None,
176 mtime=None,
177 added=False,
177 added=False,
178 merged=False,
178 merged=False,
179 from_p2=False,
179 from_p2=False,
180 possibly_dirty=False,
180 possibly_dirty=False,
181 ):
181 ):
182 """Add a tracked file to the dirstate."""
182 """Add a tracked file to the dirstate."""
183 if added:
183 if added:
184 assert not merged
184 assert not merged
185 assert not possibly_dirty
185 assert not possibly_dirty
186 assert not from_p2
186 assert not from_p2
187 state = b'a'
187 state = b'a'
188 size = NONNORMAL
188 size = NONNORMAL
189 mtime = AMBIGUOUS_TIME
189 mtime = AMBIGUOUS_TIME
190 elif merged:
190 elif merged:
191 assert not possibly_dirty
191 assert not possibly_dirty
192 assert not from_p2
192 assert not from_p2
193 state = b'm'
193 state = b'm'
194 size = FROM_P2
194 size = FROM_P2
195 mtime = AMBIGUOUS_TIME
195 mtime = AMBIGUOUS_TIME
196 elif from_p2:
196 elif from_p2:
197 assert not possibly_dirty
197 assert not possibly_dirty
198 state = b'n'
198 state = b'n'
199 size = FROM_P2
199 size = FROM_P2
200 mtime = AMBIGUOUS_TIME
200 mtime = AMBIGUOUS_TIME
201 elif possibly_dirty:
201 elif possibly_dirty:
202 state = b'n'
202 state = b'n'
203 size = NONNORMAL
203 size = NONNORMAL
204 mtime = AMBIGUOUS_TIME
204 mtime = AMBIGUOUS_TIME
205 else:
205 else:
206 assert size != FROM_P2
206 assert size != FROM_P2
207 assert size != NONNORMAL
207 assert size != NONNORMAL
208 state = b'n'
208 state = b'n'
209 size = size & rangemask
209 size = size & rangemask
210 mtime = mtime & rangemask
210 mtime = mtime & rangemask
211 assert state is not None
211 assert state is not None
212 assert size is not None
212 assert size is not None
213 assert mtime is not None
213 assert mtime is not None
214 old_entry = self.get(f)
214 old_entry = self.get(f)
215 self._dirs_incr(f, old_entry)
215 self._dirs_incr(f, old_entry)
216 e = self._map[f] = DirstateItem(state, mode, size, mtime)
216 e = self._map[f] = DirstateItem(state, mode, size, mtime)
217 if e.dm_nonnormal:
217 if e.dm_nonnormal:
218 self.nonnormalset.add(f)
218 self.nonnormalset.add(f)
219 if e.dm_otherparent:
219 if e.dm_otherparent:
220 self.otherparentset.add(f)
220 self.otherparentset.add(f)
221
221
222 def reset_state(
223 self,
224 filename,
225 wc_tracked,
226 p1_tracked,
227 p2_tracked=False,
228 merged=False,
229 clean_p1=False,
230 clean_p2=False,
231 possibly_dirty=False,
232 parentfiledata=None,
233 ):
234 """Set a entry to a given state, diregarding all previous state
235
236 This is to be used by the part of the dirstate API dedicated to
237 adjusting the dirstate after a update/merge.
238
239 note: calling this might result to no entry existing at all if the
240 dirstate map does not see any point at having one for this file
241 anymore.
242 """
243 if merged and (clean_p1 or clean_p2):
244 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
245 raise error.ProgrammingError(msg)
246 # copy information are now outdated
247 # (maybe new information should be in directly passed to this function)
248 self.copymap.pop(filename, None)
249
250 if not (p1_tracked or p2_tracked or wc_tracked):
251 self.dropfile(filename)
252 elif merged:
253 # XXX might be merged and removed ?
254 entry = self.get(filename)
255 if entry is not None and entry.tracked:
256 # XXX mostly replicate dirstate.other parent. We should get
257 # the higher layer to pass us more reliable data where `merged`
258 # actually mean merged. Dropping the else clause will show
259 # failure in `test-graft.t`
260 self.addfile(filename, merged=True)
261 else:
262 self.addfile(filename, from_p2=True)
263 elif not (p1_tracked or p2_tracked) and wc_tracked:
264 self.addfile(filename, added=True, possibly_dirty=possibly_dirty)
265 elif (p1_tracked or p2_tracked) and not wc_tracked:
266 # XXX might be merged and removed ?
267 old_entry = self._map.get(filename)
268 self._dirs_decr(filename, old_entry=old_entry, remove_variant=True)
269 self._map[filename] = DirstateItem(b'r', 0, 0, 0)
270 self.nonnormalset.add(filename)
271 elif clean_p2 and wc_tracked:
272 if p1_tracked or self.get(filename) is not None:
273 # XXX the `self.get` call is catching some case in
274 # `test-merge-remove.t` where the file is tracked in p1, the
275 # p1_tracked argument is False.
276 #
277 # In addition, this seems to be a case where the file is marked
278 # as merged without actually being the result of a merge
279 # action. So thing are not ideal here.
280 self.addfile(filename, merged=True)
281 else:
282 self.addfile(filename, from_p2=True)
283 elif not p1_tracked and p2_tracked and wc_tracked:
284 self.addfile(filename, from_p2=True, possibly_dirty=possibly_dirty)
285 elif possibly_dirty:
286 self.addfile(filename, possibly_dirty=possibly_dirty)
287 elif wc_tracked:
288 # this is a "normal" file
289 if parentfiledata is None:
290 msg = b'failed to pass parentfiledata for a normal file: %s'
291 msg %= filename
292 raise error.ProgrammingError(msg)
293 mode, size, mtime = parentfiledata
294 self.addfile(filename, mode=mode, size=size, mtime=mtime)
295 self.nonnormalset.discard(filename)
296 else:
297 assert False, 'unreachable'
298
222 def removefile(self, f, in_merge=False):
299 def removefile(self, f, in_merge=False):
223 """
300 """
224 Mark a file as removed in the dirstate.
301 Mark a file as removed in the dirstate.
225
302
226 The `size` parameter is used to store sentinel values that indicate
303 The `size` parameter is used to store sentinel values that indicate
227 the file's previous state. In the future, we should refactor this
304 the file's previous state. In the future, we should refactor this
228 to be more explicit about what that state is.
305 to be more explicit about what that state is.
229 """
306 """
230 entry = self.get(f)
307 entry = self.get(f)
231 size = 0
308 size = 0
232 if in_merge:
309 if in_merge:
233 # XXX we should not be able to have 'm' state and 'FROM_P2' if not
310 # XXX we should not be able to have 'm' state and 'FROM_P2' if not
234 # during a merge. So I (marmoute) am not sure we need the
311 # during a merge. So I (marmoute) am not sure we need the
235 # conditionnal at all. Adding double checking this with assert
312 # conditionnal at all. Adding double checking this with assert
236 # would be nice.
313 # would be nice.
237 if entry is not None:
314 if entry is not None:
238 # backup the previous state
315 # backup the previous state
239 if entry.merged: # merge
316 if entry.merged: # merge
240 size = NONNORMAL
317 size = NONNORMAL
241 elif entry.from_p2:
318 elif entry.from_p2:
242 size = FROM_P2
319 size = FROM_P2
243 self.otherparentset.add(f)
320 self.otherparentset.add(f)
244 if entry is not None and not (entry.merged or entry.from_p2):
321 if entry is not None and not (entry.merged or entry.from_p2):
245 self.copymap.pop(f, None)
322 self.copymap.pop(f, None)
246 self._dirs_decr(f, old_entry=entry, remove_variant=True)
323 self._dirs_decr(f, old_entry=entry, remove_variant=True)
247 self._map[f] = DirstateItem(b'r', 0, size, 0)
324 self._map[f] = DirstateItem(b'r', 0, size, 0)
248 self.nonnormalset.add(f)
325 self.nonnormalset.add(f)
249
326
250 def dropfile(self, f):
327 def dropfile(self, f):
251 """
328 """
252 Remove a file from the dirstate. Returns True if the file was
329 Remove a file from the dirstate. Returns True if the file was
253 previously recorded.
330 previously recorded.
254 """
331 """
255 old_entry = self._map.pop(f, None)
332 old_entry = self._map.pop(f, None)
256 self._dirs_decr(f, old_entry=old_entry)
333 self._dirs_decr(f, old_entry=old_entry)
257 self.nonnormalset.discard(f)
334 self.nonnormalset.discard(f)
258 return old_entry is not None
335 return old_entry is not None
259
336
260 def clearambiguoustimes(self, files, now):
337 def clearambiguoustimes(self, files, now):
261 for f in files:
338 for f in files:
262 e = self.get(f)
339 e = self.get(f)
263 if e is not None and e.need_delay(now):
340 if e is not None and e.need_delay(now):
264 e.set_possibly_dirty()
341 e.set_possibly_dirty()
265 self.nonnormalset.add(f)
342 self.nonnormalset.add(f)
266
343
267 def nonnormalentries(self):
344 def nonnormalentries(self):
268 '''Compute the nonnormal dirstate entries from the dmap'''
345 '''Compute the nonnormal dirstate entries from the dmap'''
269 try:
346 try:
270 return parsers.nonnormalotherparententries(self._map)
347 return parsers.nonnormalotherparententries(self._map)
271 except AttributeError:
348 except AttributeError:
272 nonnorm = set()
349 nonnorm = set()
273 otherparent = set()
350 otherparent = set()
274 for fname, e in pycompat.iteritems(self._map):
351 for fname, e in pycompat.iteritems(self._map):
275 if e.dm_nonnormal:
352 if e.dm_nonnormal:
276 nonnorm.add(fname)
353 nonnorm.add(fname)
277 if e.from_p2:
354 if e.from_p2:
278 otherparent.add(fname)
355 otherparent.add(fname)
279 return nonnorm, otherparent
356 return nonnorm, otherparent
280
357
281 @propertycache
358 @propertycache
282 def filefoldmap(self):
359 def filefoldmap(self):
283 """Returns a dictionary mapping normalized case paths to their
360 """Returns a dictionary mapping normalized case paths to their
284 non-normalized versions.
361 non-normalized versions.
285 """
362 """
286 try:
363 try:
287 makefilefoldmap = parsers.make_file_foldmap
364 makefilefoldmap = parsers.make_file_foldmap
288 except AttributeError:
365 except AttributeError:
289 pass
366 pass
290 else:
367 else:
291 return makefilefoldmap(
368 return makefilefoldmap(
292 self._map, util.normcasespec, util.normcasefallback
369 self._map, util.normcasespec, util.normcasefallback
293 )
370 )
294
371
295 f = {}
372 f = {}
296 normcase = util.normcase
373 normcase = util.normcase
297 for name, s in pycompat.iteritems(self._map):
374 for name, s in pycompat.iteritems(self._map):
298 if not s.removed:
375 if not s.removed:
299 f[normcase(name)] = name
376 f[normcase(name)] = name
300 f[b'.'] = b'.' # prevents useless util.fspath() invocation
377 f[b'.'] = b'.' # prevents useless util.fspath() invocation
301 return f
378 return f
302
379
303 def hastrackeddir(self, d):
380 def hastrackeddir(self, d):
304 """
381 """
305 Returns True if the dirstate contains a tracked (not removed) file
382 Returns True if the dirstate contains a tracked (not removed) file
306 in this directory.
383 in this directory.
307 """
384 """
308 return d in self._dirs
385 return d in self._dirs
309
386
310 def hasdir(self, d):
387 def hasdir(self, d):
311 """
388 """
312 Returns True if the dirstate contains a file (tracked or removed)
389 Returns True if the dirstate contains a file (tracked or removed)
313 in this directory.
390 in this directory.
314 """
391 """
315 return d in self._alldirs
392 return d in self._alldirs
316
393
317 @propertycache
394 @propertycache
318 def _dirs(self):
395 def _dirs(self):
319 return pathutil.dirs(self._map, b'r')
396 return pathutil.dirs(self._map, b'r')
320
397
321 @propertycache
398 @propertycache
322 def _alldirs(self):
399 def _alldirs(self):
323 return pathutil.dirs(self._map)
400 return pathutil.dirs(self._map)
324
401
325 def _opendirstatefile(self):
402 def _opendirstatefile(self):
326 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
403 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
327 if self._pendingmode is not None and self._pendingmode != mode:
404 if self._pendingmode is not None and self._pendingmode != mode:
328 fp.close()
405 fp.close()
329 raise error.Abort(
406 raise error.Abort(
330 _(b'working directory state may be changed parallelly')
407 _(b'working directory state may be changed parallelly')
331 )
408 )
332 self._pendingmode = mode
409 self._pendingmode = mode
333 return fp
410 return fp
334
411
335 def parents(self):
412 def parents(self):
336 if not self._parents:
413 if not self._parents:
337 try:
414 try:
338 fp = self._opendirstatefile()
415 fp = self._opendirstatefile()
339 st = fp.read(2 * self._nodelen)
416 st = fp.read(2 * self._nodelen)
340 fp.close()
417 fp.close()
341 except IOError as err:
418 except IOError as err:
342 if err.errno != errno.ENOENT:
419 if err.errno != errno.ENOENT:
343 raise
420 raise
344 # File doesn't exist, so the current state is empty
421 # File doesn't exist, so the current state is empty
345 st = b''
422 st = b''
346
423
347 l = len(st)
424 l = len(st)
348 if l == self._nodelen * 2:
425 if l == self._nodelen * 2:
349 self._parents = (
426 self._parents = (
350 st[: self._nodelen],
427 st[: self._nodelen],
351 st[self._nodelen : 2 * self._nodelen],
428 st[self._nodelen : 2 * self._nodelen],
352 )
429 )
353 elif l == 0:
430 elif l == 0:
354 self._parents = (
431 self._parents = (
355 self._nodeconstants.nullid,
432 self._nodeconstants.nullid,
356 self._nodeconstants.nullid,
433 self._nodeconstants.nullid,
357 )
434 )
358 else:
435 else:
359 raise error.Abort(
436 raise error.Abort(
360 _(b'working directory state appears damaged!')
437 _(b'working directory state appears damaged!')
361 )
438 )
362
439
363 return self._parents
440 return self._parents
364
441
365 def setparents(self, p1, p2):
442 def setparents(self, p1, p2):
366 self._parents = (p1, p2)
443 self._parents = (p1, p2)
367 self._dirtyparents = True
444 self._dirtyparents = True
368
445
369 def read(self):
446 def read(self):
370 # ignore HG_PENDING because identity is used only for writing
447 # ignore HG_PENDING because identity is used only for writing
371 self.identity = util.filestat.frompath(
448 self.identity = util.filestat.frompath(
372 self._opener.join(self._filename)
449 self._opener.join(self._filename)
373 )
450 )
374
451
375 try:
452 try:
376 fp = self._opendirstatefile()
453 fp = self._opendirstatefile()
377 try:
454 try:
378 st = fp.read()
455 st = fp.read()
379 finally:
456 finally:
380 fp.close()
457 fp.close()
381 except IOError as err:
458 except IOError as err:
382 if err.errno != errno.ENOENT:
459 if err.errno != errno.ENOENT:
383 raise
460 raise
384 return
461 return
385 if not st:
462 if not st:
386 return
463 return
387
464
388 if util.safehasattr(parsers, b'dict_new_presized'):
465 if util.safehasattr(parsers, b'dict_new_presized'):
389 # Make an estimate of the number of files in the dirstate based on
466 # Make an estimate of the number of files in the dirstate based on
390 # its size. This trades wasting some memory for avoiding costly
467 # its size. This trades wasting some memory for avoiding costly
391 # resizes. Each entry have a prefix of 17 bytes followed by one or
468 # resizes. Each entry have a prefix of 17 bytes followed by one or
392 # two path names. Studies on various large-scale real-world repositories
469 # two path names. Studies on various large-scale real-world repositories
393 # found 54 bytes a reasonable upper limit for the average path names.
470 # found 54 bytes a reasonable upper limit for the average path names.
394 # Copy entries are ignored for the sake of this estimate.
471 # Copy entries are ignored for the sake of this estimate.
395 self._map = parsers.dict_new_presized(len(st) // 71)
472 self._map = parsers.dict_new_presized(len(st) // 71)
396
473
397 # Python's garbage collector triggers a GC each time a certain number
474 # Python's garbage collector triggers a GC each time a certain number
398 # of container objects (the number being defined by
475 # of container objects (the number being defined by
399 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
476 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
400 # for each file in the dirstate. The C version then immediately marks
477 # for each file in the dirstate. The C version then immediately marks
401 # them as not to be tracked by the collector. However, this has no
478 # them as not to be tracked by the collector. However, this has no
402 # effect on when GCs are triggered, only on what objects the GC looks
479 # effect on when GCs are triggered, only on what objects the GC looks
403 # into. This means that O(number of files) GCs are unavoidable.
480 # into. This means that O(number of files) GCs are unavoidable.
404 # Depending on when in the process's lifetime the dirstate is parsed,
481 # Depending on when in the process's lifetime the dirstate is parsed,
405 # this can get very expensive. As a workaround, disable GC while
482 # this can get very expensive. As a workaround, disable GC while
406 # parsing the dirstate.
483 # parsing the dirstate.
407 #
484 #
408 # (we cannot decorate the function directly since it is in a C module)
485 # (we cannot decorate the function directly since it is in a C module)
409 parse_dirstate = util.nogc(parsers.parse_dirstate)
486 parse_dirstate = util.nogc(parsers.parse_dirstate)
410 p = parse_dirstate(self._map, self.copymap, st)
487 p = parse_dirstate(self._map, self.copymap, st)
411 if not self._dirtyparents:
488 if not self._dirtyparents:
412 self.setparents(*p)
489 self.setparents(*p)
413
490
414 # Avoid excess attribute lookups by fast pathing certain checks
491 # Avoid excess attribute lookups by fast pathing certain checks
415 self.__contains__ = self._map.__contains__
492 self.__contains__ = self._map.__contains__
416 self.__getitem__ = self._map.__getitem__
493 self.__getitem__ = self._map.__getitem__
417 self.get = self._map.get
494 self.get = self._map.get
418
495
419 def write(self, _tr, st, now):
496 def write(self, _tr, st, now):
420 st.write(
497 st.write(
421 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
498 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
422 )
499 )
423 st.close()
500 st.close()
424 self._dirtyparents = False
501 self._dirtyparents = False
425 self.nonnormalset, self.otherparentset = self.nonnormalentries()
502 self.nonnormalset, self.otherparentset = self.nonnormalentries()
426
503
427 @propertycache
504 @propertycache
428 def nonnormalset(self):
505 def nonnormalset(self):
429 nonnorm, otherparents = self.nonnormalentries()
506 nonnorm, otherparents = self.nonnormalentries()
430 self.otherparentset = otherparents
507 self.otherparentset = otherparents
431 return nonnorm
508 return nonnorm
432
509
433 @propertycache
510 @propertycache
434 def otherparentset(self):
511 def otherparentset(self):
435 nonnorm, otherparents = self.nonnormalentries()
512 nonnorm, otherparents = self.nonnormalentries()
436 self.nonnormalset = nonnorm
513 self.nonnormalset = nonnorm
437 return otherparents
514 return otherparents
438
515
439 def non_normal_or_other_parent_paths(self):
516 def non_normal_or_other_parent_paths(self):
440 return self.nonnormalset.union(self.otherparentset)
517 return self.nonnormalset.union(self.otherparentset)
441
518
442 @propertycache
519 @propertycache
443 def identity(self):
520 def identity(self):
444 self._map
521 self._map
445 return self.identity
522 return self.identity
446
523
447 @propertycache
524 @propertycache
448 def dirfoldmap(self):
525 def dirfoldmap(self):
449 f = {}
526 f = {}
450 normcase = util.normcase
527 normcase = util.normcase
451 for name in self._dirs:
528 for name in self._dirs:
452 f[normcase(name)] = name
529 f[normcase(name)] = name
453 return f
530 return f
454
531
455
532
456 if rustmod is not None:
533 if rustmod is not None:
457
534
458 class dirstatemap(object):
535 class dirstatemap(object):
459 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
536 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
460 self._use_dirstate_v2 = use_dirstate_v2
537 self._use_dirstate_v2 = use_dirstate_v2
461 self._nodeconstants = nodeconstants
538 self._nodeconstants = nodeconstants
462 self._ui = ui
539 self._ui = ui
463 self._opener = opener
540 self._opener = opener
464 self._root = root
541 self._root = root
465 self._filename = b'dirstate'
542 self._filename = b'dirstate'
466 self._nodelen = 20 # Also update Rust code when changing this!
543 self._nodelen = 20 # Also update Rust code when changing this!
467 self._parents = None
544 self._parents = None
468 self._dirtyparents = False
545 self._dirtyparents = False
469 self._docket = None
546 self._docket = None
470
547
471 # for consistent view between _pl() and _read() invocations
548 # for consistent view between _pl() and _read() invocations
472 self._pendingmode = None
549 self._pendingmode = None
473
550
474 self._use_dirstate_tree = self._ui.configbool(
551 self._use_dirstate_tree = self._ui.configbool(
475 b"experimental",
552 b"experimental",
476 b"dirstate-tree.in-memory",
553 b"dirstate-tree.in-memory",
477 False,
554 False,
478 )
555 )
479
556
480 def addfile(
557 def addfile(
481 self,
558 self,
482 f,
559 f,
483 mode=0,
560 mode=0,
484 size=None,
561 size=None,
485 mtime=None,
562 mtime=None,
486 added=False,
563 added=False,
487 merged=False,
564 merged=False,
488 from_p2=False,
565 from_p2=False,
489 possibly_dirty=False,
566 possibly_dirty=False,
490 ):
567 ):
491 return self._rustmap.addfile(
568 return self._rustmap.addfile(
492 f,
569 f,
493 mode,
570 mode,
494 size,
571 size,
495 mtime,
572 mtime,
496 added,
573 added,
497 merged,
574 merged,
498 from_p2,
575 from_p2,
499 possibly_dirty,
576 possibly_dirty,
500 )
577 )
501
578
579 def reset_state(
580 self,
581 filename,
582 wc_tracked,
583 p1_tracked,
584 p2_tracked=False,
585 merged=False,
586 clean_p1=False,
587 clean_p2=False,
588 possibly_dirty=False,
589 parentfiledata=None,
590 ):
591 """Set a entry to a given state, disregarding all previous state
592
593 This is to be used by the part of the dirstate API dedicated to
594 adjusting the dirstate after a update/merge.
595
596 note: calling this might result to no entry existing at all if the
597 dirstate map does not see any point at having one for this file
598 anymore.
599 """
600 if merged and (clean_p1 or clean_p2):
601 msg = (
602 b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
603 )
604 raise error.ProgrammingError(msg)
605 # copy information are now outdated
606 # (maybe new information should be in directly passed to this function)
607 self.copymap.pop(filename, None)
608
609 if not (p1_tracked or p2_tracked or wc_tracked):
610 self.dropfile(filename)
611 elif merged:
612 # XXX might be merged and removed ?
613 entry = self.get(filename)
614 if entry is not None and entry.tracked:
615 # XXX mostly replicate dirstate.other parent. We should get
616 # the higher layer to pass us more reliable data where `merged`
617 # actually mean merged. Dropping the else clause will show
618 # failure in `test-graft.t`
619 self.addfile(filename, merged=True)
620 else:
621 self.addfile(filename, from_p2=True)
622 elif not (p1_tracked or p2_tracked) and wc_tracked:
623 self.addfile(
624 filename, added=True, possibly_dirty=possibly_dirty
625 )
626 elif (p1_tracked or p2_tracked) and not wc_tracked:
627 # XXX might be merged and removed ?
628 self[filename] = DirstateItem(b'r', 0, 0, 0)
629 self.nonnormalset.add(filename)
630 elif clean_p2 and wc_tracked:
631 if p1_tracked or self.get(filename) is not None:
632 # XXX the `self.get` call is catching some case in
633 # `test-merge-remove.t` where the file is tracked in p1, the
634 # p1_tracked argument is False.
635 #
636 # In addition, this seems to be a case where the file is marked
637 # as merged without actually being the result of a merge
638 # action. So thing are not ideal here.
639 self.addfile(filename, merged=True)
640 else:
641 self.addfile(filename, from_p2=True)
642 elif not p1_tracked and p2_tracked and wc_tracked:
643 self.addfile(
644 filename, from_p2=True, possibly_dirty=possibly_dirty
645 )
646 elif possibly_dirty:
647 self.addfile(filename, possibly_dirty=possibly_dirty)
648 elif wc_tracked:
649 # this is a "normal" file
650 if parentfiledata is None:
651 msg = b'failed to pass parentfiledata for a normal file: %s'
652 msg %= filename
653 raise error.ProgrammingError(msg)
654 mode, size, mtime = parentfiledata
655 self.addfile(filename, mode=mode, size=size, mtime=mtime)
656 self.nonnormalset.discard(filename)
657 else:
658 assert False, 'unreachable'
659
502 def removefile(self, *args, **kwargs):
660 def removefile(self, *args, **kwargs):
503 return self._rustmap.removefile(*args, **kwargs)
661 return self._rustmap.removefile(*args, **kwargs)
504
662
505 def dropfile(self, *args, **kwargs):
663 def dropfile(self, *args, **kwargs):
506 return self._rustmap.dropfile(*args, **kwargs)
664 return self._rustmap.dropfile(*args, **kwargs)
507
665
508 def clearambiguoustimes(self, *args, **kwargs):
666 def clearambiguoustimes(self, *args, **kwargs):
509 return self._rustmap.clearambiguoustimes(*args, **kwargs)
667 return self._rustmap.clearambiguoustimes(*args, **kwargs)
510
668
511 def nonnormalentries(self):
669 def nonnormalentries(self):
512 return self._rustmap.nonnormalentries()
670 return self._rustmap.nonnormalentries()
513
671
514 def get(self, *args, **kwargs):
672 def get(self, *args, **kwargs):
515 return self._rustmap.get(*args, **kwargs)
673 return self._rustmap.get(*args, **kwargs)
516
674
517 @property
675 @property
518 def copymap(self):
676 def copymap(self):
519 return self._rustmap.copymap()
677 return self._rustmap.copymap()
520
678
521 def directories(self):
679 def directories(self):
522 return self._rustmap.directories()
680 return self._rustmap.directories()
523
681
524 def debug_iter(self):
682 def debug_iter(self):
525 return self._rustmap.debug_iter()
683 return self._rustmap.debug_iter()
526
684
527 def preload(self):
685 def preload(self):
528 self._rustmap
686 self._rustmap
529
687
530 def clear(self):
688 def clear(self):
531 self._rustmap.clear()
689 self._rustmap.clear()
532 self.setparents(
690 self.setparents(
533 self._nodeconstants.nullid, self._nodeconstants.nullid
691 self._nodeconstants.nullid, self._nodeconstants.nullid
534 )
692 )
535 util.clearcachedproperty(self, b"_dirs")
693 util.clearcachedproperty(self, b"_dirs")
536 util.clearcachedproperty(self, b"_alldirs")
694 util.clearcachedproperty(self, b"_alldirs")
537 util.clearcachedproperty(self, b"dirfoldmap")
695 util.clearcachedproperty(self, b"dirfoldmap")
538
696
539 def items(self):
697 def items(self):
540 return self._rustmap.items()
698 return self._rustmap.items()
541
699
542 def keys(self):
700 def keys(self):
543 return iter(self._rustmap)
701 return iter(self._rustmap)
544
702
545 def __contains__(self, key):
703 def __contains__(self, key):
546 return key in self._rustmap
704 return key in self._rustmap
547
705
548 def __getitem__(self, item):
706 def __getitem__(self, item):
549 return self._rustmap[item]
707 return self._rustmap[item]
550
708
551 def __len__(self):
709 def __len__(self):
552 return len(self._rustmap)
710 return len(self._rustmap)
553
711
554 def __iter__(self):
712 def __iter__(self):
555 return iter(self._rustmap)
713 return iter(self._rustmap)
556
714
557 # forward for python2,3 compat
715 # forward for python2,3 compat
558 iteritems = items
716 iteritems = items
559
717
560 def _opendirstatefile(self):
718 def _opendirstatefile(self):
561 fp, mode = txnutil.trypending(
719 fp, mode = txnutil.trypending(
562 self._root, self._opener, self._filename
720 self._root, self._opener, self._filename
563 )
721 )
564 if self._pendingmode is not None and self._pendingmode != mode:
722 if self._pendingmode is not None and self._pendingmode != mode:
565 fp.close()
723 fp.close()
566 raise error.Abort(
724 raise error.Abort(
567 _(b'working directory state may be changed parallelly')
725 _(b'working directory state may be changed parallelly')
568 )
726 )
569 self._pendingmode = mode
727 self._pendingmode = mode
570 return fp
728 return fp
571
729
572 def _readdirstatefile(self, size=-1):
730 def _readdirstatefile(self, size=-1):
573 try:
731 try:
574 with self._opendirstatefile() as fp:
732 with self._opendirstatefile() as fp:
575 return fp.read(size)
733 return fp.read(size)
576 except IOError as err:
734 except IOError as err:
577 if err.errno != errno.ENOENT:
735 if err.errno != errno.ENOENT:
578 raise
736 raise
579 # File doesn't exist, so the current state is empty
737 # File doesn't exist, so the current state is empty
580 return b''
738 return b''
581
739
582 def setparents(self, p1, p2):
740 def setparents(self, p1, p2):
583 self._parents = (p1, p2)
741 self._parents = (p1, p2)
584 self._dirtyparents = True
742 self._dirtyparents = True
585
743
586 def parents(self):
744 def parents(self):
587 if not self._parents:
745 if not self._parents:
588 if self._use_dirstate_v2:
746 if self._use_dirstate_v2:
589 self._parents = self.docket.parents
747 self._parents = self.docket.parents
590 else:
748 else:
591 read_len = self._nodelen * 2
749 read_len = self._nodelen * 2
592 st = self._readdirstatefile(read_len)
750 st = self._readdirstatefile(read_len)
593 l = len(st)
751 l = len(st)
594 if l == read_len:
752 if l == read_len:
595 self._parents = (
753 self._parents = (
596 st[: self._nodelen],
754 st[: self._nodelen],
597 st[self._nodelen : 2 * self._nodelen],
755 st[self._nodelen : 2 * self._nodelen],
598 )
756 )
599 elif l == 0:
757 elif l == 0:
600 self._parents = (
758 self._parents = (
601 self._nodeconstants.nullid,
759 self._nodeconstants.nullid,
602 self._nodeconstants.nullid,
760 self._nodeconstants.nullid,
603 )
761 )
604 else:
762 else:
605 raise error.Abort(
763 raise error.Abort(
606 _(b'working directory state appears damaged!')
764 _(b'working directory state appears damaged!')
607 )
765 )
608
766
609 return self._parents
767 return self._parents
610
768
611 @property
769 @property
612 def docket(self):
770 def docket(self):
613 if not self._docket:
771 if not self._docket:
614 if not self._use_dirstate_v2:
772 if not self._use_dirstate_v2:
615 raise error.ProgrammingError(
773 raise error.ProgrammingError(
616 b'dirstate only has a docket in v2 format'
774 b'dirstate only has a docket in v2 format'
617 )
775 )
618 self._docket = docketmod.DirstateDocket.parse(
776 self._docket = docketmod.DirstateDocket.parse(
619 self._readdirstatefile(), self._nodeconstants
777 self._readdirstatefile(), self._nodeconstants
620 )
778 )
621 return self._docket
779 return self._docket
622
780
623 @propertycache
781 @propertycache
624 def _rustmap(self):
782 def _rustmap(self):
625 """
783 """
626 Fills the Dirstatemap when called.
784 Fills the Dirstatemap when called.
627 """
785 """
628 # ignore HG_PENDING because identity is used only for writing
786 # ignore HG_PENDING because identity is used only for writing
629 self.identity = util.filestat.frompath(
787 self.identity = util.filestat.frompath(
630 self._opener.join(self._filename)
788 self._opener.join(self._filename)
631 )
789 )
632
790
633 if self._use_dirstate_v2:
791 if self._use_dirstate_v2:
634 if self.docket.uuid:
792 if self.docket.uuid:
635 # TODO: use mmap when possible
793 # TODO: use mmap when possible
636 data = self._opener.read(self.docket.data_filename())
794 data = self._opener.read(self.docket.data_filename())
637 else:
795 else:
638 data = b''
796 data = b''
639 self._rustmap = rustmod.DirstateMap.new_v2(
797 self._rustmap = rustmod.DirstateMap.new_v2(
640 data, self.docket.data_size, self.docket.tree_metadata
798 data, self.docket.data_size, self.docket.tree_metadata
641 )
799 )
642 parents = self.docket.parents
800 parents = self.docket.parents
643 else:
801 else:
644 self._rustmap, parents = rustmod.DirstateMap.new_v1(
802 self._rustmap, parents = rustmod.DirstateMap.new_v1(
645 self._use_dirstate_tree, self._readdirstatefile()
803 self._use_dirstate_tree, self._readdirstatefile()
646 )
804 )
647
805
648 if parents and not self._dirtyparents:
806 if parents and not self._dirtyparents:
649 self.setparents(*parents)
807 self.setparents(*parents)
650
808
651 self.__contains__ = self._rustmap.__contains__
809 self.__contains__ = self._rustmap.__contains__
652 self.__getitem__ = self._rustmap.__getitem__
810 self.__getitem__ = self._rustmap.__getitem__
653 self.get = self._rustmap.get
811 self.get = self._rustmap.get
654 return self._rustmap
812 return self._rustmap
655
813
656 def write(self, tr, st, now):
814 def write(self, tr, st, now):
657 if not self._use_dirstate_v2:
815 if not self._use_dirstate_v2:
658 p1, p2 = self.parents()
816 p1, p2 = self.parents()
659 packed = self._rustmap.write_v1(p1, p2, now)
817 packed = self._rustmap.write_v1(p1, p2, now)
660 st.write(packed)
818 st.write(packed)
661 st.close()
819 st.close()
662 self._dirtyparents = False
820 self._dirtyparents = False
663 return
821 return
664
822
665 # We can only append to an existing data file if there is one
823 # We can only append to an existing data file if there is one
666 can_append = self.docket.uuid is not None
824 can_append = self.docket.uuid is not None
667 packed, meta, append = self._rustmap.write_v2(now, can_append)
825 packed, meta, append = self._rustmap.write_v2(now, can_append)
668 if append:
826 if append:
669 docket = self.docket
827 docket = self.docket
670 data_filename = docket.data_filename()
828 data_filename = docket.data_filename()
671 if tr:
829 if tr:
672 tr.add(data_filename, docket.data_size)
830 tr.add(data_filename, docket.data_size)
673 with self._opener(data_filename, b'r+b') as fp:
831 with self._opener(data_filename, b'r+b') as fp:
674 fp.seek(docket.data_size)
832 fp.seek(docket.data_size)
675 assert fp.tell() == docket.data_size
833 assert fp.tell() == docket.data_size
676 written = fp.write(packed)
834 written = fp.write(packed)
677 if written is not None: # py2 may return None
835 if written is not None: # py2 may return None
678 assert written == len(packed), (written, len(packed))
836 assert written == len(packed), (written, len(packed))
679 docket.data_size += len(packed)
837 docket.data_size += len(packed)
680 docket.parents = self.parents()
838 docket.parents = self.parents()
681 docket.tree_metadata = meta
839 docket.tree_metadata = meta
682 st.write(docket.serialize())
840 st.write(docket.serialize())
683 st.close()
841 st.close()
684 else:
842 else:
685 old_docket = self.docket
843 old_docket = self.docket
686 new_docket = docketmod.DirstateDocket.with_new_uuid(
844 new_docket = docketmod.DirstateDocket.with_new_uuid(
687 self.parents(), len(packed), meta
845 self.parents(), len(packed), meta
688 )
846 )
689 data_filename = new_docket.data_filename()
847 data_filename = new_docket.data_filename()
690 if tr:
848 if tr:
691 tr.add(data_filename, 0)
849 tr.add(data_filename, 0)
692 self._opener.write(data_filename, packed)
850 self._opener.write(data_filename, packed)
693 # Write the new docket after the new data file has been
851 # Write the new docket after the new data file has been
694 # written. Because `st` was opened with `atomictemp=True`,
852 # written. Because `st` was opened with `atomictemp=True`,
695 # the actual `.hg/dirstate` file is only affected on close.
853 # the actual `.hg/dirstate` file is only affected on close.
696 st.write(new_docket.serialize())
854 st.write(new_docket.serialize())
697 st.close()
855 st.close()
698 # Remove the old data file after the new docket pointing to
856 # Remove the old data file after the new docket pointing to
699 # the new data file was written.
857 # the new data file was written.
700 if old_docket.uuid:
858 if old_docket.uuid:
701 data_filename = old_docket.data_filename()
859 data_filename = old_docket.data_filename()
702 unlink = lambda _tr=None: self._opener.unlink(data_filename)
860 unlink = lambda _tr=None: self._opener.unlink(data_filename)
703 if tr:
861 if tr:
704 category = b"dirstate-v2-clean-" + old_docket.uuid
862 category = b"dirstate-v2-clean-" + old_docket.uuid
705 tr.addpostclose(category, unlink)
863 tr.addpostclose(category, unlink)
706 else:
864 else:
707 unlink()
865 unlink()
708 self._docket = new_docket
866 self._docket = new_docket
709 # Reload from the newly-written file
867 # Reload from the newly-written file
710 util.clearcachedproperty(self, b"_rustmap")
868 util.clearcachedproperty(self, b"_rustmap")
711 self._dirtyparents = False
869 self._dirtyparents = False
712
870
713 @propertycache
871 @propertycache
714 def filefoldmap(self):
872 def filefoldmap(self):
715 """Returns a dictionary mapping normalized case paths to their
873 """Returns a dictionary mapping normalized case paths to their
716 non-normalized versions.
874 non-normalized versions.
717 """
875 """
718 return self._rustmap.filefoldmapasdict()
876 return self._rustmap.filefoldmapasdict()
719
877
720 def hastrackeddir(self, d):
878 def hastrackeddir(self, d):
721 return self._rustmap.hastrackeddir(d)
879 return self._rustmap.hastrackeddir(d)
722
880
723 def hasdir(self, d):
881 def hasdir(self, d):
724 return self._rustmap.hasdir(d)
882 return self._rustmap.hasdir(d)
725
883
726 @propertycache
884 @propertycache
727 def identity(self):
885 def identity(self):
728 self._rustmap
886 self._rustmap
729 return self.identity
887 return self.identity
730
888
731 @property
889 @property
732 def nonnormalset(self):
890 def nonnormalset(self):
733 nonnorm = self._rustmap.non_normal_entries()
891 nonnorm = self._rustmap.non_normal_entries()
734 return nonnorm
892 return nonnorm
735
893
736 @propertycache
894 @propertycache
737 def otherparentset(self):
895 def otherparentset(self):
738 otherparents = self._rustmap.other_parent_entries()
896 otherparents = self._rustmap.other_parent_entries()
739 return otherparents
897 return otherparents
740
898
741 def non_normal_or_other_parent_paths(self):
899 def non_normal_or_other_parent_paths(self):
742 return self._rustmap.non_normal_or_other_parent_paths()
900 return self._rustmap.non_normal_or_other_parent_paths()
743
901
744 @propertycache
902 @propertycache
745 def dirfoldmap(self):
903 def dirfoldmap(self):
746 f = {}
904 f = {}
747 normcase = util.normcase
905 normcase = util.normcase
748 for name in self._rustmap.tracked_dirs():
906 for name in self._rustmap.tracked_dirs():
749 f[normcase(name)] = name
907 f[normcase(name)] = name
750 return f
908 return f
909
910 def __setitem__(self, key, value):
911 assert isinstance(value, DirstateItem)
912 self._rustmap.set_v1(key, value)
@@ -1,481 +1,494
1 // dirstate_map.rs
1 // dirstate_map.rs
2 //
2 //
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 use crate::dirstate::parsers::Timestamp;
8 use crate::dirstate::parsers::Timestamp;
9 use crate::{
9 use crate::{
10 dirstate::EntryState,
10 dirstate::EntryState,
11 dirstate::MTIME_UNSET,
11 dirstate::MTIME_UNSET,
12 dirstate::SIZE_FROM_OTHER_PARENT,
12 dirstate::SIZE_FROM_OTHER_PARENT,
13 dirstate::SIZE_NON_NORMAL,
13 dirstate::SIZE_NON_NORMAL,
14 dirstate::V1_RANGEMASK,
14 dirstate::V1_RANGEMASK,
15 pack_dirstate, parse_dirstate,
15 pack_dirstate, parse_dirstate,
16 utils::hg_path::{HgPath, HgPathBuf},
16 utils::hg_path::{HgPath, HgPathBuf},
17 CopyMap, DirsMultiset, DirstateEntry, DirstateError, DirstateParents,
17 CopyMap, DirsMultiset, DirstateEntry, DirstateError, DirstateParents,
18 StateMap,
18 StateMap,
19 };
19 };
20 use micro_timer::timed;
20 use micro_timer::timed;
21 use std::collections::HashSet;
21 use std::collections::HashSet;
22 use std::iter::FromIterator;
22 use std::iter::FromIterator;
23 use std::ops::Deref;
23 use std::ops::Deref;
24
24
25 #[derive(Default)]
25 #[derive(Default)]
26 pub struct DirstateMap {
26 pub struct DirstateMap {
27 state_map: StateMap,
27 state_map: StateMap,
28 pub copy_map: CopyMap,
28 pub copy_map: CopyMap,
29 pub dirs: Option<DirsMultiset>,
29 pub dirs: Option<DirsMultiset>,
30 pub all_dirs: Option<DirsMultiset>,
30 pub all_dirs: Option<DirsMultiset>,
31 non_normal_set: Option<HashSet<HgPathBuf>>,
31 non_normal_set: Option<HashSet<HgPathBuf>>,
32 other_parent_set: Option<HashSet<HgPathBuf>>,
32 other_parent_set: Option<HashSet<HgPathBuf>>,
33 }
33 }
34
34
35 /// Should only really be used in python interface code, for clarity
35 /// Should only really be used in python interface code, for clarity
36 impl Deref for DirstateMap {
36 impl Deref for DirstateMap {
37 type Target = StateMap;
37 type Target = StateMap;
38
38
39 fn deref(&self) -> &Self::Target {
39 fn deref(&self) -> &Self::Target {
40 &self.state_map
40 &self.state_map
41 }
41 }
42 }
42 }
43
43
44 impl FromIterator<(HgPathBuf, DirstateEntry)> for DirstateMap {
44 impl FromIterator<(HgPathBuf, DirstateEntry)> for DirstateMap {
45 fn from_iter<I: IntoIterator<Item = (HgPathBuf, DirstateEntry)>>(
45 fn from_iter<I: IntoIterator<Item = (HgPathBuf, DirstateEntry)>>(
46 iter: I,
46 iter: I,
47 ) -> Self {
47 ) -> Self {
48 Self {
48 Self {
49 state_map: iter.into_iter().collect(),
49 state_map: iter.into_iter().collect(),
50 ..Self::default()
50 ..Self::default()
51 }
51 }
52 }
52 }
53 }
53 }
54
54
55 impl DirstateMap {
55 impl DirstateMap {
56 pub fn new() -> Self {
56 pub fn new() -> Self {
57 Self::default()
57 Self::default()
58 }
58 }
59
59
60 pub fn clear(&mut self) {
60 pub fn clear(&mut self) {
61 self.state_map = StateMap::default();
61 self.state_map = StateMap::default();
62 self.copy_map.clear();
62 self.copy_map.clear();
63 self.non_normal_set = None;
63 self.non_normal_set = None;
64 self.other_parent_set = None;
64 self.other_parent_set = None;
65 }
65 }
66
66
67 pub fn set_v1_inner(&mut self, filename: &HgPath, entry: DirstateEntry) {
68 self.state_map.insert(filename.to_owned(), entry);
69 }
70
67 /// Add a tracked file to the dirstate
71 /// Add a tracked file to the dirstate
68 pub fn add_file(
72 pub fn add_file(
69 &mut self,
73 &mut self,
70 filename: &HgPath,
74 filename: &HgPath,
71 entry: DirstateEntry,
75 entry: DirstateEntry,
72 // XXX once the dust settle this should probably become an enum
76 // XXX once the dust settle this should probably become an enum
73 added: bool,
77 added: bool,
74 merged: bool,
78 merged: bool,
75 from_p2: bool,
79 from_p2: bool,
76 possibly_dirty: bool,
80 possibly_dirty: bool,
77 ) -> Result<(), DirstateError> {
81 ) -> Result<(), DirstateError> {
78 let mut entry = entry;
82 let mut entry = entry;
79 if added {
83 if added {
80 assert!(!merged);
84 assert!(!merged);
81 assert!(!possibly_dirty);
85 assert!(!possibly_dirty);
82 assert!(!from_p2);
86 assert!(!from_p2);
83 entry.state = EntryState::Added;
87 entry.state = EntryState::Added;
84 entry.size = SIZE_NON_NORMAL;
88 entry.size = SIZE_NON_NORMAL;
85 entry.mtime = MTIME_UNSET;
89 entry.mtime = MTIME_UNSET;
86 } else if merged {
90 } else if merged {
87 assert!(!possibly_dirty);
91 assert!(!possibly_dirty);
88 assert!(!from_p2);
92 assert!(!from_p2);
89 entry.state = EntryState::Merged;
93 entry.state = EntryState::Merged;
90 entry.size = SIZE_FROM_OTHER_PARENT;
94 entry.size = SIZE_FROM_OTHER_PARENT;
91 entry.mtime = MTIME_UNSET;
95 entry.mtime = MTIME_UNSET;
92 } else if from_p2 {
96 } else if from_p2 {
93 assert!(!possibly_dirty);
97 assert!(!possibly_dirty);
94 entry.state = EntryState::Normal;
98 entry.state = EntryState::Normal;
95 entry.size = SIZE_FROM_OTHER_PARENT;
99 entry.size = SIZE_FROM_OTHER_PARENT;
96 entry.mtime = MTIME_UNSET;
100 entry.mtime = MTIME_UNSET;
97 } else if possibly_dirty {
101 } else if possibly_dirty {
98 entry.state = EntryState::Normal;
102 entry.state = EntryState::Normal;
99 entry.size = SIZE_NON_NORMAL;
103 entry.size = SIZE_NON_NORMAL;
100 entry.mtime = MTIME_UNSET;
104 entry.mtime = MTIME_UNSET;
101 } else {
105 } else {
102 entry.state = EntryState::Normal;
106 entry.state = EntryState::Normal;
103 entry.size = entry.size & V1_RANGEMASK;
107 entry.size = entry.size & V1_RANGEMASK;
104 entry.mtime = entry.mtime & V1_RANGEMASK;
108 entry.mtime = entry.mtime & V1_RANGEMASK;
105 }
109 }
106 let old_state = match self.get(filename) {
110 let old_state = match self.get(filename) {
107 Some(e) => e.state,
111 Some(e) => e.state,
108 None => EntryState::Unknown,
112 None => EntryState::Unknown,
109 };
113 };
110 if old_state == EntryState::Unknown || old_state == EntryState::Removed
114 if old_state == EntryState::Unknown || old_state == EntryState::Removed
111 {
115 {
112 if let Some(ref mut dirs) = self.dirs {
116 if let Some(ref mut dirs) = self.dirs {
113 dirs.add_path(filename)?;
117 dirs.add_path(filename)?;
114 }
118 }
115 }
119 }
116 if old_state == EntryState::Unknown {
120 if old_state == EntryState::Unknown {
117 if let Some(ref mut all_dirs) = self.all_dirs {
121 if let Some(ref mut all_dirs) = self.all_dirs {
118 all_dirs.add_path(filename)?;
122 all_dirs.add_path(filename)?;
119 }
123 }
120 }
124 }
121 self.state_map.insert(filename.to_owned(), entry.to_owned());
125 self.state_map.insert(filename.to_owned(), entry.to_owned());
122
126
123 if entry.is_non_normal() {
127 if entry.is_non_normal() {
124 self.get_non_normal_other_parent_entries()
128 self.get_non_normal_other_parent_entries()
125 .0
129 .0
126 .insert(filename.to_owned());
130 .insert(filename.to_owned());
127 }
131 }
128
132
129 if entry.is_from_other_parent() {
133 if entry.is_from_other_parent() {
130 self.get_non_normal_other_parent_entries()
134 self.get_non_normal_other_parent_entries()
131 .1
135 .1
132 .insert(filename.to_owned());
136 .insert(filename.to_owned());
133 }
137 }
134 Ok(())
138 Ok(())
135 }
139 }
136
140
137 /// Mark a file as removed in the dirstate.
141 /// Mark a file as removed in the dirstate.
138 ///
142 ///
139 /// The `size` parameter is used to store sentinel values that indicate
143 /// The `size` parameter is used to store sentinel values that indicate
140 /// the file's previous state. In the future, we should refactor this
144 /// the file's previous state. In the future, we should refactor this
141 /// to be more explicit about what that state is.
145 /// to be more explicit about what that state is.
142 pub fn remove_file(
146 pub fn remove_file(
143 &mut self,
147 &mut self,
144 filename: &HgPath,
148 filename: &HgPath,
145 in_merge: bool,
149 in_merge: bool,
146 ) -> Result<(), DirstateError> {
150 ) -> Result<(), DirstateError> {
147 let old_entry_opt = self.get(filename);
151 let old_entry_opt = self.get(filename);
148 let old_state = match old_entry_opt {
152 let old_state = match old_entry_opt {
149 Some(e) => e.state,
153 Some(e) => e.state,
150 None => EntryState::Unknown,
154 None => EntryState::Unknown,
151 };
155 };
152 let mut size = 0;
156 let mut size = 0;
153 if in_merge {
157 if in_merge {
154 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
158 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
155 // during a merge. So I (marmoute) am not sure we need the
159 // during a merge. So I (marmoute) am not sure we need the
156 // conditionnal at all. Adding double checking this with assert
160 // conditionnal at all. Adding double checking this with assert
157 // would be nice.
161 // would be nice.
158 if let Some(old_entry) = old_entry_opt {
162 if let Some(old_entry) = old_entry_opt {
159 // backup the previous state
163 // backup the previous state
160 if old_entry.state == EntryState::Merged {
164 if old_entry.state == EntryState::Merged {
161 size = SIZE_NON_NORMAL;
165 size = SIZE_NON_NORMAL;
162 } else if old_entry.state == EntryState::Normal
166 } else if old_entry.state == EntryState::Normal
163 && old_entry.size == SIZE_FROM_OTHER_PARENT
167 && old_entry.size == SIZE_FROM_OTHER_PARENT
164 {
168 {
165 // other parent
169 // other parent
166 size = SIZE_FROM_OTHER_PARENT;
170 size = SIZE_FROM_OTHER_PARENT;
167 self.get_non_normal_other_parent_entries()
171 self.get_non_normal_other_parent_entries()
168 .1
172 .1
169 .insert(filename.to_owned());
173 .insert(filename.to_owned());
170 }
174 }
171 }
175 }
172 }
176 }
173 if old_state != EntryState::Unknown && old_state != EntryState::Removed
177 if old_state != EntryState::Unknown && old_state != EntryState::Removed
174 {
178 {
175 if let Some(ref mut dirs) = self.dirs {
179 if let Some(ref mut dirs) = self.dirs {
176 dirs.delete_path(filename)?;
180 dirs.delete_path(filename)?;
177 }
181 }
178 }
182 }
179 if old_state == EntryState::Unknown {
183 if old_state == EntryState::Unknown {
180 if let Some(ref mut all_dirs) = self.all_dirs {
184 if let Some(ref mut all_dirs) = self.all_dirs {
181 all_dirs.add_path(filename)?;
185 all_dirs.add_path(filename)?;
182 }
186 }
183 }
187 }
184 if size == 0 {
188 if size == 0 {
185 self.copy_map.remove(filename);
189 self.copy_map.remove(filename);
186 }
190 }
187
191
188 self.state_map.insert(
192 self.state_map.insert(
189 filename.to_owned(),
193 filename.to_owned(),
190 DirstateEntry {
194 DirstateEntry {
191 state: EntryState::Removed,
195 state: EntryState::Removed,
192 mode: 0,
196 mode: 0,
193 size,
197 size,
194 mtime: 0,
198 mtime: 0,
195 },
199 },
196 );
200 );
197 self.get_non_normal_other_parent_entries()
201 self.get_non_normal_other_parent_entries()
198 .0
202 .0
199 .insert(filename.to_owned());
203 .insert(filename.to_owned());
200 Ok(())
204 Ok(())
201 }
205 }
202
206
203 /// Remove a file from the dirstate.
207 /// Remove a file from the dirstate.
204 /// Returns `true` if the file was previously recorded.
208 /// Returns `true` if the file was previously recorded.
205 pub fn drop_file(
209 pub fn drop_file(
206 &mut self,
210 &mut self,
207 filename: &HgPath,
211 filename: &HgPath,
208 ) -> Result<bool, DirstateError> {
212 ) -> Result<bool, DirstateError> {
209 let old_state = match self.get(filename) {
213 let old_state = match self.get(filename) {
210 Some(e) => e.state,
214 Some(e) => e.state,
211 None => EntryState::Unknown,
215 None => EntryState::Unknown,
212 };
216 };
213 let exists = self.state_map.remove(filename).is_some();
217 let exists = self.state_map.remove(filename).is_some();
214
218
215 if exists {
219 if exists {
216 if old_state != EntryState::Removed {
220 if old_state != EntryState::Removed {
217 if let Some(ref mut dirs) = self.dirs {
221 if let Some(ref mut dirs) = self.dirs {
218 dirs.delete_path(filename)?;
222 dirs.delete_path(filename)?;
219 }
223 }
220 }
224 }
221 if let Some(ref mut all_dirs) = self.all_dirs {
225 if let Some(ref mut all_dirs) = self.all_dirs {
222 all_dirs.delete_path(filename)?;
226 all_dirs.delete_path(filename)?;
223 }
227 }
224 }
228 }
225 self.get_non_normal_other_parent_entries()
229 self.get_non_normal_other_parent_entries()
226 .0
230 .0
227 .remove(filename);
231 .remove(filename);
228
232
229 Ok(exists)
233 Ok(exists)
230 }
234 }
231
235
232 pub fn clear_ambiguous_times(
236 pub fn clear_ambiguous_times(
233 &mut self,
237 &mut self,
234 filenames: Vec<HgPathBuf>,
238 filenames: Vec<HgPathBuf>,
235 now: i32,
239 now: i32,
236 ) {
240 ) {
237 for filename in filenames {
241 for filename in filenames {
238 if let Some(entry) = self.state_map.get_mut(&filename) {
242 if let Some(entry) = self.state_map.get_mut(&filename) {
239 if entry.clear_ambiguous_mtime(now) {
243 if entry.clear_ambiguous_mtime(now) {
240 self.get_non_normal_other_parent_entries()
244 self.get_non_normal_other_parent_entries()
241 .0
245 .0
242 .insert(filename.to_owned());
246 .insert(filename.to_owned());
243 }
247 }
244 }
248 }
245 }
249 }
246 }
250 }
247
251
248 pub fn non_normal_entries_remove(&mut self, key: impl AsRef<HgPath>) {
252 pub fn non_normal_entries_remove(
253 &mut self,
254 key: impl AsRef<HgPath>,
255 ) -> bool {
249 self.get_non_normal_other_parent_entries()
256 self.get_non_normal_other_parent_entries()
250 .0
257 .0
251 .remove(key.as_ref());
258 .remove(key.as_ref())
259 }
260
261 pub fn non_normal_entries_add(&mut self, key: impl AsRef<HgPath>) {
262 self.get_non_normal_other_parent_entries()
263 .0
264 .insert(key.as_ref().into());
252 }
265 }
253
266
254 pub fn non_normal_entries_union(
267 pub fn non_normal_entries_union(
255 &mut self,
268 &mut self,
256 other: HashSet<HgPathBuf>,
269 other: HashSet<HgPathBuf>,
257 ) -> Vec<HgPathBuf> {
270 ) -> Vec<HgPathBuf> {
258 self.get_non_normal_other_parent_entries()
271 self.get_non_normal_other_parent_entries()
259 .0
272 .0
260 .union(&other)
273 .union(&other)
261 .map(ToOwned::to_owned)
274 .map(ToOwned::to_owned)
262 .collect()
275 .collect()
263 }
276 }
264
277
265 pub fn get_non_normal_other_parent_entries(
278 pub fn get_non_normal_other_parent_entries(
266 &mut self,
279 &mut self,
267 ) -> (&mut HashSet<HgPathBuf>, &mut HashSet<HgPathBuf>) {
280 ) -> (&mut HashSet<HgPathBuf>, &mut HashSet<HgPathBuf>) {
268 self.set_non_normal_other_parent_entries(false);
281 self.set_non_normal_other_parent_entries(false);
269 (
282 (
270 self.non_normal_set.as_mut().unwrap(),
283 self.non_normal_set.as_mut().unwrap(),
271 self.other_parent_set.as_mut().unwrap(),
284 self.other_parent_set.as_mut().unwrap(),
272 )
285 )
273 }
286 }
274
287
275 /// Useful to get immutable references to those sets in contexts where
288 /// Useful to get immutable references to those sets in contexts where
276 /// you only have an immutable reference to the `DirstateMap`, like when
289 /// you only have an immutable reference to the `DirstateMap`, like when
277 /// sharing references with Python.
290 /// sharing references with Python.
278 ///
291 ///
279 /// TODO, get rid of this along with the other "setter/getter" stuff when
292 /// TODO, get rid of this along with the other "setter/getter" stuff when
280 /// a nice typestate plan is defined.
293 /// a nice typestate plan is defined.
281 ///
294 ///
282 /// # Panics
295 /// # Panics
283 ///
296 ///
284 /// Will panic if either set is `None`.
297 /// Will panic if either set is `None`.
285 pub fn get_non_normal_other_parent_entries_panic(
298 pub fn get_non_normal_other_parent_entries_panic(
286 &self,
299 &self,
287 ) -> (&HashSet<HgPathBuf>, &HashSet<HgPathBuf>) {
300 ) -> (&HashSet<HgPathBuf>, &HashSet<HgPathBuf>) {
288 (
301 (
289 self.non_normal_set.as_ref().unwrap(),
302 self.non_normal_set.as_ref().unwrap(),
290 self.other_parent_set.as_ref().unwrap(),
303 self.other_parent_set.as_ref().unwrap(),
291 )
304 )
292 }
305 }
293
306
294 pub fn set_non_normal_other_parent_entries(&mut self, force: bool) {
307 pub fn set_non_normal_other_parent_entries(&mut self, force: bool) {
295 if !force
308 if !force
296 && self.non_normal_set.is_some()
309 && self.non_normal_set.is_some()
297 && self.other_parent_set.is_some()
310 && self.other_parent_set.is_some()
298 {
311 {
299 return;
312 return;
300 }
313 }
301 let mut non_normal = HashSet::new();
314 let mut non_normal = HashSet::new();
302 let mut other_parent = HashSet::new();
315 let mut other_parent = HashSet::new();
303
316
304 for (filename, entry) in self.state_map.iter() {
317 for (filename, entry) in self.state_map.iter() {
305 if entry.is_non_normal() {
318 if entry.is_non_normal() {
306 non_normal.insert(filename.to_owned());
319 non_normal.insert(filename.to_owned());
307 }
320 }
308 if entry.is_from_other_parent() {
321 if entry.is_from_other_parent() {
309 other_parent.insert(filename.to_owned());
322 other_parent.insert(filename.to_owned());
310 }
323 }
311 }
324 }
312 self.non_normal_set = Some(non_normal);
325 self.non_normal_set = Some(non_normal);
313 self.other_parent_set = Some(other_parent);
326 self.other_parent_set = Some(other_parent);
314 }
327 }
315
328
316 /// Both of these setters and their uses appear to be the simplest way to
329 /// Both of these setters and their uses appear to be the simplest way to
317 /// emulate a Python lazy property, but it is ugly and unidiomatic.
330 /// emulate a Python lazy property, but it is ugly and unidiomatic.
318 /// TODO One day, rewriting this struct using the typestate might be a
331 /// TODO One day, rewriting this struct using the typestate might be a
319 /// good idea.
332 /// good idea.
320 pub fn set_all_dirs(&mut self) -> Result<(), DirstateError> {
333 pub fn set_all_dirs(&mut self) -> Result<(), DirstateError> {
321 if self.all_dirs.is_none() {
334 if self.all_dirs.is_none() {
322 self.all_dirs = Some(DirsMultiset::from_dirstate(
335 self.all_dirs = Some(DirsMultiset::from_dirstate(
323 self.state_map.iter().map(|(k, v)| Ok((k, *v))),
336 self.state_map.iter().map(|(k, v)| Ok((k, *v))),
324 None,
337 None,
325 )?);
338 )?);
326 }
339 }
327 Ok(())
340 Ok(())
328 }
341 }
329
342
330 pub fn set_dirs(&mut self) -> Result<(), DirstateError> {
343 pub fn set_dirs(&mut self) -> Result<(), DirstateError> {
331 if self.dirs.is_none() {
344 if self.dirs.is_none() {
332 self.dirs = Some(DirsMultiset::from_dirstate(
345 self.dirs = Some(DirsMultiset::from_dirstate(
333 self.state_map.iter().map(|(k, v)| Ok((k, *v))),
346 self.state_map.iter().map(|(k, v)| Ok((k, *v))),
334 Some(EntryState::Removed),
347 Some(EntryState::Removed),
335 )?);
348 )?);
336 }
349 }
337 Ok(())
350 Ok(())
338 }
351 }
339
352
340 pub fn has_tracked_dir(
353 pub fn has_tracked_dir(
341 &mut self,
354 &mut self,
342 directory: &HgPath,
355 directory: &HgPath,
343 ) -> Result<bool, DirstateError> {
356 ) -> Result<bool, DirstateError> {
344 self.set_dirs()?;
357 self.set_dirs()?;
345 Ok(self.dirs.as_ref().unwrap().contains(directory))
358 Ok(self.dirs.as_ref().unwrap().contains(directory))
346 }
359 }
347
360
348 pub fn has_dir(
361 pub fn has_dir(
349 &mut self,
362 &mut self,
350 directory: &HgPath,
363 directory: &HgPath,
351 ) -> Result<bool, DirstateError> {
364 ) -> Result<bool, DirstateError> {
352 self.set_all_dirs()?;
365 self.set_all_dirs()?;
353 Ok(self.all_dirs.as_ref().unwrap().contains(directory))
366 Ok(self.all_dirs.as_ref().unwrap().contains(directory))
354 }
367 }
355
368
356 #[timed]
369 #[timed]
357 pub fn read(
370 pub fn read(
358 &mut self,
371 &mut self,
359 file_contents: &[u8],
372 file_contents: &[u8],
360 ) -> Result<Option<DirstateParents>, DirstateError> {
373 ) -> Result<Option<DirstateParents>, DirstateError> {
361 if file_contents.is_empty() {
374 if file_contents.is_empty() {
362 return Ok(None);
375 return Ok(None);
363 }
376 }
364
377
365 let (parents, entries, copies) = parse_dirstate(file_contents)?;
378 let (parents, entries, copies) = parse_dirstate(file_contents)?;
366 self.state_map.extend(
379 self.state_map.extend(
367 entries
380 entries
368 .into_iter()
381 .into_iter()
369 .map(|(path, entry)| (path.to_owned(), entry)),
382 .map(|(path, entry)| (path.to_owned(), entry)),
370 );
383 );
371 self.copy_map.extend(
384 self.copy_map.extend(
372 copies
385 copies
373 .into_iter()
386 .into_iter()
374 .map(|(path, copy)| (path.to_owned(), copy.to_owned())),
387 .map(|(path, copy)| (path.to_owned(), copy.to_owned())),
375 );
388 );
376 Ok(Some(parents.clone()))
389 Ok(Some(parents.clone()))
377 }
390 }
378
391
379 pub fn pack(
392 pub fn pack(
380 &mut self,
393 &mut self,
381 parents: DirstateParents,
394 parents: DirstateParents,
382 now: Timestamp,
395 now: Timestamp,
383 ) -> Result<Vec<u8>, DirstateError> {
396 ) -> Result<Vec<u8>, DirstateError> {
384 let packed =
397 let packed =
385 pack_dirstate(&mut self.state_map, &self.copy_map, parents, now)?;
398 pack_dirstate(&mut self.state_map, &self.copy_map, parents, now)?;
386
399
387 self.set_non_normal_other_parent_entries(true);
400 self.set_non_normal_other_parent_entries(true);
388 Ok(packed)
401 Ok(packed)
389 }
402 }
390 }
403 }
391
404
392 #[cfg(test)]
405 #[cfg(test)]
393 mod tests {
406 mod tests {
394 use super::*;
407 use super::*;
395
408
396 #[test]
409 #[test]
397 fn test_dirs_multiset() {
410 fn test_dirs_multiset() {
398 let mut map = DirstateMap::new();
411 let mut map = DirstateMap::new();
399 assert!(map.dirs.is_none());
412 assert!(map.dirs.is_none());
400 assert!(map.all_dirs.is_none());
413 assert!(map.all_dirs.is_none());
401
414
402 assert_eq!(map.has_dir(HgPath::new(b"nope")).unwrap(), false);
415 assert_eq!(map.has_dir(HgPath::new(b"nope")).unwrap(), false);
403 assert!(map.all_dirs.is_some());
416 assert!(map.all_dirs.is_some());
404 assert!(map.dirs.is_none());
417 assert!(map.dirs.is_none());
405
418
406 assert_eq!(map.has_tracked_dir(HgPath::new(b"nope")).unwrap(), false);
419 assert_eq!(map.has_tracked_dir(HgPath::new(b"nope")).unwrap(), false);
407 assert!(map.dirs.is_some());
420 assert!(map.dirs.is_some());
408 }
421 }
409
422
410 #[test]
423 #[test]
411 fn test_add_file() {
424 fn test_add_file() {
412 let mut map = DirstateMap::new();
425 let mut map = DirstateMap::new();
413
426
414 assert_eq!(0, map.len());
427 assert_eq!(0, map.len());
415
428
416 map.add_file(
429 map.add_file(
417 HgPath::new(b"meh"),
430 HgPath::new(b"meh"),
418 DirstateEntry {
431 DirstateEntry {
419 state: EntryState::Normal,
432 state: EntryState::Normal,
420 mode: 1337,
433 mode: 1337,
421 mtime: 1337,
434 mtime: 1337,
422 size: 1337,
435 size: 1337,
423 },
436 },
424 false,
437 false,
425 false,
438 false,
426 false,
439 false,
427 false,
440 false,
428 )
441 )
429 .unwrap();
442 .unwrap();
430
443
431 assert_eq!(1, map.len());
444 assert_eq!(1, map.len());
432 assert_eq!(0, map.get_non_normal_other_parent_entries().0.len());
445 assert_eq!(0, map.get_non_normal_other_parent_entries().0.len());
433 assert_eq!(0, map.get_non_normal_other_parent_entries().1.len());
446 assert_eq!(0, map.get_non_normal_other_parent_entries().1.len());
434 }
447 }
435
448
436 #[test]
449 #[test]
437 fn test_non_normal_other_parent_entries() {
450 fn test_non_normal_other_parent_entries() {
438 let mut map: DirstateMap = [
451 let mut map: DirstateMap = [
439 (b"f1", (EntryState::Removed, 1337, 1337, 1337)),
452 (b"f1", (EntryState::Removed, 1337, 1337, 1337)),
440 (b"f2", (EntryState::Normal, 1337, 1337, -1)),
453 (b"f2", (EntryState::Normal, 1337, 1337, -1)),
441 (b"f3", (EntryState::Normal, 1337, 1337, 1337)),
454 (b"f3", (EntryState::Normal, 1337, 1337, 1337)),
442 (b"f4", (EntryState::Normal, 1337, -2, 1337)),
455 (b"f4", (EntryState::Normal, 1337, -2, 1337)),
443 (b"f5", (EntryState::Added, 1337, 1337, 1337)),
456 (b"f5", (EntryState::Added, 1337, 1337, 1337)),
444 (b"f6", (EntryState::Added, 1337, 1337, -1)),
457 (b"f6", (EntryState::Added, 1337, 1337, -1)),
445 (b"f7", (EntryState::Merged, 1337, 1337, -1)),
458 (b"f7", (EntryState::Merged, 1337, 1337, -1)),
446 (b"f8", (EntryState::Merged, 1337, 1337, 1337)),
459 (b"f8", (EntryState::Merged, 1337, 1337, 1337)),
447 (b"f9", (EntryState::Merged, 1337, -2, 1337)),
460 (b"f9", (EntryState::Merged, 1337, -2, 1337)),
448 (b"fa", (EntryState::Added, 1337, -2, 1337)),
461 (b"fa", (EntryState::Added, 1337, -2, 1337)),
449 (b"fb", (EntryState::Removed, 1337, -2, 1337)),
462 (b"fb", (EntryState::Removed, 1337, -2, 1337)),
450 ]
463 ]
451 .iter()
464 .iter()
452 .map(|(fname, (state, mode, size, mtime))| {
465 .map(|(fname, (state, mode, size, mtime))| {
453 (
466 (
454 HgPathBuf::from_bytes(fname.as_ref()),
467 HgPathBuf::from_bytes(fname.as_ref()),
455 DirstateEntry {
468 DirstateEntry {
456 state: *state,
469 state: *state,
457 mode: *mode,
470 mode: *mode,
458 size: *size,
471 size: *size,
459 mtime: *mtime,
472 mtime: *mtime,
460 },
473 },
461 )
474 )
462 })
475 })
463 .collect();
476 .collect();
464
477
465 let mut non_normal = [
478 let mut non_normal = [
466 b"f1", b"f2", b"f5", b"f6", b"f7", b"f8", b"f9", b"fa", b"fb",
479 b"f1", b"f2", b"f5", b"f6", b"f7", b"f8", b"f9", b"fa", b"fb",
467 ]
480 ]
468 .iter()
481 .iter()
469 .map(|x| HgPathBuf::from_bytes(x.as_ref()))
482 .map(|x| HgPathBuf::from_bytes(x.as_ref()))
470 .collect();
483 .collect();
471
484
472 let mut other_parent = HashSet::new();
485 let mut other_parent = HashSet::new();
473 other_parent.insert(HgPathBuf::from_bytes(b"f4"));
486 other_parent.insert(HgPathBuf::from_bytes(b"f4"));
474 let entries = map.get_non_normal_other_parent_entries();
487 let entries = map.get_non_normal_other_parent_entries();
475
488
476 assert_eq!(
489 assert_eq!(
477 (&mut non_normal, &mut other_parent),
490 (&mut non_normal, &mut other_parent),
478 (entries.0, entries.1)
491 (entries.0, entries.1)
479 );
492 );
480 }
493 }
481 }
494 }
@@ -1,1295 +1,1314
1 use bytes_cast::BytesCast;
1 use bytes_cast::BytesCast;
2 use micro_timer::timed;
2 use micro_timer::timed;
3 use std::borrow::Cow;
3 use std::borrow::Cow;
4 use std::convert::TryInto;
4 use std::convert::TryInto;
5 use std::path::PathBuf;
5 use std::path::PathBuf;
6
6
7 use super::on_disk;
7 use super::on_disk;
8 use super::on_disk::DirstateV2ParseError;
8 use super::on_disk::DirstateV2ParseError;
9 use super::path_with_basename::WithBasename;
9 use super::path_with_basename::WithBasename;
10 use crate::dirstate::parsers::pack_entry;
10 use crate::dirstate::parsers::pack_entry;
11 use crate::dirstate::parsers::packed_entry_size;
11 use crate::dirstate::parsers::packed_entry_size;
12 use crate::dirstate::parsers::parse_dirstate_entries;
12 use crate::dirstate::parsers::parse_dirstate_entries;
13 use crate::dirstate::parsers::Timestamp;
13 use crate::dirstate::parsers::Timestamp;
14 use crate::dirstate::MTIME_UNSET;
14 use crate::dirstate::MTIME_UNSET;
15 use crate::dirstate::SIZE_FROM_OTHER_PARENT;
15 use crate::dirstate::SIZE_FROM_OTHER_PARENT;
16 use crate::dirstate::SIZE_NON_NORMAL;
16 use crate::dirstate::SIZE_NON_NORMAL;
17 use crate::dirstate::V1_RANGEMASK;
17 use crate::dirstate::V1_RANGEMASK;
18 use crate::matchers::Matcher;
18 use crate::matchers::Matcher;
19 use crate::utils::hg_path::{HgPath, HgPathBuf};
19 use crate::utils::hg_path::{HgPath, HgPathBuf};
20 use crate::CopyMapIter;
20 use crate::CopyMapIter;
21 use crate::DirstateEntry;
21 use crate::DirstateEntry;
22 use crate::DirstateError;
22 use crate::DirstateError;
23 use crate::DirstateParents;
23 use crate::DirstateParents;
24 use crate::DirstateStatus;
24 use crate::DirstateStatus;
25 use crate::EntryState;
25 use crate::EntryState;
26 use crate::FastHashMap;
26 use crate::FastHashMap;
27 use crate::PatternFileWarning;
27 use crate::PatternFileWarning;
28 use crate::StateMapIter;
28 use crate::StateMapIter;
29 use crate::StatusError;
29 use crate::StatusError;
30 use crate::StatusOptions;
30 use crate::StatusOptions;
31
31
32 /// Append to an existing data file if the amount of unreachable data (not used
32 /// Append to an existing data file if the amount of unreachable data (not used
33 /// anymore) is less than this fraction of the total amount of existing data.
33 /// anymore) is less than this fraction of the total amount of existing data.
34 const ACCEPTABLE_UNREACHABLE_BYTES_RATIO: f32 = 0.5;
34 const ACCEPTABLE_UNREACHABLE_BYTES_RATIO: f32 = 0.5;
35
35
36 pub struct DirstateMap<'on_disk> {
36 pub struct DirstateMap<'on_disk> {
37 /// Contents of the `.hg/dirstate` file
37 /// Contents of the `.hg/dirstate` file
38 pub(super) on_disk: &'on_disk [u8],
38 pub(super) on_disk: &'on_disk [u8],
39
39
40 pub(super) root: ChildNodes<'on_disk>,
40 pub(super) root: ChildNodes<'on_disk>,
41
41
42 /// Number of nodes anywhere in the tree that have `.entry.is_some()`.
42 /// Number of nodes anywhere in the tree that have `.entry.is_some()`.
43 pub(super) nodes_with_entry_count: u32,
43 pub(super) nodes_with_entry_count: u32,
44
44
45 /// Number of nodes anywhere in the tree that have
45 /// Number of nodes anywhere in the tree that have
46 /// `.copy_source.is_some()`.
46 /// `.copy_source.is_some()`.
47 pub(super) nodes_with_copy_source_count: u32,
47 pub(super) nodes_with_copy_source_count: u32,
48
48
49 /// See on_disk::Header
49 /// See on_disk::Header
50 pub(super) ignore_patterns_hash: on_disk::IgnorePatternsHash,
50 pub(super) ignore_patterns_hash: on_disk::IgnorePatternsHash,
51
51
52 /// How many bytes of `on_disk` are not used anymore
52 /// How many bytes of `on_disk` are not used anymore
53 pub(super) unreachable_bytes: u32,
53 pub(super) unreachable_bytes: u32,
54 }
54 }
55
55
56 /// Using a plain `HgPathBuf` of the full path from the repository root as a
56 /// Using a plain `HgPathBuf` of the full path from the repository root as a
57 /// map key would also work: all paths in a given map have the same parent
57 /// map key would also work: all paths in a given map have the same parent
58 /// path, so comparing full paths gives the same result as comparing base
58 /// path, so comparing full paths gives the same result as comparing base
59 /// names. However `HashMap` would waste time always re-hashing the same
59 /// names. However `HashMap` would waste time always re-hashing the same
60 /// string prefix.
60 /// string prefix.
61 pub(super) type NodeKey<'on_disk> = WithBasename<Cow<'on_disk, HgPath>>;
61 pub(super) type NodeKey<'on_disk> = WithBasename<Cow<'on_disk, HgPath>>;
62
62
63 /// Similar to `&'tree Cow<'on_disk, HgPath>`, but can also be returned
63 /// Similar to `&'tree Cow<'on_disk, HgPath>`, but can also be returned
64 /// for on-disk nodes that don’t actually have a `Cow` to borrow.
64 /// for on-disk nodes that don’t actually have a `Cow` to borrow.
65 pub(super) enum BorrowedPath<'tree, 'on_disk> {
65 pub(super) enum BorrowedPath<'tree, 'on_disk> {
66 InMemory(&'tree HgPathBuf),
66 InMemory(&'tree HgPathBuf),
67 OnDisk(&'on_disk HgPath),
67 OnDisk(&'on_disk HgPath),
68 }
68 }
69
69
70 pub(super) enum ChildNodes<'on_disk> {
70 pub(super) enum ChildNodes<'on_disk> {
71 InMemory(FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
71 InMemory(FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
72 OnDisk(&'on_disk [on_disk::Node]),
72 OnDisk(&'on_disk [on_disk::Node]),
73 }
73 }
74
74
75 pub(super) enum ChildNodesRef<'tree, 'on_disk> {
75 pub(super) enum ChildNodesRef<'tree, 'on_disk> {
76 InMemory(&'tree FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
76 InMemory(&'tree FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
77 OnDisk(&'on_disk [on_disk::Node]),
77 OnDisk(&'on_disk [on_disk::Node]),
78 }
78 }
79
79
80 pub(super) enum NodeRef<'tree, 'on_disk> {
80 pub(super) enum NodeRef<'tree, 'on_disk> {
81 InMemory(&'tree NodeKey<'on_disk>, &'tree Node<'on_disk>),
81 InMemory(&'tree NodeKey<'on_disk>, &'tree Node<'on_disk>),
82 OnDisk(&'on_disk on_disk::Node),
82 OnDisk(&'on_disk on_disk::Node),
83 }
83 }
84
84
85 impl<'tree, 'on_disk> BorrowedPath<'tree, 'on_disk> {
85 impl<'tree, 'on_disk> BorrowedPath<'tree, 'on_disk> {
86 pub fn detach_from_tree(&self) -> Cow<'on_disk, HgPath> {
86 pub fn detach_from_tree(&self) -> Cow<'on_disk, HgPath> {
87 match *self {
87 match *self {
88 BorrowedPath::InMemory(in_memory) => Cow::Owned(in_memory.clone()),
88 BorrowedPath::InMemory(in_memory) => Cow::Owned(in_memory.clone()),
89 BorrowedPath::OnDisk(on_disk) => Cow::Borrowed(on_disk),
89 BorrowedPath::OnDisk(on_disk) => Cow::Borrowed(on_disk),
90 }
90 }
91 }
91 }
92 }
92 }
93
93
94 impl<'tree, 'on_disk> std::ops::Deref for BorrowedPath<'tree, 'on_disk> {
94 impl<'tree, 'on_disk> std::ops::Deref for BorrowedPath<'tree, 'on_disk> {
95 type Target = HgPath;
95 type Target = HgPath;
96
96
97 fn deref(&self) -> &HgPath {
97 fn deref(&self) -> &HgPath {
98 match *self {
98 match *self {
99 BorrowedPath::InMemory(in_memory) => in_memory,
99 BorrowedPath::InMemory(in_memory) => in_memory,
100 BorrowedPath::OnDisk(on_disk) => on_disk,
100 BorrowedPath::OnDisk(on_disk) => on_disk,
101 }
101 }
102 }
102 }
103 }
103 }
104
104
105 impl Default for ChildNodes<'_> {
105 impl Default for ChildNodes<'_> {
106 fn default() -> Self {
106 fn default() -> Self {
107 ChildNodes::InMemory(Default::default())
107 ChildNodes::InMemory(Default::default())
108 }
108 }
109 }
109 }
110
110
111 impl<'on_disk> ChildNodes<'on_disk> {
111 impl<'on_disk> ChildNodes<'on_disk> {
112 pub(super) fn as_ref<'tree>(
112 pub(super) fn as_ref<'tree>(
113 &'tree self,
113 &'tree self,
114 ) -> ChildNodesRef<'tree, 'on_disk> {
114 ) -> ChildNodesRef<'tree, 'on_disk> {
115 match self {
115 match self {
116 ChildNodes::InMemory(nodes) => ChildNodesRef::InMemory(nodes),
116 ChildNodes::InMemory(nodes) => ChildNodesRef::InMemory(nodes),
117 ChildNodes::OnDisk(nodes) => ChildNodesRef::OnDisk(nodes),
117 ChildNodes::OnDisk(nodes) => ChildNodesRef::OnDisk(nodes),
118 }
118 }
119 }
119 }
120
120
121 pub(super) fn is_empty(&self) -> bool {
121 pub(super) fn is_empty(&self) -> bool {
122 match self {
122 match self {
123 ChildNodes::InMemory(nodes) => nodes.is_empty(),
123 ChildNodes::InMemory(nodes) => nodes.is_empty(),
124 ChildNodes::OnDisk(nodes) => nodes.is_empty(),
124 ChildNodes::OnDisk(nodes) => nodes.is_empty(),
125 }
125 }
126 }
126 }
127
127
128 fn make_mut(
128 fn make_mut(
129 &mut self,
129 &mut self,
130 on_disk: &'on_disk [u8],
130 on_disk: &'on_disk [u8],
131 unreachable_bytes: &mut u32,
131 unreachable_bytes: &mut u32,
132 ) -> Result<
132 ) -> Result<
133 &mut FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>,
133 &mut FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>,
134 DirstateV2ParseError,
134 DirstateV2ParseError,
135 > {
135 > {
136 match self {
136 match self {
137 ChildNodes::InMemory(nodes) => Ok(nodes),
137 ChildNodes::InMemory(nodes) => Ok(nodes),
138 ChildNodes::OnDisk(nodes) => {
138 ChildNodes::OnDisk(nodes) => {
139 *unreachable_bytes +=
139 *unreachable_bytes +=
140 std::mem::size_of_val::<[on_disk::Node]>(nodes) as u32;
140 std::mem::size_of_val::<[on_disk::Node]>(nodes) as u32;
141 let nodes = nodes
141 let nodes = nodes
142 .iter()
142 .iter()
143 .map(|node| {
143 .map(|node| {
144 Ok((
144 Ok((
145 node.path(on_disk)?,
145 node.path(on_disk)?,
146 node.to_in_memory_node(on_disk)?,
146 node.to_in_memory_node(on_disk)?,
147 ))
147 ))
148 })
148 })
149 .collect::<Result<_, _>>()?;
149 .collect::<Result<_, _>>()?;
150 *self = ChildNodes::InMemory(nodes);
150 *self = ChildNodes::InMemory(nodes);
151 match self {
151 match self {
152 ChildNodes::InMemory(nodes) => Ok(nodes),
152 ChildNodes::InMemory(nodes) => Ok(nodes),
153 ChildNodes::OnDisk(_) => unreachable!(),
153 ChildNodes::OnDisk(_) => unreachable!(),
154 }
154 }
155 }
155 }
156 }
156 }
157 }
157 }
158 }
158 }
159
159
160 impl<'tree, 'on_disk> ChildNodesRef<'tree, 'on_disk> {
160 impl<'tree, 'on_disk> ChildNodesRef<'tree, 'on_disk> {
161 pub(super) fn get(
161 pub(super) fn get(
162 &self,
162 &self,
163 base_name: &HgPath,
163 base_name: &HgPath,
164 on_disk: &'on_disk [u8],
164 on_disk: &'on_disk [u8],
165 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
165 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
166 match self {
166 match self {
167 ChildNodesRef::InMemory(nodes) => Ok(nodes
167 ChildNodesRef::InMemory(nodes) => Ok(nodes
168 .get_key_value(base_name)
168 .get_key_value(base_name)
169 .map(|(k, v)| NodeRef::InMemory(k, v))),
169 .map(|(k, v)| NodeRef::InMemory(k, v))),
170 ChildNodesRef::OnDisk(nodes) => {
170 ChildNodesRef::OnDisk(nodes) => {
171 let mut parse_result = Ok(());
171 let mut parse_result = Ok(());
172 let search_result = nodes.binary_search_by(|node| {
172 let search_result = nodes.binary_search_by(|node| {
173 match node.base_name(on_disk) {
173 match node.base_name(on_disk) {
174 Ok(node_base_name) => node_base_name.cmp(base_name),
174 Ok(node_base_name) => node_base_name.cmp(base_name),
175 Err(e) => {
175 Err(e) => {
176 parse_result = Err(e);
176 parse_result = Err(e);
177 // Dummy comparison result, `search_result` won’t
177 // Dummy comparison result, `search_result` won’t
178 // be used since `parse_result` is an error
178 // be used since `parse_result` is an error
179 std::cmp::Ordering::Equal
179 std::cmp::Ordering::Equal
180 }
180 }
181 }
181 }
182 });
182 });
183 parse_result.map(|()| {
183 parse_result.map(|()| {
184 search_result.ok().map(|i| NodeRef::OnDisk(&nodes[i]))
184 search_result.ok().map(|i| NodeRef::OnDisk(&nodes[i]))
185 })
185 })
186 }
186 }
187 }
187 }
188 }
188 }
189
189
190 /// Iterate in undefined order
190 /// Iterate in undefined order
191 pub(super) fn iter(
191 pub(super) fn iter(
192 &self,
192 &self,
193 ) -> impl Iterator<Item = NodeRef<'tree, 'on_disk>> {
193 ) -> impl Iterator<Item = NodeRef<'tree, 'on_disk>> {
194 match self {
194 match self {
195 ChildNodesRef::InMemory(nodes) => itertools::Either::Left(
195 ChildNodesRef::InMemory(nodes) => itertools::Either::Left(
196 nodes.iter().map(|(k, v)| NodeRef::InMemory(k, v)),
196 nodes.iter().map(|(k, v)| NodeRef::InMemory(k, v)),
197 ),
197 ),
198 ChildNodesRef::OnDisk(nodes) => {
198 ChildNodesRef::OnDisk(nodes) => {
199 itertools::Either::Right(nodes.iter().map(NodeRef::OnDisk))
199 itertools::Either::Right(nodes.iter().map(NodeRef::OnDisk))
200 }
200 }
201 }
201 }
202 }
202 }
203
203
204 /// Iterate in parallel in undefined order
204 /// Iterate in parallel in undefined order
205 pub(super) fn par_iter(
205 pub(super) fn par_iter(
206 &self,
206 &self,
207 ) -> impl rayon::iter::ParallelIterator<Item = NodeRef<'tree, 'on_disk>>
207 ) -> impl rayon::iter::ParallelIterator<Item = NodeRef<'tree, 'on_disk>>
208 {
208 {
209 use rayon::prelude::*;
209 use rayon::prelude::*;
210 match self {
210 match self {
211 ChildNodesRef::InMemory(nodes) => rayon::iter::Either::Left(
211 ChildNodesRef::InMemory(nodes) => rayon::iter::Either::Left(
212 nodes.par_iter().map(|(k, v)| NodeRef::InMemory(k, v)),
212 nodes.par_iter().map(|(k, v)| NodeRef::InMemory(k, v)),
213 ),
213 ),
214 ChildNodesRef::OnDisk(nodes) => rayon::iter::Either::Right(
214 ChildNodesRef::OnDisk(nodes) => rayon::iter::Either::Right(
215 nodes.par_iter().map(NodeRef::OnDisk),
215 nodes.par_iter().map(NodeRef::OnDisk),
216 ),
216 ),
217 }
217 }
218 }
218 }
219
219
220 pub(super) fn sorted(&self) -> Vec<NodeRef<'tree, 'on_disk>> {
220 pub(super) fn sorted(&self) -> Vec<NodeRef<'tree, 'on_disk>> {
221 match self {
221 match self {
222 ChildNodesRef::InMemory(nodes) => {
222 ChildNodesRef::InMemory(nodes) => {
223 let mut vec: Vec<_> = nodes
223 let mut vec: Vec<_> = nodes
224 .iter()
224 .iter()
225 .map(|(k, v)| NodeRef::InMemory(k, v))
225 .map(|(k, v)| NodeRef::InMemory(k, v))
226 .collect();
226 .collect();
227 fn sort_key<'a>(node: &'a NodeRef) -> &'a HgPath {
227 fn sort_key<'a>(node: &'a NodeRef) -> &'a HgPath {
228 match node {
228 match node {
229 NodeRef::InMemory(path, _node) => path.base_name(),
229 NodeRef::InMemory(path, _node) => path.base_name(),
230 NodeRef::OnDisk(_) => unreachable!(),
230 NodeRef::OnDisk(_) => unreachable!(),
231 }
231 }
232 }
232 }
233 // `sort_unstable_by_key` doesn’t allow keys borrowing from the
233 // `sort_unstable_by_key` doesn’t allow keys borrowing from the
234 // value: https://github.com/rust-lang/rust/issues/34162
234 // value: https://github.com/rust-lang/rust/issues/34162
235 vec.sort_unstable_by(|a, b| sort_key(a).cmp(sort_key(b)));
235 vec.sort_unstable_by(|a, b| sort_key(a).cmp(sort_key(b)));
236 vec
236 vec
237 }
237 }
238 ChildNodesRef::OnDisk(nodes) => {
238 ChildNodesRef::OnDisk(nodes) => {
239 // Nodes on disk are already sorted
239 // Nodes on disk are already sorted
240 nodes.iter().map(NodeRef::OnDisk).collect()
240 nodes.iter().map(NodeRef::OnDisk).collect()
241 }
241 }
242 }
242 }
243 }
243 }
244 }
244 }
245
245
246 impl<'tree, 'on_disk> NodeRef<'tree, 'on_disk> {
246 impl<'tree, 'on_disk> NodeRef<'tree, 'on_disk> {
247 pub(super) fn full_path(
247 pub(super) fn full_path(
248 &self,
248 &self,
249 on_disk: &'on_disk [u8],
249 on_disk: &'on_disk [u8],
250 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
250 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
251 match self {
251 match self {
252 NodeRef::InMemory(path, _node) => Ok(path.full_path()),
252 NodeRef::InMemory(path, _node) => Ok(path.full_path()),
253 NodeRef::OnDisk(node) => node.full_path(on_disk),
253 NodeRef::OnDisk(node) => node.full_path(on_disk),
254 }
254 }
255 }
255 }
256
256
257 /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk,
257 /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk,
258 /// HgPath>` detached from `'tree`
258 /// HgPath>` detached from `'tree`
259 pub(super) fn full_path_borrowed(
259 pub(super) fn full_path_borrowed(
260 &self,
260 &self,
261 on_disk: &'on_disk [u8],
261 on_disk: &'on_disk [u8],
262 ) -> Result<BorrowedPath<'tree, 'on_disk>, DirstateV2ParseError> {
262 ) -> Result<BorrowedPath<'tree, 'on_disk>, DirstateV2ParseError> {
263 match self {
263 match self {
264 NodeRef::InMemory(path, _node) => match path.full_path() {
264 NodeRef::InMemory(path, _node) => match path.full_path() {
265 Cow::Borrowed(on_disk) => Ok(BorrowedPath::OnDisk(on_disk)),
265 Cow::Borrowed(on_disk) => Ok(BorrowedPath::OnDisk(on_disk)),
266 Cow::Owned(in_memory) => Ok(BorrowedPath::InMemory(in_memory)),
266 Cow::Owned(in_memory) => Ok(BorrowedPath::InMemory(in_memory)),
267 },
267 },
268 NodeRef::OnDisk(node) => {
268 NodeRef::OnDisk(node) => {
269 Ok(BorrowedPath::OnDisk(node.full_path(on_disk)?))
269 Ok(BorrowedPath::OnDisk(node.full_path(on_disk)?))
270 }
270 }
271 }
271 }
272 }
272 }
273
273
274 pub(super) fn base_name(
274 pub(super) fn base_name(
275 &self,
275 &self,
276 on_disk: &'on_disk [u8],
276 on_disk: &'on_disk [u8],
277 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
277 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
278 match self {
278 match self {
279 NodeRef::InMemory(path, _node) => Ok(path.base_name()),
279 NodeRef::InMemory(path, _node) => Ok(path.base_name()),
280 NodeRef::OnDisk(node) => node.base_name(on_disk),
280 NodeRef::OnDisk(node) => node.base_name(on_disk),
281 }
281 }
282 }
282 }
283
283
284 pub(super) fn children(
284 pub(super) fn children(
285 &self,
285 &self,
286 on_disk: &'on_disk [u8],
286 on_disk: &'on_disk [u8],
287 ) -> Result<ChildNodesRef<'tree, 'on_disk>, DirstateV2ParseError> {
287 ) -> Result<ChildNodesRef<'tree, 'on_disk>, DirstateV2ParseError> {
288 match self {
288 match self {
289 NodeRef::InMemory(_path, node) => Ok(node.children.as_ref()),
289 NodeRef::InMemory(_path, node) => Ok(node.children.as_ref()),
290 NodeRef::OnDisk(node) => {
290 NodeRef::OnDisk(node) => {
291 Ok(ChildNodesRef::OnDisk(node.children(on_disk)?))
291 Ok(ChildNodesRef::OnDisk(node.children(on_disk)?))
292 }
292 }
293 }
293 }
294 }
294 }
295
295
296 pub(super) fn has_copy_source(&self) -> bool {
296 pub(super) fn has_copy_source(&self) -> bool {
297 match self {
297 match self {
298 NodeRef::InMemory(_path, node) => node.copy_source.is_some(),
298 NodeRef::InMemory(_path, node) => node.copy_source.is_some(),
299 NodeRef::OnDisk(node) => node.has_copy_source(),
299 NodeRef::OnDisk(node) => node.has_copy_source(),
300 }
300 }
301 }
301 }
302
302
303 pub(super) fn copy_source(
303 pub(super) fn copy_source(
304 &self,
304 &self,
305 on_disk: &'on_disk [u8],
305 on_disk: &'on_disk [u8],
306 ) -> Result<Option<&'tree HgPath>, DirstateV2ParseError> {
306 ) -> Result<Option<&'tree HgPath>, DirstateV2ParseError> {
307 match self {
307 match self {
308 NodeRef::InMemory(_path, node) => {
308 NodeRef::InMemory(_path, node) => {
309 Ok(node.copy_source.as_ref().map(|s| &**s))
309 Ok(node.copy_source.as_ref().map(|s| &**s))
310 }
310 }
311 NodeRef::OnDisk(node) => node.copy_source(on_disk),
311 NodeRef::OnDisk(node) => node.copy_source(on_disk),
312 }
312 }
313 }
313 }
314
314
315 pub(super) fn entry(
315 pub(super) fn entry(
316 &self,
316 &self,
317 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
317 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
318 match self {
318 match self {
319 NodeRef::InMemory(_path, node) => {
319 NodeRef::InMemory(_path, node) => {
320 Ok(node.data.as_entry().copied())
320 Ok(node.data.as_entry().copied())
321 }
321 }
322 NodeRef::OnDisk(node) => node.entry(),
322 NodeRef::OnDisk(node) => node.entry(),
323 }
323 }
324 }
324 }
325
325
326 pub(super) fn state(
326 pub(super) fn state(
327 &self,
327 &self,
328 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
328 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
329 match self {
329 match self {
330 NodeRef::InMemory(_path, node) => {
330 NodeRef::InMemory(_path, node) => {
331 Ok(node.data.as_entry().map(|entry| entry.state))
331 Ok(node.data.as_entry().map(|entry| entry.state))
332 }
332 }
333 NodeRef::OnDisk(node) => node.state(),
333 NodeRef::OnDisk(node) => node.state(),
334 }
334 }
335 }
335 }
336
336
337 pub(super) fn cached_directory_mtime(
337 pub(super) fn cached_directory_mtime(
338 &self,
338 &self,
339 ) -> Option<&'tree on_disk::Timestamp> {
339 ) -> Option<&'tree on_disk::Timestamp> {
340 match self {
340 match self {
341 NodeRef::InMemory(_path, node) => match &node.data {
341 NodeRef::InMemory(_path, node) => match &node.data {
342 NodeData::CachedDirectory { mtime } => Some(mtime),
342 NodeData::CachedDirectory { mtime } => Some(mtime),
343 _ => None,
343 _ => None,
344 },
344 },
345 NodeRef::OnDisk(node) => node.cached_directory_mtime(),
345 NodeRef::OnDisk(node) => node.cached_directory_mtime(),
346 }
346 }
347 }
347 }
348
348
349 pub(super) fn descendants_with_entry_count(&self) -> u32 {
349 pub(super) fn descendants_with_entry_count(&self) -> u32 {
350 match self {
350 match self {
351 NodeRef::InMemory(_path, node) => {
351 NodeRef::InMemory(_path, node) => {
352 node.descendants_with_entry_count
352 node.descendants_with_entry_count
353 }
353 }
354 NodeRef::OnDisk(node) => node.descendants_with_entry_count.get(),
354 NodeRef::OnDisk(node) => node.descendants_with_entry_count.get(),
355 }
355 }
356 }
356 }
357
357
358 pub(super) fn tracked_descendants_count(&self) -> u32 {
358 pub(super) fn tracked_descendants_count(&self) -> u32 {
359 match self {
359 match self {
360 NodeRef::InMemory(_path, node) => node.tracked_descendants_count,
360 NodeRef::InMemory(_path, node) => node.tracked_descendants_count,
361 NodeRef::OnDisk(node) => node.tracked_descendants_count.get(),
361 NodeRef::OnDisk(node) => node.tracked_descendants_count.get(),
362 }
362 }
363 }
363 }
364 }
364 }
365
365
366 /// Represents a file or a directory
366 /// Represents a file or a directory
367 #[derive(Default)]
367 #[derive(Default)]
368 pub(super) struct Node<'on_disk> {
368 pub(super) struct Node<'on_disk> {
369 pub(super) data: NodeData,
369 pub(super) data: NodeData,
370
370
371 pub(super) copy_source: Option<Cow<'on_disk, HgPath>>,
371 pub(super) copy_source: Option<Cow<'on_disk, HgPath>>,
372
372
373 pub(super) children: ChildNodes<'on_disk>,
373 pub(super) children: ChildNodes<'on_disk>,
374
374
375 /// How many (non-inclusive) descendants of this node have an entry.
375 /// How many (non-inclusive) descendants of this node have an entry.
376 pub(super) descendants_with_entry_count: u32,
376 pub(super) descendants_with_entry_count: u32,
377
377
378 /// How many (non-inclusive) descendants of this node have an entry whose
378 /// How many (non-inclusive) descendants of this node have an entry whose
379 /// state is "tracked".
379 /// state is "tracked".
380 pub(super) tracked_descendants_count: u32,
380 pub(super) tracked_descendants_count: u32,
381 }
381 }
382
382
383 pub(super) enum NodeData {
383 pub(super) enum NodeData {
384 Entry(DirstateEntry),
384 Entry(DirstateEntry),
385 CachedDirectory { mtime: on_disk::Timestamp },
385 CachedDirectory { mtime: on_disk::Timestamp },
386 None,
386 None,
387 }
387 }
388
388
389 impl Default for NodeData {
389 impl Default for NodeData {
390 fn default() -> Self {
390 fn default() -> Self {
391 NodeData::None
391 NodeData::None
392 }
392 }
393 }
393 }
394
394
395 impl NodeData {
395 impl NodeData {
396 fn has_entry(&self) -> bool {
396 fn has_entry(&self) -> bool {
397 match self {
397 match self {
398 NodeData::Entry(_) => true,
398 NodeData::Entry(_) => true,
399 _ => false,
399 _ => false,
400 }
400 }
401 }
401 }
402
402
403 fn as_entry(&self) -> Option<&DirstateEntry> {
403 fn as_entry(&self) -> Option<&DirstateEntry> {
404 match self {
404 match self {
405 NodeData::Entry(entry) => Some(entry),
405 NodeData::Entry(entry) => Some(entry),
406 _ => None,
406 _ => None,
407 }
407 }
408 }
408 }
409 }
409 }
410
410
411 impl<'on_disk> DirstateMap<'on_disk> {
411 impl<'on_disk> DirstateMap<'on_disk> {
412 pub(super) fn empty(on_disk: &'on_disk [u8]) -> Self {
412 pub(super) fn empty(on_disk: &'on_disk [u8]) -> Self {
413 Self {
413 Self {
414 on_disk,
414 on_disk,
415 root: ChildNodes::default(),
415 root: ChildNodes::default(),
416 nodes_with_entry_count: 0,
416 nodes_with_entry_count: 0,
417 nodes_with_copy_source_count: 0,
417 nodes_with_copy_source_count: 0,
418 ignore_patterns_hash: [0; on_disk::IGNORE_PATTERNS_HASH_LEN],
418 ignore_patterns_hash: [0; on_disk::IGNORE_PATTERNS_HASH_LEN],
419 unreachable_bytes: 0,
419 unreachable_bytes: 0,
420 }
420 }
421 }
421 }
422
422
423 #[timed]
423 #[timed]
424 pub fn new_v2(
424 pub fn new_v2(
425 on_disk: &'on_disk [u8],
425 on_disk: &'on_disk [u8],
426 data_size: usize,
426 data_size: usize,
427 metadata: &[u8],
427 metadata: &[u8],
428 ) -> Result<Self, DirstateError> {
428 ) -> Result<Self, DirstateError> {
429 if let Some(data) = on_disk.get(..data_size) {
429 if let Some(data) = on_disk.get(..data_size) {
430 Ok(on_disk::read(data, metadata)?)
430 Ok(on_disk::read(data, metadata)?)
431 } else {
431 } else {
432 Err(DirstateV2ParseError.into())
432 Err(DirstateV2ParseError.into())
433 }
433 }
434 }
434 }
435
435
436 #[timed]
436 #[timed]
437 pub fn new_v1(
437 pub fn new_v1(
438 on_disk: &'on_disk [u8],
438 on_disk: &'on_disk [u8],
439 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
439 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
440 let mut map = Self::empty(on_disk);
440 let mut map = Self::empty(on_disk);
441 if map.on_disk.is_empty() {
441 if map.on_disk.is_empty() {
442 return Ok((map, None));
442 return Ok((map, None));
443 }
443 }
444
444
445 let parents = parse_dirstate_entries(
445 let parents = parse_dirstate_entries(
446 map.on_disk,
446 map.on_disk,
447 |path, entry, copy_source| {
447 |path, entry, copy_source| {
448 let tracked = entry.state.is_tracked();
448 let tracked = entry.state.is_tracked();
449 let node = Self::get_or_insert_node(
449 let node = Self::get_or_insert_node(
450 map.on_disk,
450 map.on_disk,
451 &mut map.unreachable_bytes,
451 &mut map.unreachable_bytes,
452 &mut map.root,
452 &mut map.root,
453 path,
453 path,
454 WithBasename::to_cow_borrowed,
454 WithBasename::to_cow_borrowed,
455 |ancestor| {
455 |ancestor| {
456 if tracked {
456 if tracked {
457 ancestor.tracked_descendants_count += 1
457 ancestor.tracked_descendants_count += 1
458 }
458 }
459 ancestor.descendants_with_entry_count += 1
459 ancestor.descendants_with_entry_count += 1
460 },
460 },
461 )?;
461 )?;
462 assert!(
462 assert!(
463 !node.data.has_entry(),
463 !node.data.has_entry(),
464 "duplicate dirstate entry in read"
464 "duplicate dirstate entry in read"
465 );
465 );
466 assert!(
466 assert!(
467 node.copy_source.is_none(),
467 node.copy_source.is_none(),
468 "duplicate dirstate entry in read"
468 "duplicate dirstate entry in read"
469 );
469 );
470 node.data = NodeData::Entry(*entry);
470 node.data = NodeData::Entry(*entry);
471 node.copy_source = copy_source.map(Cow::Borrowed);
471 node.copy_source = copy_source.map(Cow::Borrowed);
472 map.nodes_with_entry_count += 1;
472 map.nodes_with_entry_count += 1;
473 if copy_source.is_some() {
473 if copy_source.is_some() {
474 map.nodes_with_copy_source_count += 1
474 map.nodes_with_copy_source_count += 1
475 }
475 }
476 Ok(())
476 Ok(())
477 },
477 },
478 )?;
478 )?;
479 let parents = Some(parents.clone());
479 let parents = Some(parents.clone());
480
480
481 Ok((map, parents))
481 Ok((map, parents))
482 }
482 }
483
483
484 /// Assuming dirstate-v2 format, returns whether the next write should
484 /// Assuming dirstate-v2 format, returns whether the next write should
485 /// append to the existing data file that contains `self.on_disk` (true),
485 /// append to the existing data file that contains `self.on_disk` (true),
486 /// or create a new data file from scratch (false).
486 /// or create a new data file from scratch (false).
487 pub(super) fn write_should_append(&self) -> bool {
487 pub(super) fn write_should_append(&self) -> bool {
488 let ratio = self.unreachable_bytes as f32 / self.on_disk.len() as f32;
488 let ratio = self.unreachable_bytes as f32 / self.on_disk.len() as f32;
489 ratio < ACCEPTABLE_UNREACHABLE_BYTES_RATIO
489 ratio < ACCEPTABLE_UNREACHABLE_BYTES_RATIO
490 }
490 }
491
491
492 fn get_node<'tree>(
492 fn get_node<'tree>(
493 &'tree self,
493 &'tree self,
494 path: &HgPath,
494 path: &HgPath,
495 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
495 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
496 let mut children = self.root.as_ref();
496 let mut children = self.root.as_ref();
497 let mut components = path.components();
497 let mut components = path.components();
498 let mut component =
498 let mut component =
499 components.next().expect("expected at least one components");
499 components.next().expect("expected at least one components");
500 loop {
500 loop {
501 if let Some(child) = children.get(component, self.on_disk)? {
501 if let Some(child) = children.get(component, self.on_disk)? {
502 if let Some(next_component) = components.next() {
502 if let Some(next_component) = components.next() {
503 component = next_component;
503 component = next_component;
504 children = child.children(self.on_disk)?;
504 children = child.children(self.on_disk)?;
505 } else {
505 } else {
506 return Ok(Some(child));
506 return Ok(Some(child));
507 }
507 }
508 } else {
508 } else {
509 return Ok(None);
509 return Ok(None);
510 }
510 }
511 }
511 }
512 }
512 }
513
513
514 /// Returns a mutable reference to the node at `path` if it exists
514 /// Returns a mutable reference to the node at `path` if it exists
515 ///
515 ///
516 /// This takes `root` instead of `&mut self` so that callers can mutate
516 /// This takes `root` instead of `&mut self` so that callers can mutate
517 /// other fields while the returned borrow is still valid
517 /// other fields while the returned borrow is still valid
518 fn get_node_mut<'tree>(
518 fn get_node_mut<'tree>(
519 on_disk: &'on_disk [u8],
519 on_disk: &'on_disk [u8],
520 unreachable_bytes: &mut u32,
520 unreachable_bytes: &mut u32,
521 root: &'tree mut ChildNodes<'on_disk>,
521 root: &'tree mut ChildNodes<'on_disk>,
522 path: &HgPath,
522 path: &HgPath,
523 ) -> Result<Option<&'tree mut Node<'on_disk>>, DirstateV2ParseError> {
523 ) -> Result<Option<&'tree mut Node<'on_disk>>, DirstateV2ParseError> {
524 let mut children = root;
524 let mut children = root;
525 let mut components = path.components();
525 let mut components = path.components();
526 let mut component =
526 let mut component =
527 components.next().expect("expected at least one components");
527 components.next().expect("expected at least one components");
528 loop {
528 loop {
529 if let Some(child) = children
529 if let Some(child) = children
530 .make_mut(on_disk, unreachable_bytes)?
530 .make_mut(on_disk, unreachable_bytes)?
531 .get_mut(component)
531 .get_mut(component)
532 {
532 {
533 if let Some(next_component) = components.next() {
533 if let Some(next_component) = components.next() {
534 component = next_component;
534 component = next_component;
535 children = &mut child.children;
535 children = &mut child.children;
536 } else {
536 } else {
537 return Ok(Some(child));
537 return Ok(Some(child));
538 }
538 }
539 } else {
539 } else {
540 return Ok(None);
540 return Ok(None);
541 }
541 }
542 }
542 }
543 }
543 }
544
544
545 pub(super) fn get_or_insert<'tree, 'path>(
545 pub(super) fn get_or_insert<'tree, 'path>(
546 &'tree mut self,
546 &'tree mut self,
547 path: &HgPath,
547 path: &HgPath,
548 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
548 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
549 Self::get_or_insert_node(
549 Self::get_or_insert_node(
550 self.on_disk,
550 self.on_disk,
551 &mut self.unreachable_bytes,
551 &mut self.unreachable_bytes,
552 &mut self.root,
552 &mut self.root,
553 path,
553 path,
554 WithBasename::to_cow_owned,
554 WithBasename::to_cow_owned,
555 |_| {},
555 |_| {},
556 )
556 )
557 }
557 }
558
558
559 fn get_or_insert_node<'tree, 'path>(
559 fn get_or_insert_node<'tree, 'path>(
560 on_disk: &'on_disk [u8],
560 on_disk: &'on_disk [u8],
561 unreachable_bytes: &mut u32,
561 unreachable_bytes: &mut u32,
562 root: &'tree mut ChildNodes<'on_disk>,
562 root: &'tree mut ChildNodes<'on_disk>,
563 path: &'path HgPath,
563 path: &'path HgPath,
564 to_cow: impl Fn(
564 to_cow: impl Fn(
565 WithBasename<&'path HgPath>,
565 WithBasename<&'path HgPath>,
566 ) -> WithBasename<Cow<'on_disk, HgPath>>,
566 ) -> WithBasename<Cow<'on_disk, HgPath>>,
567 mut each_ancestor: impl FnMut(&mut Node),
567 mut each_ancestor: impl FnMut(&mut Node),
568 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
568 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
569 let mut child_nodes = root;
569 let mut child_nodes = root;
570 let mut inclusive_ancestor_paths =
570 let mut inclusive_ancestor_paths =
571 WithBasename::inclusive_ancestors_of(path);
571 WithBasename::inclusive_ancestors_of(path);
572 let mut ancestor_path = inclusive_ancestor_paths
572 let mut ancestor_path = inclusive_ancestor_paths
573 .next()
573 .next()
574 .expect("expected at least one inclusive ancestor");
574 .expect("expected at least one inclusive ancestor");
575 loop {
575 loop {
576 // TODO: can we avoid allocating an owned key in cases where the
576 // TODO: can we avoid allocating an owned key in cases where the
577 // map already contains that key, without introducing double
577 // map already contains that key, without introducing double
578 // lookup?
578 // lookup?
579 let child_node = child_nodes
579 let child_node = child_nodes
580 .make_mut(on_disk, unreachable_bytes)?
580 .make_mut(on_disk, unreachable_bytes)?
581 .entry(to_cow(ancestor_path))
581 .entry(to_cow(ancestor_path))
582 .or_default();
582 .or_default();
583 if let Some(next) = inclusive_ancestor_paths.next() {
583 if let Some(next) = inclusive_ancestor_paths.next() {
584 each_ancestor(child_node);
584 each_ancestor(child_node);
585 ancestor_path = next;
585 ancestor_path = next;
586 child_nodes = &mut child_node.children;
586 child_nodes = &mut child_node.children;
587 } else {
587 } else {
588 return Ok(child_node);
588 return Ok(child_node);
589 }
589 }
590 }
590 }
591 }
591 }
592
592
593 fn add_or_remove_file(
593 fn add_or_remove_file(
594 &mut self,
594 &mut self,
595 path: &HgPath,
595 path: &HgPath,
596 old_state: EntryState,
596 old_state: EntryState,
597 new_entry: DirstateEntry,
597 new_entry: DirstateEntry,
598 ) -> Result<(), DirstateV2ParseError> {
598 ) -> Result<(), DirstateV2ParseError> {
599 let had_entry = old_state != EntryState::Unknown;
599 let had_entry = old_state != EntryState::Unknown;
600 let tracked_count_increment =
600 let tracked_count_increment =
601 match (old_state.is_tracked(), new_entry.state.is_tracked()) {
601 match (old_state.is_tracked(), new_entry.state.is_tracked()) {
602 (false, true) => 1,
602 (false, true) => 1,
603 (true, false) => -1,
603 (true, false) => -1,
604 _ => 0,
604 _ => 0,
605 };
605 };
606
606
607 let node = Self::get_or_insert_node(
607 let node = Self::get_or_insert_node(
608 self.on_disk,
608 self.on_disk,
609 &mut self.unreachable_bytes,
609 &mut self.unreachable_bytes,
610 &mut self.root,
610 &mut self.root,
611 path,
611 path,
612 WithBasename::to_cow_owned,
612 WithBasename::to_cow_owned,
613 |ancestor| {
613 |ancestor| {
614 if !had_entry {
614 if !had_entry {
615 ancestor.descendants_with_entry_count += 1;
615 ancestor.descendants_with_entry_count += 1;
616 }
616 }
617
617
618 // We can’t use `+= increment` because the counter is unsigned,
618 // We can’t use `+= increment` because the counter is unsigned,
619 // and we want debug builds to detect accidental underflow
619 // and we want debug builds to detect accidental underflow
620 // through zero
620 // through zero
621 match tracked_count_increment {
621 match tracked_count_increment {
622 1 => ancestor.tracked_descendants_count += 1,
622 1 => ancestor.tracked_descendants_count += 1,
623 -1 => ancestor.tracked_descendants_count -= 1,
623 -1 => ancestor.tracked_descendants_count -= 1,
624 _ => {}
624 _ => {}
625 }
625 }
626 },
626 },
627 )?;
627 )?;
628 if !had_entry {
628 if !had_entry {
629 self.nodes_with_entry_count += 1
629 self.nodes_with_entry_count += 1
630 }
630 }
631 node.data = NodeData::Entry(new_entry);
631 node.data = NodeData::Entry(new_entry);
632 Ok(())
632 Ok(())
633 }
633 }
634
634
635 fn iter_nodes<'tree>(
635 fn iter_nodes<'tree>(
636 &'tree self,
636 &'tree self,
637 ) -> impl Iterator<
637 ) -> impl Iterator<
638 Item = Result<NodeRef<'tree, 'on_disk>, DirstateV2ParseError>,
638 Item = Result<NodeRef<'tree, 'on_disk>, DirstateV2ParseError>,
639 > + 'tree {
639 > + 'tree {
640 // Depth first tree traversal.
640 // Depth first tree traversal.
641 //
641 //
642 // If we could afford internal iteration and recursion,
642 // If we could afford internal iteration and recursion,
643 // this would look like:
643 // this would look like:
644 //
644 //
645 // ```
645 // ```
646 // fn traverse_children(
646 // fn traverse_children(
647 // children: &ChildNodes,
647 // children: &ChildNodes,
648 // each: &mut impl FnMut(&Node),
648 // each: &mut impl FnMut(&Node),
649 // ) {
649 // ) {
650 // for child in children.values() {
650 // for child in children.values() {
651 // traverse_children(&child.children, each);
651 // traverse_children(&child.children, each);
652 // each(child);
652 // each(child);
653 // }
653 // }
654 // }
654 // }
655 // ```
655 // ```
656 //
656 //
657 // However we want an external iterator and therefore can’t use the
657 // However we want an external iterator and therefore can’t use the
658 // call stack. Use an explicit stack instead:
658 // call stack. Use an explicit stack instead:
659 let mut stack = Vec::new();
659 let mut stack = Vec::new();
660 let mut iter = self.root.as_ref().iter();
660 let mut iter = self.root.as_ref().iter();
661 std::iter::from_fn(move || {
661 std::iter::from_fn(move || {
662 while let Some(child_node) = iter.next() {
662 while let Some(child_node) = iter.next() {
663 let children = match child_node.children(self.on_disk) {
663 let children = match child_node.children(self.on_disk) {
664 Ok(children) => children,
664 Ok(children) => children,
665 Err(error) => return Some(Err(error)),
665 Err(error) => return Some(Err(error)),
666 };
666 };
667 // Pseudo-recursion
667 // Pseudo-recursion
668 let new_iter = children.iter();
668 let new_iter = children.iter();
669 let old_iter = std::mem::replace(&mut iter, new_iter);
669 let old_iter = std::mem::replace(&mut iter, new_iter);
670 stack.push((child_node, old_iter));
670 stack.push((child_node, old_iter));
671 }
671 }
672 // Found the end of a `children.iter()` iterator.
672 // Found the end of a `children.iter()` iterator.
673 if let Some((child_node, next_iter)) = stack.pop() {
673 if let Some((child_node, next_iter)) = stack.pop() {
674 // "Return" from pseudo-recursion by restoring state from the
674 // "Return" from pseudo-recursion by restoring state from the
675 // explicit stack
675 // explicit stack
676 iter = next_iter;
676 iter = next_iter;
677
677
678 Some(Ok(child_node))
678 Some(Ok(child_node))
679 } else {
679 } else {
680 // Reached the bottom of the stack, we’re done
680 // Reached the bottom of the stack, we’re done
681 None
681 None
682 }
682 }
683 })
683 })
684 }
684 }
685
685
686 fn clear_known_ambiguous_mtimes(
686 fn clear_known_ambiguous_mtimes(
687 &mut self,
687 &mut self,
688 paths: &[impl AsRef<HgPath>],
688 paths: &[impl AsRef<HgPath>],
689 ) -> Result<(), DirstateV2ParseError> {
689 ) -> Result<(), DirstateV2ParseError> {
690 for path in paths {
690 for path in paths {
691 if let Some(node) = Self::get_node_mut(
691 if let Some(node) = Self::get_node_mut(
692 self.on_disk,
692 self.on_disk,
693 &mut self.unreachable_bytes,
693 &mut self.unreachable_bytes,
694 &mut self.root,
694 &mut self.root,
695 path.as_ref(),
695 path.as_ref(),
696 )? {
696 )? {
697 if let NodeData::Entry(entry) = &mut node.data {
697 if let NodeData::Entry(entry) = &mut node.data {
698 entry.clear_mtime();
698 entry.clear_mtime();
699 }
699 }
700 }
700 }
701 }
701 }
702 Ok(())
702 Ok(())
703 }
703 }
704
704
705 /// Return a faillilble iterator of full paths of nodes that have an
705 /// Return a faillilble iterator of full paths of nodes that have an
706 /// `entry` for which the given `predicate` returns true.
706 /// `entry` for which the given `predicate` returns true.
707 ///
707 ///
708 /// Fallibility means that each iterator item is a `Result`, which may
708 /// Fallibility means that each iterator item is a `Result`, which may
709 /// indicate a parse error of the on-disk dirstate-v2 format. Such errors
709 /// indicate a parse error of the on-disk dirstate-v2 format. Such errors
710 /// should only happen if Mercurial is buggy or a repository is corrupted.
710 /// should only happen if Mercurial is buggy or a repository is corrupted.
711 fn filter_full_paths<'tree>(
711 fn filter_full_paths<'tree>(
712 &'tree self,
712 &'tree self,
713 predicate: impl Fn(&DirstateEntry) -> bool + 'tree,
713 predicate: impl Fn(&DirstateEntry) -> bool + 'tree,
714 ) -> impl Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + 'tree
714 ) -> impl Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + 'tree
715 {
715 {
716 filter_map_results(self.iter_nodes(), move |node| {
716 filter_map_results(self.iter_nodes(), move |node| {
717 if let Some(entry) = node.entry()? {
717 if let Some(entry) = node.entry()? {
718 if predicate(&entry) {
718 if predicate(&entry) {
719 return Ok(Some(node.full_path(self.on_disk)?));
719 return Ok(Some(node.full_path(self.on_disk)?));
720 }
720 }
721 }
721 }
722 Ok(None)
722 Ok(None)
723 })
723 })
724 }
724 }
725
725
726 fn count_dropped_path(unreachable_bytes: &mut u32, path: &Cow<HgPath>) {
726 fn count_dropped_path(unreachable_bytes: &mut u32, path: &Cow<HgPath>) {
727 if let Cow::Borrowed(path) = path {
727 if let Cow::Borrowed(path) = path {
728 *unreachable_bytes += path.len() as u32
728 *unreachable_bytes += path.len() as u32
729 }
729 }
730 }
730 }
731 }
731 }
732
732
733 /// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s.
733 /// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s.
734 ///
734 ///
735 /// The callback is only called for incoming `Ok` values. Errors are passed
735 /// The callback is only called for incoming `Ok` values. Errors are passed
736 /// through as-is. In order to let it use the `?` operator the callback is
736 /// through as-is. In order to let it use the `?` operator the callback is
737 /// expected to return a `Result` of `Option`, instead of an `Option` of
737 /// expected to return a `Result` of `Option`, instead of an `Option` of
738 /// `Result`.
738 /// `Result`.
739 fn filter_map_results<'a, I, F, A, B, E>(
739 fn filter_map_results<'a, I, F, A, B, E>(
740 iter: I,
740 iter: I,
741 f: F,
741 f: F,
742 ) -> impl Iterator<Item = Result<B, E>> + 'a
742 ) -> impl Iterator<Item = Result<B, E>> + 'a
743 where
743 where
744 I: Iterator<Item = Result<A, E>> + 'a,
744 I: Iterator<Item = Result<A, E>> + 'a,
745 F: Fn(A) -> Result<Option<B>, E> + 'a,
745 F: Fn(A) -> Result<Option<B>, E> + 'a,
746 {
746 {
747 iter.filter_map(move |result| match result {
747 iter.filter_map(move |result| match result {
748 Ok(node) => f(node).transpose(),
748 Ok(node) => f(node).transpose(),
749 Err(e) => Some(Err(e)),
749 Err(e) => Some(Err(e)),
750 })
750 })
751 }
751 }
752
752
753 impl<'on_disk> super::dispatch::DirstateMapMethods for DirstateMap<'on_disk> {
753 impl<'on_disk> super::dispatch::DirstateMapMethods for DirstateMap<'on_disk> {
754 fn clear(&mut self) {
754 fn clear(&mut self) {
755 self.root = Default::default();
755 self.root = Default::default();
756 self.nodes_with_entry_count = 0;
756 self.nodes_with_entry_count = 0;
757 self.nodes_with_copy_source_count = 0;
757 self.nodes_with_copy_source_count = 0;
758 }
758 }
759
759
760 fn set_v1(&mut self, filename: &HgPath, entry: DirstateEntry) {
761 let node =
762 self.get_or_insert(&filename).expect("no parse error in v1");
763 node.data = NodeData::Entry(entry);
764 node.children = ChildNodes::default();
765 node.copy_source = None;
766 node.descendants_with_entry_count = 0;
767 node.tracked_descendants_count = 0;
768 }
769
760 fn add_file(
770 fn add_file(
761 &mut self,
771 &mut self,
762 filename: &HgPath,
772 filename: &HgPath,
763 entry: DirstateEntry,
773 entry: DirstateEntry,
764 added: bool,
774 added: bool,
765 merged: bool,
775 merged: bool,
766 from_p2: bool,
776 from_p2: bool,
767 possibly_dirty: bool,
777 possibly_dirty: bool,
768 ) -> Result<(), DirstateError> {
778 ) -> Result<(), DirstateError> {
769 let mut entry = entry;
779 let mut entry = entry;
770 if added {
780 if added {
771 assert!(!possibly_dirty);
781 assert!(!possibly_dirty);
772 assert!(!from_p2);
782 assert!(!from_p2);
773 entry.state = EntryState::Added;
783 entry.state = EntryState::Added;
774 entry.size = SIZE_NON_NORMAL;
784 entry.size = SIZE_NON_NORMAL;
775 entry.mtime = MTIME_UNSET;
785 entry.mtime = MTIME_UNSET;
776 } else if merged {
786 } else if merged {
777 assert!(!possibly_dirty);
787 assert!(!possibly_dirty);
778 assert!(!from_p2);
788 assert!(!from_p2);
779 entry.state = EntryState::Merged;
789 entry.state = EntryState::Merged;
780 entry.size = SIZE_FROM_OTHER_PARENT;
790 entry.size = SIZE_FROM_OTHER_PARENT;
781 entry.mtime = MTIME_UNSET;
791 entry.mtime = MTIME_UNSET;
782 } else if from_p2 {
792 } else if from_p2 {
783 assert!(!possibly_dirty);
793 assert!(!possibly_dirty);
784 entry.state = EntryState::Normal;
794 entry.state = EntryState::Normal;
785 entry.size = SIZE_FROM_OTHER_PARENT;
795 entry.size = SIZE_FROM_OTHER_PARENT;
786 entry.mtime = MTIME_UNSET;
796 entry.mtime = MTIME_UNSET;
787 } else if possibly_dirty {
797 } else if possibly_dirty {
788 entry.state = EntryState::Normal;
798 entry.state = EntryState::Normal;
789 entry.size = SIZE_NON_NORMAL;
799 entry.size = SIZE_NON_NORMAL;
790 entry.mtime = MTIME_UNSET;
800 entry.mtime = MTIME_UNSET;
791 } else {
801 } else {
792 entry.state = EntryState::Normal;
802 entry.state = EntryState::Normal;
793 entry.size = entry.size & V1_RANGEMASK;
803 entry.size = entry.size & V1_RANGEMASK;
794 entry.mtime = entry.mtime & V1_RANGEMASK;
804 entry.mtime = entry.mtime & V1_RANGEMASK;
795 }
805 }
796
806
797 let old_state = match self.get(filename)? {
807 let old_state = match self.get(filename)? {
798 Some(e) => e.state,
808 Some(e) => e.state,
799 None => EntryState::Unknown,
809 None => EntryState::Unknown,
800 };
810 };
801
811
802 Ok(self.add_or_remove_file(filename, old_state, entry)?)
812 Ok(self.add_or_remove_file(filename, old_state, entry)?)
803 }
813 }
804
814
805 fn remove_file(
815 fn remove_file(
806 &mut self,
816 &mut self,
807 filename: &HgPath,
817 filename: &HgPath,
808 in_merge: bool,
818 in_merge: bool,
809 ) -> Result<(), DirstateError> {
819 ) -> Result<(), DirstateError> {
810 let old_entry_opt = self.get(filename)?;
820 let old_entry_opt = self.get(filename)?;
811 let old_state = match old_entry_opt {
821 let old_state = match old_entry_opt {
812 Some(e) => e.state,
822 Some(e) => e.state,
813 None => EntryState::Unknown,
823 None => EntryState::Unknown,
814 };
824 };
815 let mut size = 0;
825 let mut size = 0;
816 if in_merge {
826 if in_merge {
817 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
827 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
818 // during a merge. So I (marmoute) am not sure we need the
828 // during a merge. So I (marmoute) am not sure we need the
819 // conditionnal at all. Adding double checking this with assert
829 // conditionnal at all. Adding double checking this with assert
820 // would be nice.
830 // would be nice.
821 if let Some(old_entry) = old_entry_opt {
831 if let Some(old_entry) = old_entry_opt {
822 // backup the previous state
832 // backup the previous state
823 if old_entry.state == EntryState::Merged {
833 if old_entry.state == EntryState::Merged {
824 size = SIZE_NON_NORMAL;
834 size = SIZE_NON_NORMAL;
825 } else if old_entry.state == EntryState::Normal
835 } else if old_entry.state == EntryState::Normal
826 && old_entry.size == SIZE_FROM_OTHER_PARENT
836 && old_entry.size == SIZE_FROM_OTHER_PARENT
827 {
837 {
828 // other parent
838 // other parent
829 size = SIZE_FROM_OTHER_PARENT;
839 size = SIZE_FROM_OTHER_PARENT;
830 }
840 }
831 }
841 }
832 }
842 }
833 if size == 0 {
843 if size == 0 {
834 self.copy_map_remove(filename)?;
844 self.copy_map_remove(filename)?;
835 }
845 }
836 let entry = DirstateEntry {
846 let entry = DirstateEntry {
837 state: EntryState::Removed,
847 state: EntryState::Removed,
838 mode: 0,
848 mode: 0,
839 size,
849 size,
840 mtime: 0,
850 mtime: 0,
841 };
851 };
842 Ok(self.add_or_remove_file(filename, old_state, entry)?)
852 Ok(self.add_or_remove_file(filename, old_state, entry)?)
843 }
853 }
844
854
845 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
855 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
846 let old_state = match self.get(filename)? {
856 let old_state = match self.get(filename)? {
847 Some(e) => e.state,
857 Some(e) => e.state,
848 None => EntryState::Unknown,
858 None => EntryState::Unknown,
849 };
859 };
850 struct Dropped {
860 struct Dropped {
851 was_tracked: bool,
861 was_tracked: bool,
852 had_entry: bool,
862 had_entry: bool,
853 had_copy_source: bool,
863 had_copy_source: bool,
854 }
864 }
855
865
856 /// If this returns `Ok(Some((dropped, removed)))`, then
866 /// If this returns `Ok(Some((dropped, removed)))`, then
857 ///
867 ///
858 /// * `dropped` is about the leaf node that was at `filename`
868 /// * `dropped` is about the leaf node that was at `filename`
859 /// * `removed` is whether this particular level of recursion just
869 /// * `removed` is whether this particular level of recursion just
860 /// removed a node in `nodes`.
870 /// removed a node in `nodes`.
861 fn recur<'on_disk>(
871 fn recur<'on_disk>(
862 on_disk: &'on_disk [u8],
872 on_disk: &'on_disk [u8],
863 unreachable_bytes: &mut u32,
873 unreachable_bytes: &mut u32,
864 nodes: &mut ChildNodes<'on_disk>,
874 nodes: &mut ChildNodes<'on_disk>,
865 path: &HgPath,
875 path: &HgPath,
866 ) -> Result<Option<(Dropped, bool)>, DirstateV2ParseError> {
876 ) -> Result<Option<(Dropped, bool)>, DirstateV2ParseError> {
867 let (first_path_component, rest_of_path) =
877 let (first_path_component, rest_of_path) =
868 path.split_first_component();
878 path.split_first_component();
869 let nodes = nodes.make_mut(on_disk, unreachable_bytes)?;
879 let nodes = nodes.make_mut(on_disk, unreachable_bytes)?;
870 let node = if let Some(node) = nodes.get_mut(first_path_component)
880 let node = if let Some(node) = nodes.get_mut(first_path_component)
871 {
881 {
872 node
882 node
873 } else {
883 } else {
874 return Ok(None);
884 return Ok(None);
875 };
885 };
876 let dropped;
886 let dropped;
877 if let Some(rest) = rest_of_path {
887 if let Some(rest) = rest_of_path {
878 if let Some((d, removed)) = recur(
888 if let Some((d, removed)) = recur(
879 on_disk,
889 on_disk,
880 unreachable_bytes,
890 unreachable_bytes,
881 &mut node.children,
891 &mut node.children,
882 rest,
892 rest,
883 )? {
893 )? {
884 dropped = d;
894 dropped = d;
885 if dropped.had_entry {
895 if dropped.had_entry {
886 node.descendants_with_entry_count -= 1;
896 node.descendants_with_entry_count -= 1;
887 }
897 }
888 if dropped.was_tracked {
898 if dropped.was_tracked {
889 node.tracked_descendants_count -= 1;
899 node.tracked_descendants_count -= 1;
890 }
900 }
891
901
892 // Directory caches must be invalidated when removing a
902 // Directory caches must be invalidated when removing a
893 // child node
903 // child node
894 if removed {
904 if removed {
895 if let NodeData::CachedDirectory { .. } = &node.data {
905 if let NodeData::CachedDirectory { .. } = &node.data {
896 node.data = NodeData::None
906 node.data = NodeData::None
897 }
907 }
898 }
908 }
899 } else {
909 } else {
900 return Ok(None);
910 return Ok(None);
901 }
911 }
902 } else {
912 } else {
903 let had_entry = node.data.has_entry();
913 let had_entry = node.data.has_entry();
904 if had_entry {
914 if had_entry {
905 node.data = NodeData::None
915 node.data = NodeData::None
906 }
916 }
907 if let Some(source) = &node.copy_source {
917 if let Some(source) = &node.copy_source {
908 DirstateMap::count_dropped_path(unreachable_bytes, source)
918 DirstateMap::count_dropped_path(unreachable_bytes, source)
909 }
919 }
910 dropped = Dropped {
920 dropped = Dropped {
911 was_tracked: node
921 was_tracked: node
912 .data
922 .data
913 .as_entry()
923 .as_entry()
914 .map_or(false, |entry| entry.state.is_tracked()),
924 .map_or(false, |entry| entry.state.is_tracked()),
915 had_entry,
925 had_entry,
916 had_copy_source: node.copy_source.take().is_some(),
926 had_copy_source: node.copy_source.take().is_some(),
917 };
927 };
918 }
928 }
919 // After recursion, for both leaf (rest_of_path is None) nodes and
929 // After recursion, for both leaf (rest_of_path is None) nodes and
920 // parent nodes, remove a node if it just became empty.
930 // parent nodes, remove a node if it just became empty.
921 let remove = !node.data.has_entry()
931 let remove = !node.data.has_entry()
922 && node.copy_source.is_none()
932 && node.copy_source.is_none()
923 && node.children.is_empty();
933 && node.children.is_empty();
924 if remove {
934 if remove {
925 let (key, _) =
935 let (key, _) =
926 nodes.remove_entry(first_path_component).unwrap();
936 nodes.remove_entry(first_path_component).unwrap();
927 DirstateMap::count_dropped_path(
937 DirstateMap::count_dropped_path(
928 unreachable_bytes,
938 unreachable_bytes,
929 key.full_path(),
939 key.full_path(),
930 )
940 )
931 }
941 }
932 Ok(Some((dropped, remove)))
942 Ok(Some((dropped, remove)))
933 }
943 }
934
944
935 if let Some((dropped, _removed)) = recur(
945 if let Some((dropped, _removed)) = recur(
936 self.on_disk,
946 self.on_disk,
937 &mut self.unreachable_bytes,
947 &mut self.unreachable_bytes,
938 &mut self.root,
948 &mut self.root,
939 filename,
949 filename,
940 )? {
950 )? {
941 if dropped.had_entry {
951 if dropped.had_entry {
942 self.nodes_with_entry_count -= 1
952 self.nodes_with_entry_count -= 1
943 }
953 }
944 if dropped.had_copy_source {
954 if dropped.had_copy_source {
945 self.nodes_with_copy_source_count -= 1
955 self.nodes_with_copy_source_count -= 1
946 }
956 }
947 Ok(dropped.had_entry)
957 Ok(dropped.had_entry)
948 } else {
958 } else {
949 debug_assert!(!old_state.is_tracked());
959 debug_assert!(!old_state.is_tracked());
950 Ok(false)
960 Ok(false)
951 }
961 }
952 }
962 }
953
963
954 fn clear_ambiguous_times(
964 fn clear_ambiguous_times(
955 &mut self,
965 &mut self,
956 filenames: Vec<HgPathBuf>,
966 filenames: Vec<HgPathBuf>,
957 now: i32,
967 now: i32,
958 ) -> Result<(), DirstateV2ParseError> {
968 ) -> Result<(), DirstateV2ParseError> {
959 for filename in filenames {
969 for filename in filenames {
960 if let Some(node) = Self::get_node_mut(
970 if let Some(node) = Self::get_node_mut(
961 self.on_disk,
971 self.on_disk,
962 &mut self.unreachable_bytes,
972 &mut self.unreachable_bytes,
963 &mut self.root,
973 &mut self.root,
964 &filename,
974 &filename,
965 )? {
975 )? {
966 if let NodeData::Entry(entry) = &mut node.data {
976 if let NodeData::Entry(entry) = &mut node.data {
967 entry.clear_ambiguous_mtime(now);
977 entry.clear_ambiguous_mtime(now);
968 }
978 }
969 }
979 }
970 }
980 }
971 Ok(())
981 Ok(())
972 }
982 }
973
983
974 fn non_normal_entries_contains(
984 fn non_normal_entries_contains(
975 &mut self,
985 &mut self,
976 key: &HgPath,
986 key: &HgPath,
977 ) -> Result<bool, DirstateV2ParseError> {
987 ) -> Result<bool, DirstateV2ParseError> {
978 Ok(if let Some(node) = self.get_node(key)? {
988 Ok(if let Some(node) = self.get_node(key)? {
979 node.entry()?.map_or(false, |entry| entry.is_non_normal())
989 node.entry()?.map_or(false, |entry| entry.is_non_normal())
980 } else {
990 } else {
981 false
991 false
982 })
992 })
983 }
993 }
984
994
985 fn non_normal_entries_remove(&mut self, _key: &HgPath) {
995 fn non_normal_entries_remove(&mut self, key: &HgPath) -> bool {
996 // Do nothing, this `DirstateMap` does not have a separate "non normal
997 // entries" set that need to be kept up to date.
998 if let Ok(Some(v)) = self.get(key) {
999 return v.is_non_normal();
1000 }
1001 false
1002 }
1003
1004 fn non_normal_entries_add(&mut self, _key: &HgPath) {
986 // Do nothing, this `DirstateMap` does not have a separate "non normal
1005 // Do nothing, this `DirstateMap` does not have a separate "non normal
987 // entries" set that need to be kept up to date
1006 // entries" set that need to be kept up to date
988 }
1007 }
989
1008
990 fn non_normal_or_other_parent_paths(
1009 fn non_normal_or_other_parent_paths(
991 &mut self,
1010 &mut self,
992 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
1011 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
993 {
1012 {
994 Box::new(self.filter_full_paths(|entry| {
1013 Box::new(self.filter_full_paths(|entry| {
995 entry.is_non_normal() || entry.is_from_other_parent()
1014 entry.is_non_normal() || entry.is_from_other_parent()
996 }))
1015 }))
997 }
1016 }
998
1017
999 fn set_non_normal_other_parent_entries(&mut self, _force: bool) {
1018 fn set_non_normal_other_parent_entries(&mut self, _force: bool) {
1000 // Do nothing, this `DirstateMap` does not have a separate "non normal
1019 // Do nothing, this `DirstateMap` does not have a separate "non normal
1001 // entries" and "from other parent" sets that need to be recomputed
1020 // entries" and "from other parent" sets that need to be recomputed
1002 }
1021 }
1003
1022
1004 fn iter_non_normal_paths(
1023 fn iter_non_normal_paths(
1005 &mut self,
1024 &mut self,
1006 ) -> Box<
1025 ) -> Box<
1007 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
1026 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
1008 > {
1027 > {
1009 self.iter_non_normal_paths_panic()
1028 self.iter_non_normal_paths_panic()
1010 }
1029 }
1011
1030
1012 fn iter_non_normal_paths_panic(
1031 fn iter_non_normal_paths_panic(
1013 &self,
1032 &self,
1014 ) -> Box<
1033 ) -> Box<
1015 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
1034 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
1016 > {
1035 > {
1017 Box::new(self.filter_full_paths(|entry| entry.is_non_normal()))
1036 Box::new(self.filter_full_paths(|entry| entry.is_non_normal()))
1018 }
1037 }
1019
1038
1020 fn iter_other_parent_paths(
1039 fn iter_other_parent_paths(
1021 &mut self,
1040 &mut self,
1022 ) -> Box<
1041 ) -> Box<
1023 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
1042 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
1024 > {
1043 > {
1025 Box::new(self.filter_full_paths(|entry| entry.is_from_other_parent()))
1044 Box::new(self.filter_full_paths(|entry| entry.is_from_other_parent()))
1026 }
1045 }
1027
1046
1028 fn has_tracked_dir(
1047 fn has_tracked_dir(
1029 &mut self,
1048 &mut self,
1030 directory: &HgPath,
1049 directory: &HgPath,
1031 ) -> Result<bool, DirstateError> {
1050 ) -> Result<bool, DirstateError> {
1032 if let Some(node) = self.get_node(directory)? {
1051 if let Some(node) = self.get_node(directory)? {
1033 // A node without a `DirstateEntry` was created to hold child
1052 // A node without a `DirstateEntry` was created to hold child
1034 // nodes, and is therefore a directory.
1053 // nodes, and is therefore a directory.
1035 let state = node.state()?;
1054 let state = node.state()?;
1036 Ok(state.is_none() && node.tracked_descendants_count() > 0)
1055 Ok(state.is_none() && node.tracked_descendants_count() > 0)
1037 } else {
1056 } else {
1038 Ok(false)
1057 Ok(false)
1039 }
1058 }
1040 }
1059 }
1041
1060
1042 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
1061 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
1043 if let Some(node) = self.get_node(directory)? {
1062 if let Some(node) = self.get_node(directory)? {
1044 // A node without a `DirstateEntry` was created to hold child
1063 // A node without a `DirstateEntry` was created to hold child
1045 // nodes, and is therefore a directory.
1064 // nodes, and is therefore a directory.
1046 let state = node.state()?;
1065 let state = node.state()?;
1047 Ok(state.is_none() && node.descendants_with_entry_count() > 0)
1066 Ok(state.is_none() && node.descendants_with_entry_count() > 0)
1048 } else {
1067 } else {
1049 Ok(false)
1068 Ok(false)
1050 }
1069 }
1051 }
1070 }
1052
1071
1053 #[timed]
1072 #[timed]
1054 fn pack_v1(
1073 fn pack_v1(
1055 &mut self,
1074 &mut self,
1056 parents: DirstateParents,
1075 parents: DirstateParents,
1057 now: Timestamp,
1076 now: Timestamp,
1058 ) -> Result<Vec<u8>, DirstateError> {
1077 ) -> Result<Vec<u8>, DirstateError> {
1059 let now: i32 = now.0.try_into().expect("time overflow");
1078 let now: i32 = now.0.try_into().expect("time overflow");
1060 let mut ambiguous_mtimes = Vec::new();
1079 let mut ambiguous_mtimes = Vec::new();
1061 // Optizimation (to be measured?): pre-compute size to avoid `Vec`
1080 // Optizimation (to be measured?): pre-compute size to avoid `Vec`
1062 // reallocations
1081 // reallocations
1063 let mut size = parents.as_bytes().len();
1082 let mut size = parents.as_bytes().len();
1064 for node in self.iter_nodes() {
1083 for node in self.iter_nodes() {
1065 let node = node?;
1084 let node = node?;
1066 if let Some(entry) = node.entry()? {
1085 if let Some(entry) = node.entry()? {
1067 size += packed_entry_size(
1086 size += packed_entry_size(
1068 node.full_path(self.on_disk)?,
1087 node.full_path(self.on_disk)?,
1069 node.copy_source(self.on_disk)?,
1088 node.copy_source(self.on_disk)?,
1070 );
1089 );
1071 if entry.mtime_is_ambiguous(now) {
1090 if entry.mtime_is_ambiguous(now) {
1072 ambiguous_mtimes.push(
1091 ambiguous_mtimes.push(
1073 node.full_path_borrowed(self.on_disk)?
1092 node.full_path_borrowed(self.on_disk)?
1074 .detach_from_tree(),
1093 .detach_from_tree(),
1075 )
1094 )
1076 }
1095 }
1077 }
1096 }
1078 }
1097 }
1079 self.clear_known_ambiguous_mtimes(&ambiguous_mtimes)?;
1098 self.clear_known_ambiguous_mtimes(&ambiguous_mtimes)?;
1080
1099
1081 let mut packed = Vec::with_capacity(size);
1100 let mut packed = Vec::with_capacity(size);
1082 packed.extend(parents.as_bytes());
1101 packed.extend(parents.as_bytes());
1083
1102
1084 for node in self.iter_nodes() {
1103 for node in self.iter_nodes() {
1085 let node = node?;
1104 let node = node?;
1086 if let Some(entry) = node.entry()? {
1105 if let Some(entry) = node.entry()? {
1087 pack_entry(
1106 pack_entry(
1088 node.full_path(self.on_disk)?,
1107 node.full_path(self.on_disk)?,
1089 &entry,
1108 &entry,
1090 node.copy_source(self.on_disk)?,
1109 node.copy_source(self.on_disk)?,
1091 &mut packed,
1110 &mut packed,
1092 );
1111 );
1093 }
1112 }
1094 }
1113 }
1095 Ok(packed)
1114 Ok(packed)
1096 }
1115 }
1097
1116
1098 /// Returns new data and metadata together with whether that data should be
1117 /// Returns new data and metadata together with whether that data should be
1099 /// appended to the existing data file whose content is at
1118 /// appended to the existing data file whose content is at
1100 /// `self.on_disk` (true), instead of written to a new data file
1119 /// `self.on_disk` (true), instead of written to a new data file
1101 /// (false).
1120 /// (false).
1102 #[timed]
1121 #[timed]
1103 fn pack_v2(
1122 fn pack_v2(
1104 &mut self,
1123 &mut self,
1105 now: Timestamp,
1124 now: Timestamp,
1106 can_append: bool,
1125 can_append: bool,
1107 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> {
1126 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> {
1108 // TODO:Β how do we want to handle this in 2038?
1127 // TODO:Β how do we want to handle this in 2038?
1109 let now: i32 = now.0.try_into().expect("time overflow");
1128 let now: i32 = now.0.try_into().expect("time overflow");
1110 let mut paths = Vec::new();
1129 let mut paths = Vec::new();
1111 for node in self.iter_nodes() {
1130 for node in self.iter_nodes() {
1112 let node = node?;
1131 let node = node?;
1113 if let Some(entry) = node.entry()? {
1132 if let Some(entry) = node.entry()? {
1114 if entry.mtime_is_ambiguous(now) {
1133 if entry.mtime_is_ambiguous(now) {
1115 paths.push(
1134 paths.push(
1116 node.full_path_borrowed(self.on_disk)?
1135 node.full_path_borrowed(self.on_disk)?
1117 .detach_from_tree(),
1136 .detach_from_tree(),
1118 )
1137 )
1119 }
1138 }
1120 }
1139 }
1121 }
1140 }
1122 // Borrow of `self` ends here since we collect cloned paths
1141 // Borrow of `self` ends here since we collect cloned paths
1123
1142
1124 self.clear_known_ambiguous_mtimes(&paths)?;
1143 self.clear_known_ambiguous_mtimes(&paths)?;
1125
1144
1126 on_disk::write(self, can_append)
1145 on_disk::write(self, can_append)
1127 }
1146 }
1128
1147
1129 fn status<'a>(
1148 fn status<'a>(
1130 &'a mut self,
1149 &'a mut self,
1131 matcher: &'a (dyn Matcher + Sync),
1150 matcher: &'a (dyn Matcher + Sync),
1132 root_dir: PathBuf,
1151 root_dir: PathBuf,
1133 ignore_files: Vec<PathBuf>,
1152 ignore_files: Vec<PathBuf>,
1134 options: StatusOptions,
1153 options: StatusOptions,
1135 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
1154 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
1136 {
1155 {
1137 super::status::status(self, matcher, root_dir, ignore_files, options)
1156 super::status::status(self, matcher, root_dir, ignore_files, options)
1138 }
1157 }
1139
1158
1140 fn copy_map_len(&self) -> usize {
1159 fn copy_map_len(&self) -> usize {
1141 self.nodes_with_copy_source_count as usize
1160 self.nodes_with_copy_source_count as usize
1142 }
1161 }
1143
1162
1144 fn copy_map_iter(&self) -> CopyMapIter<'_> {
1163 fn copy_map_iter(&self) -> CopyMapIter<'_> {
1145 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1164 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1146 Ok(if let Some(source) = node.copy_source(self.on_disk)? {
1165 Ok(if let Some(source) = node.copy_source(self.on_disk)? {
1147 Some((node.full_path(self.on_disk)?, source))
1166 Some((node.full_path(self.on_disk)?, source))
1148 } else {
1167 } else {
1149 None
1168 None
1150 })
1169 })
1151 }))
1170 }))
1152 }
1171 }
1153
1172
1154 fn copy_map_contains_key(
1173 fn copy_map_contains_key(
1155 &self,
1174 &self,
1156 key: &HgPath,
1175 key: &HgPath,
1157 ) -> Result<bool, DirstateV2ParseError> {
1176 ) -> Result<bool, DirstateV2ParseError> {
1158 Ok(if let Some(node) = self.get_node(key)? {
1177 Ok(if let Some(node) = self.get_node(key)? {
1159 node.has_copy_source()
1178 node.has_copy_source()
1160 } else {
1179 } else {
1161 false
1180 false
1162 })
1181 })
1163 }
1182 }
1164
1183
1165 fn copy_map_get(
1184 fn copy_map_get(
1166 &self,
1185 &self,
1167 key: &HgPath,
1186 key: &HgPath,
1168 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
1187 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
1169 if let Some(node) = self.get_node(key)? {
1188 if let Some(node) = self.get_node(key)? {
1170 if let Some(source) = node.copy_source(self.on_disk)? {
1189 if let Some(source) = node.copy_source(self.on_disk)? {
1171 return Ok(Some(source));
1190 return Ok(Some(source));
1172 }
1191 }
1173 }
1192 }
1174 Ok(None)
1193 Ok(None)
1175 }
1194 }
1176
1195
1177 fn copy_map_remove(
1196 fn copy_map_remove(
1178 &mut self,
1197 &mut self,
1179 key: &HgPath,
1198 key: &HgPath,
1180 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1199 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1181 let count = &mut self.nodes_with_copy_source_count;
1200 let count = &mut self.nodes_with_copy_source_count;
1182 let unreachable_bytes = &mut self.unreachable_bytes;
1201 let unreachable_bytes = &mut self.unreachable_bytes;
1183 Ok(Self::get_node_mut(
1202 Ok(Self::get_node_mut(
1184 self.on_disk,
1203 self.on_disk,
1185 unreachable_bytes,
1204 unreachable_bytes,
1186 &mut self.root,
1205 &mut self.root,
1187 key,
1206 key,
1188 )?
1207 )?
1189 .and_then(|node| {
1208 .and_then(|node| {
1190 if let Some(source) = &node.copy_source {
1209 if let Some(source) = &node.copy_source {
1191 *count -= 1;
1210 *count -= 1;
1192 Self::count_dropped_path(unreachable_bytes, source);
1211 Self::count_dropped_path(unreachable_bytes, source);
1193 }
1212 }
1194 node.copy_source.take().map(Cow::into_owned)
1213 node.copy_source.take().map(Cow::into_owned)
1195 }))
1214 }))
1196 }
1215 }
1197
1216
1198 fn copy_map_insert(
1217 fn copy_map_insert(
1199 &mut self,
1218 &mut self,
1200 key: HgPathBuf,
1219 key: HgPathBuf,
1201 value: HgPathBuf,
1220 value: HgPathBuf,
1202 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1221 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1203 let node = Self::get_or_insert_node(
1222 let node = Self::get_or_insert_node(
1204 self.on_disk,
1223 self.on_disk,
1205 &mut self.unreachable_bytes,
1224 &mut self.unreachable_bytes,
1206 &mut self.root,
1225 &mut self.root,
1207 &key,
1226 &key,
1208 WithBasename::to_cow_owned,
1227 WithBasename::to_cow_owned,
1209 |_ancestor| {},
1228 |_ancestor| {},
1210 )?;
1229 )?;
1211 if node.copy_source.is_none() {
1230 if node.copy_source.is_none() {
1212 self.nodes_with_copy_source_count += 1
1231 self.nodes_with_copy_source_count += 1
1213 }
1232 }
1214 Ok(node.copy_source.replace(value.into()).map(Cow::into_owned))
1233 Ok(node.copy_source.replace(value.into()).map(Cow::into_owned))
1215 }
1234 }
1216
1235
1217 fn len(&self) -> usize {
1236 fn len(&self) -> usize {
1218 self.nodes_with_entry_count as usize
1237 self.nodes_with_entry_count as usize
1219 }
1238 }
1220
1239
1221 fn contains_key(
1240 fn contains_key(
1222 &self,
1241 &self,
1223 key: &HgPath,
1242 key: &HgPath,
1224 ) -> Result<bool, DirstateV2ParseError> {
1243 ) -> Result<bool, DirstateV2ParseError> {
1225 Ok(self.get(key)?.is_some())
1244 Ok(self.get(key)?.is_some())
1226 }
1245 }
1227
1246
1228 fn get(
1247 fn get(
1229 &self,
1248 &self,
1230 key: &HgPath,
1249 key: &HgPath,
1231 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
1250 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
1232 Ok(if let Some(node) = self.get_node(key)? {
1251 Ok(if let Some(node) = self.get_node(key)? {
1233 node.entry()?
1252 node.entry()?
1234 } else {
1253 } else {
1235 None
1254 None
1236 })
1255 })
1237 }
1256 }
1238
1257
1239 fn iter(&self) -> StateMapIter<'_> {
1258 fn iter(&self) -> StateMapIter<'_> {
1240 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1259 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1241 Ok(if let Some(entry) = node.entry()? {
1260 Ok(if let Some(entry) = node.entry()? {
1242 Some((node.full_path(self.on_disk)?, entry))
1261 Some((node.full_path(self.on_disk)?, entry))
1243 } else {
1262 } else {
1244 None
1263 None
1245 })
1264 })
1246 }))
1265 }))
1247 }
1266 }
1248
1267
1249 fn iter_tracked_dirs(
1268 fn iter_tracked_dirs(
1250 &mut self,
1269 &mut self,
1251 ) -> Result<
1270 ) -> Result<
1252 Box<
1271 Box<
1253 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>>
1272 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>>
1254 + Send
1273 + Send
1255 + '_,
1274 + '_,
1256 >,
1275 >,
1257 DirstateError,
1276 DirstateError,
1258 > {
1277 > {
1259 let on_disk = self.on_disk;
1278 let on_disk = self.on_disk;
1260 Ok(Box::new(filter_map_results(
1279 Ok(Box::new(filter_map_results(
1261 self.iter_nodes(),
1280 self.iter_nodes(),
1262 move |node| {
1281 move |node| {
1263 Ok(if node.tracked_descendants_count() > 0 {
1282 Ok(if node.tracked_descendants_count() > 0 {
1264 Some(node.full_path(on_disk)?)
1283 Some(node.full_path(on_disk)?)
1265 } else {
1284 } else {
1266 None
1285 None
1267 })
1286 })
1268 },
1287 },
1269 )))
1288 )))
1270 }
1289 }
1271
1290
1272 fn debug_iter(
1291 fn debug_iter(
1273 &self,
1292 &self,
1274 ) -> Box<
1293 ) -> Box<
1275 dyn Iterator<
1294 dyn Iterator<
1276 Item = Result<
1295 Item = Result<
1277 (&HgPath, (u8, i32, i32, i32)),
1296 (&HgPath, (u8, i32, i32, i32)),
1278 DirstateV2ParseError,
1297 DirstateV2ParseError,
1279 >,
1298 >,
1280 > + Send
1299 > + Send
1281 + '_,
1300 + '_,
1282 > {
1301 > {
1283 Box::new(self.iter_nodes().map(move |node| {
1302 Box::new(self.iter_nodes().map(move |node| {
1284 let node = node?;
1303 let node = node?;
1285 let debug_tuple = if let Some(entry) = node.entry()? {
1304 let debug_tuple = if let Some(entry) = node.entry()? {
1286 entry.debug_tuple()
1305 entry.debug_tuple()
1287 } else if let Some(mtime) = node.cached_directory_mtime() {
1306 } else if let Some(mtime) = node.cached_directory_mtime() {
1288 (b' ', 0, -1, mtime.seconds() as i32)
1307 (b' ', 0, -1, mtime.seconds() as i32)
1289 } else {
1308 } else {
1290 (b' ', 0, -1, -1)
1309 (b' ', 0, -1, -1)
1291 };
1310 };
1292 Ok((node.full_path(self.on_disk)?, debug_tuple))
1311 Ok((node.full_path(self.on_disk)?, debug_tuple))
1293 }))
1312 }))
1294 }
1313 }
1295 }
1314 }
@@ -1,536 +1,556
1 use std::path::PathBuf;
1 use std::path::PathBuf;
2
2
3 use crate::dirstate::parsers::Timestamp;
3 use crate::dirstate::parsers::Timestamp;
4 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
4 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
5 use crate::matchers::Matcher;
5 use crate::matchers::Matcher;
6 use crate::utils::hg_path::{HgPath, HgPathBuf};
6 use crate::utils::hg_path::{HgPath, HgPathBuf};
7 use crate::CopyMapIter;
7 use crate::CopyMapIter;
8 use crate::DirstateEntry;
8 use crate::DirstateEntry;
9 use crate::DirstateError;
9 use crate::DirstateError;
10 use crate::DirstateMap;
10 use crate::DirstateMap;
11 use crate::DirstateParents;
11 use crate::DirstateParents;
12 use crate::DirstateStatus;
12 use crate::DirstateStatus;
13 use crate::PatternFileWarning;
13 use crate::PatternFileWarning;
14 use crate::StateMapIter;
14 use crate::StateMapIter;
15 use crate::StatusError;
15 use crate::StatusError;
16 use crate::StatusOptions;
16 use crate::StatusOptions;
17
17
18 /// `rust/hg-cpython/src/dirstate/dirstate_map.rs` implements in Rust a
18 /// `rust/hg-cpython/src/dirstate/dirstate_map.rs` implements in Rust a
19 /// `DirstateMap` Python class that wraps `Box<dyn DirstateMapMethods + Send>`,
19 /// `DirstateMap` Python class that wraps `Box<dyn DirstateMapMethods + Send>`,
20 /// a trait object of this trait. Except for constructors, this trait defines
20 /// a trait object of this trait. Except for constructors, this trait defines
21 /// all APIs that the class needs to interact with its inner dirstate map.
21 /// all APIs that the class needs to interact with its inner dirstate map.
22 ///
22 ///
23 /// A trait object is used to support two different concrete types:
23 /// A trait object is used to support two different concrete types:
24 ///
24 ///
25 /// * `rust/hg-core/src/dirstate/dirstate_map.rs` defines the "flat dirstate
25 /// * `rust/hg-core/src/dirstate/dirstate_map.rs` defines the "flat dirstate
26 /// map" which is based on a few large `HgPath`-keyed `HashMap` and `HashSet`
26 /// map" which is based on a few large `HgPath`-keyed `HashMap` and `HashSet`
27 /// fields.
27 /// fields.
28 /// * `rust/hg-core/src/dirstate_tree/dirstate_map.rs` defines the "tree
28 /// * `rust/hg-core/src/dirstate_tree/dirstate_map.rs` defines the "tree
29 /// dirstate map" based on a tree data struture with nodes for directories
29 /// dirstate map" based on a tree data struture with nodes for directories
30 /// containing child nodes for their files and sub-directories. This tree
30 /// containing child nodes for their files and sub-directories. This tree
31 /// enables a more efficient algorithm for `hg status`, but its details are
31 /// enables a more efficient algorithm for `hg status`, but its details are
32 /// abstracted in this trait.
32 /// abstracted in this trait.
33 ///
33 ///
34 /// The dirstate map associates paths of files in the working directory to
34 /// The dirstate map associates paths of files in the working directory to
35 /// various information about the state of those files.
35 /// various information about the state of those files.
36 pub trait DirstateMapMethods {
36 pub trait DirstateMapMethods {
37 /// Remove information about all files in this map
37 /// Remove information about all files in this map
38 fn clear(&mut self);
38 fn clear(&mut self);
39
39
40 fn set_v1(&mut self, filename: &HgPath, entry: DirstateEntry);
41
40 /// Add or change the information associated to a given file.
42 /// Add or change the information associated to a given file.
41 ///
43 ///
42 /// `old_state` is the state in the entry that `get` would have returned
44 /// `old_state` is the state in the entry that `get` would have returned
43 /// before this call, or `EntryState::Unknown` if there was no such entry.
45 /// before this call, or `EntryState::Unknown` if there was no such entry.
44 ///
46 ///
45 /// `entry.state` should never be `EntryState::Unknown`.
47 /// `entry.state` should never be `EntryState::Unknown`.
46 fn add_file(
48 fn add_file(
47 &mut self,
49 &mut self,
48 filename: &HgPath,
50 filename: &HgPath,
49 entry: DirstateEntry,
51 entry: DirstateEntry,
50 added: bool,
52 added: bool,
51 merged: bool,
53 merged: bool,
52 from_p2: bool,
54 from_p2: bool,
53 possibly_dirty: bool,
55 possibly_dirty: bool,
54 ) -> Result<(), DirstateError>;
56 ) -> Result<(), DirstateError>;
55
57
56 /// Mark a file as "removed" (as in `hg rm`).
58 /// Mark a file as "removed" (as in `hg rm`).
57 ///
59 ///
58 /// `old_state` is the state in the entry that `get` would have returned
60 /// `old_state` is the state in the entry that `get` would have returned
59 /// before this call, or `EntryState::Unknown` if there was no such entry.
61 /// before this call, or `EntryState::Unknown` if there was no such entry.
60 ///
62 ///
61 /// `size` is not actually a size but the 0 or -1 or -2 value that would be
63 /// `size` is not actually a size but the 0 or -1 or -2 value that would be
62 /// put in the size field in the dirstate-v1Β format.
64 /// put in the size field in the dirstate-v1Β format.
63 fn remove_file(
65 fn remove_file(
64 &mut self,
66 &mut self,
65 filename: &HgPath,
67 filename: &HgPath,
66 in_merge: bool,
68 in_merge: bool,
67 ) -> Result<(), DirstateError>;
69 ) -> Result<(), DirstateError>;
68
70
69 /// Drop information about this file from the map if any, and return
71 /// Drop information about this file from the map if any, and return
70 /// whether there was any.
72 /// whether there was any.
71 ///
73 ///
72 /// `get` will now return `None` for this filename.
74 /// `get` will now return `None` for this filename.
73 ///
75 ///
74 /// `old_state` is the state in the entry that `get` would have returned
76 /// `old_state` is the state in the entry that `get` would have returned
75 /// before this call, or `EntryState::Unknown` if there was no such entry.
77 /// before this call, or `EntryState::Unknown` if there was no such entry.
76 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError>;
78 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError>;
77
79
78 /// Among given files, mark the stored `mtime` as ambiguous if there is one
80 /// Among given files, mark the stored `mtime` as ambiguous if there is one
79 /// (if `state == EntryState::Normal`) equal to the given current Unix
81 /// (if `state == EntryState::Normal`) equal to the given current Unix
80 /// timestamp.
82 /// timestamp.
81 fn clear_ambiguous_times(
83 fn clear_ambiguous_times(
82 &mut self,
84 &mut self,
83 filenames: Vec<HgPathBuf>,
85 filenames: Vec<HgPathBuf>,
84 now: i32,
86 now: i32,
85 ) -> Result<(), DirstateV2ParseError>;
87 ) -> Result<(), DirstateV2ParseError>;
86
88
87 /// Return whether the map has an "non-normal" entry for the given
89 /// Return whether the map has an "non-normal" entry for the given
88 /// filename. That is, any entry with a `state` other than
90 /// filename. That is, any entry with a `state` other than
89 /// `EntryState::Normal` or with an ambiguous `mtime`.
91 /// `EntryState::Normal` or with an ambiguous `mtime`.
90 fn non_normal_entries_contains(
92 fn non_normal_entries_contains(
91 &mut self,
93 &mut self,
92 key: &HgPath,
94 key: &HgPath,
93 ) -> Result<bool, DirstateV2ParseError>;
95 ) -> Result<bool, DirstateV2ParseError>;
94
96
95 /// Mark the given path as "normal" file. This is only relevant in the flat
97 /// Mark the given path as "normal" file. This is only relevant in the flat
96 /// dirstate map where there is a separate `HashSet` that needs to be kept
98 /// dirstate map where there is a separate `HashSet` that needs to be kept
97 /// up to date.
99 /// up to date.
98 fn non_normal_entries_remove(&mut self, key: &HgPath);
100 /// Returns whether the key was present in the set.
101 fn non_normal_entries_remove(&mut self, key: &HgPath) -> bool;
102
103 /// Mark the given path as "non-normal" file.
104 /// This is only relevant in the flat dirstate map where there is a
105 /// separate `HashSet` that needs to be kept up to date.
106 fn non_normal_entries_add(&mut self, key: &HgPath);
99
107
100 /// Return an iterator of paths whose respective entry are either
108 /// Return an iterator of paths whose respective entry are either
101 /// "non-normal" (see `non_normal_entries_contains`) or "from other
109 /// "non-normal" (see `non_normal_entries_contains`) or "from other
102 /// parent".
110 /// parent".
103 ///
111 ///
104 /// If that information is cached, create the cache as needed.
112 /// If that information is cached, create the cache as needed.
105 ///
113 ///
106 /// "From other parent" is defined as `state == Normal && size == -2`.
114 /// "From other parent" is defined as `state == Normal && size == -2`.
107 ///
115 ///
108 /// Because parse errors can happen during iteration, the iterated items
116 /// Because parse errors can happen during iteration, the iterated items
109 /// are `Result`s.
117 /// are `Result`s.
110 fn non_normal_or_other_parent_paths(
118 fn non_normal_or_other_parent_paths(
111 &mut self,
119 &mut self,
112 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>;
120 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>;
113
121
114 /// Create the cache for `non_normal_or_other_parent_paths` if needed.
122 /// Create the cache for `non_normal_or_other_parent_paths` if needed.
115 ///
123 ///
116 /// If `force` is true, the cache is re-created even if it already exists.
124 /// If `force` is true, the cache is re-created even if it already exists.
117 fn set_non_normal_other_parent_entries(&mut self, force: bool);
125 fn set_non_normal_other_parent_entries(&mut self, force: bool);
118
126
119 /// Return an iterator of paths whose respective entry are "non-normal"
127 /// Return an iterator of paths whose respective entry are "non-normal"
120 /// (see `non_normal_entries_contains`).
128 /// (see `non_normal_entries_contains`).
121 ///
129 ///
122 /// If that information is cached, create the cache as needed.
130 /// If that information is cached, create the cache as needed.
123 ///
131 ///
124 /// Because parse errors can happen during iteration, the iterated items
132 /// Because parse errors can happen during iteration, the iterated items
125 /// are `Result`s.
133 /// are `Result`s.
126 fn iter_non_normal_paths(
134 fn iter_non_normal_paths(
127 &mut self,
135 &mut self,
128 ) -> Box<
136 ) -> Box<
129 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
137 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
130 >;
138 >;
131
139
132 /// Same as `iter_non_normal_paths`, but takes `&self` instead of `&mut
140 /// Same as `iter_non_normal_paths`, but takes `&self` instead of `&mut
133 /// self`.
141 /// self`.
134 ///
142 ///
135 /// Panics if a cache is necessary but does not exist yet.
143 /// Panics if a cache is necessary but does not exist yet.
136 fn iter_non_normal_paths_panic(
144 fn iter_non_normal_paths_panic(
137 &self,
145 &self,
138 ) -> Box<
146 ) -> Box<
139 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
147 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
140 >;
148 >;
141
149
142 /// Return an iterator of paths whose respective entry are "from other
150 /// Return an iterator of paths whose respective entry are "from other
143 /// parent".
151 /// parent".
144 ///
152 ///
145 /// If that information is cached, create the cache as needed.
153 /// If that information is cached, create the cache as needed.
146 ///
154 ///
147 /// "From other parent" is defined as `state == Normal && size == -2`.
155 /// "From other parent" is defined as `state == Normal && size == -2`.
148 ///
156 ///
149 /// Because parse errors can happen during iteration, the iterated items
157 /// Because parse errors can happen during iteration, the iterated items
150 /// are `Result`s.
158 /// are `Result`s.
151 fn iter_other_parent_paths(
159 fn iter_other_parent_paths(
152 &mut self,
160 &mut self,
153 ) -> Box<
161 ) -> Box<
154 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
162 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
155 >;
163 >;
156
164
157 /// Returns whether the sub-tree rooted at the given directory contains any
165 /// Returns whether the sub-tree rooted at the given directory contains any
158 /// tracked file.
166 /// tracked file.
159 ///
167 ///
160 /// A file is tracked if it has a `state` other than `EntryState::Removed`.
168 /// A file is tracked if it has a `state` other than `EntryState::Removed`.
161 fn has_tracked_dir(
169 fn has_tracked_dir(
162 &mut self,
170 &mut self,
163 directory: &HgPath,
171 directory: &HgPath,
164 ) -> Result<bool, DirstateError>;
172 ) -> Result<bool, DirstateError>;
165
173
166 /// Returns whether the sub-tree rooted at the given directory contains any
174 /// Returns whether the sub-tree rooted at the given directory contains any
167 /// file with a dirstate entry.
175 /// file with a dirstate entry.
168 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError>;
176 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError>;
169
177
170 /// Clear mtimes that are ambigous with `now` (similar to
178 /// Clear mtimes that are ambigous with `now` (similar to
171 /// `clear_ambiguous_times` but for all files in the dirstate map), and
179 /// `clear_ambiguous_times` but for all files in the dirstate map), and
172 /// serialize bytes to write the `.hg/dirstate` file to disk in dirstate-v1
180 /// serialize bytes to write the `.hg/dirstate` file to disk in dirstate-v1
173 /// format.
181 /// format.
174 fn pack_v1(
182 fn pack_v1(
175 &mut self,
183 &mut self,
176 parents: DirstateParents,
184 parents: DirstateParents,
177 now: Timestamp,
185 now: Timestamp,
178 ) -> Result<Vec<u8>, DirstateError>;
186 ) -> Result<Vec<u8>, DirstateError>;
179
187
180 /// Clear mtimes that are ambigous with `now` (similar to
188 /// Clear mtimes that are ambigous with `now` (similar to
181 /// `clear_ambiguous_times` but for all files in the dirstate map), and
189 /// `clear_ambiguous_times` but for all files in the dirstate map), and
182 /// serialize bytes to write a dirstate data file to disk in dirstate-v2
190 /// serialize bytes to write a dirstate data file to disk in dirstate-v2
183 /// format.
191 /// format.
184 ///
192 ///
185 /// Returns new data and metadata together with whether that data should be
193 /// Returns new data and metadata together with whether that data should be
186 /// appended to the existing data file whose content is at
194 /// appended to the existing data file whose content is at
187 /// `self.on_disk` (true), instead of written to a new data file
195 /// `self.on_disk` (true), instead of written to a new data file
188 /// (false).
196 /// (false).
189 ///
197 ///
190 /// Note: this is only supported by the tree dirstate map.
198 /// Note: this is only supported by the tree dirstate map.
191 fn pack_v2(
199 fn pack_v2(
192 &mut self,
200 &mut self,
193 now: Timestamp,
201 now: Timestamp,
194 can_append: bool,
202 can_append: bool,
195 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError>;
203 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError>;
196
204
197 /// Run the status algorithm.
205 /// Run the status algorithm.
198 ///
206 ///
199 /// This is not sematically a method of the dirstate map, but a different
207 /// This is not sematically a method of the dirstate map, but a different
200 /// algorithm is used for the flat v.s. tree dirstate map so having it in
208 /// algorithm is used for the flat v.s. tree dirstate map so having it in
201 /// this trait enables the same dynamic dispatch as with other methods.
209 /// this trait enables the same dynamic dispatch as with other methods.
202 fn status<'a>(
210 fn status<'a>(
203 &'a mut self,
211 &'a mut self,
204 matcher: &'a (dyn Matcher + Sync),
212 matcher: &'a (dyn Matcher + Sync),
205 root_dir: PathBuf,
213 root_dir: PathBuf,
206 ignore_files: Vec<PathBuf>,
214 ignore_files: Vec<PathBuf>,
207 options: StatusOptions,
215 options: StatusOptions,
208 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>;
216 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>;
209
217
210 /// Returns how many files in the dirstate map have a recorded copy source.
218 /// Returns how many files in the dirstate map have a recorded copy source.
211 fn copy_map_len(&self) -> usize;
219 fn copy_map_len(&self) -> usize;
212
220
213 /// Returns an iterator of `(path, copy_source)` for all files that have a
221 /// Returns an iterator of `(path, copy_source)` for all files that have a
214 /// copy source.
222 /// copy source.
215 fn copy_map_iter(&self) -> CopyMapIter<'_>;
223 fn copy_map_iter(&self) -> CopyMapIter<'_>;
216
224
217 /// Returns whether the givef file has a copy source.
225 /// Returns whether the givef file has a copy source.
218 fn copy_map_contains_key(
226 fn copy_map_contains_key(
219 &self,
227 &self,
220 key: &HgPath,
228 key: &HgPath,
221 ) -> Result<bool, DirstateV2ParseError>;
229 ) -> Result<bool, DirstateV2ParseError>;
222
230
223 /// Returns the copy source for the given file.
231 /// Returns the copy source for the given file.
224 fn copy_map_get(
232 fn copy_map_get(
225 &self,
233 &self,
226 key: &HgPath,
234 key: &HgPath,
227 ) -> Result<Option<&HgPath>, DirstateV2ParseError>;
235 ) -> Result<Option<&HgPath>, DirstateV2ParseError>;
228
236
229 /// Removes the recorded copy source if any for the given file, and returns
237 /// Removes the recorded copy source if any for the given file, and returns
230 /// it.
238 /// it.
231 fn copy_map_remove(
239 fn copy_map_remove(
232 &mut self,
240 &mut self,
233 key: &HgPath,
241 key: &HgPath,
234 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
242 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
235
243
236 /// Set the given `value` copy source for the given `key` file.
244 /// Set the given `value` copy source for the given `key` file.
237 fn copy_map_insert(
245 fn copy_map_insert(
238 &mut self,
246 &mut self,
239 key: HgPathBuf,
247 key: HgPathBuf,
240 value: HgPathBuf,
248 value: HgPathBuf,
241 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
249 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
242
250
243 /// Returns the number of files that have an entry.
251 /// Returns the number of files that have an entry.
244 fn len(&self) -> usize;
252 fn len(&self) -> usize;
245
253
246 /// Returns whether the given file has an entry.
254 /// Returns whether the given file has an entry.
247 fn contains_key(&self, key: &HgPath)
255 fn contains_key(&self, key: &HgPath)
248 -> Result<bool, DirstateV2ParseError>;
256 -> Result<bool, DirstateV2ParseError>;
249
257
250 /// Returns the entry, if any, for the given file.
258 /// Returns the entry, if any, for the given file.
251 fn get(
259 fn get(
252 &self,
260 &self,
253 key: &HgPath,
261 key: &HgPath,
254 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError>;
262 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError>;
255
263
256 /// Returns a `(path, entry)` iterator of files that have an entry.
264 /// Returns a `(path, entry)` iterator of files that have an entry.
257 ///
265 ///
258 /// Because parse errors can happen during iteration, the iterated items
266 /// Because parse errors can happen during iteration, the iterated items
259 /// are `Result`s.
267 /// are `Result`s.
260 fn iter(&self) -> StateMapIter<'_>;
268 fn iter(&self) -> StateMapIter<'_>;
261
269
262 /// Returns an iterator of tracked directories.
270 /// Returns an iterator of tracked directories.
263 ///
271 ///
264 /// This is the paths for which `has_tracked_dir` would return true.
272 /// This is the paths for which `has_tracked_dir` would return true.
265 /// Or, in other words, the union of ancestor paths of all paths that have
273 /// Or, in other words, the union of ancestor paths of all paths that have
266 /// an associated entry in a "tracked" state in this dirstate map.
274 /// an associated entry in a "tracked" state in this dirstate map.
267 ///
275 ///
268 /// Because parse errors can happen during iteration, the iterated items
276 /// Because parse errors can happen during iteration, the iterated items
269 /// are `Result`s.
277 /// are `Result`s.
270 fn iter_tracked_dirs(
278 fn iter_tracked_dirs(
271 &mut self,
279 &mut self,
272 ) -> Result<
280 ) -> Result<
273 Box<
281 Box<
274 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>>
282 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>>
275 + Send
283 + Send
276 + '_,
284 + '_,
277 >,
285 >,
278 DirstateError,
286 DirstateError,
279 >;
287 >;
280
288
281 /// Return an iterator of `(path, (state, mode, size, mtime))` for every
289 /// Return an iterator of `(path, (state, mode, size, mtime))` for every
282 /// node stored in this dirstate map, for the purpose of the `hg
290 /// node stored in this dirstate map, for the purpose of the `hg
283 /// debugdirstate` command.
291 /// debugdirstate` command.
284 ///
292 ///
285 /// For nodes that don’t have an entry, `state` is the ASCII space.
293 /// For nodes that don’t have an entry, `state` is the ASCII space.
286 /// An `mtime` may still be present. It is used to optimize `status`.
294 /// An `mtime` may still be present. It is used to optimize `status`.
287 ///
295 ///
288 /// Because parse errors can happen during iteration, the iterated items
296 /// Because parse errors can happen during iteration, the iterated items
289 /// are `Result`s.
297 /// are `Result`s.
290 fn debug_iter(
298 fn debug_iter(
291 &self,
299 &self,
292 ) -> Box<
300 ) -> Box<
293 dyn Iterator<
301 dyn Iterator<
294 Item = Result<
302 Item = Result<
295 (&HgPath, (u8, i32, i32, i32)),
303 (&HgPath, (u8, i32, i32, i32)),
296 DirstateV2ParseError,
304 DirstateV2ParseError,
297 >,
305 >,
298 > + Send
306 > + Send
299 + '_,
307 + '_,
300 >;
308 >;
301 }
309 }
302
310
303 impl DirstateMapMethods for DirstateMap {
311 impl DirstateMapMethods for DirstateMap {
304 fn clear(&mut self) {
312 fn clear(&mut self) {
305 self.clear()
313 self.clear()
306 }
314 }
307
315
316 /// Used to set a value directory.
317 ///
318 /// XXX Is temporary during a refactor of V1 dirstate and will disappear
319 /// shortly.
320 fn set_v1(&mut self, filename: &HgPath, entry: DirstateEntry) {
321 self.set_v1_inner(&filename, entry)
322 }
323
308 fn add_file(
324 fn add_file(
309 &mut self,
325 &mut self,
310 filename: &HgPath,
326 filename: &HgPath,
311 entry: DirstateEntry,
327 entry: DirstateEntry,
312 added: bool,
328 added: bool,
313 merged: bool,
329 merged: bool,
314 from_p2: bool,
330 from_p2: bool,
315 possibly_dirty: bool,
331 possibly_dirty: bool,
316 ) -> Result<(), DirstateError> {
332 ) -> Result<(), DirstateError> {
317 self.add_file(filename, entry, added, merged, from_p2, possibly_dirty)
333 self.add_file(filename, entry, added, merged, from_p2, possibly_dirty)
318 }
334 }
319
335
320 fn remove_file(
336 fn remove_file(
321 &mut self,
337 &mut self,
322 filename: &HgPath,
338 filename: &HgPath,
323 in_merge: bool,
339 in_merge: bool,
324 ) -> Result<(), DirstateError> {
340 ) -> Result<(), DirstateError> {
325 self.remove_file(filename, in_merge)
341 self.remove_file(filename, in_merge)
326 }
342 }
327
343
328 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
344 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
329 self.drop_file(filename)
345 self.drop_file(filename)
330 }
346 }
331
347
332 fn clear_ambiguous_times(
348 fn clear_ambiguous_times(
333 &mut self,
349 &mut self,
334 filenames: Vec<HgPathBuf>,
350 filenames: Vec<HgPathBuf>,
335 now: i32,
351 now: i32,
336 ) -> Result<(), DirstateV2ParseError> {
352 ) -> Result<(), DirstateV2ParseError> {
337 Ok(self.clear_ambiguous_times(filenames, now))
353 Ok(self.clear_ambiguous_times(filenames, now))
338 }
354 }
339
355
340 fn non_normal_entries_contains(
356 fn non_normal_entries_contains(
341 &mut self,
357 &mut self,
342 key: &HgPath,
358 key: &HgPath,
343 ) -> Result<bool, DirstateV2ParseError> {
359 ) -> Result<bool, DirstateV2ParseError> {
344 let (non_normal, _other_parent) =
360 let (non_normal, _other_parent) =
345 self.get_non_normal_other_parent_entries();
361 self.get_non_normal_other_parent_entries();
346 Ok(non_normal.contains(key))
362 Ok(non_normal.contains(key))
347 }
363 }
348
364
349 fn non_normal_entries_remove(&mut self, key: &HgPath) {
365 fn non_normal_entries_remove(&mut self, key: &HgPath) -> bool {
350 self.non_normal_entries_remove(key)
366 self.non_normal_entries_remove(key)
351 }
367 }
352
368
369 fn non_normal_entries_add(&mut self, key: &HgPath) {
370 self.non_normal_entries_add(key)
371 }
372
353 fn non_normal_or_other_parent_paths(
373 fn non_normal_or_other_parent_paths(
354 &mut self,
374 &mut self,
355 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
375 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
356 {
376 {
357 let (non_normal, other_parent) =
377 let (non_normal, other_parent) =
358 self.get_non_normal_other_parent_entries();
378 self.get_non_normal_other_parent_entries();
359 Box::new(non_normal.union(other_parent).map(|p| Ok(&**p)))
379 Box::new(non_normal.union(other_parent).map(|p| Ok(&**p)))
360 }
380 }
361
381
362 fn set_non_normal_other_parent_entries(&mut self, force: bool) {
382 fn set_non_normal_other_parent_entries(&mut self, force: bool) {
363 self.set_non_normal_other_parent_entries(force)
383 self.set_non_normal_other_parent_entries(force)
364 }
384 }
365
385
366 fn iter_non_normal_paths(
386 fn iter_non_normal_paths(
367 &mut self,
387 &mut self,
368 ) -> Box<
388 ) -> Box<
369 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
389 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
370 > {
390 > {
371 let (non_normal, _other_parent) =
391 let (non_normal, _other_parent) =
372 self.get_non_normal_other_parent_entries();
392 self.get_non_normal_other_parent_entries();
373 Box::new(non_normal.iter().map(|p| Ok(&**p)))
393 Box::new(non_normal.iter().map(|p| Ok(&**p)))
374 }
394 }
375
395
376 fn iter_non_normal_paths_panic(
396 fn iter_non_normal_paths_panic(
377 &self,
397 &self,
378 ) -> Box<
398 ) -> Box<
379 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
399 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
380 > {
400 > {
381 let (non_normal, _other_parent) =
401 let (non_normal, _other_parent) =
382 self.get_non_normal_other_parent_entries_panic();
402 self.get_non_normal_other_parent_entries_panic();
383 Box::new(non_normal.iter().map(|p| Ok(&**p)))
403 Box::new(non_normal.iter().map(|p| Ok(&**p)))
384 }
404 }
385
405
386 fn iter_other_parent_paths(
406 fn iter_other_parent_paths(
387 &mut self,
407 &mut self,
388 ) -> Box<
408 ) -> Box<
389 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
409 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
390 > {
410 > {
391 let (_non_normal, other_parent) =
411 let (_non_normal, other_parent) =
392 self.get_non_normal_other_parent_entries();
412 self.get_non_normal_other_parent_entries();
393 Box::new(other_parent.iter().map(|p| Ok(&**p)))
413 Box::new(other_parent.iter().map(|p| Ok(&**p)))
394 }
414 }
395
415
396 fn has_tracked_dir(
416 fn has_tracked_dir(
397 &mut self,
417 &mut self,
398 directory: &HgPath,
418 directory: &HgPath,
399 ) -> Result<bool, DirstateError> {
419 ) -> Result<bool, DirstateError> {
400 self.has_tracked_dir(directory)
420 self.has_tracked_dir(directory)
401 }
421 }
402
422
403 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
423 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
404 self.has_dir(directory)
424 self.has_dir(directory)
405 }
425 }
406
426
407 fn pack_v1(
427 fn pack_v1(
408 &mut self,
428 &mut self,
409 parents: DirstateParents,
429 parents: DirstateParents,
410 now: Timestamp,
430 now: Timestamp,
411 ) -> Result<Vec<u8>, DirstateError> {
431 ) -> Result<Vec<u8>, DirstateError> {
412 self.pack(parents, now)
432 self.pack(parents, now)
413 }
433 }
414
434
415 fn pack_v2(
435 fn pack_v2(
416 &mut self,
436 &mut self,
417 _now: Timestamp,
437 _now: Timestamp,
418 _can_append: bool,
438 _can_append: bool,
419 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> {
439 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> {
420 panic!(
440 panic!(
421 "should have used dirstate_tree::DirstateMap to use the v2 format"
441 "should have used dirstate_tree::DirstateMap to use the v2 format"
422 )
442 )
423 }
443 }
424
444
425 fn status<'a>(
445 fn status<'a>(
426 &'a mut self,
446 &'a mut self,
427 matcher: &'a (dyn Matcher + Sync),
447 matcher: &'a (dyn Matcher + Sync),
428 root_dir: PathBuf,
448 root_dir: PathBuf,
429 ignore_files: Vec<PathBuf>,
449 ignore_files: Vec<PathBuf>,
430 options: StatusOptions,
450 options: StatusOptions,
431 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
451 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
432 {
452 {
433 crate::status(self, matcher, root_dir, ignore_files, options)
453 crate::status(self, matcher, root_dir, ignore_files, options)
434 }
454 }
435
455
436 fn copy_map_len(&self) -> usize {
456 fn copy_map_len(&self) -> usize {
437 self.copy_map.len()
457 self.copy_map.len()
438 }
458 }
439
459
440 fn copy_map_iter(&self) -> CopyMapIter<'_> {
460 fn copy_map_iter(&self) -> CopyMapIter<'_> {
441 Box::new(
461 Box::new(
442 self.copy_map
462 self.copy_map
443 .iter()
463 .iter()
444 .map(|(key, value)| Ok((&**key, &**value))),
464 .map(|(key, value)| Ok((&**key, &**value))),
445 )
465 )
446 }
466 }
447
467
448 fn copy_map_contains_key(
468 fn copy_map_contains_key(
449 &self,
469 &self,
450 key: &HgPath,
470 key: &HgPath,
451 ) -> Result<bool, DirstateV2ParseError> {
471 ) -> Result<bool, DirstateV2ParseError> {
452 Ok(self.copy_map.contains_key(key))
472 Ok(self.copy_map.contains_key(key))
453 }
473 }
454
474
455 fn copy_map_get(
475 fn copy_map_get(
456 &self,
476 &self,
457 key: &HgPath,
477 key: &HgPath,
458 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
478 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
459 Ok(self.copy_map.get(key).map(|p| &**p))
479 Ok(self.copy_map.get(key).map(|p| &**p))
460 }
480 }
461
481
462 fn copy_map_remove(
482 fn copy_map_remove(
463 &mut self,
483 &mut self,
464 key: &HgPath,
484 key: &HgPath,
465 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
485 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
466 Ok(self.copy_map.remove(key))
486 Ok(self.copy_map.remove(key))
467 }
487 }
468
488
469 fn copy_map_insert(
489 fn copy_map_insert(
470 &mut self,
490 &mut self,
471 key: HgPathBuf,
491 key: HgPathBuf,
472 value: HgPathBuf,
492 value: HgPathBuf,
473 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
493 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
474 Ok(self.copy_map.insert(key, value))
494 Ok(self.copy_map.insert(key, value))
475 }
495 }
476
496
477 fn len(&self) -> usize {
497 fn len(&self) -> usize {
478 (&**self).len()
498 (&**self).len()
479 }
499 }
480
500
481 fn contains_key(
501 fn contains_key(
482 &self,
502 &self,
483 key: &HgPath,
503 key: &HgPath,
484 ) -> Result<bool, DirstateV2ParseError> {
504 ) -> Result<bool, DirstateV2ParseError> {
485 Ok((&**self).contains_key(key))
505 Ok((&**self).contains_key(key))
486 }
506 }
487
507
488 fn get(
508 fn get(
489 &self,
509 &self,
490 key: &HgPath,
510 key: &HgPath,
491 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
511 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
492 Ok((&**self).get(key).cloned())
512 Ok((&**self).get(key).cloned())
493 }
513 }
494
514
495 fn iter(&self) -> StateMapIter<'_> {
515 fn iter(&self) -> StateMapIter<'_> {
496 Box::new((&**self).iter().map(|(key, value)| Ok((&**key, *value))))
516 Box::new((&**self).iter().map(|(key, value)| Ok((&**key, *value))))
497 }
517 }
498
518
499 fn iter_tracked_dirs(
519 fn iter_tracked_dirs(
500 &mut self,
520 &mut self,
501 ) -> Result<
521 ) -> Result<
502 Box<
522 Box<
503 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>>
523 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>>
504 + Send
524 + Send
505 + '_,
525 + '_,
506 >,
526 >,
507 DirstateError,
527 DirstateError,
508 > {
528 > {
509 self.set_all_dirs()?;
529 self.set_all_dirs()?;
510 Ok(Box::new(
530 Ok(Box::new(
511 self.all_dirs
531 self.all_dirs
512 .as_ref()
532 .as_ref()
513 .unwrap()
533 .unwrap()
514 .iter()
534 .iter()
515 .map(|path| Ok(&**path)),
535 .map(|path| Ok(&**path)),
516 ))
536 ))
517 }
537 }
518
538
519 fn debug_iter(
539 fn debug_iter(
520 &self,
540 &self,
521 ) -> Box<
541 ) -> Box<
522 dyn Iterator<
542 dyn Iterator<
523 Item = Result<
543 Item = Result<
524 (&HgPath, (u8, i32, i32, i32)),
544 (&HgPath, (u8, i32, i32, i32)),
525 DirstateV2ParseError,
545 DirstateV2ParseError,
526 >,
546 >,
527 > + Send
547 > + Send
528 + '_,
548 + '_,
529 > {
549 > {
530 Box::new(
550 Box::new(
531 (&**self)
551 (&**self)
532 .iter()
552 .iter()
533 .map(|(path, entry)| Ok((&**path, entry.debug_tuple()))),
553 .map(|(path, entry)| Ok((&**path, entry.debug_tuple()))),
534 )
554 )
535 }
555 }
536 }
556 }
@@ -1,629 +1,669
1 // dirstate_map.rs
1 // dirstate_map.rs
2 //
2 //
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 //! Bindings for the `hg::dirstate::dirstate_map` file provided by the
8 //! Bindings for the `hg::dirstate::dirstate_map` file provided by the
9 //! `hg-core` package.
9 //! `hg-core` package.
10
10
11 use std::cell::{RefCell, RefMut};
11 use std::cell::{RefCell, RefMut};
12 use std::convert::TryInto;
12 use std::convert::TryInto;
13
13
14 use cpython::{
14 use cpython::{
15 exc, ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList,
15 exc, ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList,
16 PyObject, PyResult, PySet, PyString, Python, PythonObject, ToPyObject,
16 PyObject, PyResult, PySet, PyString, Python, PythonObject, ToPyObject,
17 UnsafePyLeaked,
17 UnsafePyLeaked,
18 };
18 };
19
19
20 use crate::{
20 use crate::{
21 dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
21 dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
22 dirstate::make_dirstate_item,
22 dirstate::make_dirstate_item,
23 dirstate::make_dirstate_item_raw,
23 dirstate::make_dirstate_item_raw,
24 dirstate::non_normal_entries::{
24 dirstate::non_normal_entries::{
25 NonNormalEntries, NonNormalEntriesIterator,
25 NonNormalEntries, NonNormalEntriesIterator,
26 },
26 },
27 dirstate::owning::OwningDirstateMap,
27 dirstate::owning::OwningDirstateMap,
28 parsers::dirstate_parents_to_pytuple,
28 parsers::dirstate_parents_to_pytuple,
29 };
29 };
30 use hg::{
30 use hg::{
31 dirstate::parsers::Timestamp,
31 dirstate::parsers::Timestamp,
32 dirstate::MTIME_UNSET,
32 dirstate::MTIME_UNSET,
33 dirstate::SIZE_NON_NORMAL,
33 dirstate::SIZE_NON_NORMAL,
34 dirstate_tree::dispatch::DirstateMapMethods,
34 dirstate_tree::dispatch::DirstateMapMethods,
35 dirstate_tree::on_disk::DirstateV2ParseError,
35 dirstate_tree::on_disk::DirstateV2ParseError,
36 revlog::Node,
36 revlog::Node,
37 utils::files::normalize_case,
37 utils::files::normalize_case,
38 utils::hg_path::{HgPath, HgPathBuf},
38 utils::hg_path::{HgPath, HgPathBuf},
39 DirstateEntry, DirstateError, DirstateMap as RustDirstateMap,
39 DirstateEntry, DirstateError, DirstateMap as RustDirstateMap,
40 DirstateParents, EntryState, StateMapIter,
40 DirstateParents, EntryState, StateMapIter,
41 };
41 };
42
42
43 // TODO
43 // TODO
44 // This object needs to share references to multiple members of its Rust
44 // This object needs to share references to multiple members of its Rust
45 // inner struct, namely `copy_map`, `dirs` and `all_dirs`.
45 // inner struct, namely `copy_map`, `dirs` and `all_dirs`.
46 // Right now `CopyMap` is done, but it needs to have an explicit reference
46 // Right now `CopyMap` is done, but it needs to have an explicit reference
47 // to `RustDirstateMap` which itself needs to have an encapsulation for
47 // to `RustDirstateMap` which itself needs to have an encapsulation for
48 // every method in `CopyMap` (copymapcopy, etc.).
48 // every method in `CopyMap` (copymapcopy, etc.).
49 // This is ugly and hard to maintain.
49 // This is ugly and hard to maintain.
50 // The same logic applies to `dirs` and `all_dirs`, however the `Dirs`
50 // The same logic applies to `dirs` and `all_dirs`, however the `Dirs`
51 // `py_class!` is already implemented and does not mention
51 // `py_class!` is already implemented and does not mention
52 // `RustDirstateMap`, rightfully so.
52 // `RustDirstateMap`, rightfully so.
53 // All attributes also have to have a separate refcount data attribute for
53 // All attributes also have to have a separate refcount data attribute for
54 // leaks, with all methods that go along for reference sharing.
54 // leaks, with all methods that go along for reference sharing.
55 py_class!(pub class DirstateMap |py| {
55 py_class!(pub class DirstateMap |py| {
56 @shared data inner: Box<dyn DirstateMapMethods + Send>;
56 @shared data inner: Box<dyn DirstateMapMethods + Send>;
57
57
58 /// Returns a `(dirstate_map, parents)` tuple
58 /// Returns a `(dirstate_map, parents)` tuple
59 @staticmethod
59 @staticmethod
60 def new_v1(
60 def new_v1(
61 use_dirstate_tree: bool,
61 use_dirstate_tree: bool,
62 on_disk: PyBytes,
62 on_disk: PyBytes,
63 ) -> PyResult<PyObject> {
63 ) -> PyResult<PyObject> {
64 let (inner, parents) = if use_dirstate_tree {
64 let (inner, parents) = if use_dirstate_tree {
65 let (map, parents) = OwningDirstateMap::new_v1(py, on_disk)
65 let (map, parents) = OwningDirstateMap::new_v1(py, on_disk)
66 .map_err(|e| dirstate_error(py, e))?;
66 .map_err(|e| dirstate_error(py, e))?;
67 (Box::new(map) as _, parents)
67 (Box::new(map) as _, parents)
68 } else {
68 } else {
69 let bytes = on_disk.data(py);
69 let bytes = on_disk.data(py);
70 let mut map = RustDirstateMap::default();
70 let mut map = RustDirstateMap::default();
71 let parents = map.read(bytes).map_err(|e| dirstate_error(py, e))?;
71 let parents = map.read(bytes).map_err(|e| dirstate_error(py, e))?;
72 (Box::new(map) as _, parents)
72 (Box::new(map) as _, parents)
73 };
73 };
74 let map = Self::create_instance(py, inner)?;
74 let map = Self::create_instance(py, inner)?;
75 let parents = parents.map(|p| dirstate_parents_to_pytuple(py, &p));
75 let parents = parents.map(|p| dirstate_parents_to_pytuple(py, &p));
76 Ok((map, parents).to_py_object(py).into_object())
76 Ok((map, parents).to_py_object(py).into_object())
77 }
77 }
78
78
79 /// Returns a DirstateMap
79 /// Returns a DirstateMap
80 @staticmethod
80 @staticmethod
81 def new_v2(
81 def new_v2(
82 on_disk: PyBytes,
82 on_disk: PyBytes,
83 data_size: usize,
83 data_size: usize,
84 tree_metadata: PyBytes,
84 tree_metadata: PyBytes,
85 ) -> PyResult<PyObject> {
85 ) -> PyResult<PyObject> {
86 let dirstate_error = |e: DirstateError| {
86 let dirstate_error = |e: DirstateError| {
87 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
87 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
88 };
88 };
89 let inner = OwningDirstateMap::new_v2(
89 let inner = OwningDirstateMap::new_v2(
90 py, on_disk, data_size, tree_metadata,
90 py, on_disk, data_size, tree_metadata,
91 ).map_err(dirstate_error)?;
91 ).map_err(dirstate_error)?;
92 let map = Self::create_instance(py, Box::new(inner))?;
92 let map = Self::create_instance(py, Box::new(inner))?;
93 Ok(map.into_object())
93 Ok(map.into_object())
94 }
94 }
95
95
96 def clear(&self) -> PyResult<PyObject> {
96 def clear(&self) -> PyResult<PyObject> {
97 self.inner(py).borrow_mut().clear();
97 self.inner(py).borrow_mut().clear();
98 Ok(py.None())
98 Ok(py.None())
99 }
99 }
100
100
101 def get(
101 def get(
102 &self,
102 &self,
103 key: PyObject,
103 key: PyObject,
104 default: Option<PyObject> = None
104 default: Option<PyObject> = None
105 ) -> PyResult<Option<PyObject>> {
105 ) -> PyResult<Option<PyObject>> {
106 let key = key.extract::<PyBytes>(py)?;
106 let key = key.extract::<PyBytes>(py)?;
107 match self
107 match self
108 .inner(py)
108 .inner(py)
109 .borrow()
109 .borrow()
110 .get(HgPath::new(key.data(py)))
110 .get(HgPath::new(key.data(py)))
111 .map_err(|e| v2_error(py, e))?
111 .map_err(|e| v2_error(py, e))?
112 {
112 {
113 Some(entry) => {
113 Some(entry) => {
114 Ok(Some(make_dirstate_item(py, &entry)?))
114 Ok(Some(make_dirstate_item(py, &entry)?))
115 },
115 },
116 None => Ok(default)
116 None => Ok(default)
117 }
117 }
118 }
118 }
119
119
120 def set_v1(&self, path: PyObject, item: PyObject) -> PyResult<PyObject> {
121 let f = path.extract::<PyBytes>(py)?;
122 let filename = HgPath::new(f.data(py));
123 let state = item.getattr(py, "state")?.extract::<PyBytes>(py)?;
124 let state = state.data(py)[0];
125 let entry = DirstateEntry {
126 state: state.try_into().expect("state is always valid"),
127 mtime: item.getattr(py, "mtime")?.extract(py)?,
128 size: item.getattr(py, "size")?.extract(py)?,
129 mode: item.getattr(py, "mode")?.extract(py)?,
130 };
131 self.inner(py).borrow_mut().set_v1(filename, entry);
132 Ok(py.None())
133 }
134
120 def addfile(
135 def addfile(
121 &self,
136 &self,
122 f: PyObject,
137 f: PyObject,
123 mode: PyObject,
138 mode: PyObject,
124 size: PyObject,
139 size: PyObject,
125 mtime: PyObject,
140 mtime: PyObject,
126 added: PyObject,
141 added: PyObject,
127 merged: PyObject,
142 merged: PyObject,
128 from_p2: PyObject,
143 from_p2: PyObject,
129 possibly_dirty: PyObject,
144 possibly_dirty: PyObject,
130 ) -> PyResult<PyObject> {
145 ) -> PyResult<PyObject> {
131 let f = f.extract::<PyBytes>(py)?;
146 let f = f.extract::<PyBytes>(py)?;
132 let filename = HgPath::new(f.data(py));
147 let filename = HgPath::new(f.data(py));
133 let mode = if mode.is_none(py) {
148 let mode = if mode.is_none(py) {
134 // fallback default value
149 // fallback default value
135 0
150 0
136 } else {
151 } else {
137 mode.extract(py)?
152 mode.extract(py)?
138 };
153 };
139 let size = if size.is_none(py) {
154 let size = if size.is_none(py) {
140 // fallback default value
155 // fallback default value
141 SIZE_NON_NORMAL
156 SIZE_NON_NORMAL
142 } else {
157 } else {
143 size.extract(py)?
158 size.extract(py)?
144 };
159 };
145 let mtime = if mtime.is_none(py) {
160 let mtime = if mtime.is_none(py) {
146 // fallback default value
161 // fallback default value
147 MTIME_UNSET
162 MTIME_UNSET
148 } else {
163 } else {
149 mtime.extract(py)?
164 mtime.extract(py)?
150 };
165 };
151 let entry = DirstateEntry {
166 let entry = DirstateEntry {
152 // XXX Arbitrary default value since the value is determined later
167 // XXX Arbitrary default value since the value is determined later
153 state: EntryState::Normal,
168 state: EntryState::Normal,
154 mode: mode,
169 mode: mode,
155 size: size,
170 size: size,
156 mtime: mtime,
171 mtime: mtime,
157 };
172 };
158 let added = added.extract::<PyBool>(py)?.is_true();
173 let added = added.extract::<PyBool>(py)?.is_true();
159 let merged = merged.extract::<PyBool>(py)?.is_true();
174 let merged = merged.extract::<PyBool>(py)?.is_true();
160 let from_p2 = from_p2.extract::<PyBool>(py)?.is_true();
175 let from_p2 = from_p2.extract::<PyBool>(py)?.is_true();
161 let possibly_dirty = possibly_dirty.extract::<PyBool>(py)?.is_true();
176 let possibly_dirty = possibly_dirty.extract::<PyBool>(py)?.is_true();
162 self.inner(py).borrow_mut().add_file(
177 self.inner(py).borrow_mut().add_file(
163 filename,
178 filename,
164 entry,
179 entry,
165 added,
180 added,
166 merged,
181 merged,
167 from_p2,
182 from_p2,
168 possibly_dirty
183 possibly_dirty
169 ).and(Ok(py.None())).or_else(|e: DirstateError| {
184 ).and(Ok(py.None())).or_else(|e: DirstateError| {
170 Err(PyErr::new::<exc::ValueError, _>(py, e.to_string()))
185 Err(PyErr::new::<exc::ValueError, _>(py, e.to_string()))
171 })
186 })
172 }
187 }
173
188
174 def removefile(
189 def removefile(
175 &self,
190 &self,
176 f: PyObject,
191 f: PyObject,
177 in_merge: PyObject
192 in_merge: PyObject
178 ) -> PyResult<PyObject> {
193 ) -> PyResult<PyObject> {
179 self.inner(py).borrow_mut()
194 self.inner(py).borrow_mut()
180 .remove_file(
195 .remove_file(
181 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
196 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
182 in_merge.extract::<PyBool>(py)?.is_true(),
197 in_merge.extract::<PyBool>(py)?.is_true(),
183 )
198 )
184 .or_else(|_| {
199 .or_else(|_| {
185 Err(PyErr::new::<exc::OSError, _>(
200 Err(PyErr::new::<exc::OSError, _>(
186 py,
201 py,
187 "Dirstate error".to_string(),
202 "Dirstate error".to_string(),
188 ))
203 ))
189 })?;
204 })?;
190 Ok(py.None())
205 Ok(py.None())
191 }
206 }
192
207
193 def dropfile(
208 def dropfile(
194 &self,
209 &self,
195 f: PyObject,
210 f: PyObject,
196 ) -> PyResult<PyBool> {
211 ) -> PyResult<PyBool> {
197 self.inner(py).borrow_mut()
212 self.inner(py).borrow_mut()
198 .drop_file(
213 .drop_file(
199 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
214 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
200 )
215 )
201 .and_then(|b| Ok(b.to_py_object(py)))
216 .and_then(|b| Ok(b.to_py_object(py)))
202 .or_else(|e| {
217 .or_else(|e| {
203 Err(PyErr::new::<exc::OSError, _>(
218 Err(PyErr::new::<exc::OSError, _>(
204 py,
219 py,
205 format!("Dirstate error: {}", e.to_string()),
220 format!("Dirstate error: {}", e.to_string()),
206 ))
221 ))
207 })
222 })
208 }
223 }
209
224
210 def clearambiguoustimes(
225 def clearambiguoustimes(
211 &self,
226 &self,
212 files: PyObject,
227 files: PyObject,
213 now: PyObject
228 now: PyObject
214 ) -> PyResult<PyObject> {
229 ) -> PyResult<PyObject> {
215 let files: PyResult<Vec<HgPathBuf>> = files
230 let files: PyResult<Vec<HgPathBuf>> = files
216 .iter(py)?
231 .iter(py)?
217 .map(|filename| {
232 .map(|filename| {
218 Ok(HgPathBuf::from_bytes(
233 Ok(HgPathBuf::from_bytes(
219 filename?.extract::<PyBytes>(py)?.data(py),
234 filename?.extract::<PyBytes>(py)?.data(py),
220 ))
235 ))
221 })
236 })
222 .collect();
237 .collect();
223 self.inner(py)
238 self.inner(py)
224 .borrow_mut()
239 .borrow_mut()
225 .clear_ambiguous_times(files?, now.extract(py)?)
240 .clear_ambiguous_times(files?, now.extract(py)?)
226 .map_err(|e| v2_error(py, e))?;
241 .map_err(|e| v2_error(py, e))?;
227 Ok(py.None())
242 Ok(py.None())
228 }
243 }
229
244
230 def other_parent_entries(&self) -> PyResult<PyObject> {
245 def other_parent_entries(&self) -> PyResult<PyObject> {
231 let mut inner_shared = self.inner(py).borrow_mut();
246 let mut inner_shared = self.inner(py).borrow_mut();
232 let set = PySet::empty(py)?;
247 let set = PySet::empty(py)?;
233 for path in inner_shared.iter_other_parent_paths() {
248 for path in inner_shared.iter_other_parent_paths() {
234 let path = path.map_err(|e| v2_error(py, e))?;
249 let path = path.map_err(|e| v2_error(py, e))?;
235 set.add(py, PyBytes::new(py, path.as_bytes()))?;
250 set.add(py, PyBytes::new(py, path.as_bytes()))?;
236 }
251 }
237 Ok(set.into_object())
252 Ok(set.into_object())
238 }
253 }
239
254
240 def non_normal_entries(&self) -> PyResult<NonNormalEntries> {
255 def non_normal_entries(&self) -> PyResult<NonNormalEntries> {
241 NonNormalEntries::from_inner(py, self.clone_ref(py))
256 NonNormalEntries::from_inner(py, self.clone_ref(py))
242 }
257 }
243
258
244 def non_normal_entries_contains(&self, key: PyObject) -> PyResult<bool> {
259 def non_normal_entries_contains(&self, key: PyObject) -> PyResult<bool> {
245 let key = key.extract::<PyBytes>(py)?;
260 let key = key.extract::<PyBytes>(py)?;
246 self.inner(py)
261 self.inner(py)
247 .borrow_mut()
262 .borrow_mut()
248 .non_normal_entries_contains(HgPath::new(key.data(py)))
263 .non_normal_entries_contains(HgPath::new(key.data(py)))
249 .map_err(|e| v2_error(py, e))
264 .map_err(|e| v2_error(py, e))
250 }
265 }
251
266
252 def non_normal_entries_display(&self) -> PyResult<PyString> {
267 def non_normal_entries_display(&self) -> PyResult<PyString> {
253 let mut inner = self.inner(py).borrow_mut();
268 let mut inner = self.inner(py).borrow_mut();
254 let paths = inner
269 let paths = inner
255 .iter_non_normal_paths()
270 .iter_non_normal_paths()
256 .collect::<Result<Vec<_>, _>>()
271 .collect::<Result<Vec<_>, _>>()
257 .map_err(|e| v2_error(py, e))?;
272 .map_err(|e| v2_error(py, e))?;
258 let formatted = format!("NonNormalEntries: {}", hg::utils::join_display(paths, ", "));
273 let formatted = format!("NonNormalEntries: {}", hg::utils::join_display(paths, ", "));
259 Ok(PyString::new(py, &formatted))
274 Ok(PyString::new(py, &formatted))
260 }
275 }
261
276
262 def non_normal_entries_remove(&self, key: PyObject) -> PyResult<PyObject> {
277 def non_normal_entries_remove(&self, key: PyObject) -> PyResult<PyObject> {
263 let key = key.extract::<PyBytes>(py)?;
278 let key = key.extract::<PyBytes>(py)?;
279 let key = key.data(py);
280 let was_present = self
281 .inner(py)
282 .borrow_mut()
283 .non_normal_entries_remove(HgPath::new(key));
284 if !was_present {
285 let msg = String::from_utf8_lossy(key);
286 Err(PyErr::new::<exc::KeyError, _>(py, msg))
287 } else {
288 Ok(py.None())
289 }
290 }
291
292 def non_normal_entries_discard(&self, key: PyObject) -> PyResult<PyObject>
293 {
294 let key = key.extract::<PyBytes>(py)?;
264 self
295 self
265 .inner(py)
296 .inner(py)
266 .borrow_mut()
297 .borrow_mut()
267 .non_normal_entries_remove(HgPath::new(key.data(py)));
298 .non_normal_entries_remove(HgPath::new(key.data(py)));
268 Ok(py.None())
299 Ok(py.None())
269 }
300 }
270
301
302 def non_normal_entries_add(&self, key: PyObject) -> PyResult<PyObject> {
303 let key = key.extract::<PyBytes>(py)?;
304 self
305 .inner(py)
306 .borrow_mut()
307 .non_normal_entries_add(HgPath::new(key.data(py)));
308 Ok(py.None())
309 }
310
271 def non_normal_or_other_parent_paths(&self) -> PyResult<PyList> {
311 def non_normal_or_other_parent_paths(&self) -> PyResult<PyList> {
272 let mut inner = self.inner(py).borrow_mut();
312 let mut inner = self.inner(py).borrow_mut();
273
313
274 let ret = PyList::new(py, &[]);
314 let ret = PyList::new(py, &[]);
275 for filename in inner.non_normal_or_other_parent_paths() {
315 for filename in inner.non_normal_or_other_parent_paths() {
276 let filename = filename.map_err(|e| v2_error(py, e))?;
316 let filename = filename.map_err(|e| v2_error(py, e))?;
277 let as_pystring = PyBytes::new(py, filename.as_bytes());
317 let as_pystring = PyBytes::new(py, filename.as_bytes());
278 ret.append(py, as_pystring.into_object());
318 ret.append(py, as_pystring.into_object());
279 }
319 }
280 Ok(ret)
320 Ok(ret)
281 }
321 }
282
322
283 def non_normal_entries_iter(&self) -> PyResult<NonNormalEntriesIterator> {
323 def non_normal_entries_iter(&self) -> PyResult<NonNormalEntriesIterator> {
284 // Make sure the sets are defined before we no longer have a mutable
324 // Make sure the sets are defined before we no longer have a mutable
285 // reference to the dmap.
325 // reference to the dmap.
286 self.inner(py)
326 self.inner(py)
287 .borrow_mut()
327 .borrow_mut()
288 .set_non_normal_other_parent_entries(false);
328 .set_non_normal_other_parent_entries(false);
289
329
290 let leaked_ref = self.inner(py).leak_immutable();
330 let leaked_ref = self.inner(py).leak_immutable();
291
331
292 NonNormalEntriesIterator::from_inner(py, unsafe {
332 NonNormalEntriesIterator::from_inner(py, unsafe {
293 leaked_ref.map(py, |o| {
333 leaked_ref.map(py, |o| {
294 o.iter_non_normal_paths_panic()
334 o.iter_non_normal_paths_panic()
295 })
335 })
296 })
336 })
297 }
337 }
298
338
299 def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> {
339 def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> {
300 let d = d.extract::<PyBytes>(py)?;
340 let d = d.extract::<PyBytes>(py)?;
301 Ok(self.inner(py).borrow_mut()
341 Ok(self.inner(py).borrow_mut()
302 .has_tracked_dir(HgPath::new(d.data(py)))
342 .has_tracked_dir(HgPath::new(d.data(py)))
303 .map_err(|e| {
343 .map_err(|e| {
304 PyErr::new::<exc::ValueError, _>(py, e.to_string())
344 PyErr::new::<exc::ValueError, _>(py, e.to_string())
305 })?
345 })?
306 .to_py_object(py))
346 .to_py_object(py))
307 }
347 }
308
348
309 def hasdir(&self, d: PyObject) -> PyResult<PyBool> {
349 def hasdir(&self, d: PyObject) -> PyResult<PyBool> {
310 let d = d.extract::<PyBytes>(py)?;
350 let d = d.extract::<PyBytes>(py)?;
311 Ok(self.inner(py).borrow_mut()
351 Ok(self.inner(py).borrow_mut()
312 .has_dir(HgPath::new(d.data(py)))
352 .has_dir(HgPath::new(d.data(py)))
313 .map_err(|e| {
353 .map_err(|e| {
314 PyErr::new::<exc::ValueError, _>(py, e.to_string())
354 PyErr::new::<exc::ValueError, _>(py, e.to_string())
315 })?
355 })?
316 .to_py_object(py))
356 .to_py_object(py))
317 }
357 }
318
358
319 def write_v1(
359 def write_v1(
320 &self,
360 &self,
321 p1: PyObject,
361 p1: PyObject,
322 p2: PyObject,
362 p2: PyObject,
323 now: PyObject
363 now: PyObject
324 ) -> PyResult<PyBytes> {
364 ) -> PyResult<PyBytes> {
325 let now = Timestamp(now.extract(py)?);
365 let now = Timestamp(now.extract(py)?);
326
366
327 let mut inner = self.inner(py).borrow_mut();
367 let mut inner = self.inner(py).borrow_mut();
328 let parents = DirstateParents {
368 let parents = DirstateParents {
329 p1: extract_node_id(py, &p1)?,
369 p1: extract_node_id(py, &p1)?,
330 p2: extract_node_id(py, &p2)?,
370 p2: extract_node_id(py, &p2)?,
331 };
371 };
332 let result = inner.pack_v1(parents, now);
372 let result = inner.pack_v1(parents, now);
333 match result {
373 match result {
334 Ok(packed) => Ok(PyBytes::new(py, &packed)),
374 Ok(packed) => Ok(PyBytes::new(py, &packed)),
335 Err(_) => Err(PyErr::new::<exc::OSError, _>(
375 Err(_) => Err(PyErr::new::<exc::OSError, _>(
336 py,
376 py,
337 "Dirstate error".to_string(),
377 "Dirstate error".to_string(),
338 )),
378 )),
339 }
379 }
340 }
380 }
341
381
342 /// Returns new data together with whether that data should be appended to
382 /// Returns new data together with whether that data should be appended to
343 /// the existing data file whose content is at `self.on_disk` (True),
383 /// the existing data file whose content is at `self.on_disk` (True),
344 /// instead of written to a new data file (False).
384 /// instead of written to a new data file (False).
345 def write_v2(
385 def write_v2(
346 &self,
386 &self,
347 now: PyObject,
387 now: PyObject,
348 can_append: bool,
388 can_append: bool,
349 ) -> PyResult<PyObject> {
389 ) -> PyResult<PyObject> {
350 let now = Timestamp(now.extract(py)?);
390 let now = Timestamp(now.extract(py)?);
351
391
352 let mut inner = self.inner(py).borrow_mut();
392 let mut inner = self.inner(py).borrow_mut();
353 let result = inner.pack_v2(now, can_append);
393 let result = inner.pack_v2(now, can_append);
354 match result {
394 match result {
355 Ok((packed, tree_metadata, append)) => {
395 Ok((packed, tree_metadata, append)) => {
356 let packed = PyBytes::new(py, &packed);
396 let packed = PyBytes::new(py, &packed);
357 let tree_metadata = PyBytes::new(py, &tree_metadata);
397 let tree_metadata = PyBytes::new(py, &tree_metadata);
358 let tuple = (packed, tree_metadata, append);
398 let tuple = (packed, tree_metadata, append);
359 Ok(tuple.to_py_object(py).into_object())
399 Ok(tuple.to_py_object(py).into_object())
360 },
400 },
361 Err(_) => Err(PyErr::new::<exc::OSError, _>(
401 Err(_) => Err(PyErr::new::<exc::OSError, _>(
362 py,
402 py,
363 "Dirstate error".to_string(),
403 "Dirstate error".to_string(),
364 )),
404 )),
365 }
405 }
366 }
406 }
367
407
368 def filefoldmapasdict(&self) -> PyResult<PyDict> {
408 def filefoldmapasdict(&self) -> PyResult<PyDict> {
369 let dict = PyDict::new(py);
409 let dict = PyDict::new(py);
370 for item in self.inner(py).borrow_mut().iter() {
410 for item in self.inner(py).borrow_mut().iter() {
371 let (path, entry) = item.map_err(|e| v2_error(py, e))?;
411 let (path, entry) = item.map_err(|e| v2_error(py, e))?;
372 if entry.state != EntryState::Removed {
412 if entry.state != EntryState::Removed {
373 let key = normalize_case(path);
413 let key = normalize_case(path);
374 let value = path;
414 let value = path;
375 dict.set_item(
415 dict.set_item(
376 py,
416 py,
377 PyBytes::new(py, key.as_bytes()).into_object(),
417 PyBytes::new(py, key.as_bytes()).into_object(),
378 PyBytes::new(py, value.as_bytes()).into_object(),
418 PyBytes::new(py, value.as_bytes()).into_object(),
379 )?;
419 )?;
380 }
420 }
381 }
421 }
382 Ok(dict)
422 Ok(dict)
383 }
423 }
384
424
385 def __len__(&self) -> PyResult<usize> {
425 def __len__(&self) -> PyResult<usize> {
386 Ok(self.inner(py).borrow().len())
426 Ok(self.inner(py).borrow().len())
387 }
427 }
388
428
389 def __contains__(&self, key: PyObject) -> PyResult<bool> {
429 def __contains__(&self, key: PyObject) -> PyResult<bool> {
390 let key = key.extract::<PyBytes>(py)?;
430 let key = key.extract::<PyBytes>(py)?;
391 self.inner(py)
431 self.inner(py)
392 .borrow()
432 .borrow()
393 .contains_key(HgPath::new(key.data(py)))
433 .contains_key(HgPath::new(key.data(py)))
394 .map_err(|e| v2_error(py, e))
434 .map_err(|e| v2_error(py, e))
395 }
435 }
396
436
397 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
437 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
398 let key = key.extract::<PyBytes>(py)?;
438 let key = key.extract::<PyBytes>(py)?;
399 let key = HgPath::new(key.data(py));
439 let key = HgPath::new(key.data(py));
400 match self
440 match self
401 .inner(py)
441 .inner(py)
402 .borrow()
442 .borrow()
403 .get(key)
443 .get(key)
404 .map_err(|e| v2_error(py, e))?
444 .map_err(|e| v2_error(py, e))?
405 {
445 {
406 Some(entry) => {
446 Some(entry) => {
407 Ok(make_dirstate_item(py, &entry)?)
447 Ok(make_dirstate_item(py, &entry)?)
408 },
448 },
409 None => Err(PyErr::new::<exc::KeyError, _>(
449 None => Err(PyErr::new::<exc::KeyError, _>(
410 py,
450 py,
411 String::from_utf8_lossy(key.as_bytes()),
451 String::from_utf8_lossy(key.as_bytes()),
412 )),
452 )),
413 }
453 }
414 }
454 }
415
455
416 def keys(&self) -> PyResult<DirstateMapKeysIterator> {
456 def keys(&self) -> PyResult<DirstateMapKeysIterator> {
417 let leaked_ref = self.inner(py).leak_immutable();
457 let leaked_ref = self.inner(py).leak_immutable();
418 DirstateMapKeysIterator::from_inner(
458 DirstateMapKeysIterator::from_inner(
419 py,
459 py,
420 unsafe { leaked_ref.map(py, |o| o.iter()) },
460 unsafe { leaked_ref.map(py, |o| o.iter()) },
421 )
461 )
422 }
462 }
423
463
424 def items(&self) -> PyResult<DirstateMapItemsIterator> {
464 def items(&self) -> PyResult<DirstateMapItemsIterator> {
425 let leaked_ref = self.inner(py).leak_immutable();
465 let leaked_ref = self.inner(py).leak_immutable();
426 DirstateMapItemsIterator::from_inner(
466 DirstateMapItemsIterator::from_inner(
427 py,
467 py,
428 unsafe { leaked_ref.map(py, |o| o.iter()) },
468 unsafe { leaked_ref.map(py, |o| o.iter()) },
429 )
469 )
430 }
470 }
431
471
432 def __iter__(&self) -> PyResult<DirstateMapKeysIterator> {
472 def __iter__(&self) -> PyResult<DirstateMapKeysIterator> {
433 let leaked_ref = self.inner(py).leak_immutable();
473 let leaked_ref = self.inner(py).leak_immutable();
434 DirstateMapKeysIterator::from_inner(
474 DirstateMapKeysIterator::from_inner(
435 py,
475 py,
436 unsafe { leaked_ref.map(py, |o| o.iter()) },
476 unsafe { leaked_ref.map(py, |o| o.iter()) },
437 )
477 )
438 }
478 }
439
479
440 // TODO all copymap* methods, see docstring above
480 // TODO all copymap* methods, see docstring above
441 def copymapcopy(&self) -> PyResult<PyDict> {
481 def copymapcopy(&self) -> PyResult<PyDict> {
442 let dict = PyDict::new(py);
482 let dict = PyDict::new(py);
443 for item in self.inner(py).borrow().copy_map_iter() {
483 for item in self.inner(py).borrow().copy_map_iter() {
444 let (key, value) = item.map_err(|e| v2_error(py, e))?;
484 let (key, value) = item.map_err(|e| v2_error(py, e))?;
445 dict.set_item(
485 dict.set_item(
446 py,
486 py,
447 PyBytes::new(py, key.as_bytes()),
487 PyBytes::new(py, key.as_bytes()),
448 PyBytes::new(py, value.as_bytes()),
488 PyBytes::new(py, value.as_bytes()),
449 )?;
489 )?;
450 }
490 }
451 Ok(dict)
491 Ok(dict)
452 }
492 }
453
493
454 def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
494 def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
455 let key = key.extract::<PyBytes>(py)?;
495 let key = key.extract::<PyBytes>(py)?;
456 match self
496 match self
457 .inner(py)
497 .inner(py)
458 .borrow()
498 .borrow()
459 .copy_map_get(HgPath::new(key.data(py)))
499 .copy_map_get(HgPath::new(key.data(py)))
460 .map_err(|e| v2_error(py, e))?
500 .map_err(|e| v2_error(py, e))?
461 {
501 {
462 Some(copy) => Ok(PyBytes::new(py, copy.as_bytes())),
502 Some(copy) => Ok(PyBytes::new(py, copy.as_bytes())),
463 None => Err(PyErr::new::<exc::KeyError, _>(
503 None => Err(PyErr::new::<exc::KeyError, _>(
464 py,
504 py,
465 String::from_utf8_lossy(key.data(py)),
505 String::from_utf8_lossy(key.data(py)),
466 )),
506 )),
467 }
507 }
468 }
508 }
469 def copymap(&self) -> PyResult<CopyMap> {
509 def copymap(&self) -> PyResult<CopyMap> {
470 CopyMap::from_inner(py, self.clone_ref(py))
510 CopyMap::from_inner(py, self.clone_ref(py))
471 }
511 }
472
512
473 def copymaplen(&self) -> PyResult<usize> {
513 def copymaplen(&self) -> PyResult<usize> {
474 Ok(self.inner(py).borrow().copy_map_len())
514 Ok(self.inner(py).borrow().copy_map_len())
475 }
515 }
476 def copymapcontains(&self, key: PyObject) -> PyResult<bool> {
516 def copymapcontains(&self, key: PyObject) -> PyResult<bool> {
477 let key = key.extract::<PyBytes>(py)?;
517 let key = key.extract::<PyBytes>(py)?;
478 self.inner(py)
518 self.inner(py)
479 .borrow()
519 .borrow()
480 .copy_map_contains_key(HgPath::new(key.data(py)))
520 .copy_map_contains_key(HgPath::new(key.data(py)))
481 .map_err(|e| v2_error(py, e))
521 .map_err(|e| v2_error(py, e))
482 }
522 }
483 def copymapget(
523 def copymapget(
484 &self,
524 &self,
485 key: PyObject,
525 key: PyObject,
486 default: Option<PyObject>
526 default: Option<PyObject>
487 ) -> PyResult<Option<PyObject>> {
527 ) -> PyResult<Option<PyObject>> {
488 let key = key.extract::<PyBytes>(py)?;
528 let key = key.extract::<PyBytes>(py)?;
489 match self
529 match self
490 .inner(py)
530 .inner(py)
491 .borrow()
531 .borrow()
492 .copy_map_get(HgPath::new(key.data(py)))
532 .copy_map_get(HgPath::new(key.data(py)))
493 .map_err(|e| v2_error(py, e))?
533 .map_err(|e| v2_error(py, e))?
494 {
534 {
495 Some(copy) => Ok(Some(
535 Some(copy) => Ok(Some(
496 PyBytes::new(py, copy.as_bytes()).into_object(),
536 PyBytes::new(py, copy.as_bytes()).into_object(),
497 )),
537 )),
498 None => Ok(default),
538 None => Ok(default),
499 }
539 }
500 }
540 }
501 def copymapsetitem(
541 def copymapsetitem(
502 &self,
542 &self,
503 key: PyObject,
543 key: PyObject,
504 value: PyObject
544 value: PyObject
505 ) -> PyResult<PyObject> {
545 ) -> PyResult<PyObject> {
506 let key = key.extract::<PyBytes>(py)?;
546 let key = key.extract::<PyBytes>(py)?;
507 let value = value.extract::<PyBytes>(py)?;
547 let value = value.extract::<PyBytes>(py)?;
508 self.inner(py)
548 self.inner(py)
509 .borrow_mut()
549 .borrow_mut()
510 .copy_map_insert(
550 .copy_map_insert(
511 HgPathBuf::from_bytes(key.data(py)),
551 HgPathBuf::from_bytes(key.data(py)),
512 HgPathBuf::from_bytes(value.data(py)),
552 HgPathBuf::from_bytes(value.data(py)),
513 )
553 )
514 .map_err(|e| v2_error(py, e))?;
554 .map_err(|e| v2_error(py, e))?;
515 Ok(py.None())
555 Ok(py.None())
516 }
556 }
517 def copymappop(
557 def copymappop(
518 &self,
558 &self,
519 key: PyObject,
559 key: PyObject,
520 default: Option<PyObject>
560 default: Option<PyObject>
521 ) -> PyResult<Option<PyObject>> {
561 ) -> PyResult<Option<PyObject>> {
522 let key = key.extract::<PyBytes>(py)?;
562 let key = key.extract::<PyBytes>(py)?;
523 match self
563 match self
524 .inner(py)
564 .inner(py)
525 .borrow_mut()
565 .borrow_mut()
526 .copy_map_remove(HgPath::new(key.data(py)))
566 .copy_map_remove(HgPath::new(key.data(py)))
527 .map_err(|e| v2_error(py, e))?
567 .map_err(|e| v2_error(py, e))?
528 {
568 {
529 Some(_) => Ok(None),
569 Some(_) => Ok(None),
530 None => Ok(default),
570 None => Ok(default),
531 }
571 }
532 }
572 }
533
573
534 def copymapiter(&self) -> PyResult<CopyMapKeysIterator> {
574 def copymapiter(&self) -> PyResult<CopyMapKeysIterator> {
535 let leaked_ref = self.inner(py).leak_immutable();
575 let leaked_ref = self.inner(py).leak_immutable();
536 CopyMapKeysIterator::from_inner(
576 CopyMapKeysIterator::from_inner(
537 py,
577 py,
538 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
578 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
539 )
579 )
540 }
580 }
541
581
542 def copymapitemsiter(&self) -> PyResult<CopyMapItemsIterator> {
582 def copymapitemsiter(&self) -> PyResult<CopyMapItemsIterator> {
543 let leaked_ref = self.inner(py).leak_immutable();
583 let leaked_ref = self.inner(py).leak_immutable();
544 CopyMapItemsIterator::from_inner(
584 CopyMapItemsIterator::from_inner(
545 py,
585 py,
546 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
586 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
547 )
587 )
548 }
588 }
549
589
550 def tracked_dirs(&self) -> PyResult<PyList> {
590 def tracked_dirs(&self) -> PyResult<PyList> {
551 let dirs = PyList::new(py, &[]);
591 let dirs = PyList::new(py, &[]);
552 for path in self.inner(py).borrow_mut().iter_tracked_dirs()
592 for path in self.inner(py).borrow_mut().iter_tracked_dirs()
553 .map_err(|e |dirstate_error(py, e))?
593 .map_err(|e |dirstate_error(py, e))?
554 {
594 {
555 let path = path.map_err(|e| v2_error(py, e))?;
595 let path = path.map_err(|e| v2_error(py, e))?;
556 let path = PyBytes::new(py, path.as_bytes());
596 let path = PyBytes::new(py, path.as_bytes());
557 dirs.append(py, path.into_object())
597 dirs.append(py, path.into_object())
558 }
598 }
559 Ok(dirs)
599 Ok(dirs)
560 }
600 }
561
601
562 def debug_iter(&self) -> PyResult<PyList> {
602 def debug_iter(&self) -> PyResult<PyList> {
563 let dirs = PyList::new(py, &[]);
603 let dirs = PyList::new(py, &[]);
564 for item in self.inner(py).borrow().debug_iter() {
604 for item in self.inner(py).borrow().debug_iter() {
565 let (path, (state, mode, size, mtime)) =
605 let (path, (state, mode, size, mtime)) =
566 item.map_err(|e| v2_error(py, e))?;
606 item.map_err(|e| v2_error(py, e))?;
567 let path = PyBytes::new(py, path.as_bytes());
607 let path = PyBytes::new(py, path.as_bytes());
568 let item = make_dirstate_item_raw(py, state, mode, size, mtime)?;
608 let item = make_dirstate_item_raw(py, state, mode, size, mtime)?;
569 dirs.append(py, (path, item).to_py_object(py).into_object())
609 dirs.append(py, (path, item).to_py_object(py).into_object())
570 }
610 }
571 Ok(dirs)
611 Ok(dirs)
572 }
612 }
573 });
613 });
574
614
575 impl DirstateMap {
615 impl DirstateMap {
576 pub fn get_inner_mut<'a>(
616 pub fn get_inner_mut<'a>(
577 &'a self,
617 &'a self,
578 py: Python<'a>,
618 py: Python<'a>,
579 ) -> RefMut<'a, Box<dyn DirstateMapMethods + Send>> {
619 ) -> RefMut<'a, Box<dyn DirstateMapMethods + Send>> {
580 self.inner(py).borrow_mut()
620 self.inner(py).borrow_mut()
581 }
621 }
582 fn translate_key(
622 fn translate_key(
583 py: Python,
623 py: Python,
584 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
624 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
585 ) -> PyResult<Option<PyBytes>> {
625 ) -> PyResult<Option<PyBytes>> {
586 let (f, _entry) = res.map_err(|e| v2_error(py, e))?;
626 let (f, _entry) = res.map_err(|e| v2_error(py, e))?;
587 Ok(Some(PyBytes::new(py, f.as_bytes())))
627 Ok(Some(PyBytes::new(py, f.as_bytes())))
588 }
628 }
589 fn translate_key_value(
629 fn translate_key_value(
590 py: Python,
630 py: Python,
591 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
631 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
592 ) -> PyResult<Option<(PyBytes, PyObject)>> {
632 ) -> PyResult<Option<(PyBytes, PyObject)>> {
593 let (f, entry) = res.map_err(|e| v2_error(py, e))?;
633 let (f, entry) = res.map_err(|e| v2_error(py, e))?;
594 Ok(Some((
634 Ok(Some((
595 PyBytes::new(py, f.as_bytes()),
635 PyBytes::new(py, f.as_bytes()),
596 make_dirstate_item(py, &entry)?,
636 make_dirstate_item(py, &entry)?,
597 )))
637 )))
598 }
638 }
599 }
639 }
600
640
601 py_shared_iterator!(
641 py_shared_iterator!(
602 DirstateMapKeysIterator,
642 DirstateMapKeysIterator,
603 UnsafePyLeaked<StateMapIter<'static>>,
643 UnsafePyLeaked<StateMapIter<'static>>,
604 DirstateMap::translate_key,
644 DirstateMap::translate_key,
605 Option<PyBytes>
645 Option<PyBytes>
606 );
646 );
607
647
608 py_shared_iterator!(
648 py_shared_iterator!(
609 DirstateMapItemsIterator,
649 DirstateMapItemsIterator,
610 UnsafePyLeaked<StateMapIter<'static>>,
650 UnsafePyLeaked<StateMapIter<'static>>,
611 DirstateMap::translate_key_value,
651 DirstateMap::translate_key_value,
612 Option<(PyBytes, PyObject)>
652 Option<(PyBytes, PyObject)>
613 );
653 );
614
654
615 fn extract_node_id(py: Python, obj: &PyObject) -> PyResult<Node> {
655 fn extract_node_id(py: Python, obj: &PyObject) -> PyResult<Node> {
616 let bytes = obj.extract::<PyBytes>(py)?;
656 let bytes = obj.extract::<PyBytes>(py)?;
617 match bytes.data(py).try_into() {
657 match bytes.data(py).try_into() {
618 Ok(s) => Ok(s),
658 Ok(s) => Ok(s),
619 Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())),
659 Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())),
620 }
660 }
621 }
661 }
622
662
623 pub(super) fn v2_error(py: Python<'_>, _: DirstateV2ParseError) -> PyErr {
663 pub(super) fn v2_error(py: Python<'_>, _: DirstateV2ParseError) -> PyErr {
624 PyErr::new::<exc::ValueError, _>(py, "corrupted dirstate-v2")
664 PyErr::new::<exc::ValueError, _>(py, "corrupted dirstate-v2")
625 }
665 }
626
666
627 fn dirstate_error(py: Python<'_>, e: DirstateError) -> PyErr {
667 fn dirstate_error(py: Python<'_>, e: DirstateError) -> PyErr {
628 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
668 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
629 }
669 }
@@ -1,232 +1,240
1 use crate::dirstate::owning::OwningDirstateMap;
1 use crate::dirstate::owning::OwningDirstateMap;
2 use hg::dirstate::parsers::Timestamp;
2 use hg::dirstate::parsers::Timestamp;
3 use hg::dirstate_tree::dispatch::DirstateMapMethods;
3 use hg::dirstate_tree::dispatch::DirstateMapMethods;
4 use hg::dirstate_tree::on_disk::DirstateV2ParseError;
4 use hg::dirstate_tree::on_disk::DirstateV2ParseError;
5 use hg::matchers::Matcher;
5 use hg::matchers::Matcher;
6 use hg::utils::hg_path::{HgPath, HgPathBuf};
6 use hg::utils::hg_path::{HgPath, HgPathBuf};
7 use hg::CopyMapIter;
7 use hg::CopyMapIter;
8 use hg::DirstateEntry;
8 use hg::DirstateEntry;
9 use hg::DirstateError;
9 use hg::DirstateError;
10 use hg::DirstateParents;
10 use hg::DirstateParents;
11 use hg::DirstateStatus;
11 use hg::DirstateStatus;
12 use hg::PatternFileWarning;
12 use hg::PatternFileWarning;
13 use hg::StateMapIter;
13 use hg::StateMapIter;
14 use hg::StatusError;
14 use hg::StatusError;
15 use hg::StatusOptions;
15 use hg::StatusOptions;
16 use std::path::PathBuf;
16 use std::path::PathBuf;
17
17
18 impl DirstateMapMethods for OwningDirstateMap {
18 impl DirstateMapMethods for OwningDirstateMap {
19 fn clear(&mut self) {
19 fn clear(&mut self) {
20 self.get_mut().clear()
20 self.get_mut().clear()
21 }
21 }
22
22
23 fn set_v1(&mut self, filename: &HgPath, entry: DirstateEntry) {
24 self.get_mut().set_v1(filename, entry)
25 }
26
23 fn add_file(
27 fn add_file(
24 &mut self,
28 &mut self,
25 filename: &HgPath,
29 filename: &HgPath,
26 entry: DirstateEntry,
30 entry: DirstateEntry,
27 added: bool,
31 added: bool,
28 merged: bool,
32 merged: bool,
29 from_p2: bool,
33 from_p2: bool,
30 possibly_dirty: bool,
34 possibly_dirty: bool,
31 ) -> Result<(), DirstateError> {
35 ) -> Result<(), DirstateError> {
32 self.get_mut().add_file(
36 self.get_mut().add_file(
33 filename,
37 filename,
34 entry,
38 entry,
35 added,
39 added,
36 merged,
40 merged,
37 from_p2,
41 from_p2,
38 possibly_dirty,
42 possibly_dirty,
39 )
43 )
40 }
44 }
41
45
42 fn remove_file(
46 fn remove_file(
43 &mut self,
47 &mut self,
44 filename: &HgPath,
48 filename: &HgPath,
45 in_merge: bool,
49 in_merge: bool,
46 ) -> Result<(), DirstateError> {
50 ) -> Result<(), DirstateError> {
47 self.get_mut().remove_file(filename, in_merge)
51 self.get_mut().remove_file(filename, in_merge)
48 }
52 }
49
53
50 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
54 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
51 self.get_mut().drop_file(filename)
55 self.get_mut().drop_file(filename)
52 }
56 }
53
57
54 fn clear_ambiguous_times(
58 fn clear_ambiguous_times(
55 &mut self,
59 &mut self,
56 filenames: Vec<HgPathBuf>,
60 filenames: Vec<HgPathBuf>,
57 now: i32,
61 now: i32,
58 ) -> Result<(), DirstateV2ParseError> {
62 ) -> Result<(), DirstateV2ParseError> {
59 self.get_mut().clear_ambiguous_times(filenames, now)
63 self.get_mut().clear_ambiguous_times(filenames, now)
60 }
64 }
61
65
62 fn non_normal_entries_contains(
66 fn non_normal_entries_contains(
63 &mut self,
67 &mut self,
64 key: &HgPath,
68 key: &HgPath,
65 ) -> Result<bool, DirstateV2ParseError> {
69 ) -> Result<bool, DirstateV2ParseError> {
66 self.get_mut().non_normal_entries_contains(key)
70 self.get_mut().non_normal_entries_contains(key)
67 }
71 }
68
72
69 fn non_normal_entries_remove(&mut self, key: &HgPath) {
73 fn non_normal_entries_remove(&mut self, key: &HgPath) -> bool {
70 self.get_mut().non_normal_entries_remove(key)
74 self.get_mut().non_normal_entries_remove(key)
71 }
75 }
72
76
77 fn non_normal_entries_add(&mut self, key: &HgPath) {
78 self.get_mut().non_normal_entries_add(key)
79 }
80
73 fn non_normal_or_other_parent_paths(
81 fn non_normal_or_other_parent_paths(
74 &mut self,
82 &mut self,
75 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
83 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
76 {
84 {
77 self.get_mut().non_normal_or_other_parent_paths()
85 self.get_mut().non_normal_or_other_parent_paths()
78 }
86 }
79
87
80 fn set_non_normal_other_parent_entries(&mut self, force: bool) {
88 fn set_non_normal_other_parent_entries(&mut self, force: bool) {
81 self.get_mut().set_non_normal_other_parent_entries(force)
89 self.get_mut().set_non_normal_other_parent_entries(force)
82 }
90 }
83
91
84 fn iter_non_normal_paths(
92 fn iter_non_normal_paths(
85 &mut self,
93 &mut self,
86 ) -> Box<
94 ) -> Box<
87 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
95 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
88 > {
96 > {
89 self.get_mut().iter_non_normal_paths()
97 self.get_mut().iter_non_normal_paths()
90 }
98 }
91
99
92 fn iter_non_normal_paths_panic(
100 fn iter_non_normal_paths_panic(
93 &self,
101 &self,
94 ) -> Box<
102 ) -> Box<
95 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
103 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
96 > {
104 > {
97 self.get().iter_non_normal_paths_panic()
105 self.get().iter_non_normal_paths_panic()
98 }
106 }
99
107
100 fn iter_other_parent_paths(
108 fn iter_other_parent_paths(
101 &mut self,
109 &mut self,
102 ) -> Box<
110 ) -> Box<
103 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
111 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
104 > {
112 > {
105 self.get_mut().iter_other_parent_paths()
113 self.get_mut().iter_other_parent_paths()
106 }
114 }
107
115
108 fn has_tracked_dir(
116 fn has_tracked_dir(
109 &mut self,
117 &mut self,
110 directory: &HgPath,
118 directory: &HgPath,
111 ) -> Result<bool, DirstateError> {
119 ) -> Result<bool, DirstateError> {
112 self.get_mut().has_tracked_dir(directory)
120 self.get_mut().has_tracked_dir(directory)
113 }
121 }
114
122
115 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
123 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
116 self.get_mut().has_dir(directory)
124 self.get_mut().has_dir(directory)
117 }
125 }
118
126
119 fn pack_v1(
127 fn pack_v1(
120 &mut self,
128 &mut self,
121 parents: DirstateParents,
129 parents: DirstateParents,
122 now: Timestamp,
130 now: Timestamp,
123 ) -> Result<Vec<u8>, DirstateError> {
131 ) -> Result<Vec<u8>, DirstateError> {
124 self.get_mut().pack_v1(parents, now)
132 self.get_mut().pack_v1(parents, now)
125 }
133 }
126
134
127 fn pack_v2(
135 fn pack_v2(
128 &mut self,
136 &mut self,
129 now: Timestamp,
137 now: Timestamp,
130 can_append: bool,
138 can_append: bool,
131 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> {
139 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> {
132 self.get_mut().pack_v2(now, can_append)
140 self.get_mut().pack_v2(now, can_append)
133 }
141 }
134
142
135 fn status<'a>(
143 fn status<'a>(
136 &'a mut self,
144 &'a mut self,
137 matcher: &'a (dyn Matcher + Sync),
145 matcher: &'a (dyn Matcher + Sync),
138 root_dir: PathBuf,
146 root_dir: PathBuf,
139 ignore_files: Vec<PathBuf>,
147 ignore_files: Vec<PathBuf>,
140 options: StatusOptions,
148 options: StatusOptions,
141 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
149 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
142 {
150 {
143 self.get_mut()
151 self.get_mut()
144 .status(matcher, root_dir, ignore_files, options)
152 .status(matcher, root_dir, ignore_files, options)
145 }
153 }
146
154
147 fn copy_map_len(&self) -> usize {
155 fn copy_map_len(&self) -> usize {
148 self.get().copy_map_len()
156 self.get().copy_map_len()
149 }
157 }
150
158
151 fn copy_map_iter(&self) -> CopyMapIter<'_> {
159 fn copy_map_iter(&self) -> CopyMapIter<'_> {
152 self.get().copy_map_iter()
160 self.get().copy_map_iter()
153 }
161 }
154
162
155 fn copy_map_contains_key(
163 fn copy_map_contains_key(
156 &self,
164 &self,
157 key: &HgPath,
165 key: &HgPath,
158 ) -> Result<bool, DirstateV2ParseError> {
166 ) -> Result<bool, DirstateV2ParseError> {
159 self.get().copy_map_contains_key(key)
167 self.get().copy_map_contains_key(key)
160 }
168 }
161
169
162 fn copy_map_get(
170 fn copy_map_get(
163 &self,
171 &self,
164 key: &HgPath,
172 key: &HgPath,
165 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
173 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
166 self.get().copy_map_get(key)
174 self.get().copy_map_get(key)
167 }
175 }
168
176
169 fn copy_map_remove(
177 fn copy_map_remove(
170 &mut self,
178 &mut self,
171 key: &HgPath,
179 key: &HgPath,
172 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
180 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
173 self.get_mut().copy_map_remove(key)
181 self.get_mut().copy_map_remove(key)
174 }
182 }
175
183
176 fn copy_map_insert(
184 fn copy_map_insert(
177 &mut self,
185 &mut self,
178 key: HgPathBuf,
186 key: HgPathBuf,
179 value: HgPathBuf,
187 value: HgPathBuf,
180 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
188 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
181 self.get_mut().copy_map_insert(key, value)
189 self.get_mut().copy_map_insert(key, value)
182 }
190 }
183
191
184 fn len(&self) -> usize {
192 fn len(&self) -> usize {
185 self.get().len()
193 self.get().len()
186 }
194 }
187
195
188 fn contains_key(
196 fn contains_key(
189 &self,
197 &self,
190 key: &HgPath,
198 key: &HgPath,
191 ) -> Result<bool, DirstateV2ParseError> {
199 ) -> Result<bool, DirstateV2ParseError> {
192 self.get().contains_key(key)
200 self.get().contains_key(key)
193 }
201 }
194
202
195 fn get(
203 fn get(
196 &self,
204 &self,
197 key: &HgPath,
205 key: &HgPath,
198 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
206 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
199 self.get().get(key)
207 self.get().get(key)
200 }
208 }
201
209
202 fn iter(&self) -> StateMapIter<'_> {
210 fn iter(&self) -> StateMapIter<'_> {
203 self.get().iter()
211 self.get().iter()
204 }
212 }
205
213
206 fn iter_tracked_dirs(
214 fn iter_tracked_dirs(
207 &mut self,
215 &mut self,
208 ) -> Result<
216 ) -> Result<
209 Box<
217 Box<
210 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>>
218 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>>
211 + Send
219 + Send
212 + '_,
220 + '_,
213 >,
221 >,
214 DirstateError,
222 DirstateError,
215 > {
223 > {
216 self.get_mut().iter_tracked_dirs()
224 self.get_mut().iter_tracked_dirs()
217 }
225 }
218
226
219 fn debug_iter(
227 fn debug_iter(
220 &self,
228 &self,
221 ) -> Box<
229 ) -> Box<
222 dyn Iterator<
230 dyn Iterator<
223 Item = Result<
231 Item = Result<
224 (&HgPath, (u8, i32, i32, i32)),
232 (&HgPath, (u8, i32, i32, i32)),
225 DirstateV2ParseError,
233 DirstateV2ParseError,
226 >,
234 >,
227 > + Send
235 > + Send
228 + '_,
236 + '_,
229 > {
237 > {
230 self.get().debug_iter()
238 self.get().debug_iter()
231 }
239 }
232 }
240 }
@@ -1,77 +1,83
1 // non_normal_other_parent_entries.rs
1 // non_normal_other_parent_entries.rs
2 //
2 //
3 // Copyright 2020 Raphaël Gomès <rgomes@octobus.net>
3 // Copyright 2020 Raphaël Gomès <rgomes@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 use cpython::{
8 use cpython::{
9 exc::NotImplementedError, CompareOp, ObjectProtocol, PyBytes, PyClone,
9 exc::NotImplementedError, CompareOp, ObjectProtocol, PyBytes, PyClone,
10 PyErr, PyObject, PyResult, PyString, Python, PythonObject, ToPyObject,
10 PyErr, PyObject, PyResult, PyString, Python, PythonObject, ToPyObject,
11 UnsafePyLeaked,
11 UnsafePyLeaked,
12 };
12 };
13
13
14 use crate::dirstate::dirstate_map::v2_error;
14 use crate::dirstate::dirstate_map::v2_error;
15 use crate::dirstate::DirstateMap;
15 use crate::dirstate::DirstateMap;
16 use hg::dirstate_tree::on_disk::DirstateV2ParseError;
16 use hg::dirstate_tree::on_disk::DirstateV2ParseError;
17 use hg::utils::hg_path::HgPath;
17 use hg::utils::hg_path::HgPath;
18 use std::cell::RefCell;
18 use std::cell::RefCell;
19
19
20 py_class!(pub class NonNormalEntries |py| {
20 py_class!(pub class NonNormalEntries |py| {
21 data dmap: DirstateMap;
21 data dmap: DirstateMap;
22
22
23 def __contains__(&self, key: PyObject) -> PyResult<bool> {
23 def __contains__(&self, key: PyObject) -> PyResult<bool> {
24 self.dmap(py).non_normal_entries_contains(py, key)
24 self.dmap(py).non_normal_entries_contains(py, key)
25 }
25 }
26 def remove(&self, key: PyObject) -> PyResult<PyObject> {
26 def remove(&self, key: PyObject) -> PyResult<PyObject> {
27 self.dmap(py).non_normal_entries_remove(py, key)
27 self.dmap(py).non_normal_entries_remove(py, key)
28 }
28 }
29 def add(&self, key: PyObject) -> PyResult<PyObject> {
30 self.dmap(py).non_normal_entries_add(py, key)
31 }
32 def discard(&self, key: PyObject) -> PyResult<PyObject> {
33 self.dmap(py).non_normal_entries_discard(py, key)
34 }
29 def __richcmp__(&self, other: PyObject, op: CompareOp) -> PyResult<bool> {
35 def __richcmp__(&self, other: PyObject, op: CompareOp) -> PyResult<bool> {
30 match op {
36 match op {
31 CompareOp::Eq => self.is_equal_to(py, other),
37 CompareOp::Eq => self.is_equal_to(py, other),
32 CompareOp::Ne => Ok(!self.is_equal_to(py, other)?),
38 CompareOp::Ne => Ok(!self.is_equal_to(py, other)?),
33 _ => Err(PyErr::new::<NotImplementedError, _>(py, ""))
39 _ => Err(PyErr::new::<NotImplementedError, _>(py, ""))
34 }
40 }
35 }
41 }
36 def __repr__(&self) -> PyResult<PyString> {
42 def __repr__(&self) -> PyResult<PyString> {
37 self.dmap(py).non_normal_entries_display(py)
43 self.dmap(py).non_normal_entries_display(py)
38 }
44 }
39
45
40 def __iter__(&self) -> PyResult<NonNormalEntriesIterator> {
46 def __iter__(&self) -> PyResult<NonNormalEntriesIterator> {
41 self.dmap(py).non_normal_entries_iter(py)
47 self.dmap(py).non_normal_entries_iter(py)
42 }
48 }
43 });
49 });
44
50
45 impl NonNormalEntries {
51 impl NonNormalEntries {
46 pub fn from_inner(py: Python, dm: DirstateMap) -> PyResult<Self> {
52 pub fn from_inner(py: Python, dm: DirstateMap) -> PyResult<Self> {
47 Self::create_instance(py, dm)
53 Self::create_instance(py, dm)
48 }
54 }
49
55
50 fn is_equal_to(&self, py: Python, other: PyObject) -> PyResult<bool> {
56 fn is_equal_to(&self, py: Python, other: PyObject) -> PyResult<bool> {
51 for item in other.iter(py)? {
57 for item in other.iter(py)? {
52 if !self.dmap(py).non_normal_entries_contains(py, item?)? {
58 if !self.dmap(py).non_normal_entries_contains(py, item?)? {
53 return Ok(false);
59 return Ok(false);
54 }
60 }
55 }
61 }
56 Ok(true)
62 Ok(true)
57 }
63 }
58
64
59 fn translate_key(
65 fn translate_key(
60 py: Python,
66 py: Python,
61 key: Result<&HgPath, DirstateV2ParseError>,
67 key: Result<&HgPath, DirstateV2ParseError>,
62 ) -> PyResult<Option<PyBytes>> {
68 ) -> PyResult<Option<PyBytes>> {
63 let key = key.map_err(|e| v2_error(py, e))?;
69 let key = key.map_err(|e| v2_error(py, e))?;
64 Ok(Some(PyBytes::new(py, key.as_bytes())))
70 Ok(Some(PyBytes::new(py, key.as_bytes())))
65 }
71 }
66 }
72 }
67
73
68 type NonNormalEntriesIter<'a> = Box<
74 type NonNormalEntriesIter<'a> = Box<
69 dyn Iterator<Item = Result<&'a HgPath, DirstateV2ParseError>> + Send + 'a,
75 dyn Iterator<Item = Result<&'a HgPath, DirstateV2ParseError>> + Send + 'a,
70 >;
76 >;
71
77
72 py_shared_iterator!(
78 py_shared_iterator!(
73 NonNormalEntriesIterator,
79 NonNormalEntriesIterator,
74 UnsafePyLeaked<NonNormalEntriesIter<'static>>,
80 UnsafePyLeaked<NonNormalEntriesIter<'static>>,
75 NonNormalEntries::translate_key,
81 NonNormalEntries::translate_key,
76 Option<PyBytes>
82 Option<PyBytes>
77 );
83 );
General Comments 0
You need to be logged in to leave comments. Login now