##// END OF EJS Templates
dirstate: move the copymap drop inside dropfile...
marmoute -
r48784:7ab99007 default
parent child Browse files
Show More
@@ -1,1619 +1,1618
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = parsers.DirstateItem
48 DirstateItem = parsers.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._updatedfiles = set()
133 self._updatedfiles = set()
134 self._mapcls = dirstatemap.dirstatemap
134 self._mapcls = dirstatemap.dirstatemap
135 # Access and cache cwd early, so we don't access it for the first time
135 # Access and cache cwd early, so we don't access it for the first time
136 # after a working-copy update caused it to not exist (accessing it then
136 # after a working-copy update caused it to not exist (accessing it then
137 # raises an exception).
137 # raises an exception).
138 self._cwd
138 self._cwd
139
139
140 def prefetch_parents(self):
140 def prefetch_parents(self):
141 """make sure the parents are loaded
141 """make sure the parents are loaded
142
142
143 Used to avoid a race condition.
143 Used to avoid a race condition.
144 """
144 """
145 self._pl
145 self._pl
146
146
147 @contextlib.contextmanager
147 @contextlib.contextmanager
148 def parentchange(self):
148 def parentchange(self):
149 """Context manager for handling dirstate parents.
149 """Context manager for handling dirstate parents.
150
150
151 If an exception occurs in the scope of the context manager,
151 If an exception occurs in the scope of the context manager,
152 the incoherent dirstate won't be written when wlock is
152 the incoherent dirstate won't be written when wlock is
153 released.
153 released.
154 """
154 """
155 self._parentwriters += 1
155 self._parentwriters += 1
156 yield
156 yield
157 # Typically we want the "undo" step of a context manager in a
157 # Typically we want the "undo" step of a context manager in a
158 # finally block so it happens even when an exception
158 # finally block so it happens even when an exception
159 # occurs. In this case, however, we only want to decrement
159 # occurs. In this case, however, we only want to decrement
160 # parentwriters if the code in the with statement exits
160 # parentwriters if the code in the with statement exits
161 # normally, so we don't have a try/finally here on purpose.
161 # normally, so we don't have a try/finally here on purpose.
162 self._parentwriters -= 1
162 self._parentwriters -= 1
163
163
164 def pendingparentchange(self):
164 def pendingparentchange(self):
165 """Returns true if the dirstate is in the middle of a set of changes
165 """Returns true if the dirstate is in the middle of a set of changes
166 that modify the dirstate parent.
166 that modify the dirstate parent.
167 """
167 """
168 return self._parentwriters > 0
168 return self._parentwriters > 0
169
169
170 @propertycache
170 @propertycache
171 def _map(self):
171 def _map(self):
172 """Return the dirstate contents (see documentation for dirstatemap)."""
172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 self._map = self._mapcls(
173 self._map = self._mapcls(
174 self._ui,
174 self._ui,
175 self._opener,
175 self._opener,
176 self._root,
176 self._root,
177 self._nodeconstants,
177 self._nodeconstants,
178 self._use_dirstate_v2,
178 self._use_dirstate_v2,
179 )
179 )
180 return self._map
180 return self._map
181
181
182 @property
182 @property
183 def _sparsematcher(self):
183 def _sparsematcher(self):
184 """The matcher for the sparse checkout.
184 """The matcher for the sparse checkout.
185
185
186 The working directory may not include every file from a manifest. The
186 The working directory may not include every file from a manifest. The
187 matcher obtained by this property will match a path if it is to be
187 matcher obtained by this property will match a path if it is to be
188 included in the working directory.
188 included in the working directory.
189 """
189 """
190 # TODO there is potential to cache this property. For now, the matcher
190 # TODO there is potential to cache this property. For now, the matcher
191 # is resolved on every access. (But the called function does use a
191 # is resolved on every access. (But the called function does use a
192 # cache to keep the lookup fast.)
192 # cache to keep the lookup fast.)
193 return self._sparsematchfn()
193 return self._sparsematchfn()
194
194
195 @repocache(b'branch')
195 @repocache(b'branch')
196 def _branch(self):
196 def _branch(self):
197 try:
197 try:
198 return self._opener.read(b"branch").strip() or b"default"
198 return self._opener.read(b"branch").strip() or b"default"
199 except IOError as inst:
199 except IOError as inst:
200 if inst.errno != errno.ENOENT:
200 if inst.errno != errno.ENOENT:
201 raise
201 raise
202 return b"default"
202 return b"default"
203
203
204 @property
204 @property
205 def _pl(self):
205 def _pl(self):
206 return self._map.parents()
206 return self._map.parents()
207
207
208 def hasdir(self, d):
208 def hasdir(self, d):
209 return self._map.hastrackeddir(d)
209 return self._map.hastrackeddir(d)
210
210
211 @rootcache(b'.hgignore')
211 @rootcache(b'.hgignore')
212 def _ignore(self):
212 def _ignore(self):
213 files = self._ignorefiles()
213 files = self._ignorefiles()
214 if not files:
214 if not files:
215 return matchmod.never()
215 return matchmod.never()
216
216
217 pats = [b'include:%s' % f for f in files]
217 pats = [b'include:%s' % f for f in files]
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219
219
220 @propertycache
220 @propertycache
221 def _slash(self):
221 def _slash(self):
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223
223
224 @propertycache
224 @propertycache
225 def _checklink(self):
225 def _checklink(self):
226 return util.checklink(self._root)
226 return util.checklink(self._root)
227
227
228 @propertycache
228 @propertycache
229 def _checkexec(self):
229 def _checkexec(self):
230 return bool(util.checkexec(self._root))
230 return bool(util.checkexec(self._root))
231
231
232 @propertycache
232 @propertycache
233 def _checkcase(self):
233 def _checkcase(self):
234 return not util.fscasesensitive(self._join(b'.hg'))
234 return not util.fscasesensitive(self._join(b'.hg'))
235
235
236 def _join(self, f):
236 def _join(self, f):
237 # much faster than os.path.join()
237 # much faster than os.path.join()
238 # it's safe because f is always a relative path
238 # it's safe because f is always a relative path
239 return self._rootdir + f
239 return self._rootdir + f
240
240
241 def flagfunc(self, buildfallback):
241 def flagfunc(self, buildfallback):
242 if self._checklink and self._checkexec:
242 if self._checklink and self._checkexec:
243
243
244 def f(x):
244 def f(x):
245 try:
245 try:
246 st = os.lstat(self._join(x))
246 st = os.lstat(self._join(x))
247 if util.statislink(st):
247 if util.statislink(st):
248 return b'l'
248 return b'l'
249 if util.statisexec(st):
249 if util.statisexec(st):
250 return b'x'
250 return b'x'
251 except OSError:
251 except OSError:
252 pass
252 pass
253 return b''
253 return b''
254
254
255 return f
255 return f
256
256
257 fallback = buildfallback()
257 fallback = buildfallback()
258 if self._checklink:
258 if self._checklink:
259
259
260 def f(x):
260 def f(x):
261 if os.path.islink(self._join(x)):
261 if os.path.islink(self._join(x)):
262 return b'l'
262 return b'l'
263 if b'x' in fallback(x):
263 if b'x' in fallback(x):
264 return b'x'
264 return b'x'
265 return b''
265 return b''
266
266
267 return f
267 return f
268 if self._checkexec:
268 if self._checkexec:
269
269
270 def f(x):
270 def f(x):
271 if b'l' in fallback(x):
271 if b'l' in fallback(x):
272 return b'l'
272 return b'l'
273 if util.isexec(self._join(x)):
273 if util.isexec(self._join(x)):
274 return b'x'
274 return b'x'
275 return b''
275 return b''
276
276
277 return f
277 return f
278 else:
278 else:
279 return fallback
279 return fallback
280
280
281 @propertycache
281 @propertycache
282 def _cwd(self):
282 def _cwd(self):
283 # internal config: ui.forcecwd
283 # internal config: ui.forcecwd
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 if forcecwd:
285 if forcecwd:
286 return forcecwd
286 return forcecwd
287 return encoding.getcwd()
287 return encoding.getcwd()
288
288
289 def getcwd(self):
289 def getcwd(self):
290 """Return the path from which a canonical path is calculated.
290 """Return the path from which a canonical path is calculated.
291
291
292 This path should be used to resolve file patterns or to convert
292 This path should be used to resolve file patterns or to convert
293 canonical paths back to file paths for display. It shouldn't be
293 canonical paths back to file paths for display. It shouldn't be
294 used to get real file paths. Use vfs functions instead.
294 used to get real file paths. Use vfs functions instead.
295 """
295 """
296 cwd = self._cwd
296 cwd = self._cwd
297 if cwd == self._root:
297 if cwd == self._root:
298 return b''
298 return b''
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 rootsep = self._root
300 rootsep = self._root
301 if not util.endswithsep(rootsep):
301 if not util.endswithsep(rootsep):
302 rootsep += pycompat.ossep
302 rootsep += pycompat.ossep
303 if cwd.startswith(rootsep):
303 if cwd.startswith(rootsep):
304 return cwd[len(rootsep) :]
304 return cwd[len(rootsep) :]
305 else:
305 else:
306 # we're outside the repo. return an absolute path.
306 # we're outside the repo. return an absolute path.
307 return cwd
307 return cwd
308
308
309 def pathto(self, f, cwd=None):
309 def pathto(self, f, cwd=None):
310 if cwd is None:
310 if cwd is None:
311 cwd = self.getcwd()
311 cwd = self.getcwd()
312 path = util.pathto(self._root, cwd, f)
312 path = util.pathto(self._root, cwd, f)
313 if self._slash:
313 if self._slash:
314 return util.pconvert(path)
314 return util.pconvert(path)
315 return path
315 return path
316
316
317 def __getitem__(self, key):
317 def __getitem__(self, key):
318 """Return the current state of key (a filename) in the dirstate.
318 """Return the current state of key (a filename) in the dirstate.
319
319
320 States are:
320 States are:
321 n normal
321 n normal
322 m needs merging
322 m needs merging
323 r marked for removal
323 r marked for removal
324 a marked for addition
324 a marked for addition
325 ? not tracked
325 ? not tracked
326
326
327 XXX The "state" is a bit obscure to be in the "public" API. we should
327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 consider migrating all user of this to going through the dirstate entry
328 consider migrating all user of this to going through the dirstate entry
329 instead.
329 instead.
330 """
330 """
331 entry = self._map.get(key)
331 entry = self._map.get(key)
332 if entry is not None:
332 if entry is not None:
333 return entry.state
333 return entry.state
334 return b'?'
334 return b'?'
335
335
336 def __contains__(self, key):
336 def __contains__(self, key):
337 return key in self._map
337 return key in self._map
338
338
339 def __iter__(self):
339 def __iter__(self):
340 return iter(sorted(self._map))
340 return iter(sorted(self._map))
341
341
342 def items(self):
342 def items(self):
343 return pycompat.iteritems(self._map)
343 return pycompat.iteritems(self._map)
344
344
345 iteritems = items
345 iteritems = items
346
346
347 def directories(self):
347 def directories(self):
348 return self._map.directories()
348 return self._map.directories()
349
349
350 def parents(self):
350 def parents(self):
351 return [self._validate(p) for p in self._pl]
351 return [self._validate(p) for p in self._pl]
352
352
353 def p1(self):
353 def p1(self):
354 return self._validate(self._pl[0])
354 return self._validate(self._pl[0])
355
355
356 def p2(self):
356 def p2(self):
357 return self._validate(self._pl[1])
357 return self._validate(self._pl[1])
358
358
359 @property
359 @property
360 def in_merge(self):
360 def in_merge(self):
361 """True if a merge is in progress"""
361 """True if a merge is in progress"""
362 return self._pl[1] != self._nodeconstants.nullid
362 return self._pl[1] != self._nodeconstants.nullid
363
363
364 def branch(self):
364 def branch(self):
365 return encoding.tolocal(self._branch)
365 return encoding.tolocal(self._branch)
366
366
367 def setparents(self, p1, p2=None):
367 def setparents(self, p1, p2=None):
368 """Set dirstate parents to p1 and p2.
368 """Set dirstate parents to p1 and p2.
369
369
370 When moving from two parents to one, "merged" entries a
370 When moving from two parents to one, "merged" entries a
371 adjusted to normal and previous copy records discarded and
371 adjusted to normal and previous copy records discarded and
372 returned by the call.
372 returned by the call.
373
373
374 See localrepo.setparents()
374 See localrepo.setparents()
375 """
375 """
376 if p2 is None:
376 if p2 is None:
377 p2 = self._nodeconstants.nullid
377 p2 = self._nodeconstants.nullid
378 if self._parentwriters == 0:
378 if self._parentwriters == 0:
379 raise ValueError(
379 raise ValueError(
380 b"cannot set dirstate parent outside of "
380 b"cannot set dirstate parent outside of "
381 b"dirstate.parentchange context manager"
381 b"dirstate.parentchange context manager"
382 )
382 )
383
383
384 self._dirty = True
384 self._dirty = True
385 oldp2 = self._pl[1]
385 oldp2 = self._pl[1]
386 if self._origpl is None:
386 if self._origpl is None:
387 self._origpl = self._pl
387 self._origpl = self._pl
388 self._map.setparents(p1, p2)
388 self._map.setparents(p1, p2)
389 copies = {}
389 copies = {}
390 if (
390 if (
391 oldp2 != self._nodeconstants.nullid
391 oldp2 != self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
393 ):
393 ):
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395
395
396 for f in candidatefiles:
396 for f in candidatefiles:
397 s = self._map.get(f)
397 s = self._map.get(f)
398 if s is None:
398 if s is None:
399 continue
399 continue
400
400
401 # Discard "merged" markers when moving away from a merge state
401 # Discard "merged" markers when moving away from a merge state
402 if s.merged:
402 if s.merged:
403 source = self._map.copymap.get(f)
403 source = self._map.copymap.get(f)
404 if source:
404 if source:
405 copies[f] = source
405 copies[f] = source
406 self._normallookup(f)
406 self._normallookup(f)
407 # Also fix up otherparent markers
407 # Also fix up otherparent markers
408 elif s.from_p2:
408 elif s.from_p2:
409 source = self._map.copymap.get(f)
409 source = self._map.copymap.get(f)
410 if source:
410 if source:
411 copies[f] = source
411 copies[f] = source
412 self._add(f)
412 self._add(f)
413 return copies
413 return copies
414
414
415 def setbranch(self, branch):
415 def setbranch(self, branch):
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 try:
418 try:
419 f.write(self._branch + b'\n')
419 f.write(self._branch + b'\n')
420 f.close()
420 f.close()
421
421
422 # make sure filecache has the correct stat info for _branch after
422 # make sure filecache has the correct stat info for _branch after
423 # replacing the underlying file
423 # replacing the underlying file
424 ce = self._filecache[b'_branch']
424 ce = self._filecache[b'_branch']
425 if ce:
425 if ce:
426 ce.refresh()
426 ce.refresh()
427 except: # re-raises
427 except: # re-raises
428 f.discard()
428 f.discard()
429 raise
429 raise
430
430
431 def invalidate(self):
431 def invalidate(self):
432 """Causes the next access to reread the dirstate.
432 """Causes the next access to reread the dirstate.
433
433
434 This is different from localrepo.invalidatedirstate() because it always
434 This is different from localrepo.invalidatedirstate() because it always
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 check whether the dirstate has changed before rereading it."""
436 check whether the dirstate has changed before rereading it."""
437
437
438 for a in ("_map", "_branch", "_ignore"):
438 for a in ("_map", "_branch", "_ignore"):
439 if a in self.__dict__:
439 if a in self.__dict__:
440 delattr(self, a)
440 delattr(self, a)
441 self._lastnormaltime = 0
441 self._lastnormaltime = 0
442 self._dirty = False
442 self._dirty = False
443 self._updatedfiles.clear()
443 self._updatedfiles.clear()
444 self._parentwriters = 0
444 self._parentwriters = 0
445 self._origpl = None
445 self._origpl = None
446
446
447 def copy(self, source, dest):
447 def copy(self, source, dest):
448 """Mark dest as a copy of source. Unmark dest if source is None."""
448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 if source == dest:
449 if source == dest:
450 return
450 return
451 self._dirty = True
451 self._dirty = True
452 if source is not None:
452 if source is not None:
453 self._map.copymap[dest] = source
453 self._map.copymap[dest] = source
454 self._updatedfiles.add(source)
454 self._updatedfiles.add(source)
455 self._updatedfiles.add(dest)
455 self._updatedfiles.add(dest)
456 elif self._map.copymap.pop(dest, None):
456 elif self._map.copymap.pop(dest, None):
457 self._updatedfiles.add(dest)
457 self._updatedfiles.add(dest)
458
458
459 def copied(self, file):
459 def copied(self, file):
460 return self._map.copymap.get(file, None)
460 return self._map.copymap.get(file, None)
461
461
462 def copies(self):
462 def copies(self):
463 return self._map.copymap
463 return self._map.copymap
464
464
465 @requires_no_parents_change
465 @requires_no_parents_change
466 def set_tracked(self, filename):
466 def set_tracked(self, filename):
467 """a "public" method for generic code to mark a file as tracked
467 """a "public" method for generic code to mark a file as tracked
468
468
469 This function is to be called outside of "update/merge" case. For
469 This function is to be called outside of "update/merge" case. For
470 example by a command like `hg add X`.
470 example by a command like `hg add X`.
471
471
472 return True the file was previously untracked, False otherwise.
472 return True the file was previously untracked, False otherwise.
473 """
473 """
474 entry = self._map.get(filename)
474 entry = self._map.get(filename)
475 if entry is None:
475 if entry is None:
476 self._add(filename)
476 self._add(filename)
477 return True
477 return True
478 elif not entry.tracked:
478 elif not entry.tracked:
479 self._normallookup(filename)
479 self._normallookup(filename)
480 return True
480 return True
481 # XXX This is probably overkill for more case, but we need this to
481 # XXX This is probably overkill for more case, but we need this to
482 # fully replace the `normallookup` call with `set_tracked` one.
482 # fully replace the `normallookup` call with `set_tracked` one.
483 # Consider smoothing this in the future.
483 # Consider smoothing this in the future.
484 self.set_possibly_dirty(filename)
484 self.set_possibly_dirty(filename)
485 return False
485 return False
486
486
487 @requires_no_parents_change
487 @requires_no_parents_change
488 def set_untracked(self, filename):
488 def set_untracked(self, filename):
489 """a "public" method for generic code to mark a file as untracked
489 """a "public" method for generic code to mark a file as untracked
490
490
491 This function is to be called outside of "update/merge" case. For
491 This function is to be called outside of "update/merge" case. For
492 example by a command like `hg remove X`.
492 example by a command like `hg remove X`.
493
493
494 return True the file was previously tracked, False otherwise.
494 return True the file was previously tracked, False otherwise.
495 """
495 """
496 entry = self._map.get(filename)
496 entry = self._map.get(filename)
497 if entry is None:
497 if entry is None:
498 return False
498 return False
499 elif entry.added:
499 elif entry.added:
500 self._drop(filename)
500 self._drop(filename)
501 return True
501 return True
502 else:
502 else:
503 self._dirty = True
503 self._dirty = True
504 self._updatedfiles.add(filename)
504 self._updatedfiles.add(filename)
505 self._map.set_untracked(filename)
505 self._map.set_untracked(filename)
506 return True
506 return True
507
507
508 @requires_no_parents_change
508 @requires_no_parents_change
509 def set_clean(self, filename, parentfiledata=None):
509 def set_clean(self, filename, parentfiledata=None):
510 """record that the current state of the file on disk is known to be clean"""
510 """record that the current state of the file on disk is known to be clean"""
511 self._dirty = True
511 self._dirty = True
512 self._updatedfiles.add(filename)
512 self._updatedfiles.add(filename)
513 if parentfiledata:
513 if parentfiledata:
514 (mode, size, mtime) = parentfiledata
514 (mode, size, mtime) = parentfiledata
515 else:
515 else:
516 (mode, size, mtime) = self._get_filedata(filename)
516 (mode, size, mtime) = self._get_filedata(filename)
517 self._addpath(filename, mode=mode, size=size, mtime=mtime)
517 self._addpath(filename, mode=mode, size=size, mtime=mtime)
518 self._map.copymap.pop(filename, None)
518 self._map.copymap.pop(filename, None)
519 if filename in self._map.nonnormalset:
519 if filename in self._map.nonnormalset:
520 self._map.nonnormalset.remove(filename)
520 self._map.nonnormalset.remove(filename)
521 if mtime > self._lastnormaltime:
521 if mtime > self._lastnormaltime:
522 # Remember the most recent modification timeslot for status(),
522 # Remember the most recent modification timeslot for status(),
523 # to make sure we won't miss future size-preserving file content
523 # to make sure we won't miss future size-preserving file content
524 # modifications that happen within the same timeslot.
524 # modifications that happen within the same timeslot.
525 self._lastnormaltime = mtime
525 self._lastnormaltime = mtime
526
526
527 @requires_no_parents_change
527 @requires_no_parents_change
528 def set_possibly_dirty(self, filename):
528 def set_possibly_dirty(self, filename):
529 """record that the current state of the file on disk is unknown"""
529 """record that the current state of the file on disk is unknown"""
530 self._dirty = True
530 self._dirty = True
531 self._updatedfiles.add(filename)
531 self._updatedfiles.add(filename)
532 self._map.set_possibly_dirty(filename)
532 self._map.set_possibly_dirty(filename)
533
533
534 @requires_parents_change
534 @requires_parents_change
535 def update_file_p1(
535 def update_file_p1(
536 self,
536 self,
537 filename,
537 filename,
538 p1_tracked,
538 p1_tracked,
539 ):
539 ):
540 """Set a file as tracked in the parent (or not)
540 """Set a file as tracked in the parent (or not)
541
541
542 This is to be called when adjust the dirstate to a new parent after an history
542 This is to be called when adjust the dirstate to a new parent after an history
543 rewriting operation.
543 rewriting operation.
544
544
545 It should not be called during a merge (p2 != nullid) and only within
545 It should not be called during a merge (p2 != nullid) and only within
546 a `with dirstate.parentchange():` context.
546 a `with dirstate.parentchange():` context.
547 """
547 """
548 if self.in_merge:
548 if self.in_merge:
549 msg = b'update_file_reference should not be called when merging'
549 msg = b'update_file_reference should not be called when merging'
550 raise error.ProgrammingError(msg)
550 raise error.ProgrammingError(msg)
551 entry = self._map.get(filename)
551 entry = self._map.get(filename)
552 if entry is None:
552 if entry is None:
553 wc_tracked = False
553 wc_tracked = False
554 else:
554 else:
555 wc_tracked = entry.tracked
555 wc_tracked = entry.tracked
556 possibly_dirty = False
556 possibly_dirty = False
557 if p1_tracked and wc_tracked:
557 if p1_tracked and wc_tracked:
558 # the underlying reference might have changed, we will have to
558 # the underlying reference might have changed, we will have to
559 # check it.
559 # check it.
560 possibly_dirty = True
560 possibly_dirty = True
561 elif not (p1_tracked or wc_tracked):
561 elif not (p1_tracked or wc_tracked):
562 # the file is no longer relevant to anyone
562 # the file is no longer relevant to anyone
563 self._drop(filename)
563 self._drop(filename)
564 elif (not p1_tracked) and wc_tracked:
564 elif (not p1_tracked) and wc_tracked:
565 if entry is not None and entry.added:
565 if entry is not None and entry.added:
566 return # avoid dropping copy information (maybe?)
566 return # avoid dropping copy information (maybe?)
567 elif p1_tracked and not wc_tracked:
567 elif p1_tracked and not wc_tracked:
568 pass
568 pass
569 else:
569 else:
570 assert False, 'unreachable'
570 assert False, 'unreachable'
571
571
572 # this mean we are doing call for file we do not really care about the
572 # this mean we are doing call for file we do not really care about the
573 # data (eg: added or removed), however this should be a minor overhead
573 # data (eg: added or removed), however this should be a minor overhead
574 # compared to the overall update process calling this.
574 # compared to the overall update process calling this.
575 parentfiledata = None
575 parentfiledata = None
576 if wc_tracked:
576 if wc_tracked:
577 parentfiledata = self._get_filedata(filename)
577 parentfiledata = self._get_filedata(filename)
578
578
579 self._updatedfiles.add(filename)
579 self._updatedfiles.add(filename)
580 self._map.reset_state(
580 self._map.reset_state(
581 filename,
581 filename,
582 wc_tracked,
582 wc_tracked,
583 p1_tracked,
583 p1_tracked,
584 possibly_dirty=possibly_dirty,
584 possibly_dirty=possibly_dirty,
585 parentfiledata=parentfiledata,
585 parentfiledata=parentfiledata,
586 )
586 )
587 if (
587 if (
588 parentfiledata is not None
588 parentfiledata is not None
589 and parentfiledata[2] > self._lastnormaltime
589 and parentfiledata[2] > self._lastnormaltime
590 ):
590 ):
591 # Remember the most recent modification timeslot for status(),
591 # Remember the most recent modification timeslot for status(),
592 # to make sure we won't miss future size-preserving file content
592 # to make sure we won't miss future size-preserving file content
593 # modifications that happen within the same timeslot.
593 # modifications that happen within the same timeslot.
594 self._lastnormaltime = parentfiledata[2]
594 self._lastnormaltime = parentfiledata[2]
595
595
596 @requires_parents_change
596 @requires_parents_change
597 def update_file(
597 def update_file(
598 self,
598 self,
599 filename,
599 filename,
600 wc_tracked,
600 wc_tracked,
601 p1_tracked,
601 p1_tracked,
602 p2_tracked=False,
602 p2_tracked=False,
603 merged=False,
603 merged=False,
604 clean_p1=False,
604 clean_p1=False,
605 clean_p2=False,
605 clean_p2=False,
606 possibly_dirty=False,
606 possibly_dirty=False,
607 parentfiledata=None,
607 parentfiledata=None,
608 ):
608 ):
609 """update the information about a file in the dirstate
609 """update the information about a file in the dirstate
610
610
611 This is to be called when the direstates parent changes to keep track
611 This is to be called when the direstates parent changes to keep track
612 of what is the file situation in regards to the working copy and its parent.
612 of what is the file situation in regards to the working copy and its parent.
613
613
614 This function must be called within a `dirstate.parentchange` context.
614 This function must be called within a `dirstate.parentchange` context.
615
615
616 note: the API is at an early stage and we might need to adjust it
616 note: the API is at an early stage and we might need to adjust it
617 depending of what information ends up being relevant and useful to
617 depending of what information ends up being relevant and useful to
618 other processing.
618 other processing.
619 """
619 """
620 if merged and (clean_p1 or clean_p2):
620 if merged and (clean_p1 or clean_p2):
621 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
621 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
622 raise error.ProgrammingError(msg)
622 raise error.ProgrammingError(msg)
623
623
624 # note: I do not think we need to double check name clash here since we
624 # note: I do not think we need to double check name clash here since we
625 # are in a update/merge case that should already have taken care of
625 # are in a update/merge case that should already have taken care of
626 # this. The test agrees
626 # this. The test agrees
627
627
628 self._dirty = True
628 self._dirty = True
629 self._updatedfiles.add(filename)
629 self._updatedfiles.add(filename)
630
630
631 need_parent_file_data = (
631 need_parent_file_data = (
632 not (possibly_dirty or clean_p2 or merged)
632 not (possibly_dirty or clean_p2 or merged)
633 and wc_tracked
633 and wc_tracked
634 and p1_tracked
634 and p1_tracked
635 )
635 )
636
636
637 # this mean we are doing call for file we do not really care about the
637 # this mean we are doing call for file we do not really care about the
638 # data (eg: added or removed), however this should be a minor overhead
638 # data (eg: added or removed), however this should be a minor overhead
639 # compared to the overall update process calling this.
639 # compared to the overall update process calling this.
640 if need_parent_file_data:
640 if need_parent_file_data:
641 if parentfiledata is None:
641 if parentfiledata is None:
642 parentfiledata = self._get_filedata(filename)
642 parentfiledata = self._get_filedata(filename)
643 mtime = parentfiledata[2]
643 mtime = parentfiledata[2]
644
644
645 if mtime > self._lastnormaltime:
645 if mtime > self._lastnormaltime:
646 # Remember the most recent modification timeslot for
646 # Remember the most recent modification timeslot for
647 # status(), to make sure we won't miss future
647 # status(), to make sure we won't miss future
648 # size-preserving file content modifications that happen
648 # size-preserving file content modifications that happen
649 # within the same timeslot.
649 # within the same timeslot.
650 self._lastnormaltime = mtime
650 self._lastnormaltime = mtime
651
651
652 self._map.reset_state(
652 self._map.reset_state(
653 filename,
653 filename,
654 wc_tracked,
654 wc_tracked,
655 p1_tracked,
655 p1_tracked,
656 p2_tracked=p2_tracked,
656 p2_tracked=p2_tracked,
657 merged=merged,
657 merged=merged,
658 clean_p1=clean_p1,
658 clean_p1=clean_p1,
659 clean_p2=clean_p2,
659 clean_p2=clean_p2,
660 possibly_dirty=possibly_dirty,
660 possibly_dirty=possibly_dirty,
661 parentfiledata=parentfiledata,
661 parentfiledata=parentfiledata,
662 )
662 )
663 if (
663 if (
664 parentfiledata is not None
664 parentfiledata is not None
665 and parentfiledata[2] > self._lastnormaltime
665 and parentfiledata[2] > self._lastnormaltime
666 ):
666 ):
667 # Remember the most recent modification timeslot for status(),
667 # Remember the most recent modification timeslot for status(),
668 # to make sure we won't miss future size-preserving file content
668 # to make sure we won't miss future size-preserving file content
669 # modifications that happen within the same timeslot.
669 # modifications that happen within the same timeslot.
670 self._lastnormaltime = parentfiledata[2]
670 self._lastnormaltime = parentfiledata[2]
671
671
672 def _addpath(
672 def _addpath(
673 self,
673 self,
674 f,
674 f,
675 mode=0,
675 mode=0,
676 size=None,
676 size=None,
677 mtime=None,
677 mtime=None,
678 added=False,
678 added=False,
679 merged=False,
679 merged=False,
680 from_p2=False,
680 from_p2=False,
681 possibly_dirty=False,
681 possibly_dirty=False,
682 ):
682 ):
683 entry = self._map.get(f)
683 entry = self._map.get(f)
684 if added or entry is not None and entry.removed:
684 if added or entry is not None and entry.removed:
685 scmutil.checkfilename(f)
685 scmutil.checkfilename(f)
686 if self._map.hastrackeddir(f):
686 if self._map.hastrackeddir(f):
687 msg = _(b'directory %r already in dirstate')
687 msg = _(b'directory %r already in dirstate')
688 msg %= pycompat.bytestr(f)
688 msg %= pycompat.bytestr(f)
689 raise error.Abort(msg)
689 raise error.Abort(msg)
690 # shadows
690 # shadows
691 for d in pathutil.finddirs(f):
691 for d in pathutil.finddirs(f):
692 if self._map.hastrackeddir(d):
692 if self._map.hastrackeddir(d):
693 break
693 break
694 entry = self._map.get(d)
694 entry = self._map.get(d)
695 if entry is not None and not entry.removed:
695 if entry is not None and not entry.removed:
696 msg = _(b'file %r in dirstate clashes with %r')
696 msg = _(b'file %r in dirstate clashes with %r')
697 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
697 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
698 raise error.Abort(msg)
698 raise error.Abort(msg)
699 self._dirty = True
699 self._dirty = True
700 self._updatedfiles.add(f)
700 self._updatedfiles.add(f)
701 self._map.addfile(
701 self._map.addfile(
702 f,
702 f,
703 mode=mode,
703 mode=mode,
704 size=size,
704 size=size,
705 mtime=mtime,
705 mtime=mtime,
706 added=added,
706 added=added,
707 merged=merged,
707 merged=merged,
708 from_p2=from_p2,
708 from_p2=from_p2,
709 possibly_dirty=possibly_dirty,
709 possibly_dirty=possibly_dirty,
710 )
710 )
711
711
712 def _get_filedata(self, filename):
712 def _get_filedata(self, filename):
713 """returns"""
713 """returns"""
714 s = os.lstat(self._join(filename))
714 s = os.lstat(self._join(filename))
715 mode = s.st_mode
715 mode = s.st_mode
716 size = s.st_size
716 size = s.st_size
717 mtime = s[stat.ST_MTIME]
717 mtime = s[stat.ST_MTIME]
718 return (mode, size, mtime)
718 return (mode, size, mtime)
719
719
720 def _normallookup(self, f):
720 def _normallookup(self, f):
721 '''Mark a file normal, but possibly dirty.'''
721 '''Mark a file normal, but possibly dirty.'''
722 if self.in_merge:
722 if self.in_merge:
723 # if there is a merge going on and the file was either
723 # if there is a merge going on and the file was either
724 # "merged" or coming from other parent (-2) before
724 # "merged" or coming from other parent (-2) before
725 # being removed, restore that state.
725 # being removed, restore that state.
726 entry = self._map.get(f)
726 entry = self._map.get(f)
727 if entry is not None:
727 if entry is not None:
728 # XXX this should probably be dealt with a a lower level
728 # XXX this should probably be dealt with a a lower level
729 # (see `merged_removed` and `from_p2_removed`)
729 # (see `merged_removed` and `from_p2_removed`)
730 if entry.merged_removed or entry.from_p2_removed:
730 if entry.merged_removed or entry.from_p2_removed:
731 source = self._map.copymap.get(f)
731 source = self._map.copymap.get(f)
732 self._addpath(f, from_p2=True)
732 self._addpath(f, from_p2=True)
733 self._map.copymap.pop(f, None)
733 self._map.copymap.pop(f, None)
734 if source is not None:
734 if source is not None:
735 self.copy(source, f)
735 self.copy(source, f)
736 return
736 return
737 elif entry.merged or entry.from_p2:
737 elif entry.merged or entry.from_p2:
738 return
738 return
739 self._addpath(f, possibly_dirty=True)
739 self._addpath(f, possibly_dirty=True)
740 self._map.copymap.pop(f, None)
740 self._map.copymap.pop(f, None)
741
741
742 def _add(self, filename):
742 def _add(self, filename):
743 """internal function to mark a file as added"""
743 """internal function to mark a file as added"""
744 self._addpath(filename, added=True)
744 self._addpath(filename, added=True)
745 self._map.copymap.pop(filename, None)
745 self._map.copymap.pop(filename, None)
746
746
747 def _drop(self, filename):
747 def _drop(self, filename):
748 """internal function to drop a file from the dirstate"""
748 """internal function to drop a file from the dirstate"""
749 if self._map.dropfile(filename):
749 if self._map.dropfile(filename):
750 self._dirty = True
750 self._dirty = True
751 self._updatedfiles.add(filename)
751 self._updatedfiles.add(filename)
752 self._map.copymap.pop(filename, None)
753
752
754 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
753 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
755 if exists is None:
754 if exists is None:
756 exists = os.path.lexists(os.path.join(self._root, path))
755 exists = os.path.lexists(os.path.join(self._root, path))
757 if not exists:
756 if not exists:
758 # Maybe a path component exists
757 # Maybe a path component exists
759 if not ignoremissing and b'/' in path:
758 if not ignoremissing and b'/' in path:
760 d, f = path.rsplit(b'/', 1)
759 d, f = path.rsplit(b'/', 1)
761 d = self._normalize(d, False, ignoremissing, None)
760 d = self._normalize(d, False, ignoremissing, None)
762 folded = d + b"/" + f
761 folded = d + b"/" + f
763 else:
762 else:
764 # No path components, preserve original case
763 # No path components, preserve original case
765 folded = path
764 folded = path
766 else:
765 else:
767 # recursively normalize leading directory components
766 # recursively normalize leading directory components
768 # against dirstate
767 # against dirstate
769 if b'/' in normed:
768 if b'/' in normed:
770 d, f = normed.rsplit(b'/', 1)
769 d, f = normed.rsplit(b'/', 1)
771 d = self._normalize(d, False, ignoremissing, True)
770 d = self._normalize(d, False, ignoremissing, True)
772 r = self._root + b"/" + d
771 r = self._root + b"/" + d
773 folded = d + b"/" + util.fspath(f, r)
772 folded = d + b"/" + util.fspath(f, r)
774 else:
773 else:
775 folded = util.fspath(normed, self._root)
774 folded = util.fspath(normed, self._root)
776 storemap[normed] = folded
775 storemap[normed] = folded
777
776
778 return folded
777 return folded
779
778
780 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
779 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
781 normed = util.normcase(path)
780 normed = util.normcase(path)
782 folded = self._map.filefoldmap.get(normed, None)
781 folded = self._map.filefoldmap.get(normed, None)
783 if folded is None:
782 if folded is None:
784 if isknown:
783 if isknown:
785 folded = path
784 folded = path
786 else:
785 else:
787 folded = self._discoverpath(
786 folded = self._discoverpath(
788 path, normed, ignoremissing, exists, self._map.filefoldmap
787 path, normed, ignoremissing, exists, self._map.filefoldmap
789 )
788 )
790 return folded
789 return folded
791
790
792 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
791 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
793 normed = util.normcase(path)
792 normed = util.normcase(path)
794 folded = self._map.filefoldmap.get(normed, None)
793 folded = self._map.filefoldmap.get(normed, None)
795 if folded is None:
794 if folded is None:
796 folded = self._map.dirfoldmap.get(normed, None)
795 folded = self._map.dirfoldmap.get(normed, None)
797 if folded is None:
796 if folded is None:
798 if isknown:
797 if isknown:
799 folded = path
798 folded = path
800 else:
799 else:
801 # store discovered result in dirfoldmap so that future
800 # store discovered result in dirfoldmap so that future
802 # normalizefile calls don't start matching directories
801 # normalizefile calls don't start matching directories
803 folded = self._discoverpath(
802 folded = self._discoverpath(
804 path, normed, ignoremissing, exists, self._map.dirfoldmap
803 path, normed, ignoremissing, exists, self._map.dirfoldmap
805 )
804 )
806 return folded
805 return folded
807
806
808 def normalize(self, path, isknown=False, ignoremissing=False):
807 def normalize(self, path, isknown=False, ignoremissing=False):
809 """
808 """
810 normalize the case of a pathname when on a casefolding filesystem
809 normalize the case of a pathname when on a casefolding filesystem
811
810
812 isknown specifies whether the filename came from walking the
811 isknown specifies whether the filename came from walking the
813 disk, to avoid extra filesystem access.
812 disk, to avoid extra filesystem access.
814
813
815 If ignoremissing is True, missing path are returned
814 If ignoremissing is True, missing path are returned
816 unchanged. Otherwise, we try harder to normalize possibly
815 unchanged. Otherwise, we try harder to normalize possibly
817 existing path components.
816 existing path components.
818
817
819 The normalized case is determined based on the following precedence:
818 The normalized case is determined based on the following precedence:
820
819
821 - version of name already stored in the dirstate
820 - version of name already stored in the dirstate
822 - version of name stored on disk
821 - version of name stored on disk
823 - version provided via command arguments
822 - version provided via command arguments
824 """
823 """
825
824
826 if self._checkcase:
825 if self._checkcase:
827 return self._normalize(path, isknown, ignoremissing)
826 return self._normalize(path, isknown, ignoremissing)
828 return path
827 return path
829
828
830 def clear(self):
829 def clear(self):
831 self._map.clear()
830 self._map.clear()
832 self._lastnormaltime = 0
831 self._lastnormaltime = 0
833 self._updatedfiles.clear()
832 self._updatedfiles.clear()
834 self._dirty = True
833 self._dirty = True
835
834
836 def rebuild(self, parent, allfiles, changedfiles=None):
835 def rebuild(self, parent, allfiles, changedfiles=None):
837 if changedfiles is None:
836 if changedfiles is None:
838 # Rebuild entire dirstate
837 # Rebuild entire dirstate
839 to_lookup = allfiles
838 to_lookup = allfiles
840 to_drop = []
839 to_drop = []
841 lastnormaltime = self._lastnormaltime
840 lastnormaltime = self._lastnormaltime
842 self.clear()
841 self.clear()
843 self._lastnormaltime = lastnormaltime
842 self._lastnormaltime = lastnormaltime
844 elif len(changedfiles) < 10:
843 elif len(changedfiles) < 10:
845 # Avoid turning allfiles into a set, which can be expensive if it's
844 # Avoid turning allfiles into a set, which can be expensive if it's
846 # large.
845 # large.
847 to_lookup = []
846 to_lookup = []
848 to_drop = []
847 to_drop = []
849 for f in changedfiles:
848 for f in changedfiles:
850 if f in allfiles:
849 if f in allfiles:
851 to_lookup.append(f)
850 to_lookup.append(f)
852 else:
851 else:
853 to_drop.append(f)
852 to_drop.append(f)
854 else:
853 else:
855 changedfilesset = set(changedfiles)
854 changedfilesset = set(changedfiles)
856 to_lookup = changedfilesset & set(allfiles)
855 to_lookup = changedfilesset & set(allfiles)
857 to_drop = changedfilesset - to_lookup
856 to_drop = changedfilesset - to_lookup
858
857
859 if self._origpl is None:
858 if self._origpl is None:
860 self._origpl = self._pl
859 self._origpl = self._pl
861 self._map.setparents(parent, self._nodeconstants.nullid)
860 self._map.setparents(parent, self._nodeconstants.nullid)
862
861
863 for f in to_lookup:
862 for f in to_lookup:
864 self._normallookup(f)
863 self._normallookup(f)
865 for f in to_drop:
864 for f in to_drop:
866 self._drop(f)
865 self._drop(f)
867
866
868 self._dirty = True
867 self._dirty = True
869
868
870 def identity(self):
869 def identity(self):
871 """Return identity of dirstate itself to detect changing in storage
870 """Return identity of dirstate itself to detect changing in storage
872
871
873 If identity of previous dirstate is equal to this, writing
872 If identity of previous dirstate is equal to this, writing
874 changes based on the former dirstate out can keep consistency.
873 changes based on the former dirstate out can keep consistency.
875 """
874 """
876 return self._map.identity
875 return self._map.identity
877
876
878 def write(self, tr):
877 def write(self, tr):
879 if not self._dirty:
878 if not self._dirty:
880 return
879 return
881
880
882 filename = self._filename
881 filename = self._filename
883 if tr:
882 if tr:
884 # 'dirstate.write()' is not only for writing in-memory
883 # 'dirstate.write()' is not only for writing in-memory
885 # changes out, but also for dropping ambiguous timestamp.
884 # changes out, but also for dropping ambiguous timestamp.
886 # delayed writing re-raise "ambiguous timestamp issue".
885 # delayed writing re-raise "ambiguous timestamp issue".
887 # See also the wiki page below for detail:
886 # See also the wiki page below for detail:
888 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
887 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
889
888
890 # emulate dropping timestamp in 'parsers.pack_dirstate'
889 # emulate dropping timestamp in 'parsers.pack_dirstate'
891 now = _getfsnow(self._opener)
890 now = _getfsnow(self._opener)
892 self._map.clearambiguoustimes(self._updatedfiles, now)
891 self._map.clearambiguoustimes(self._updatedfiles, now)
893
892
894 # emulate that all 'dirstate.normal' results are written out
893 # emulate that all 'dirstate.normal' results are written out
895 self._lastnormaltime = 0
894 self._lastnormaltime = 0
896 self._updatedfiles.clear()
895 self._updatedfiles.clear()
897
896
898 # delay writing in-memory changes out
897 # delay writing in-memory changes out
899 tr.addfilegenerator(
898 tr.addfilegenerator(
900 b'dirstate',
899 b'dirstate',
901 (self._filename,),
900 (self._filename,),
902 lambda f: self._writedirstate(tr, f),
901 lambda f: self._writedirstate(tr, f),
903 location=b'plain',
902 location=b'plain',
904 )
903 )
905 return
904 return
906
905
907 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
906 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
908 self._writedirstate(tr, st)
907 self._writedirstate(tr, st)
909
908
910 def addparentchangecallback(self, category, callback):
909 def addparentchangecallback(self, category, callback):
911 """add a callback to be called when the wd parents are changed
910 """add a callback to be called when the wd parents are changed
912
911
913 Callback will be called with the following arguments:
912 Callback will be called with the following arguments:
914 dirstate, (oldp1, oldp2), (newp1, newp2)
913 dirstate, (oldp1, oldp2), (newp1, newp2)
915
914
916 Category is a unique identifier to allow overwriting an old callback
915 Category is a unique identifier to allow overwriting an old callback
917 with a newer callback.
916 with a newer callback.
918 """
917 """
919 self._plchangecallbacks[category] = callback
918 self._plchangecallbacks[category] = callback
920
919
921 def _writedirstate(self, tr, st):
920 def _writedirstate(self, tr, st):
922 # notify callbacks about parents change
921 # notify callbacks about parents change
923 if self._origpl is not None and self._origpl != self._pl:
922 if self._origpl is not None and self._origpl != self._pl:
924 for c, callback in sorted(
923 for c, callback in sorted(
925 pycompat.iteritems(self._plchangecallbacks)
924 pycompat.iteritems(self._plchangecallbacks)
926 ):
925 ):
927 callback(self, self._origpl, self._pl)
926 callback(self, self._origpl, self._pl)
928 self._origpl = None
927 self._origpl = None
929 # use the modification time of the newly created temporary file as the
928 # use the modification time of the newly created temporary file as the
930 # filesystem's notion of 'now'
929 # filesystem's notion of 'now'
931 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
930 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
932
931
933 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
932 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
934 # timestamp of each entries in dirstate, because of 'now > mtime'
933 # timestamp of each entries in dirstate, because of 'now > mtime'
935 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
934 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
936 if delaywrite > 0:
935 if delaywrite > 0:
937 # do we have any files to delay for?
936 # do we have any files to delay for?
938 for f, e in pycompat.iteritems(self._map):
937 for f, e in pycompat.iteritems(self._map):
939 if e.need_delay(now):
938 if e.need_delay(now):
940 import time # to avoid useless import
939 import time # to avoid useless import
941
940
942 # rather than sleep n seconds, sleep until the next
941 # rather than sleep n seconds, sleep until the next
943 # multiple of n seconds
942 # multiple of n seconds
944 clock = time.time()
943 clock = time.time()
945 start = int(clock) - (int(clock) % delaywrite)
944 start = int(clock) - (int(clock) % delaywrite)
946 end = start + delaywrite
945 end = start + delaywrite
947 time.sleep(end - clock)
946 time.sleep(end - clock)
948 now = end # trust our estimate that the end is near now
947 now = end # trust our estimate that the end is near now
949 break
948 break
950
949
951 self._map.write(tr, st, now)
950 self._map.write(tr, st, now)
952 self._lastnormaltime = 0
951 self._lastnormaltime = 0
953 self._dirty = False
952 self._dirty = False
954
953
955 def _dirignore(self, f):
954 def _dirignore(self, f):
956 if self._ignore(f):
955 if self._ignore(f):
957 return True
956 return True
958 for p in pathutil.finddirs(f):
957 for p in pathutil.finddirs(f):
959 if self._ignore(p):
958 if self._ignore(p):
960 return True
959 return True
961 return False
960 return False
962
961
963 def _ignorefiles(self):
962 def _ignorefiles(self):
964 files = []
963 files = []
965 if os.path.exists(self._join(b'.hgignore')):
964 if os.path.exists(self._join(b'.hgignore')):
966 files.append(self._join(b'.hgignore'))
965 files.append(self._join(b'.hgignore'))
967 for name, path in self._ui.configitems(b"ui"):
966 for name, path in self._ui.configitems(b"ui"):
968 if name == b'ignore' or name.startswith(b'ignore.'):
967 if name == b'ignore' or name.startswith(b'ignore.'):
969 # we need to use os.path.join here rather than self._join
968 # we need to use os.path.join here rather than self._join
970 # because path is arbitrary and user-specified
969 # because path is arbitrary and user-specified
971 files.append(os.path.join(self._rootdir, util.expandpath(path)))
970 files.append(os.path.join(self._rootdir, util.expandpath(path)))
972 return files
971 return files
973
972
974 def _ignorefileandline(self, f):
973 def _ignorefileandline(self, f):
975 files = collections.deque(self._ignorefiles())
974 files = collections.deque(self._ignorefiles())
976 visited = set()
975 visited = set()
977 while files:
976 while files:
978 i = files.popleft()
977 i = files.popleft()
979 patterns = matchmod.readpatternfile(
978 patterns = matchmod.readpatternfile(
980 i, self._ui.warn, sourceinfo=True
979 i, self._ui.warn, sourceinfo=True
981 )
980 )
982 for pattern, lineno, line in patterns:
981 for pattern, lineno, line in patterns:
983 kind, p = matchmod._patsplit(pattern, b'glob')
982 kind, p = matchmod._patsplit(pattern, b'glob')
984 if kind == b"subinclude":
983 if kind == b"subinclude":
985 if p not in visited:
984 if p not in visited:
986 files.append(p)
985 files.append(p)
987 continue
986 continue
988 m = matchmod.match(
987 m = matchmod.match(
989 self._root, b'', [], [pattern], warn=self._ui.warn
988 self._root, b'', [], [pattern], warn=self._ui.warn
990 )
989 )
991 if m(f):
990 if m(f):
992 return (i, lineno, line)
991 return (i, lineno, line)
993 visited.add(i)
992 visited.add(i)
994 return (None, -1, b"")
993 return (None, -1, b"")
995
994
996 def _walkexplicit(self, match, subrepos):
995 def _walkexplicit(self, match, subrepos):
997 """Get stat data about the files explicitly specified by match.
996 """Get stat data about the files explicitly specified by match.
998
997
999 Return a triple (results, dirsfound, dirsnotfound).
998 Return a triple (results, dirsfound, dirsnotfound).
1000 - results is a mapping from filename to stat result. It also contains
999 - results is a mapping from filename to stat result. It also contains
1001 listings mapping subrepos and .hg to None.
1000 listings mapping subrepos and .hg to None.
1002 - dirsfound is a list of files found to be directories.
1001 - dirsfound is a list of files found to be directories.
1003 - dirsnotfound is a list of files that the dirstate thinks are
1002 - dirsnotfound is a list of files that the dirstate thinks are
1004 directories and that were not found."""
1003 directories and that were not found."""
1005
1004
1006 def badtype(mode):
1005 def badtype(mode):
1007 kind = _(b'unknown')
1006 kind = _(b'unknown')
1008 if stat.S_ISCHR(mode):
1007 if stat.S_ISCHR(mode):
1009 kind = _(b'character device')
1008 kind = _(b'character device')
1010 elif stat.S_ISBLK(mode):
1009 elif stat.S_ISBLK(mode):
1011 kind = _(b'block device')
1010 kind = _(b'block device')
1012 elif stat.S_ISFIFO(mode):
1011 elif stat.S_ISFIFO(mode):
1013 kind = _(b'fifo')
1012 kind = _(b'fifo')
1014 elif stat.S_ISSOCK(mode):
1013 elif stat.S_ISSOCK(mode):
1015 kind = _(b'socket')
1014 kind = _(b'socket')
1016 elif stat.S_ISDIR(mode):
1015 elif stat.S_ISDIR(mode):
1017 kind = _(b'directory')
1016 kind = _(b'directory')
1018 return _(b'unsupported file type (type is %s)') % kind
1017 return _(b'unsupported file type (type is %s)') % kind
1019
1018
1020 badfn = match.bad
1019 badfn = match.bad
1021 dmap = self._map
1020 dmap = self._map
1022 lstat = os.lstat
1021 lstat = os.lstat
1023 getkind = stat.S_IFMT
1022 getkind = stat.S_IFMT
1024 dirkind = stat.S_IFDIR
1023 dirkind = stat.S_IFDIR
1025 regkind = stat.S_IFREG
1024 regkind = stat.S_IFREG
1026 lnkkind = stat.S_IFLNK
1025 lnkkind = stat.S_IFLNK
1027 join = self._join
1026 join = self._join
1028 dirsfound = []
1027 dirsfound = []
1029 foundadd = dirsfound.append
1028 foundadd = dirsfound.append
1030 dirsnotfound = []
1029 dirsnotfound = []
1031 notfoundadd = dirsnotfound.append
1030 notfoundadd = dirsnotfound.append
1032
1031
1033 if not match.isexact() and self._checkcase:
1032 if not match.isexact() and self._checkcase:
1034 normalize = self._normalize
1033 normalize = self._normalize
1035 else:
1034 else:
1036 normalize = None
1035 normalize = None
1037
1036
1038 files = sorted(match.files())
1037 files = sorted(match.files())
1039 subrepos.sort()
1038 subrepos.sort()
1040 i, j = 0, 0
1039 i, j = 0, 0
1041 while i < len(files) and j < len(subrepos):
1040 while i < len(files) and j < len(subrepos):
1042 subpath = subrepos[j] + b"/"
1041 subpath = subrepos[j] + b"/"
1043 if files[i] < subpath:
1042 if files[i] < subpath:
1044 i += 1
1043 i += 1
1045 continue
1044 continue
1046 while i < len(files) and files[i].startswith(subpath):
1045 while i < len(files) and files[i].startswith(subpath):
1047 del files[i]
1046 del files[i]
1048 j += 1
1047 j += 1
1049
1048
1050 if not files or b'' in files:
1049 if not files or b'' in files:
1051 files = [b'']
1050 files = [b'']
1052 # constructing the foldmap is expensive, so don't do it for the
1051 # constructing the foldmap is expensive, so don't do it for the
1053 # common case where files is ['']
1052 # common case where files is ['']
1054 normalize = None
1053 normalize = None
1055 results = dict.fromkeys(subrepos)
1054 results = dict.fromkeys(subrepos)
1056 results[b'.hg'] = None
1055 results[b'.hg'] = None
1057
1056
1058 for ff in files:
1057 for ff in files:
1059 if normalize:
1058 if normalize:
1060 nf = normalize(ff, False, True)
1059 nf = normalize(ff, False, True)
1061 else:
1060 else:
1062 nf = ff
1061 nf = ff
1063 if nf in results:
1062 if nf in results:
1064 continue
1063 continue
1065
1064
1066 try:
1065 try:
1067 st = lstat(join(nf))
1066 st = lstat(join(nf))
1068 kind = getkind(st.st_mode)
1067 kind = getkind(st.st_mode)
1069 if kind == dirkind:
1068 if kind == dirkind:
1070 if nf in dmap:
1069 if nf in dmap:
1071 # file replaced by dir on disk but still in dirstate
1070 # file replaced by dir on disk but still in dirstate
1072 results[nf] = None
1071 results[nf] = None
1073 foundadd((nf, ff))
1072 foundadd((nf, ff))
1074 elif kind == regkind or kind == lnkkind:
1073 elif kind == regkind or kind == lnkkind:
1075 results[nf] = st
1074 results[nf] = st
1076 else:
1075 else:
1077 badfn(ff, badtype(kind))
1076 badfn(ff, badtype(kind))
1078 if nf in dmap:
1077 if nf in dmap:
1079 results[nf] = None
1078 results[nf] = None
1080 except OSError as inst: # nf not found on disk - it is dirstate only
1079 except OSError as inst: # nf not found on disk - it is dirstate only
1081 if nf in dmap: # does it exactly match a missing file?
1080 if nf in dmap: # does it exactly match a missing file?
1082 results[nf] = None
1081 results[nf] = None
1083 else: # does it match a missing directory?
1082 else: # does it match a missing directory?
1084 if self._map.hasdir(nf):
1083 if self._map.hasdir(nf):
1085 notfoundadd(nf)
1084 notfoundadd(nf)
1086 else:
1085 else:
1087 badfn(ff, encoding.strtolocal(inst.strerror))
1086 badfn(ff, encoding.strtolocal(inst.strerror))
1088
1087
1089 # match.files() may contain explicitly-specified paths that shouldn't
1088 # match.files() may contain explicitly-specified paths that shouldn't
1090 # be taken; drop them from the list of files found. dirsfound/notfound
1089 # be taken; drop them from the list of files found. dirsfound/notfound
1091 # aren't filtered here because they will be tested later.
1090 # aren't filtered here because they will be tested later.
1092 if match.anypats():
1091 if match.anypats():
1093 for f in list(results):
1092 for f in list(results):
1094 if f == b'.hg' or f in subrepos:
1093 if f == b'.hg' or f in subrepos:
1095 # keep sentinel to disable further out-of-repo walks
1094 # keep sentinel to disable further out-of-repo walks
1096 continue
1095 continue
1097 if not match(f):
1096 if not match(f):
1098 del results[f]
1097 del results[f]
1099
1098
1100 # Case insensitive filesystems cannot rely on lstat() failing to detect
1099 # Case insensitive filesystems cannot rely on lstat() failing to detect
1101 # a case-only rename. Prune the stat object for any file that does not
1100 # a case-only rename. Prune the stat object for any file that does not
1102 # match the case in the filesystem, if there are multiple files that
1101 # match the case in the filesystem, if there are multiple files that
1103 # normalize to the same path.
1102 # normalize to the same path.
1104 if match.isexact() and self._checkcase:
1103 if match.isexact() and self._checkcase:
1105 normed = {}
1104 normed = {}
1106
1105
1107 for f, st in pycompat.iteritems(results):
1106 for f, st in pycompat.iteritems(results):
1108 if st is None:
1107 if st is None:
1109 continue
1108 continue
1110
1109
1111 nc = util.normcase(f)
1110 nc = util.normcase(f)
1112 paths = normed.get(nc)
1111 paths = normed.get(nc)
1113
1112
1114 if paths is None:
1113 if paths is None:
1115 paths = set()
1114 paths = set()
1116 normed[nc] = paths
1115 normed[nc] = paths
1117
1116
1118 paths.add(f)
1117 paths.add(f)
1119
1118
1120 for norm, paths in pycompat.iteritems(normed):
1119 for norm, paths in pycompat.iteritems(normed):
1121 if len(paths) > 1:
1120 if len(paths) > 1:
1122 for path in paths:
1121 for path in paths:
1123 folded = self._discoverpath(
1122 folded = self._discoverpath(
1124 path, norm, True, None, self._map.dirfoldmap
1123 path, norm, True, None, self._map.dirfoldmap
1125 )
1124 )
1126 if path != folded:
1125 if path != folded:
1127 results[path] = None
1126 results[path] = None
1128
1127
1129 return results, dirsfound, dirsnotfound
1128 return results, dirsfound, dirsnotfound
1130
1129
1131 def walk(self, match, subrepos, unknown, ignored, full=True):
1130 def walk(self, match, subrepos, unknown, ignored, full=True):
1132 """
1131 """
1133 Walk recursively through the directory tree, finding all files
1132 Walk recursively through the directory tree, finding all files
1134 matched by match.
1133 matched by match.
1135
1134
1136 If full is False, maybe skip some known-clean files.
1135 If full is False, maybe skip some known-clean files.
1137
1136
1138 Return a dict mapping filename to stat-like object (either
1137 Return a dict mapping filename to stat-like object (either
1139 mercurial.osutil.stat instance or return value of os.stat()).
1138 mercurial.osutil.stat instance or return value of os.stat()).
1140
1139
1141 """
1140 """
1142 # full is a flag that extensions that hook into walk can use -- this
1141 # full is a flag that extensions that hook into walk can use -- this
1143 # implementation doesn't use it at all. This satisfies the contract
1142 # implementation doesn't use it at all. This satisfies the contract
1144 # because we only guarantee a "maybe".
1143 # because we only guarantee a "maybe".
1145
1144
1146 if ignored:
1145 if ignored:
1147 ignore = util.never
1146 ignore = util.never
1148 dirignore = util.never
1147 dirignore = util.never
1149 elif unknown:
1148 elif unknown:
1150 ignore = self._ignore
1149 ignore = self._ignore
1151 dirignore = self._dirignore
1150 dirignore = self._dirignore
1152 else:
1151 else:
1153 # if not unknown and not ignored, drop dir recursion and step 2
1152 # if not unknown and not ignored, drop dir recursion and step 2
1154 ignore = util.always
1153 ignore = util.always
1155 dirignore = util.always
1154 dirignore = util.always
1156
1155
1157 matchfn = match.matchfn
1156 matchfn = match.matchfn
1158 matchalways = match.always()
1157 matchalways = match.always()
1159 matchtdir = match.traversedir
1158 matchtdir = match.traversedir
1160 dmap = self._map
1159 dmap = self._map
1161 listdir = util.listdir
1160 listdir = util.listdir
1162 lstat = os.lstat
1161 lstat = os.lstat
1163 dirkind = stat.S_IFDIR
1162 dirkind = stat.S_IFDIR
1164 regkind = stat.S_IFREG
1163 regkind = stat.S_IFREG
1165 lnkkind = stat.S_IFLNK
1164 lnkkind = stat.S_IFLNK
1166 join = self._join
1165 join = self._join
1167
1166
1168 exact = skipstep3 = False
1167 exact = skipstep3 = False
1169 if match.isexact(): # match.exact
1168 if match.isexact(): # match.exact
1170 exact = True
1169 exact = True
1171 dirignore = util.always # skip step 2
1170 dirignore = util.always # skip step 2
1172 elif match.prefix(): # match.match, no patterns
1171 elif match.prefix(): # match.match, no patterns
1173 skipstep3 = True
1172 skipstep3 = True
1174
1173
1175 if not exact and self._checkcase:
1174 if not exact and self._checkcase:
1176 normalize = self._normalize
1175 normalize = self._normalize
1177 normalizefile = self._normalizefile
1176 normalizefile = self._normalizefile
1178 skipstep3 = False
1177 skipstep3 = False
1179 else:
1178 else:
1180 normalize = self._normalize
1179 normalize = self._normalize
1181 normalizefile = None
1180 normalizefile = None
1182
1181
1183 # step 1: find all explicit files
1182 # step 1: find all explicit files
1184 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1183 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1185 if matchtdir:
1184 if matchtdir:
1186 for d in work:
1185 for d in work:
1187 matchtdir(d[0])
1186 matchtdir(d[0])
1188 for d in dirsnotfound:
1187 for d in dirsnotfound:
1189 matchtdir(d)
1188 matchtdir(d)
1190
1189
1191 skipstep3 = skipstep3 and not (work or dirsnotfound)
1190 skipstep3 = skipstep3 and not (work or dirsnotfound)
1192 work = [d for d in work if not dirignore(d[0])]
1191 work = [d for d in work if not dirignore(d[0])]
1193
1192
1194 # step 2: visit subdirectories
1193 # step 2: visit subdirectories
1195 def traverse(work, alreadynormed):
1194 def traverse(work, alreadynormed):
1196 wadd = work.append
1195 wadd = work.append
1197 while work:
1196 while work:
1198 tracing.counter('dirstate.walk work', len(work))
1197 tracing.counter('dirstate.walk work', len(work))
1199 nd = work.pop()
1198 nd = work.pop()
1200 visitentries = match.visitchildrenset(nd)
1199 visitentries = match.visitchildrenset(nd)
1201 if not visitentries:
1200 if not visitentries:
1202 continue
1201 continue
1203 if visitentries == b'this' or visitentries == b'all':
1202 if visitentries == b'this' or visitentries == b'all':
1204 visitentries = None
1203 visitentries = None
1205 skip = None
1204 skip = None
1206 if nd != b'':
1205 if nd != b'':
1207 skip = b'.hg'
1206 skip = b'.hg'
1208 try:
1207 try:
1209 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1208 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1210 entries = listdir(join(nd), stat=True, skip=skip)
1209 entries = listdir(join(nd), stat=True, skip=skip)
1211 except OSError as inst:
1210 except OSError as inst:
1212 if inst.errno in (errno.EACCES, errno.ENOENT):
1211 if inst.errno in (errno.EACCES, errno.ENOENT):
1213 match.bad(
1212 match.bad(
1214 self.pathto(nd), encoding.strtolocal(inst.strerror)
1213 self.pathto(nd), encoding.strtolocal(inst.strerror)
1215 )
1214 )
1216 continue
1215 continue
1217 raise
1216 raise
1218 for f, kind, st in entries:
1217 for f, kind, st in entries:
1219 # Some matchers may return files in the visitentries set,
1218 # Some matchers may return files in the visitentries set,
1220 # instead of 'this', if the matcher explicitly mentions them
1219 # instead of 'this', if the matcher explicitly mentions them
1221 # and is not an exactmatcher. This is acceptable; we do not
1220 # and is not an exactmatcher. This is acceptable; we do not
1222 # make any hard assumptions about file-or-directory below
1221 # make any hard assumptions about file-or-directory below
1223 # based on the presence of `f` in visitentries. If
1222 # based on the presence of `f` in visitentries. If
1224 # visitchildrenset returned a set, we can always skip the
1223 # visitchildrenset returned a set, we can always skip the
1225 # entries *not* in the set it provided regardless of whether
1224 # entries *not* in the set it provided regardless of whether
1226 # they're actually a file or a directory.
1225 # they're actually a file or a directory.
1227 if visitentries and f not in visitentries:
1226 if visitentries and f not in visitentries:
1228 continue
1227 continue
1229 if normalizefile:
1228 if normalizefile:
1230 # even though f might be a directory, we're only
1229 # even though f might be a directory, we're only
1231 # interested in comparing it to files currently in the
1230 # interested in comparing it to files currently in the
1232 # dmap -- therefore normalizefile is enough
1231 # dmap -- therefore normalizefile is enough
1233 nf = normalizefile(
1232 nf = normalizefile(
1234 nd and (nd + b"/" + f) or f, True, True
1233 nd and (nd + b"/" + f) or f, True, True
1235 )
1234 )
1236 else:
1235 else:
1237 nf = nd and (nd + b"/" + f) or f
1236 nf = nd and (nd + b"/" + f) or f
1238 if nf not in results:
1237 if nf not in results:
1239 if kind == dirkind:
1238 if kind == dirkind:
1240 if not ignore(nf):
1239 if not ignore(nf):
1241 if matchtdir:
1240 if matchtdir:
1242 matchtdir(nf)
1241 matchtdir(nf)
1243 wadd(nf)
1242 wadd(nf)
1244 if nf in dmap and (matchalways or matchfn(nf)):
1243 if nf in dmap and (matchalways or matchfn(nf)):
1245 results[nf] = None
1244 results[nf] = None
1246 elif kind == regkind or kind == lnkkind:
1245 elif kind == regkind or kind == lnkkind:
1247 if nf in dmap:
1246 if nf in dmap:
1248 if matchalways or matchfn(nf):
1247 if matchalways or matchfn(nf):
1249 results[nf] = st
1248 results[nf] = st
1250 elif (matchalways or matchfn(nf)) and not ignore(
1249 elif (matchalways or matchfn(nf)) and not ignore(
1251 nf
1250 nf
1252 ):
1251 ):
1253 # unknown file -- normalize if necessary
1252 # unknown file -- normalize if necessary
1254 if not alreadynormed:
1253 if not alreadynormed:
1255 nf = normalize(nf, False, True)
1254 nf = normalize(nf, False, True)
1256 results[nf] = st
1255 results[nf] = st
1257 elif nf in dmap and (matchalways or matchfn(nf)):
1256 elif nf in dmap and (matchalways or matchfn(nf)):
1258 results[nf] = None
1257 results[nf] = None
1259
1258
1260 for nd, d in work:
1259 for nd, d in work:
1261 # alreadynormed means that processwork doesn't have to do any
1260 # alreadynormed means that processwork doesn't have to do any
1262 # expensive directory normalization
1261 # expensive directory normalization
1263 alreadynormed = not normalize or nd == d
1262 alreadynormed = not normalize or nd == d
1264 traverse([d], alreadynormed)
1263 traverse([d], alreadynormed)
1265
1264
1266 for s in subrepos:
1265 for s in subrepos:
1267 del results[s]
1266 del results[s]
1268 del results[b'.hg']
1267 del results[b'.hg']
1269
1268
1270 # step 3: visit remaining files from dmap
1269 # step 3: visit remaining files from dmap
1271 if not skipstep3 and not exact:
1270 if not skipstep3 and not exact:
1272 # If a dmap file is not in results yet, it was either
1271 # If a dmap file is not in results yet, it was either
1273 # a) not matching matchfn b) ignored, c) missing, or d) under a
1272 # a) not matching matchfn b) ignored, c) missing, or d) under a
1274 # symlink directory.
1273 # symlink directory.
1275 if not results and matchalways:
1274 if not results and matchalways:
1276 visit = [f for f in dmap]
1275 visit = [f for f in dmap]
1277 else:
1276 else:
1278 visit = [f for f in dmap if f not in results and matchfn(f)]
1277 visit = [f for f in dmap if f not in results and matchfn(f)]
1279 visit.sort()
1278 visit.sort()
1280
1279
1281 if unknown:
1280 if unknown:
1282 # unknown == True means we walked all dirs under the roots
1281 # unknown == True means we walked all dirs under the roots
1283 # that wasn't ignored, and everything that matched was stat'ed
1282 # that wasn't ignored, and everything that matched was stat'ed
1284 # and is already in results.
1283 # and is already in results.
1285 # The rest must thus be ignored or under a symlink.
1284 # The rest must thus be ignored or under a symlink.
1286 audit_path = pathutil.pathauditor(self._root, cached=True)
1285 audit_path = pathutil.pathauditor(self._root, cached=True)
1287
1286
1288 for nf in iter(visit):
1287 for nf in iter(visit):
1289 # If a stat for the same file was already added with a
1288 # If a stat for the same file was already added with a
1290 # different case, don't add one for this, since that would
1289 # different case, don't add one for this, since that would
1291 # make it appear as if the file exists under both names
1290 # make it appear as if the file exists under both names
1292 # on disk.
1291 # on disk.
1293 if (
1292 if (
1294 normalizefile
1293 normalizefile
1295 and normalizefile(nf, True, True) in results
1294 and normalizefile(nf, True, True) in results
1296 ):
1295 ):
1297 results[nf] = None
1296 results[nf] = None
1298 # Report ignored items in the dmap as long as they are not
1297 # Report ignored items in the dmap as long as they are not
1299 # under a symlink directory.
1298 # under a symlink directory.
1300 elif audit_path.check(nf):
1299 elif audit_path.check(nf):
1301 try:
1300 try:
1302 results[nf] = lstat(join(nf))
1301 results[nf] = lstat(join(nf))
1303 # file was just ignored, no links, and exists
1302 # file was just ignored, no links, and exists
1304 except OSError:
1303 except OSError:
1305 # file doesn't exist
1304 # file doesn't exist
1306 results[nf] = None
1305 results[nf] = None
1307 else:
1306 else:
1308 # It's either missing or under a symlink directory
1307 # It's either missing or under a symlink directory
1309 # which we in this case report as missing
1308 # which we in this case report as missing
1310 results[nf] = None
1309 results[nf] = None
1311 else:
1310 else:
1312 # We may not have walked the full directory tree above,
1311 # We may not have walked the full directory tree above,
1313 # so stat and check everything we missed.
1312 # so stat and check everything we missed.
1314 iv = iter(visit)
1313 iv = iter(visit)
1315 for st in util.statfiles([join(i) for i in visit]):
1314 for st in util.statfiles([join(i) for i in visit]):
1316 results[next(iv)] = st
1315 results[next(iv)] = st
1317 return results
1316 return results
1318
1317
1319 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1318 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1320 # Force Rayon (Rust parallelism library) to respect the number of
1319 # Force Rayon (Rust parallelism library) to respect the number of
1321 # workers. This is a temporary workaround until Rust code knows
1320 # workers. This is a temporary workaround until Rust code knows
1322 # how to read the config file.
1321 # how to read the config file.
1323 numcpus = self._ui.configint(b"worker", b"numcpus")
1322 numcpus = self._ui.configint(b"worker", b"numcpus")
1324 if numcpus is not None:
1323 if numcpus is not None:
1325 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1324 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1326
1325
1327 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1326 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1328 if not workers_enabled:
1327 if not workers_enabled:
1329 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1328 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1330
1329
1331 (
1330 (
1332 lookup,
1331 lookup,
1333 modified,
1332 modified,
1334 added,
1333 added,
1335 removed,
1334 removed,
1336 deleted,
1335 deleted,
1337 clean,
1336 clean,
1338 ignored,
1337 ignored,
1339 unknown,
1338 unknown,
1340 warnings,
1339 warnings,
1341 bad,
1340 bad,
1342 traversed,
1341 traversed,
1343 dirty,
1342 dirty,
1344 ) = rustmod.status(
1343 ) = rustmod.status(
1345 self._map._rustmap,
1344 self._map._rustmap,
1346 matcher,
1345 matcher,
1347 self._rootdir,
1346 self._rootdir,
1348 self._ignorefiles(),
1347 self._ignorefiles(),
1349 self._checkexec,
1348 self._checkexec,
1350 self._lastnormaltime,
1349 self._lastnormaltime,
1351 bool(list_clean),
1350 bool(list_clean),
1352 bool(list_ignored),
1351 bool(list_ignored),
1353 bool(list_unknown),
1352 bool(list_unknown),
1354 bool(matcher.traversedir),
1353 bool(matcher.traversedir),
1355 )
1354 )
1356
1355
1357 self._dirty |= dirty
1356 self._dirty |= dirty
1358
1357
1359 if matcher.traversedir:
1358 if matcher.traversedir:
1360 for dir in traversed:
1359 for dir in traversed:
1361 matcher.traversedir(dir)
1360 matcher.traversedir(dir)
1362
1361
1363 if self._ui.warn:
1362 if self._ui.warn:
1364 for item in warnings:
1363 for item in warnings:
1365 if isinstance(item, tuple):
1364 if isinstance(item, tuple):
1366 file_path, syntax = item
1365 file_path, syntax = item
1367 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1366 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1368 file_path,
1367 file_path,
1369 syntax,
1368 syntax,
1370 )
1369 )
1371 self._ui.warn(msg)
1370 self._ui.warn(msg)
1372 else:
1371 else:
1373 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1372 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1374 self._ui.warn(
1373 self._ui.warn(
1375 msg
1374 msg
1376 % (
1375 % (
1377 pathutil.canonpath(
1376 pathutil.canonpath(
1378 self._rootdir, self._rootdir, item
1377 self._rootdir, self._rootdir, item
1379 ),
1378 ),
1380 b"No such file or directory",
1379 b"No such file or directory",
1381 )
1380 )
1382 )
1381 )
1383
1382
1384 for (fn, message) in bad:
1383 for (fn, message) in bad:
1385 matcher.bad(fn, encoding.strtolocal(message))
1384 matcher.bad(fn, encoding.strtolocal(message))
1386
1385
1387 status = scmutil.status(
1386 status = scmutil.status(
1388 modified=modified,
1387 modified=modified,
1389 added=added,
1388 added=added,
1390 removed=removed,
1389 removed=removed,
1391 deleted=deleted,
1390 deleted=deleted,
1392 unknown=unknown,
1391 unknown=unknown,
1393 ignored=ignored,
1392 ignored=ignored,
1394 clean=clean,
1393 clean=clean,
1395 )
1394 )
1396 return (lookup, status)
1395 return (lookup, status)
1397
1396
1398 def status(self, match, subrepos, ignored, clean, unknown):
1397 def status(self, match, subrepos, ignored, clean, unknown):
1399 """Determine the status of the working copy relative to the
1398 """Determine the status of the working copy relative to the
1400 dirstate and return a pair of (unsure, status), where status is of type
1399 dirstate and return a pair of (unsure, status), where status is of type
1401 scmutil.status and:
1400 scmutil.status and:
1402
1401
1403 unsure:
1402 unsure:
1404 files that might have been modified since the dirstate was
1403 files that might have been modified since the dirstate was
1405 written, but need to be read to be sure (size is the same
1404 written, but need to be read to be sure (size is the same
1406 but mtime differs)
1405 but mtime differs)
1407 status.modified:
1406 status.modified:
1408 files that have definitely been modified since the dirstate
1407 files that have definitely been modified since the dirstate
1409 was written (different size or mode)
1408 was written (different size or mode)
1410 status.clean:
1409 status.clean:
1411 files that have definitely not been modified since the
1410 files that have definitely not been modified since the
1412 dirstate was written
1411 dirstate was written
1413 """
1412 """
1414 listignored, listclean, listunknown = ignored, clean, unknown
1413 listignored, listclean, listunknown = ignored, clean, unknown
1415 lookup, modified, added, unknown, ignored = [], [], [], [], []
1414 lookup, modified, added, unknown, ignored = [], [], [], [], []
1416 removed, deleted, clean = [], [], []
1415 removed, deleted, clean = [], [], []
1417
1416
1418 dmap = self._map
1417 dmap = self._map
1419 dmap.preload()
1418 dmap.preload()
1420
1419
1421 use_rust = True
1420 use_rust = True
1422
1421
1423 allowed_matchers = (
1422 allowed_matchers = (
1424 matchmod.alwaysmatcher,
1423 matchmod.alwaysmatcher,
1425 matchmod.exactmatcher,
1424 matchmod.exactmatcher,
1426 matchmod.includematcher,
1425 matchmod.includematcher,
1427 )
1426 )
1428
1427
1429 if rustmod is None:
1428 if rustmod is None:
1430 use_rust = False
1429 use_rust = False
1431 elif self._checkcase:
1430 elif self._checkcase:
1432 # Case-insensitive filesystems are not handled yet
1431 # Case-insensitive filesystems are not handled yet
1433 use_rust = False
1432 use_rust = False
1434 elif subrepos:
1433 elif subrepos:
1435 use_rust = False
1434 use_rust = False
1436 elif sparse.enabled:
1435 elif sparse.enabled:
1437 use_rust = False
1436 use_rust = False
1438 elif not isinstance(match, allowed_matchers):
1437 elif not isinstance(match, allowed_matchers):
1439 # Some matchers have yet to be implemented
1438 # Some matchers have yet to be implemented
1440 use_rust = False
1439 use_rust = False
1441
1440
1442 if use_rust:
1441 if use_rust:
1443 try:
1442 try:
1444 return self._rust_status(
1443 return self._rust_status(
1445 match, listclean, listignored, listunknown
1444 match, listclean, listignored, listunknown
1446 )
1445 )
1447 except rustmod.FallbackError:
1446 except rustmod.FallbackError:
1448 pass
1447 pass
1449
1448
1450 def noop(f):
1449 def noop(f):
1451 pass
1450 pass
1452
1451
1453 dcontains = dmap.__contains__
1452 dcontains = dmap.__contains__
1454 dget = dmap.__getitem__
1453 dget = dmap.__getitem__
1455 ladd = lookup.append # aka "unsure"
1454 ladd = lookup.append # aka "unsure"
1456 madd = modified.append
1455 madd = modified.append
1457 aadd = added.append
1456 aadd = added.append
1458 uadd = unknown.append if listunknown else noop
1457 uadd = unknown.append if listunknown else noop
1459 iadd = ignored.append if listignored else noop
1458 iadd = ignored.append if listignored else noop
1460 radd = removed.append
1459 radd = removed.append
1461 dadd = deleted.append
1460 dadd = deleted.append
1462 cadd = clean.append if listclean else noop
1461 cadd = clean.append if listclean else noop
1463 mexact = match.exact
1462 mexact = match.exact
1464 dirignore = self._dirignore
1463 dirignore = self._dirignore
1465 checkexec = self._checkexec
1464 checkexec = self._checkexec
1466 copymap = self._map.copymap
1465 copymap = self._map.copymap
1467 lastnormaltime = self._lastnormaltime
1466 lastnormaltime = self._lastnormaltime
1468
1467
1469 # We need to do full walks when either
1468 # We need to do full walks when either
1470 # - we're listing all clean files, or
1469 # - we're listing all clean files, or
1471 # - match.traversedir does something, because match.traversedir should
1470 # - match.traversedir does something, because match.traversedir should
1472 # be called for every dir in the working dir
1471 # be called for every dir in the working dir
1473 full = listclean or match.traversedir is not None
1472 full = listclean or match.traversedir is not None
1474 for fn, st in pycompat.iteritems(
1473 for fn, st in pycompat.iteritems(
1475 self.walk(match, subrepos, listunknown, listignored, full=full)
1474 self.walk(match, subrepos, listunknown, listignored, full=full)
1476 ):
1475 ):
1477 if not dcontains(fn):
1476 if not dcontains(fn):
1478 if (listignored or mexact(fn)) and dirignore(fn):
1477 if (listignored or mexact(fn)) and dirignore(fn):
1479 if listignored:
1478 if listignored:
1480 iadd(fn)
1479 iadd(fn)
1481 else:
1480 else:
1482 uadd(fn)
1481 uadd(fn)
1483 continue
1482 continue
1484
1483
1485 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1484 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1486 # written like that for performance reasons. dmap[fn] is not a
1485 # written like that for performance reasons. dmap[fn] is not a
1487 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1486 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1488 # opcode has fast paths when the value to be unpacked is a tuple or
1487 # opcode has fast paths when the value to be unpacked is a tuple or
1489 # a list, but falls back to creating a full-fledged iterator in
1488 # a list, but falls back to creating a full-fledged iterator in
1490 # general. That is much slower than simply accessing and storing the
1489 # general. That is much slower than simply accessing and storing the
1491 # tuple members one by one.
1490 # tuple members one by one.
1492 t = dget(fn)
1491 t = dget(fn)
1493 mode = t.mode
1492 mode = t.mode
1494 size = t.size
1493 size = t.size
1495 time = t.mtime
1494 time = t.mtime
1496
1495
1497 if not st and t.tracked:
1496 if not st and t.tracked:
1498 dadd(fn)
1497 dadd(fn)
1499 elif t.merged:
1498 elif t.merged:
1500 madd(fn)
1499 madd(fn)
1501 elif t.added:
1500 elif t.added:
1502 aadd(fn)
1501 aadd(fn)
1503 elif t.removed:
1502 elif t.removed:
1504 radd(fn)
1503 radd(fn)
1505 elif t.tracked:
1504 elif t.tracked:
1506 if (
1505 if (
1507 size >= 0
1506 size >= 0
1508 and (
1507 and (
1509 (size != st.st_size and size != st.st_size & _rangemask)
1508 (size != st.st_size and size != st.st_size & _rangemask)
1510 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1509 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1511 )
1510 )
1512 or t.from_p2
1511 or t.from_p2
1513 or fn in copymap
1512 or fn in copymap
1514 ):
1513 ):
1515 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1514 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1516 # issue6456: Size returned may be longer due to
1515 # issue6456: Size returned may be longer due to
1517 # encryption on EXT-4 fscrypt, undecided.
1516 # encryption on EXT-4 fscrypt, undecided.
1518 ladd(fn)
1517 ladd(fn)
1519 else:
1518 else:
1520 madd(fn)
1519 madd(fn)
1521 elif (
1520 elif (
1522 time != st[stat.ST_MTIME]
1521 time != st[stat.ST_MTIME]
1523 and time != st[stat.ST_MTIME] & _rangemask
1522 and time != st[stat.ST_MTIME] & _rangemask
1524 ):
1523 ):
1525 ladd(fn)
1524 ladd(fn)
1526 elif st[stat.ST_MTIME] == lastnormaltime:
1525 elif st[stat.ST_MTIME] == lastnormaltime:
1527 # fn may have just been marked as normal and it may have
1526 # fn may have just been marked as normal and it may have
1528 # changed in the same second without changing its size.
1527 # changed in the same second without changing its size.
1529 # This can happen if we quickly do multiple commits.
1528 # This can happen if we quickly do multiple commits.
1530 # Force lookup, so we don't miss such a racy file change.
1529 # Force lookup, so we don't miss such a racy file change.
1531 ladd(fn)
1530 ladd(fn)
1532 elif listclean:
1531 elif listclean:
1533 cadd(fn)
1532 cadd(fn)
1534 status = scmutil.status(
1533 status = scmutil.status(
1535 modified, added, removed, deleted, unknown, ignored, clean
1534 modified, added, removed, deleted, unknown, ignored, clean
1536 )
1535 )
1537 return (lookup, status)
1536 return (lookup, status)
1538
1537
1539 def matches(self, match):
1538 def matches(self, match):
1540 """
1539 """
1541 return files in the dirstate (in whatever state) filtered by match
1540 return files in the dirstate (in whatever state) filtered by match
1542 """
1541 """
1543 dmap = self._map
1542 dmap = self._map
1544 if rustmod is not None:
1543 if rustmod is not None:
1545 dmap = self._map._rustmap
1544 dmap = self._map._rustmap
1546
1545
1547 if match.always():
1546 if match.always():
1548 return dmap.keys()
1547 return dmap.keys()
1549 files = match.files()
1548 files = match.files()
1550 if match.isexact():
1549 if match.isexact():
1551 # fast path -- filter the other way around, since typically files is
1550 # fast path -- filter the other way around, since typically files is
1552 # much smaller than dmap
1551 # much smaller than dmap
1553 return [f for f in files if f in dmap]
1552 return [f for f in files if f in dmap]
1554 if match.prefix() and all(fn in dmap for fn in files):
1553 if match.prefix() and all(fn in dmap for fn in files):
1555 # fast path -- all the values are known to be files, so just return
1554 # fast path -- all the values are known to be files, so just return
1556 # that
1555 # that
1557 return list(files)
1556 return list(files)
1558 return [f for f in dmap if match(f)]
1557 return [f for f in dmap if match(f)]
1559
1558
1560 def _actualfilename(self, tr):
1559 def _actualfilename(self, tr):
1561 if tr:
1560 if tr:
1562 return self._pendingfilename
1561 return self._pendingfilename
1563 else:
1562 else:
1564 return self._filename
1563 return self._filename
1565
1564
1566 def savebackup(self, tr, backupname):
1565 def savebackup(self, tr, backupname):
1567 '''Save current dirstate into backup file'''
1566 '''Save current dirstate into backup file'''
1568 filename = self._actualfilename(tr)
1567 filename = self._actualfilename(tr)
1569 assert backupname != filename
1568 assert backupname != filename
1570
1569
1571 # use '_writedirstate' instead of 'write' to write changes certainly,
1570 # use '_writedirstate' instead of 'write' to write changes certainly,
1572 # because the latter omits writing out if transaction is running.
1571 # because the latter omits writing out if transaction is running.
1573 # output file will be used to create backup of dirstate at this point.
1572 # output file will be used to create backup of dirstate at this point.
1574 if self._dirty or not self._opener.exists(filename):
1573 if self._dirty or not self._opener.exists(filename):
1575 self._writedirstate(
1574 self._writedirstate(
1576 tr,
1575 tr,
1577 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1576 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1578 )
1577 )
1579
1578
1580 if tr:
1579 if tr:
1581 # ensure that subsequent tr.writepending returns True for
1580 # ensure that subsequent tr.writepending returns True for
1582 # changes written out above, even if dirstate is never
1581 # changes written out above, even if dirstate is never
1583 # changed after this
1582 # changed after this
1584 tr.addfilegenerator(
1583 tr.addfilegenerator(
1585 b'dirstate',
1584 b'dirstate',
1586 (self._filename,),
1585 (self._filename,),
1587 lambda f: self._writedirstate(tr, f),
1586 lambda f: self._writedirstate(tr, f),
1588 location=b'plain',
1587 location=b'plain',
1589 )
1588 )
1590
1589
1591 # ensure that pending file written above is unlinked at
1590 # ensure that pending file written above is unlinked at
1592 # failure, even if tr.writepending isn't invoked until the
1591 # failure, even if tr.writepending isn't invoked until the
1593 # end of this transaction
1592 # end of this transaction
1594 tr.registertmp(filename, location=b'plain')
1593 tr.registertmp(filename, location=b'plain')
1595
1594
1596 self._opener.tryunlink(backupname)
1595 self._opener.tryunlink(backupname)
1597 # hardlink backup is okay because _writedirstate is always called
1596 # hardlink backup is okay because _writedirstate is always called
1598 # with an "atomictemp=True" file.
1597 # with an "atomictemp=True" file.
1599 util.copyfile(
1598 util.copyfile(
1600 self._opener.join(filename),
1599 self._opener.join(filename),
1601 self._opener.join(backupname),
1600 self._opener.join(backupname),
1602 hardlink=True,
1601 hardlink=True,
1603 )
1602 )
1604
1603
1605 def restorebackup(self, tr, backupname):
1604 def restorebackup(self, tr, backupname):
1606 '''Restore dirstate by backup file'''
1605 '''Restore dirstate by backup file'''
1607 # this "invalidate()" prevents "wlock.release()" from writing
1606 # this "invalidate()" prevents "wlock.release()" from writing
1608 # changes of dirstate out after restoring from backup file
1607 # changes of dirstate out after restoring from backup file
1609 self.invalidate()
1608 self.invalidate()
1610 filename = self._actualfilename(tr)
1609 filename = self._actualfilename(tr)
1611 o = self._opener
1610 o = self._opener
1612 if util.samefile(o.join(backupname), o.join(filename)):
1611 if util.samefile(o.join(backupname), o.join(filename)):
1613 o.unlink(backupname)
1612 o.unlink(backupname)
1614 else:
1613 else:
1615 o.rename(backupname, filename, checkambig=True)
1614 o.rename(backupname, filename, checkambig=True)
1616
1615
1617 def clearbackup(self, tr, backupname):
1616 def clearbackup(self, tr, backupname):
1618 '''Clear backup file'''
1617 '''Clear backup file'''
1619 self._opener.unlink(backupname)
1618 self._opener.unlink(backupname)
@@ -1,910 +1,912
1 # dirstatemap.py
1 # dirstatemap.py
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 import errno
8 import errno
9
9
10 from .i18n import _
10 from .i18n import _
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 pathutil,
14 pathutil,
15 policy,
15 policy,
16 pycompat,
16 pycompat,
17 txnutil,
17 txnutil,
18 util,
18 util,
19 )
19 )
20
20
21 from .dirstateutils import (
21 from .dirstateutils import (
22 docket as docketmod,
22 docket as docketmod,
23 )
23 )
24
24
25 parsers = policy.importmod('parsers')
25 parsers = policy.importmod('parsers')
26 rustmod = policy.importrust('dirstate')
26 rustmod = policy.importrust('dirstate')
27
27
28 propertycache = util.propertycache
28 propertycache = util.propertycache
29
29
30 DirstateItem = parsers.DirstateItem
30 DirstateItem = parsers.DirstateItem
31
31
32 rangemask = 0x7FFFFFFF
32 rangemask = 0x7FFFFFFF
33
33
34
34
35 class dirstatemap(object):
35 class dirstatemap(object):
36 """Map encapsulating the dirstate's contents.
36 """Map encapsulating the dirstate's contents.
37
37
38 The dirstate contains the following state:
38 The dirstate contains the following state:
39
39
40 - `identity` is the identity of the dirstate file, which can be used to
40 - `identity` is the identity of the dirstate file, which can be used to
41 detect when changes have occurred to the dirstate file.
41 detect when changes have occurred to the dirstate file.
42
42
43 - `parents` is a pair containing the parents of the working copy. The
43 - `parents` is a pair containing the parents of the working copy. The
44 parents are updated by calling `setparents`.
44 parents are updated by calling `setparents`.
45
45
46 - the state map maps filenames to tuples of (state, mode, size, mtime),
46 - the state map maps filenames to tuples of (state, mode, size, mtime),
47 where state is a single character representing 'normal', 'added',
47 where state is a single character representing 'normal', 'added',
48 'removed', or 'merged'. It is read by treating the dirstate as a
48 'removed', or 'merged'. It is read by treating the dirstate as a
49 dict. File state is updated by calling the `addfile`, `removefile` and
49 dict. File state is updated by calling the `addfile`, `removefile` and
50 `dropfile` methods.
50 `dropfile` methods.
51
51
52 - `copymap` maps destination filenames to their source filename.
52 - `copymap` maps destination filenames to their source filename.
53
53
54 The dirstate also provides the following views onto the state:
54 The dirstate also provides the following views onto the state:
55
55
56 - `nonnormalset` is a set of the filenames that have state other
56 - `nonnormalset` is a set of the filenames that have state other
57 than 'normal', or are normal but have an mtime of -1 ('normallookup').
57 than 'normal', or are normal but have an mtime of -1 ('normallookup').
58
58
59 - `otherparentset` is a set of the filenames that are marked as coming
59 - `otherparentset` is a set of the filenames that are marked as coming
60 from the second parent when the dirstate is currently being merged.
60 from the second parent when the dirstate is currently being merged.
61
61
62 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
62 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
63 form that they appear as in the dirstate.
63 form that they appear as in the dirstate.
64
64
65 - `dirfoldmap` is a dict mapping normalized directory names to the
65 - `dirfoldmap` is a dict mapping normalized directory names to the
66 denormalized form that they appear as in the dirstate.
66 denormalized form that they appear as in the dirstate.
67 """
67 """
68
68
69 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
69 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
70 self._ui = ui
70 self._ui = ui
71 self._opener = opener
71 self._opener = opener
72 self._root = root
72 self._root = root
73 self._filename = b'dirstate'
73 self._filename = b'dirstate'
74 self._nodelen = 20
74 self._nodelen = 20
75 self._nodeconstants = nodeconstants
75 self._nodeconstants = nodeconstants
76 assert (
76 assert (
77 not use_dirstate_v2
77 not use_dirstate_v2
78 ), "should have detected unsupported requirement"
78 ), "should have detected unsupported requirement"
79
79
80 self._parents = None
80 self._parents = None
81 self._dirtyparents = False
81 self._dirtyparents = False
82
82
83 # for consistent view between _pl() and _read() invocations
83 # for consistent view between _pl() and _read() invocations
84 self._pendingmode = None
84 self._pendingmode = None
85
85
86 @propertycache
86 @propertycache
87 def _map(self):
87 def _map(self):
88 self._map = {}
88 self._map = {}
89 self.read()
89 self.read()
90 return self._map
90 return self._map
91
91
92 @propertycache
92 @propertycache
93 def copymap(self):
93 def copymap(self):
94 self.copymap = {}
94 self.copymap = {}
95 self._map
95 self._map
96 return self.copymap
96 return self.copymap
97
97
98 def clear(self):
98 def clear(self):
99 self._map.clear()
99 self._map.clear()
100 self.copymap.clear()
100 self.copymap.clear()
101 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
101 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
102 util.clearcachedproperty(self, b"_dirs")
102 util.clearcachedproperty(self, b"_dirs")
103 util.clearcachedproperty(self, b"_alldirs")
103 util.clearcachedproperty(self, b"_alldirs")
104 util.clearcachedproperty(self, b"filefoldmap")
104 util.clearcachedproperty(self, b"filefoldmap")
105 util.clearcachedproperty(self, b"dirfoldmap")
105 util.clearcachedproperty(self, b"dirfoldmap")
106 util.clearcachedproperty(self, b"nonnormalset")
106 util.clearcachedproperty(self, b"nonnormalset")
107 util.clearcachedproperty(self, b"otherparentset")
107 util.clearcachedproperty(self, b"otherparentset")
108
108
109 def items(self):
109 def items(self):
110 return pycompat.iteritems(self._map)
110 return pycompat.iteritems(self._map)
111
111
112 # forward for python2,3 compat
112 # forward for python2,3 compat
113 iteritems = items
113 iteritems = items
114
114
115 debug_iter = items
115 debug_iter = items
116
116
117 def __len__(self):
117 def __len__(self):
118 return len(self._map)
118 return len(self._map)
119
119
120 def __iter__(self):
120 def __iter__(self):
121 return iter(self._map)
121 return iter(self._map)
122
122
123 def get(self, key, default=None):
123 def get(self, key, default=None):
124 return self._map.get(key, default)
124 return self._map.get(key, default)
125
125
126 def __contains__(self, key):
126 def __contains__(self, key):
127 return key in self._map
127 return key in self._map
128
128
129 def __getitem__(self, key):
129 def __getitem__(self, key):
130 return self._map[key]
130 return self._map[key]
131
131
132 def keys(self):
132 def keys(self):
133 return self._map.keys()
133 return self._map.keys()
134
134
135 def preload(self):
135 def preload(self):
136 """Loads the underlying data, if it's not already loaded"""
136 """Loads the underlying data, if it's not already loaded"""
137 self._map
137 self._map
138
138
139 def _dirs_incr(self, filename, old_entry=None):
139 def _dirs_incr(self, filename, old_entry=None):
140 """incremente the dirstate counter if applicable"""
140 """incremente the dirstate counter if applicable"""
141 if (
141 if (
142 old_entry is None or old_entry.removed
142 old_entry is None or old_entry.removed
143 ) and "_dirs" in self.__dict__:
143 ) and "_dirs" in self.__dict__:
144 self._dirs.addpath(filename)
144 self._dirs.addpath(filename)
145 if old_entry is None and "_alldirs" in self.__dict__:
145 if old_entry is None and "_alldirs" in self.__dict__:
146 self._alldirs.addpath(filename)
146 self._alldirs.addpath(filename)
147
147
148 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
148 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
149 """decremente the dirstate counter if applicable"""
149 """decremente the dirstate counter if applicable"""
150 if old_entry is not None:
150 if old_entry is not None:
151 if "_dirs" in self.__dict__ and not old_entry.removed:
151 if "_dirs" in self.__dict__ and not old_entry.removed:
152 self._dirs.delpath(filename)
152 self._dirs.delpath(filename)
153 if "_alldirs" in self.__dict__ and not remove_variant:
153 if "_alldirs" in self.__dict__ and not remove_variant:
154 self._alldirs.delpath(filename)
154 self._alldirs.delpath(filename)
155 elif remove_variant and "_alldirs" in self.__dict__:
155 elif remove_variant and "_alldirs" in self.__dict__:
156 self._alldirs.addpath(filename)
156 self._alldirs.addpath(filename)
157 if "filefoldmap" in self.__dict__:
157 if "filefoldmap" in self.__dict__:
158 normed = util.normcase(filename)
158 normed = util.normcase(filename)
159 self.filefoldmap.pop(normed, None)
159 self.filefoldmap.pop(normed, None)
160
160
161 def set_possibly_dirty(self, filename):
161 def set_possibly_dirty(self, filename):
162 """record that the current state of the file on disk is unknown"""
162 """record that the current state of the file on disk is unknown"""
163 self[filename].set_possibly_dirty()
163 self[filename].set_possibly_dirty()
164
164
165 def addfile(
165 def addfile(
166 self,
166 self,
167 f,
167 f,
168 mode=0,
168 mode=0,
169 size=None,
169 size=None,
170 mtime=None,
170 mtime=None,
171 added=False,
171 added=False,
172 merged=False,
172 merged=False,
173 from_p2=False,
173 from_p2=False,
174 possibly_dirty=False,
174 possibly_dirty=False,
175 ):
175 ):
176 """Add a tracked file to the dirstate."""
176 """Add a tracked file to the dirstate."""
177 if added:
177 if added:
178 assert not merged
178 assert not merged
179 assert not possibly_dirty
179 assert not possibly_dirty
180 assert not from_p2
180 assert not from_p2
181 new_entry = DirstateItem.new_added()
181 new_entry = DirstateItem.new_added()
182 elif merged:
182 elif merged:
183 assert not possibly_dirty
183 assert not possibly_dirty
184 assert not from_p2
184 assert not from_p2
185 new_entry = DirstateItem.new_merged()
185 new_entry = DirstateItem.new_merged()
186 elif from_p2:
186 elif from_p2:
187 assert not possibly_dirty
187 assert not possibly_dirty
188 new_entry = DirstateItem.new_from_p2()
188 new_entry = DirstateItem.new_from_p2()
189 elif possibly_dirty:
189 elif possibly_dirty:
190 new_entry = DirstateItem.new_possibly_dirty()
190 new_entry = DirstateItem.new_possibly_dirty()
191 else:
191 else:
192 assert size is not None
192 assert size is not None
193 assert mtime is not None
193 assert mtime is not None
194 size = size & rangemask
194 size = size & rangemask
195 mtime = mtime & rangemask
195 mtime = mtime & rangemask
196 new_entry = DirstateItem.new_normal(mode, size, mtime)
196 new_entry = DirstateItem.new_normal(mode, size, mtime)
197 old_entry = self.get(f)
197 old_entry = self.get(f)
198 self._dirs_incr(f, old_entry)
198 self._dirs_incr(f, old_entry)
199 self._map[f] = new_entry
199 self._map[f] = new_entry
200 if new_entry.dm_nonnormal:
200 if new_entry.dm_nonnormal:
201 self.nonnormalset.add(f)
201 self.nonnormalset.add(f)
202 else:
202 else:
203 self.nonnormalset.discard(f)
203 self.nonnormalset.discard(f)
204 if new_entry.dm_otherparent:
204 if new_entry.dm_otherparent:
205 self.otherparentset.add(f)
205 self.otherparentset.add(f)
206 else:
206 else:
207 self.otherparentset.discard(f)
207 self.otherparentset.discard(f)
208
208
209 def reset_state(
209 def reset_state(
210 self,
210 self,
211 filename,
211 filename,
212 wc_tracked,
212 wc_tracked,
213 p1_tracked,
213 p1_tracked,
214 p2_tracked=False,
214 p2_tracked=False,
215 merged=False,
215 merged=False,
216 clean_p1=False,
216 clean_p1=False,
217 clean_p2=False,
217 clean_p2=False,
218 possibly_dirty=False,
218 possibly_dirty=False,
219 parentfiledata=None,
219 parentfiledata=None,
220 ):
220 ):
221 """Set a entry to a given state, diregarding all previous state
221 """Set a entry to a given state, diregarding all previous state
222
222
223 This is to be used by the part of the dirstate API dedicated to
223 This is to be used by the part of the dirstate API dedicated to
224 adjusting the dirstate after a update/merge.
224 adjusting the dirstate after a update/merge.
225
225
226 note: calling this might result to no entry existing at all if the
226 note: calling this might result to no entry existing at all if the
227 dirstate map does not see any point at having one for this file
227 dirstate map does not see any point at having one for this file
228 anymore.
228 anymore.
229 """
229 """
230 if merged and (clean_p1 or clean_p2):
230 if merged and (clean_p1 or clean_p2):
231 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
231 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
232 raise error.ProgrammingError(msg)
232 raise error.ProgrammingError(msg)
233 # copy information are now outdated
233 # copy information are now outdated
234 # (maybe new information should be in directly passed to this function)
234 # (maybe new information should be in directly passed to this function)
235 self.copymap.pop(filename, None)
235 self.copymap.pop(filename, None)
236
236
237 if not (p1_tracked or p2_tracked or wc_tracked):
237 if not (p1_tracked or p2_tracked or wc_tracked):
238 self.dropfile(filename)
238 self.dropfile(filename)
239 return
239 return
240 elif merged:
240 elif merged:
241 # XXX might be merged and removed ?
241 # XXX might be merged and removed ?
242 entry = self.get(filename)
242 entry = self.get(filename)
243 if entry is None or not entry.tracked:
243 if entry is None or not entry.tracked:
244 # XXX mostly replicate dirstate.other parent. We should get
244 # XXX mostly replicate dirstate.other parent. We should get
245 # the higher layer to pass us more reliable data where `merged`
245 # the higher layer to pass us more reliable data where `merged`
246 # actually mean merged. Dropping this clause will show failure
246 # actually mean merged. Dropping this clause will show failure
247 # in `test-graft.t`
247 # in `test-graft.t`
248 merged = False
248 merged = False
249 clean_p2 = True
249 clean_p2 = True
250 elif not (p1_tracked or p2_tracked) and wc_tracked:
250 elif not (p1_tracked or p2_tracked) and wc_tracked:
251 pass # file is added, nothing special to adjust
251 pass # file is added, nothing special to adjust
252 elif (p1_tracked or p2_tracked) and not wc_tracked:
252 elif (p1_tracked or p2_tracked) and not wc_tracked:
253 pass
253 pass
254 elif clean_p2 and wc_tracked:
254 elif clean_p2 and wc_tracked:
255 if p1_tracked or self.get(filename) is not None:
255 if p1_tracked or self.get(filename) is not None:
256 # XXX the `self.get` call is catching some case in
256 # XXX the `self.get` call is catching some case in
257 # `test-merge-remove.t` where the file is tracked in p1, the
257 # `test-merge-remove.t` where the file is tracked in p1, the
258 # p1_tracked argument is False.
258 # p1_tracked argument is False.
259 #
259 #
260 # In addition, this seems to be a case where the file is marked
260 # In addition, this seems to be a case where the file is marked
261 # as merged without actually being the result of a merge
261 # as merged without actually being the result of a merge
262 # action. So thing are not ideal here.
262 # action. So thing are not ideal here.
263 merged = True
263 merged = True
264 clean_p2 = False
264 clean_p2 = False
265 elif not p1_tracked and p2_tracked and wc_tracked:
265 elif not p1_tracked and p2_tracked and wc_tracked:
266 clean_p2 = True
266 clean_p2 = True
267 elif possibly_dirty:
267 elif possibly_dirty:
268 pass
268 pass
269 elif wc_tracked:
269 elif wc_tracked:
270 # this is a "normal" file
270 # this is a "normal" file
271 if parentfiledata is None:
271 if parentfiledata is None:
272 msg = b'failed to pass parentfiledata for a normal file: %s'
272 msg = b'failed to pass parentfiledata for a normal file: %s'
273 msg %= filename
273 msg %= filename
274 raise error.ProgrammingError(msg)
274 raise error.ProgrammingError(msg)
275 else:
275 else:
276 assert False, 'unreachable'
276 assert False, 'unreachable'
277
277
278 old_entry = self._map.get(filename)
278 old_entry = self._map.get(filename)
279 self._dirs_incr(filename, old_entry)
279 self._dirs_incr(filename, old_entry)
280 entry = DirstateItem(
280 entry = DirstateItem(
281 wc_tracked=wc_tracked,
281 wc_tracked=wc_tracked,
282 p1_tracked=p1_tracked,
282 p1_tracked=p1_tracked,
283 p2_tracked=p2_tracked,
283 p2_tracked=p2_tracked,
284 merged=merged,
284 merged=merged,
285 clean_p1=clean_p1,
285 clean_p1=clean_p1,
286 clean_p2=clean_p2,
286 clean_p2=clean_p2,
287 possibly_dirty=possibly_dirty,
287 possibly_dirty=possibly_dirty,
288 parentfiledata=parentfiledata,
288 parentfiledata=parentfiledata,
289 )
289 )
290 if entry.dm_nonnormal:
290 if entry.dm_nonnormal:
291 self.nonnormalset.add(filename)
291 self.nonnormalset.add(filename)
292 else:
292 else:
293 self.nonnormalset.discard(filename)
293 self.nonnormalset.discard(filename)
294 if entry.dm_otherparent:
294 if entry.dm_otherparent:
295 self.otherparentset.add(filename)
295 self.otherparentset.add(filename)
296 else:
296 else:
297 self.otherparentset.discard(filename)
297 self.otherparentset.discard(filename)
298 self._map[filename] = entry
298 self._map[filename] = entry
299
299
300 def set_untracked(self, f):
300 def set_untracked(self, f):
301 """Mark a file as no longer tracked in the dirstate map"""
301 """Mark a file as no longer tracked in the dirstate map"""
302 entry = self[f]
302 entry = self[f]
303 self._dirs_decr(f, old_entry=entry, remove_variant=True)
303 self._dirs_decr(f, old_entry=entry, remove_variant=True)
304 if entry.from_p2:
304 if entry.from_p2:
305 self.otherparentset.add(f)
305 self.otherparentset.add(f)
306 elif not entry.merged:
306 elif not entry.merged:
307 self.copymap.pop(f, None)
307 self.copymap.pop(f, None)
308 entry.set_untracked()
308 entry.set_untracked()
309 self.nonnormalset.add(f)
309 self.nonnormalset.add(f)
310
310
311 def dropfile(self, f):
311 def dropfile(self, f):
312 """
312 """
313 Remove a file from the dirstate. Returns True if the file was
313 Remove a file from the dirstate. Returns True if the file was
314 previously recorded.
314 previously recorded.
315 """
315 """
316 old_entry = self._map.pop(f, None)
316 old_entry = self._map.pop(f, None)
317 self._dirs_decr(f, old_entry=old_entry)
317 self._dirs_decr(f, old_entry=old_entry)
318 self.nonnormalset.discard(f)
318 self.nonnormalset.discard(f)
319 self.copymap.pop(f, None)
319 return old_entry is not None
320 return old_entry is not None
320
321
321 def clearambiguoustimes(self, files, now):
322 def clearambiguoustimes(self, files, now):
322 for f in files:
323 for f in files:
323 e = self.get(f)
324 e = self.get(f)
324 if e is not None and e.need_delay(now):
325 if e is not None and e.need_delay(now):
325 e.set_possibly_dirty()
326 e.set_possibly_dirty()
326 self.nonnormalset.add(f)
327 self.nonnormalset.add(f)
327
328
328 def nonnormalentries(self):
329 def nonnormalentries(self):
329 '''Compute the nonnormal dirstate entries from the dmap'''
330 '''Compute the nonnormal dirstate entries from the dmap'''
330 try:
331 try:
331 return parsers.nonnormalotherparententries(self._map)
332 return parsers.nonnormalotherparententries(self._map)
332 except AttributeError:
333 except AttributeError:
333 nonnorm = set()
334 nonnorm = set()
334 otherparent = set()
335 otherparent = set()
335 for fname, e in pycompat.iteritems(self._map):
336 for fname, e in pycompat.iteritems(self._map):
336 if e.dm_nonnormal:
337 if e.dm_nonnormal:
337 nonnorm.add(fname)
338 nonnorm.add(fname)
338 if e.from_p2:
339 if e.from_p2:
339 otherparent.add(fname)
340 otherparent.add(fname)
340 return nonnorm, otherparent
341 return nonnorm, otherparent
341
342
342 @propertycache
343 @propertycache
343 def filefoldmap(self):
344 def filefoldmap(self):
344 """Returns a dictionary mapping normalized case paths to their
345 """Returns a dictionary mapping normalized case paths to their
345 non-normalized versions.
346 non-normalized versions.
346 """
347 """
347 try:
348 try:
348 makefilefoldmap = parsers.make_file_foldmap
349 makefilefoldmap = parsers.make_file_foldmap
349 except AttributeError:
350 except AttributeError:
350 pass
351 pass
351 else:
352 else:
352 return makefilefoldmap(
353 return makefilefoldmap(
353 self._map, util.normcasespec, util.normcasefallback
354 self._map, util.normcasespec, util.normcasefallback
354 )
355 )
355
356
356 f = {}
357 f = {}
357 normcase = util.normcase
358 normcase = util.normcase
358 for name, s in pycompat.iteritems(self._map):
359 for name, s in pycompat.iteritems(self._map):
359 if not s.removed:
360 if not s.removed:
360 f[normcase(name)] = name
361 f[normcase(name)] = name
361 f[b'.'] = b'.' # prevents useless util.fspath() invocation
362 f[b'.'] = b'.' # prevents useless util.fspath() invocation
362 return f
363 return f
363
364
364 def hastrackeddir(self, d):
365 def hastrackeddir(self, d):
365 """
366 """
366 Returns True if the dirstate contains a tracked (not removed) file
367 Returns True if the dirstate contains a tracked (not removed) file
367 in this directory.
368 in this directory.
368 """
369 """
369 return d in self._dirs
370 return d in self._dirs
370
371
371 def hasdir(self, d):
372 def hasdir(self, d):
372 """
373 """
373 Returns True if the dirstate contains a file (tracked or removed)
374 Returns True if the dirstate contains a file (tracked or removed)
374 in this directory.
375 in this directory.
375 """
376 """
376 return d in self._alldirs
377 return d in self._alldirs
377
378
378 @propertycache
379 @propertycache
379 def _dirs(self):
380 def _dirs(self):
380 return pathutil.dirs(self._map, only_tracked=True)
381 return pathutil.dirs(self._map, only_tracked=True)
381
382
382 @propertycache
383 @propertycache
383 def _alldirs(self):
384 def _alldirs(self):
384 return pathutil.dirs(self._map)
385 return pathutil.dirs(self._map)
385
386
386 def _opendirstatefile(self):
387 def _opendirstatefile(self):
387 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
388 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
388 if self._pendingmode is not None and self._pendingmode != mode:
389 if self._pendingmode is not None and self._pendingmode != mode:
389 fp.close()
390 fp.close()
390 raise error.Abort(
391 raise error.Abort(
391 _(b'working directory state may be changed parallelly')
392 _(b'working directory state may be changed parallelly')
392 )
393 )
393 self._pendingmode = mode
394 self._pendingmode = mode
394 return fp
395 return fp
395
396
396 def parents(self):
397 def parents(self):
397 if not self._parents:
398 if not self._parents:
398 try:
399 try:
399 fp = self._opendirstatefile()
400 fp = self._opendirstatefile()
400 st = fp.read(2 * self._nodelen)
401 st = fp.read(2 * self._nodelen)
401 fp.close()
402 fp.close()
402 except IOError as err:
403 except IOError as err:
403 if err.errno != errno.ENOENT:
404 if err.errno != errno.ENOENT:
404 raise
405 raise
405 # File doesn't exist, so the current state is empty
406 # File doesn't exist, so the current state is empty
406 st = b''
407 st = b''
407
408
408 l = len(st)
409 l = len(st)
409 if l == self._nodelen * 2:
410 if l == self._nodelen * 2:
410 self._parents = (
411 self._parents = (
411 st[: self._nodelen],
412 st[: self._nodelen],
412 st[self._nodelen : 2 * self._nodelen],
413 st[self._nodelen : 2 * self._nodelen],
413 )
414 )
414 elif l == 0:
415 elif l == 0:
415 self._parents = (
416 self._parents = (
416 self._nodeconstants.nullid,
417 self._nodeconstants.nullid,
417 self._nodeconstants.nullid,
418 self._nodeconstants.nullid,
418 )
419 )
419 else:
420 else:
420 raise error.Abort(
421 raise error.Abort(
421 _(b'working directory state appears damaged!')
422 _(b'working directory state appears damaged!')
422 )
423 )
423
424
424 return self._parents
425 return self._parents
425
426
426 def setparents(self, p1, p2):
427 def setparents(self, p1, p2):
427 self._parents = (p1, p2)
428 self._parents = (p1, p2)
428 self._dirtyparents = True
429 self._dirtyparents = True
429
430
430 def read(self):
431 def read(self):
431 # ignore HG_PENDING because identity is used only for writing
432 # ignore HG_PENDING because identity is used only for writing
432 self.identity = util.filestat.frompath(
433 self.identity = util.filestat.frompath(
433 self._opener.join(self._filename)
434 self._opener.join(self._filename)
434 )
435 )
435
436
436 try:
437 try:
437 fp = self._opendirstatefile()
438 fp = self._opendirstatefile()
438 try:
439 try:
439 st = fp.read()
440 st = fp.read()
440 finally:
441 finally:
441 fp.close()
442 fp.close()
442 except IOError as err:
443 except IOError as err:
443 if err.errno != errno.ENOENT:
444 if err.errno != errno.ENOENT:
444 raise
445 raise
445 return
446 return
446 if not st:
447 if not st:
447 return
448 return
448
449
449 if util.safehasattr(parsers, b'dict_new_presized'):
450 if util.safehasattr(parsers, b'dict_new_presized'):
450 # Make an estimate of the number of files in the dirstate based on
451 # Make an estimate of the number of files in the dirstate based on
451 # its size. This trades wasting some memory for avoiding costly
452 # its size. This trades wasting some memory for avoiding costly
452 # resizes. Each entry have a prefix of 17 bytes followed by one or
453 # resizes. Each entry have a prefix of 17 bytes followed by one or
453 # two path names. Studies on various large-scale real-world repositories
454 # two path names. Studies on various large-scale real-world repositories
454 # found 54 bytes a reasonable upper limit for the average path names.
455 # found 54 bytes a reasonable upper limit for the average path names.
455 # Copy entries are ignored for the sake of this estimate.
456 # Copy entries are ignored for the sake of this estimate.
456 self._map = parsers.dict_new_presized(len(st) // 71)
457 self._map = parsers.dict_new_presized(len(st) // 71)
457
458
458 # Python's garbage collector triggers a GC each time a certain number
459 # Python's garbage collector triggers a GC each time a certain number
459 # of container objects (the number being defined by
460 # of container objects (the number being defined by
460 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
461 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
461 # for each file in the dirstate. The C version then immediately marks
462 # for each file in the dirstate. The C version then immediately marks
462 # them as not to be tracked by the collector. However, this has no
463 # them as not to be tracked by the collector. However, this has no
463 # effect on when GCs are triggered, only on what objects the GC looks
464 # effect on when GCs are triggered, only on what objects the GC looks
464 # into. This means that O(number of files) GCs are unavoidable.
465 # into. This means that O(number of files) GCs are unavoidable.
465 # Depending on when in the process's lifetime the dirstate is parsed,
466 # Depending on when in the process's lifetime the dirstate is parsed,
466 # this can get very expensive. As a workaround, disable GC while
467 # this can get very expensive. As a workaround, disable GC while
467 # parsing the dirstate.
468 # parsing the dirstate.
468 #
469 #
469 # (we cannot decorate the function directly since it is in a C module)
470 # (we cannot decorate the function directly since it is in a C module)
470 parse_dirstate = util.nogc(parsers.parse_dirstate)
471 parse_dirstate = util.nogc(parsers.parse_dirstate)
471 p = parse_dirstate(self._map, self.copymap, st)
472 p = parse_dirstate(self._map, self.copymap, st)
472 if not self._dirtyparents:
473 if not self._dirtyparents:
473 self.setparents(*p)
474 self.setparents(*p)
474
475
475 # Avoid excess attribute lookups by fast pathing certain checks
476 # Avoid excess attribute lookups by fast pathing certain checks
476 self.__contains__ = self._map.__contains__
477 self.__contains__ = self._map.__contains__
477 self.__getitem__ = self._map.__getitem__
478 self.__getitem__ = self._map.__getitem__
478 self.get = self._map.get
479 self.get = self._map.get
479
480
480 def write(self, _tr, st, now):
481 def write(self, _tr, st, now):
481 st.write(
482 st.write(
482 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
483 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
483 )
484 )
484 st.close()
485 st.close()
485 self._dirtyparents = False
486 self._dirtyparents = False
486 self.nonnormalset, self.otherparentset = self.nonnormalentries()
487 self.nonnormalset, self.otherparentset = self.nonnormalentries()
487
488
488 @propertycache
489 @propertycache
489 def nonnormalset(self):
490 def nonnormalset(self):
490 nonnorm, otherparents = self.nonnormalentries()
491 nonnorm, otherparents = self.nonnormalentries()
491 self.otherparentset = otherparents
492 self.otherparentset = otherparents
492 return nonnorm
493 return nonnorm
493
494
494 @propertycache
495 @propertycache
495 def otherparentset(self):
496 def otherparentset(self):
496 nonnorm, otherparents = self.nonnormalentries()
497 nonnorm, otherparents = self.nonnormalentries()
497 self.nonnormalset = nonnorm
498 self.nonnormalset = nonnorm
498 return otherparents
499 return otherparents
499
500
500 def non_normal_or_other_parent_paths(self):
501 def non_normal_or_other_parent_paths(self):
501 return self.nonnormalset.union(self.otherparentset)
502 return self.nonnormalset.union(self.otherparentset)
502
503
503 @propertycache
504 @propertycache
504 def identity(self):
505 def identity(self):
505 self._map
506 self._map
506 return self.identity
507 return self.identity
507
508
508 @propertycache
509 @propertycache
509 def dirfoldmap(self):
510 def dirfoldmap(self):
510 f = {}
511 f = {}
511 normcase = util.normcase
512 normcase = util.normcase
512 for name in self._dirs:
513 for name in self._dirs:
513 f[normcase(name)] = name
514 f[normcase(name)] = name
514 return f
515 return f
515
516
516
517
517 if rustmod is not None:
518 if rustmod is not None:
518
519
519 class dirstatemap(object):
520 class dirstatemap(object):
520 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
521 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
521 self._use_dirstate_v2 = use_dirstate_v2
522 self._use_dirstate_v2 = use_dirstate_v2
522 self._nodeconstants = nodeconstants
523 self._nodeconstants = nodeconstants
523 self._ui = ui
524 self._ui = ui
524 self._opener = opener
525 self._opener = opener
525 self._root = root
526 self._root = root
526 self._filename = b'dirstate'
527 self._filename = b'dirstate'
527 self._nodelen = 20 # Also update Rust code when changing this!
528 self._nodelen = 20 # Also update Rust code when changing this!
528 self._parents = None
529 self._parents = None
529 self._dirtyparents = False
530 self._dirtyparents = False
530 self._docket = None
531 self._docket = None
531
532
532 # for consistent view between _pl() and _read() invocations
533 # for consistent view between _pl() and _read() invocations
533 self._pendingmode = None
534 self._pendingmode = None
534
535
535 self._use_dirstate_tree = self._ui.configbool(
536 self._use_dirstate_tree = self._ui.configbool(
536 b"experimental",
537 b"experimental",
537 b"dirstate-tree.in-memory",
538 b"dirstate-tree.in-memory",
538 False,
539 False,
539 )
540 )
540
541
541 def addfile(
542 def addfile(
542 self,
543 self,
543 f,
544 f,
544 mode=0,
545 mode=0,
545 size=None,
546 size=None,
546 mtime=None,
547 mtime=None,
547 added=False,
548 added=False,
548 merged=False,
549 merged=False,
549 from_p2=False,
550 from_p2=False,
550 possibly_dirty=False,
551 possibly_dirty=False,
551 ):
552 ):
552 return self._rustmap.addfile(
553 return self._rustmap.addfile(
553 f,
554 f,
554 mode,
555 mode,
555 size,
556 size,
556 mtime,
557 mtime,
557 added,
558 added,
558 merged,
559 merged,
559 from_p2,
560 from_p2,
560 possibly_dirty,
561 possibly_dirty,
561 )
562 )
562
563
563 def reset_state(
564 def reset_state(
564 self,
565 self,
565 filename,
566 filename,
566 wc_tracked,
567 wc_tracked,
567 p1_tracked,
568 p1_tracked,
568 p2_tracked=False,
569 p2_tracked=False,
569 merged=False,
570 merged=False,
570 clean_p1=False,
571 clean_p1=False,
571 clean_p2=False,
572 clean_p2=False,
572 possibly_dirty=False,
573 possibly_dirty=False,
573 parentfiledata=None,
574 parentfiledata=None,
574 ):
575 ):
575 """Set a entry to a given state, disregarding all previous state
576 """Set a entry to a given state, disregarding all previous state
576
577
577 This is to be used by the part of the dirstate API dedicated to
578 This is to be used by the part of the dirstate API dedicated to
578 adjusting the dirstate after a update/merge.
579 adjusting the dirstate after a update/merge.
579
580
580 note: calling this might result to no entry existing at all if the
581 note: calling this might result to no entry existing at all if the
581 dirstate map does not see any point at having one for this file
582 dirstate map does not see any point at having one for this file
582 anymore.
583 anymore.
583 """
584 """
584 if merged and (clean_p1 or clean_p2):
585 if merged and (clean_p1 or clean_p2):
585 msg = (
586 msg = (
586 b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
587 b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
587 )
588 )
588 raise error.ProgrammingError(msg)
589 raise error.ProgrammingError(msg)
589 # copy information are now outdated
590 # copy information are now outdated
590 # (maybe new information should be in directly passed to this function)
591 # (maybe new information should be in directly passed to this function)
591 self.copymap.pop(filename, None)
592 self.copymap.pop(filename, None)
592
593
593 if not (p1_tracked or p2_tracked or wc_tracked):
594 if not (p1_tracked or p2_tracked or wc_tracked):
594 self.dropfile(filename)
595 self.dropfile(filename)
595 elif merged:
596 elif merged:
596 # XXX might be merged and removed ?
597 # XXX might be merged and removed ?
597 entry = self.get(filename)
598 entry = self.get(filename)
598 if entry is not None and entry.tracked:
599 if entry is not None and entry.tracked:
599 # XXX mostly replicate dirstate.other parent. We should get
600 # XXX mostly replicate dirstate.other parent. We should get
600 # the higher layer to pass us more reliable data where `merged`
601 # the higher layer to pass us more reliable data where `merged`
601 # actually mean merged. Dropping the else clause will show
602 # actually mean merged. Dropping the else clause will show
602 # failure in `test-graft.t`
603 # failure in `test-graft.t`
603 self.addfile(filename, merged=True)
604 self.addfile(filename, merged=True)
604 else:
605 else:
605 self.addfile(filename, from_p2=True)
606 self.addfile(filename, from_p2=True)
606 elif not (p1_tracked or p2_tracked) and wc_tracked:
607 elif not (p1_tracked or p2_tracked) and wc_tracked:
607 self.addfile(
608 self.addfile(
608 filename, added=True, possibly_dirty=possibly_dirty
609 filename, added=True, possibly_dirty=possibly_dirty
609 )
610 )
610 elif (p1_tracked or p2_tracked) and not wc_tracked:
611 elif (p1_tracked or p2_tracked) and not wc_tracked:
611 # XXX might be merged and removed ?
612 # XXX might be merged and removed ?
612 self[filename] = DirstateItem.from_v1_data(b'r', 0, 0, 0)
613 self[filename] = DirstateItem.from_v1_data(b'r', 0, 0, 0)
613 self.nonnormalset.add(filename)
614 self.nonnormalset.add(filename)
614 elif clean_p2 and wc_tracked:
615 elif clean_p2 and wc_tracked:
615 if p1_tracked or self.get(filename) is not None:
616 if p1_tracked or self.get(filename) is not None:
616 # XXX the `self.get` call is catching some case in
617 # XXX the `self.get` call is catching some case in
617 # `test-merge-remove.t` where the file is tracked in p1, the
618 # `test-merge-remove.t` where the file is tracked in p1, the
618 # p1_tracked argument is False.
619 # p1_tracked argument is False.
619 #
620 #
620 # In addition, this seems to be a case where the file is marked
621 # In addition, this seems to be a case where the file is marked
621 # as merged without actually being the result of a merge
622 # as merged without actually being the result of a merge
622 # action. So thing are not ideal here.
623 # action. So thing are not ideal here.
623 self.addfile(filename, merged=True)
624 self.addfile(filename, merged=True)
624 else:
625 else:
625 self.addfile(filename, from_p2=True)
626 self.addfile(filename, from_p2=True)
626 elif not p1_tracked and p2_tracked and wc_tracked:
627 elif not p1_tracked and p2_tracked and wc_tracked:
627 self.addfile(
628 self.addfile(
628 filename, from_p2=True, possibly_dirty=possibly_dirty
629 filename, from_p2=True, possibly_dirty=possibly_dirty
629 )
630 )
630 elif possibly_dirty:
631 elif possibly_dirty:
631 self.addfile(filename, possibly_dirty=possibly_dirty)
632 self.addfile(filename, possibly_dirty=possibly_dirty)
632 elif wc_tracked:
633 elif wc_tracked:
633 # this is a "normal" file
634 # this is a "normal" file
634 if parentfiledata is None:
635 if parentfiledata is None:
635 msg = b'failed to pass parentfiledata for a normal file: %s'
636 msg = b'failed to pass parentfiledata for a normal file: %s'
636 msg %= filename
637 msg %= filename
637 raise error.ProgrammingError(msg)
638 raise error.ProgrammingError(msg)
638 mode, size, mtime = parentfiledata
639 mode, size, mtime = parentfiledata
639 self.addfile(filename, mode=mode, size=size, mtime=mtime)
640 self.addfile(filename, mode=mode, size=size, mtime=mtime)
640 self.nonnormalset.discard(filename)
641 self.nonnormalset.discard(filename)
641 else:
642 else:
642 assert False, 'unreachable'
643 assert False, 'unreachable'
643
644
644 def set_untracked(self, f):
645 def set_untracked(self, f):
645 """Mark a file as no longer tracked in the dirstate map"""
646 """Mark a file as no longer tracked in the dirstate map"""
646 # in merge is only trigger more logic, so it "fine" to pass it.
647 # in merge is only trigger more logic, so it "fine" to pass it.
647 #
648 #
648 # the inner rust dirstate map code need to be adjusted once the API
649 # the inner rust dirstate map code need to be adjusted once the API
649 # for dirstate/dirstatemap/DirstateItem is a bit more settled
650 # for dirstate/dirstatemap/DirstateItem is a bit more settled
650 self._rustmap.removefile(f, in_merge=True)
651 self._rustmap.removefile(f, in_merge=True)
651
652
652 def removefile(self, *args, **kwargs):
653 def removefile(self, *args, **kwargs):
653 return self._rustmap.removefile(*args, **kwargs)
654 return self._rustmap.removefile(*args, **kwargs)
654
655
655 def dropfile(self, *args, **kwargs):
656 def dropfile(self, f, *args, **kwargs):
656 return self._rustmap.dropfile(*args, **kwargs)
657 self._rustmap.copymap().pop(f, None)
658 return self._rustmap.dropfile(f, *args, **kwargs)
657
659
658 def clearambiguoustimes(self, *args, **kwargs):
660 def clearambiguoustimes(self, *args, **kwargs):
659 return self._rustmap.clearambiguoustimes(*args, **kwargs)
661 return self._rustmap.clearambiguoustimes(*args, **kwargs)
660
662
661 def nonnormalentries(self):
663 def nonnormalentries(self):
662 return self._rustmap.nonnormalentries()
664 return self._rustmap.nonnormalentries()
663
665
664 def get(self, *args, **kwargs):
666 def get(self, *args, **kwargs):
665 return self._rustmap.get(*args, **kwargs)
667 return self._rustmap.get(*args, **kwargs)
666
668
667 @property
669 @property
668 def copymap(self):
670 def copymap(self):
669 return self._rustmap.copymap()
671 return self._rustmap.copymap()
670
672
671 def directories(self):
673 def directories(self):
672 return self._rustmap.directories()
674 return self._rustmap.directories()
673
675
674 def debug_iter(self):
676 def debug_iter(self):
675 return self._rustmap.debug_iter()
677 return self._rustmap.debug_iter()
676
678
677 def preload(self):
679 def preload(self):
678 self._rustmap
680 self._rustmap
679
681
680 def clear(self):
682 def clear(self):
681 self._rustmap.clear()
683 self._rustmap.clear()
682 self.setparents(
684 self.setparents(
683 self._nodeconstants.nullid, self._nodeconstants.nullid
685 self._nodeconstants.nullid, self._nodeconstants.nullid
684 )
686 )
685 util.clearcachedproperty(self, b"_dirs")
687 util.clearcachedproperty(self, b"_dirs")
686 util.clearcachedproperty(self, b"_alldirs")
688 util.clearcachedproperty(self, b"_alldirs")
687 util.clearcachedproperty(self, b"dirfoldmap")
689 util.clearcachedproperty(self, b"dirfoldmap")
688
690
689 def items(self):
691 def items(self):
690 return self._rustmap.items()
692 return self._rustmap.items()
691
693
692 def keys(self):
694 def keys(self):
693 return iter(self._rustmap)
695 return iter(self._rustmap)
694
696
695 def __contains__(self, key):
697 def __contains__(self, key):
696 return key in self._rustmap
698 return key in self._rustmap
697
699
698 def __getitem__(self, item):
700 def __getitem__(self, item):
699 return self._rustmap[item]
701 return self._rustmap[item]
700
702
701 def __len__(self):
703 def __len__(self):
702 return len(self._rustmap)
704 return len(self._rustmap)
703
705
704 def __iter__(self):
706 def __iter__(self):
705 return iter(self._rustmap)
707 return iter(self._rustmap)
706
708
707 # forward for python2,3 compat
709 # forward for python2,3 compat
708 iteritems = items
710 iteritems = items
709
711
710 def _opendirstatefile(self):
712 def _opendirstatefile(self):
711 fp, mode = txnutil.trypending(
713 fp, mode = txnutil.trypending(
712 self._root, self._opener, self._filename
714 self._root, self._opener, self._filename
713 )
715 )
714 if self._pendingmode is not None and self._pendingmode != mode:
716 if self._pendingmode is not None and self._pendingmode != mode:
715 fp.close()
717 fp.close()
716 raise error.Abort(
718 raise error.Abort(
717 _(b'working directory state may be changed parallelly')
719 _(b'working directory state may be changed parallelly')
718 )
720 )
719 self._pendingmode = mode
721 self._pendingmode = mode
720 return fp
722 return fp
721
723
722 def _readdirstatefile(self, size=-1):
724 def _readdirstatefile(self, size=-1):
723 try:
725 try:
724 with self._opendirstatefile() as fp:
726 with self._opendirstatefile() as fp:
725 return fp.read(size)
727 return fp.read(size)
726 except IOError as err:
728 except IOError as err:
727 if err.errno != errno.ENOENT:
729 if err.errno != errno.ENOENT:
728 raise
730 raise
729 # File doesn't exist, so the current state is empty
731 # File doesn't exist, so the current state is empty
730 return b''
732 return b''
731
733
732 def setparents(self, p1, p2):
734 def setparents(self, p1, p2):
733 self._parents = (p1, p2)
735 self._parents = (p1, p2)
734 self._dirtyparents = True
736 self._dirtyparents = True
735
737
736 def parents(self):
738 def parents(self):
737 if not self._parents:
739 if not self._parents:
738 if self._use_dirstate_v2:
740 if self._use_dirstate_v2:
739 self._parents = self.docket.parents
741 self._parents = self.docket.parents
740 else:
742 else:
741 read_len = self._nodelen * 2
743 read_len = self._nodelen * 2
742 st = self._readdirstatefile(read_len)
744 st = self._readdirstatefile(read_len)
743 l = len(st)
745 l = len(st)
744 if l == read_len:
746 if l == read_len:
745 self._parents = (
747 self._parents = (
746 st[: self._nodelen],
748 st[: self._nodelen],
747 st[self._nodelen : 2 * self._nodelen],
749 st[self._nodelen : 2 * self._nodelen],
748 )
750 )
749 elif l == 0:
751 elif l == 0:
750 self._parents = (
752 self._parents = (
751 self._nodeconstants.nullid,
753 self._nodeconstants.nullid,
752 self._nodeconstants.nullid,
754 self._nodeconstants.nullid,
753 )
755 )
754 else:
756 else:
755 raise error.Abort(
757 raise error.Abort(
756 _(b'working directory state appears damaged!')
758 _(b'working directory state appears damaged!')
757 )
759 )
758
760
759 return self._parents
761 return self._parents
760
762
761 @property
763 @property
762 def docket(self):
764 def docket(self):
763 if not self._docket:
765 if not self._docket:
764 if not self._use_dirstate_v2:
766 if not self._use_dirstate_v2:
765 raise error.ProgrammingError(
767 raise error.ProgrammingError(
766 b'dirstate only has a docket in v2 format'
768 b'dirstate only has a docket in v2 format'
767 )
769 )
768 self._docket = docketmod.DirstateDocket.parse(
770 self._docket = docketmod.DirstateDocket.parse(
769 self._readdirstatefile(), self._nodeconstants
771 self._readdirstatefile(), self._nodeconstants
770 )
772 )
771 return self._docket
773 return self._docket
772
774
773 @propertycache
775 @propertycache
774 def _rustmap(self):
776 def _rustmap(self):
775 """
777 """
776 Fills the Dirstatemap when called.
778 Fills the Dirstatemap when called.
777 """
779 """
778 # ignore HG_PENDING because identity is used only for writing
780 # ignore HG_PENDING because identity is used only for writing
779 self.identity = util.filestat.frompath(
781 self.identity = util.filestat.frompath(
780 self._opener.join(self._filename)
782 self._opener.join(self._filename)
781 )
783 )
782
784
783 if self._use_dirstate_v2:
785 if self._use_dirstate_v2:
784 if self.docket.uuid:
786 if self.docket.uuid:
785 # TODO: use mmap when possible
787 # TODO: use mmap when possible
786 data = self._opener.read(self.docket.data_filename())
788 data = self._opener.read(self.docket.data_filename())
787 else:
789 else:
788 data = b''
790 data = b''
789 self._rustmap = rustmod.DirstateMap.new_v2(
791 self._rustmap = rustmod.DirstateMap.new_v2(
790 data, self.docket.data_size, self.docket.tree_metadata
792 data, self.docket.data_size, self.docket.tree_metadata
791 )
793 )
792 parents = self.docket.parents
794 parents = self.docket.parents
793 else:
795 else:
794 self._rustmap, parents = rustmod.DirstateMap.new_v1(
796 self._rustmap, parents = rustmod.DirstateMap.new_v1(
795 self._use_dirstate_tree, self._readdirstatefile()
797 self._use_dirstate_tree, self._readdirstatefile()
796 )
798 )
797
799
798 if parents and not self._dirtyparents:
800 if parents and not self._dirtyparents:
799 self.setparents(*parents)
801 self.setparents(*parents)
800
802
801 self.__contains__ = self._rustmap.__contains__
803 self.__contains__ = self._rustmap.__contains__
802 self.__getitem__ = self._rustmap.__getitem__
804 self.__getitem__ = self._rustmap.__getitem__
803 self.get = self._rustmap.get
805 self.get = self._rustmap.get
804 return self._rustmap
806 return self._rustmap
805
807
806 def write(self, tr, st, now):
808 def write(self, tr, st, now):
807 if not self._use_dirstate_v2:
809 if not self._use_dirstate_v2:
808 p1, p2 = self.parents()
810 p1, p2 = self.parents()
809 packed = self._rustmap.write_v1(p1, p2, now)
811 packed = self._rustmap.write_v1(p1, p2, now)
810 st.write(packed)
812 st.write(packed)
811 st.close()
813 st.close()
812 self._dirtyparents = False
814 self._dirtyparents = False
813 return
815 return
814
816
815 # We can only append to an existing data file if there is one
817 # We can only append to an existing data file if there is one
816 can_append = self.docket.uuid is not None
818 can_append = self.docket.uuid is not None
817 packed, meta, append = self._rustmap.write_v2(now, can_append)
819 packed, meta, append = self._rustmap.write_v2(now, can_append)
818 if append:
820 if append:
819 docket = self.docket
821 docket = self.docket
820 data_filename = docket.data_filename()
822 data_filename = docket.data_filename()
821 if tr:
823 if tr:
822 tr.add(data_filename, docket.data_size)
824 tr.add(data_filename, docket.data_size)
823 with self._opener(data_filename, b'r+b') as fp:
825 with self._opener(data_filename, b'r+b') as fp:
824 fp.seek(docket.data_size)
826 fp.seek(docket.data_size)
825 assert fp.tell() == docket.data_size
827 assert fp.tell() == docket.data_size
826 written = fp.write(packed)
828 written = fp.write(packed)
827 if written is not None: # py2 may return None
829 if written is not None: # py2 may return None
828 assert written == len(packed), (written, len(packed))
830 assert written == len(packed), (written, len(packed))
829 docket.data_size += len(packed)
831 docket.data_size += len(packed)
830 docket.parents = self.parents()
832 docket.parents = self.parents()
831 docket.tree_metadata = meta
833 docket.tree_metadata = meta
832 st.write(docket.serialize())
834 st.write(docket.serialize())
833 st.close()
835 st.close()
834 else:
836 else:
835 old_docket = self.docket
837 old_docket = self.docket
836 new_docket = docketmod.DirstateDocket.with_new_uuid(
838 new_docket = docketmod.DirstateDocket.with_new_uuid(
837 self.parents(), len(packed), meta
839 self.parents(), len(packed), meta
838 )
840 )
839 data_filename = new_docket.data_filename()
841 data_filename = new_docket.data_filename()
840 if tr:
842 if tr:
841 tr.add(data_filename, 0)
843 tr.add(data_filename, 0)
842 self._opener.write(data_filename, packed)
844 self._opener.write(data_filename, packed)
843 # Write the new docket after the new data file has been
845 # Write the new docket after the new data file has been
844 # written. Because `st` was opened with `atomictemp=True`,
846 # written. Because `st` was opened with `atomictemp=True`,
845 # the actual `.hg/dirstate` file is only affected on close.
847 # the actual `.hg/dirstate` file is only affected on close.
846 st.write(new_docket.serialize())
848 st.write(new_docket.serialize())
847 st.close()
849 st.close()
848 # Remove the old data file after the new docket pointing to
850 # Remove the old data file after the new docket pointing to
849 # the new data file was written.
851 # the new data file was written.
850 if old_docket.uuid:
852 if old_docket.uuid:
851 data_filename = old_docket.data_filename()
853 data_filename = old_docket.data_filename()
852 unlink = lambda _tr=None: self._opener.unlink(data_filename)
854 unlink = lambda _tr=None: self._opener.unlink(data_filename)
853 if tr:
855 if tr:
854 category = b"dirstate-v2-clean-" + old_docket.uuid
856 category = b"dirstate-v2-clean-" + old_docket.uuid
855 tr.addpostclose(category, unlink)
857 tr.addpostclose(category, unlink)
856 else:
858 else:
857 unlink()
859 unlink()
858 self._docket = new_docket
860 self._docket = new_docket
859 # Reload from the newly-written file
861 # Reload from the newly-written file
860 util.clearcachedproperty(self, b"_rustmap")
862 util.clearcachedproperty(self, b"_rustmap")
861 self._dirtyparents = False
863 self._dirtyparents = False
862
864
863 @propertycache
865 @propertycache
864 def filefoldmap(self):
866 def filefoldmap(self):
865 """Returns a dictionary mapping normalized case paths to their
867 """Returns a dictionary mapping normalized case paths to their
866 non-normalized versions.
868 non-normalized versions.
867 """
869 """
868 return self._rustmap.filefoldmapasdict()
870 return self._rustmap.filefoldmapasdict()
869
871
870 def hastrackeddir(self, d):
872 def hastrackeddir(self, d):
871 return self._rustmap.hastrackeddir(d)
873 return self._rustmap.hastrackeddir(d)
872
874
873 def hasdir(self, d):
875 def hasdir(self, d):
874 return self._rustmap.hasdir(d)
876 return self._rustmap.hasdir(d)
875
877
876 @propertycache
878 @propertycache
877 def identity(self):
879 def identity(self):
878 self._rustmap
880 self._rustmap
879 return self.identity
881 return self.identity
880
882
881 @property
883 @property
882 def nonnormalset(self):
884 def nonnormalset(self):
883 nonnorm = self._rustmap.non_normal_entries()
885 nonnorm = self._rustmap.non_normal_entries()
884 return nonnorm
886 return nonnorm
885
887
886 @propertycache
888 @propertycache
887 def otherparentset(self):
889 def otherparentset(self):
888 otherparents = self._rustmap.other_parent_entries()
890 otherparents = self._rustmap.other_parent_entries()
889 return otherparents
891 return otherparents
890
892
891 def non_normal_or_other_parent_paths(self):
893 def non_normal_or_other_parent_paths(self):
892 return self._rustmap.non_normal_or_other_parent_paths()
894 return self._rustmap.non_normal_or_other_parent_paths()
893
895
894 @propertycache
896 @propertycache
895 def dirfoldmap(self):
897 def dirfoldmap(self):
896 f = {}
898 f = {}
897 normcase = util.normcase
899 normcase = util.normcase
898 for name in self._rustmap.tracked_dirs():
900 for name in self._rustmap.tracked_dirs():
899 f[normcase(name)] = name
901 f[normcase(name)] = name
900 return f
902 return f
901
903
902 def set_possibly_dirty(self, filename):
904 def set_possibly_dirty(self, filename):
903 """record that the current state of the file on disk is unknown"""
905 """record that the current state of the file on disk is unknown"""
904 entry = self[filename]
906 entry = self[filename]
905 entry.set_possibly_dirty()
907 entry.set_possibly_dirty()
906 self._rustmap.set_v1(filename, entry)
908 self._rustmap.set_v1(filename, entry)
907
909
908 def __setitem__(self, key, value):
910 def __setitem__(self, key, value):
909 assert isinstance(value, DirstateItem)
911 assert isinstance(value, DirstateItem)
910 self._rustmap.set_v1(key, value)
912 self._rustmap.set_v1(key, value)
General Comments 0
You need to be logged in to leave comments. Login now