##// END OF EJS Templates
dirstate: deprecate the `normal` method in all cases...
marmoute -
r48519:1168e54b default
parent child Browse files
Show More
@@ -1,1686 +1,1703 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = parsers.DirstateItem
48 DirstateItem = parsers.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._updatedfiles = set()
133 self._updatedfiles = set()
134 self._mapcls = dirstatemap.dirstatemap
134 self._mapcls = dirstatemap.dirstatemap
135 # Access and cache cwd early, so we don't access it for the first time
135 # Access and cache cwd early, so we don't access it for the first time
136 # after a working-copy update caused it to not exist (accessing it then
136 # after a working-copy update caused it to not exist (accessing it then
137 # raises an exception).
137 # raises an exception).
138 self._cwd
138 self._cwd
139
139
140 def prefetch_parents(self):
140 def prefetch_parents(self):
141 """make sure the parents are loaded
141 """make sure the parents are loaded
142
142
143 Used to avoid a race condition.
143 Used to avoid a race condition.
144 """
144 """
145 self._pl
145 self._pl
146
146
147 @contextlib.contextmanager
147 @contextlib.contextmanager
148 def parentchange(self):
148 def parentchange(self):
149 """Context manager for handling dirstate parents.
149 """Context manager for handling dirstate parents.
150
150
151 If an exception occurs in the scope of the context manager,
151 If an exception occurs in the scope of the context manager,
152 the incoherent dirstate won't be written when wlock is
152 the incoherent dirstate won't be written when wlock is
153 released.
153 released.
154 """
154 """
155 self._parentwriters += 1
155 self._parentwriters += 1
156 yield
156 yield
157 # Typically we want the "undo" step of a context manager in a
157 # Typically we want the "undo" step of a context manager in a
158 # finally block so it happens even when an exception
158 # finally block so it happens even when an exception
159 # occurs. In this case, however, we only want to decrement
159 # occurs. In this case, however, we only want to decrement
160 # parentwriters if the code in the with statement exits
160 # parentwriters if the code in the with statement exits
161 # normally, so we don't have a try/finally here on purpose.
161 # normally, so we don't have a try/finally here on purpose.
162 self._parentwriters -= 1
162 self._parentwriters -= 1
163
163
164 def pendingparentchange(self):
164 def pendingparentchange(self):
165 """Returns true if the dirstate is in the middle of a set of changes
165 """Returns true if the dirstate is in the middle of a set of changes
166 that modify the dirstate parent.
166 that modify the dirstate parent.
167 """
167 """
168 return self._parentwriters > 0
168 return self._parentwriters > 0
169
169
170 @propertycache
170 @propertycache
171 def _map(self):
171 def _map(self):
172 """Return the dirstate contents (see documentation for dirstatemap)."""
172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 self._map = self._mapcls(
173 self._map = self._mapcls(
174 self._ui,
174 self._ui,
175 self._opener,
175 self._opener,
176 self._root,
176 self._root,
177 self._nodeconstants,
177 self._nodeconstants,
178 self._use_dirstate_v2,
178 self._use_dirstate_v2,
179 )
179 )
180 return self._map
180 return self._map
181
181
182 @property
182 @property
183 def _sparsematcher(self):
183 def _sparsematcher(self):
184 """The matcher for the sparse checkout.
184 """The matcher for the sparse checkout.
185
185
186 The working directory may not include every file from a manifest. The
186 The working directory may not include every file from a manifest. The
187 matcher obtained by this property will match a path if it is to be
187 matcher obtained by this property will match a path if it is to be
188 included in the working directory.
188 included in the working directory.
189 """
189 """
190 # TODO there is potential to cache this property. For now, the matcher
190 # TODO there is potential to cache this property. For now, the matcher
191 # is resolved on every access. (But the called function does use a
191 # is resolved on every access. (But the called function does use a
192 # cache to keep the lookup fast.)
192 # cache to keep the lookup fast.)
193 return self._sparsematchfn()
193 return self._sparsematchfn()
194
194
195 @repocache(b'branch')
195 @repocache(b'branch')
196 def _branch(self):
196 def _branch(self):
197 try:
197 try:
198 return self._opener.read(b"branch").strip() or b"default"
198 return self._opener.read(b"branch").strip() or b"default"
199 except IOError as inst:
199 except IOError as inst:
200 if inst.errno != errno.ENOENT:
200 if inst.errno != errno.ENOENT:
201 raise
201 raise
202 return b"default"
202 return b"default"
203
203
204 @property
204 @property
205 def _pl(self):
205 def _pl(self):
206 return self._map.parents()
206 return self._map.parents()
207
207
208 def hasdir(self, d):
208 def hasdir(self, d):
209 return self._map.hastrackeddir(d)
209 return self._map.hastrackeddir(d)
210
210
211 @rootcache(b'.hgignore')
211 @rootcache(b'.hgignore')
212 def _ignore(self):
212 def _ignore(self):
213 files = self._ignorefiles()
213 files = self._ignorefiles()
214 if not files:
214 if not files:
215 return matchmod.never()
215 return matchmod.never()
216
216
217 pats = [b'include:%s' % f for f in files]
217 pats = [b'include:%s' % f for f in files]
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219
219
220 @propertycache
220 @propertycache
221 def _slash(self):
221 def _slash(self):
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223
223
224 @propertycache
224 @propertycache
225 def _checklink(self):
225 def _checklink(self):
226 return util.checklink(self._root)
226 return util.checklink(self._root)
227
227
228 @propertycache
228 @propertycache
229 def _checkexec(self):
229 def _checkexec(self):
230 return bool(util.checkexec(self._root))
230 return bool(util.checkexec(self._root))
231
231
232 @propertycache
232 @propertycache
233 def _checkcase(self):
233 def _checkcase(self):
234 return not util.fscasesensitive(self._join(b'.hg'))
234 return not util.fscasesensitive(self._join(b'.hg'))
235
235
236 def _join(self, f):
236 def _join(self, f):
237 # much faster than os.path.join()
237 # much faster than os.path.join()
238 # it's safe because f is always a relative path
238 # it's safe because f is always a relative path
239 return self._rootdir + f
239 return self._rootdir + f
240
240
241 def flagfunc(self, buildfallback):
241 def flagfunc(self, buildfallback):
242 if self._checklink and self._checkexec:
242 if self._checklink and self._checkexec:
243
243
244 def f(x):
244 def f(x):
245 try:
245 try:
246 st = os.lstat(self._join(x))
246 st = os.lstat(self._join(x))
247 if util.statislink(st):
247 if util.statislink(st):
248 return b'l'
248 return b'l'
249 if util.statisexec(st):
249 if util.statisexec(st):
250 return b'x'
250 return b'x'
251 except OSError:
251 except OSError:
252 pass
252 pass
253 return b''
253 return b''
254
254
255 return f
255 return f
256
256
257 fallback = buildfallback()
257 fallback = buildfallback()
258 if self._checklink:
258 if self._checklink:
259
259
260 def f(x):
260 def f(x):
261 if os.path.islink(self._join(x)):
261 if os.path.islink(self._join(x)):
262 return b'l'
262 return b'l'
263 if b'x' in fallback(x):
263 if b'x' in fallback(x):
264 return b'x'
264 return b'x'
265 return b''
265 return b''
266
266
267 return f
267 return f
268 if self._checkexec:
268 if self._checkexec:
269
269
270 def f(x):
270 def f(x):
271 if b'l' in fallback(x):
271 if b'l' in fallback(x):
272 return b'l'
272 return b'l'
273 if util.isexec(self._join(x)):
273 if util.isexec(self._join(x)):
274 return b'x'
274 return b'x'
275 return b''
275 return b''
276
276
277 return f
277 return f
278 else:
278 else:
279 return fallback
279 return fallback
280
280
281 @propertycache
281 @propertycache
282 def _cwd(self):
282 def _cwd(self):
283 # internal config: ui.forcecwd
283 # internal config: ui.forcecwd
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 if forcecwd:
285 if forcecwd:
286 return forcecwd
286 return forcecwd
287 return encoding.getcwd()
287 return encoding.getcwd()
288
288
289 def getcwd(self):
289 def getcwd(self):
290 """Return the path from which a canonical path is calculated.
290 """Return the path from which a canonical path is calculated.
291
291
292 This path should be used to resolve file patterns or to convert
292 This path should be used to resolve file patterns or to convert
293 canonical paths back to file paths for display. It shouldn't be
293 canonical paths back to file paths for display. It shouldn't be
294 used to get real file paths. Use vfs functions instead.
294 used to get real file paths. Use vfs functions instead.
295 """
295 """
296 cwd = self._cwd
296 cwd = self._cwd
297 if cwd == self._root:
297 if cwd == self._root:
298 return b''
298 return b''
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 rootsep = self._root
300 rootsep = self._root
301 if not util.endswithsep(rootsep):
301 if not util.endswithsep(rootsep):
302 rootsep += pycompat.ossep
302 rootsep += pycompat.ossep
303 if cwd.startswith(rootsep):
303 if cwd.startswith(rootsep):
304 return cwd[len(rootsep) :]
304 return cwd[len(rootsep) :]
305 else:
305 else:
306 # we're outside the repo. return an absolute path.
306 # we're outside the repo. return an absolute path.
307 return cwd
307 return cwd
308
308
309 def pathto(self, f, cwd=None):
309 def pathto(self, f, cwd=None):
310 if cwd is None:
310 if cwd is None:
311 cwd = self.getcwd()
311 cwd = self.getcwd()
312 path = util.pathto(self._root, cwd, f)
312 path = util.pathto(self._root, cwd, f)
313 if self._slash:
313 if self._slash:
314 return util.pconvert(path)
314 return util.pconvert(path)
315 return path
315 return path
316
316
317 def __getitem__(self, key):
317 def __getitem__(self, key):
318 """Return the current state of key (a filename) in the dirstate.
318 """Return the current state of key (a filename) in the dirstate.
319
319
320 States are:
320 States are:
321 n normal
321 n normal
322 m needs merging
322 m needs merging
323 r marked for removal
323 r marked for removal
324 a marked for addition
324 a marked for addition
325 ? not tracked
325 ? not tracked
326
326
327 XXX The "state" is a bit obscure to be in the "public" API. we should
327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 consider migrating all user of this to going through the dirstate entry
328 consider migrating all user of this to going through the dirstate entry
329 instead.
329 instead.
330 """
330 """
331 entry = self._map.get(key)
331 entry = self._map.get(key)
332 if entry is not None:
332 if entry is not None:
333 return entry.state
333 return entry.state
334 return b'?'
334 return b'?'
335
335
336 def __contains__(self, key):
336 def __contains__(self, key):
337 return key in self._map
337 return key in self._map
338
338
339 def __iter__(self):
339 def __iter__(self):
340 return iter(sorted(self._map))
340 return iter(sorted(self._map))
341
341
342 def items(self):
342 def items(self):
343 return pycompat.iteritems(self._map)
343 return pycompat.iteritems(self._map)
344
344
345 iteritems = items
345 iteritems = items
346
346
347 def directories(self):
347 def directories(self):
348 return self._map.directories()
348 return self._map.directories()
349
349
350 def parents(self):
350 def parents(self):
351 return [self._validate(p) for p in self._pl]
351 return [self._validate(p) for p in self._pl]
352
352
353 def p1(self):
353 def p1(self):
354 return self._validate(self._pl[0])
354 return self._validate(self._pl[0])
355
355
356 def p2(self):
356 def p2(self):
357 return self._validate(self._pl[1])
357 return self._validate(self._pl[1])
358
358
359 @property
359 @property
360 def in_merge(self):
360 def in_merge(self):
361 """True if a merge is in progress"""
361 """True if a merge is in progress"""
362 return self._pl[1] != self._nodeconstants.nullid
362 return self._pl[1] != self._nodeconstants.nullid
363
363
364 def branch(self):
364 def branch(self):
365 return encoding.tolocal(self._branch)
365 return encoding.tolocal(self._branch)
366
366
367 def setparents(self, p1, p2=None):
367 def setparents(self, p1, p2=None):
368 """Set dirstate parents to p1 and p2.
368 """Set dirstate parents to p1 and p2.
369
369
370 When moving from two parents to one, "merged" entries a
370 When moving from two parents to one, "merged" entries a
371 adjusted to normal and previous copy records discarded and
371 adjusted to normal and previous copy records discarded and
372 returned by the call.
372 returned by the call.
373
373
374 See localrepo.setparents()
374 See localrepo.setparents()
375 """
375 """
376 if p2 is None:
376 if p2 is None:
377 p2 = self._nodeconstants.nullid
377 p2 = self._nodeconstants.nullid
378 if self._parentwriters == 0:
378 if self._parentwriters == 0:
379 raise ValueError(
379 raise ValueError(
380 b"cannot set dirstate parent outside of "
380 b"cannot set dirstate parent outside of "
381 b"dirstate.parentchange context manager"
381 b"dirstate.parentchange context manager"
382 )
382 )
383
383
384 self._dirty = True
384 self._dirty = True
385 oldp2 = self._pl[1]
385 oldp2 = self._pl[1]
386 if self._origpl is None:
386 if self._origpl is None:
387 self._origpl = self._pl
387 self._origpl = self._pl
388 self._map.setparents(p1, p2)
388 self._map.setparents(p1, p2)
389 copies = {}
389 copies = {}
390 if (
390 if (
391 oldp2 != self._nodeconstants.nullid
391 oldp2 != self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
393 ):
393 ):
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395
395
396 for f in candidatefiles:
396 for f in candidatefiles:
397 s = self._map.get(f)
397 s = self._map.get(f)
398 if s is None:
398 if s is None:
399 continue
399 continue
400
400
401 # Discard "merged" markers when moving away from a merge state
401 # Discard "merged" markers when moving away from a merge state
402 if s.merged:
402 if s.merged:
403 source = self._map.copymap.get(f)
403 source = self._map.copymap.get(f)
404 if source:
404 if source:
405 copies[f] = source
405 copies[f] = source
406 self.normallookup(f)
406 self.normallookup(f)
407 # Also fix up otherparent markers
407 # Also fix up otherparent markers
408 elif s.from_p2:
408 elif s.from_p2:
409 source = self._map.copymap.get(f)
409 source = self._map.copymap.get(f)
410 if source:
410 if source:
411 copies[f] = source
411 copies[f] = source
412 self._add(f)
412 self._add(f)
413 return copies
413 return copies
414
414
415 def setbranch(self, branch):
415 def setbranch(self, branch):
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 try:
418 try:
419 f.write(self._branch + b'\n')
419 f.write(self._branch + b'\n')
420 f.close()
420 f.close()
421
421
422 # make sure filecache has the correct stat info for _branch after
422 # make sure filecache has the correct stat info for _branch after
423 # replacing the underlying file
423 # replacing the underlying file
424 ce = self._filecache[b'_branch']
424 ce = self._filecache[b'_branch']
425 if ce:
425 if ce:
426 ce.refresh()
426 ce.refresh()
427 except: # re-raises
427 except: # re-raises
428 f.discard()
428 f.discard()
429 raise
429 raise
430
430
431 def invalidate(self):
431 def invalidate(self):
432 """Causes the next access to reread the dirstate.
432 """Causes the next access to reread the dirstate.
433
433
434 This is different from localrepo.invalidatedirstate() because it always
434 This is different from localrepo.invalidatedirstate() because it always
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 check whether the dirstate has changed before rereading it."""
436 check whether the dirstate has changed before rereading it."""
437
437
438 for a in ("_map", "_branch", "_ignore"):
438 for a in ("_map", "_branch", "_ignore"):
439 if a in self.__dict__:
439 if a in self.__dict__:
440 delattr(self, a)
440 delattr(self, a)
441 self._lastnormaltime = 0
441 self._lastnormaltime = 0
442 self._dirty = False
442 self._dirty = False
443 self._updatedfiles.clear()
443 self._updatedfiles.clear()
444 self._parentwriters = 0
444 self._parentwriters = 0
445 self._origpl = None
445 self._origpl = None
446
446
447 def copy(self, source, dest):
447 def copy(self, source, dest):
448 """Mark dest as a copy of source. Unmark dest if source is None."""
448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 if source == dest:
449 if source == dest:
450 return
450 return
451 self._dirty = True
451 self._dirty = True
452 if source is not None:
452 if source is not None:
453 self._map.copymap[dest] = source
453 self._map.copymap[dest] = source
454 self._updatedfiles.add(source)
454 self._updatedfiles.add(source)
455 self._updatedfiles.add(dest)
455 self._updatedfiles.add(dest)
456 elif self._map.copymap.pop(dest, None):
456 elif self._map.copymap.pop(dest, None):
457 self._updatedfiles.add(dest)
457 self._updatedfiles.add(dest)
458
458
459 def copied(self, file):
459 def copied(self, file):
460 return self._map.copymap.get(file, None)
460 return self._map.copymap.get(file, None)
461
461
462 def copies(self):
462 def copies(self):
463 return self._map.copymap
463 return self._map.copymap
464
464
465 @requires_no_parents_change
465 @requires_no_parents_change
466 def set_tracked(self, filename):
466 def set_tracked(self, filename):
467 """a "public" method for generic code to mark a file as tracked
467 """a "public" method for generic code to mark a file as tracked
468
468
469 This function is to be called outside of "update/merge" case. For
469 This function is to be called outside of "update/merge" case. For
470 example by a command like `hg add X`.
470 example by a command like `hg add X`.
471
471
472 return True the file was previously untracked, False otherwise.
472 return True the file was previously untracked, False otherwise.
473 """
473 """
474 entry = self._map.get(filename)
474 entry = self._map.get(filename)
475 if entry is None:
475 if entry is None:
476 self._add(filename)
476 self._add(filename)
477 return True
477 return True
478 elif not entry.tracked:
478 elif not entry.tracked:
479 self.normallookup(filename)
479 self.normallookup(filename)
480 return True
480 return True
481 return False
481 return False
482
482
483 @requires_no_parents_change
483 @requires_no_parents_change
484 def set_untracked(self, filename):
484 def set_untracked(self, filename):
485 """a "public" method for generic code to mark a file as untracked
485 """a "public" method for generic code to mark a file as untracked
486
486
487 This function is to be called outside of "update/merge" case. For
487 This function is to be called outside of "update/merge" case. For
488 example by a command like `hg remove X`.
488 example by a command like `hg remove X`.
489
489
490 return True the file was previously tracked, False otherwise.
490 return True the file was previously tracked, False otherwise.
491 """
491 """
492 entry = self._map.get(filename)
492 entry = self._map.get(filename)
493 if entry is None:
493 if entry is None:
494 return False
494 return False
495 elif entry.added:
495 elif entry.added:
496 self._drop(filename)
496 self._drop(filename)
497 return True
497 return True
498 else:
498 else:
499 self._remove(filename)
499 self._remove(filename)
500 return True
500 return True
501
501
502 @requires_no_parents_change
502 @requires_no_parents_change
503 def set_clean(self, filename, parentfiledata=None):
503 def set_clean(self, filename, parentfiledata=None):
504 """record that the current state of the file on disk is known to be clean"""
504 """record that the current state of the file on disk is known to be clean"""
505 self._dirty = True
505 self._dirty = True
506 self._updatedfiles.add(filename)
506 self._updatedfiles.add(filename)
507 self.normal(filename, parentfiledata=parentfiledata)
507 self._normal(filename, parentfiledata=parentfiledata)
508
508
509 @requires_parents_change
509 @requires_parents_change
510 def update_file_p1(
510 def update_file_p1(
511 self,
511 self,
512 filename,
512 filename,
513 p1_tracked,
513 p1_tracked,
514 ):
514 ):
515 """Set a file as tracked in the parent (or not)
515 """Set a file as tracked in the parent (or not)
516
516
517 This is to be called when adjust the dirstate to a new parent after an history
517 This is to be called when adjust the dirstate to a new parent after an history
518 rewriting operation.
518 rewriting operation.
519
519
520 It should not be called during a merge (p2 != nullid) and only within
520 It should not be called during a merge (p2 != nullid) and only within
521 a `with dirstate.parentchange():` context.
521 a `with dirstate.parentchange():` context.
522 """
522 """
523 if self.in_merge:
523 if self.in_merge:
524 msg = b'update_file_reference should not be called when merging'
524 msg = b'update_file_reference should not be called when merging'
525 raise error.ProgrammingError(msg)
525 raise error.ProgrammingError(msg)
526 entry = self._map.get(filename)
526 entry = self._map.get(filename)
527 if entry is None:
527 if entry is None:
528 wc_tracked = False
528 wc_tracked = False
529 else:
529 else:
530 wc_tracked = entry.tracked
530 wc_tracked = entry.tracked
531 possibly_dirty = False
531 possibly_dirty = False
532 if p1_tracked and wc_tracked:
532 if p1_tracked and wc_tracked:
533 # the underlying reference might have changed, we will have to
533 # the underlying reference might have changed, we will have to
534 # check it.
534 # check it.
535 possibly_dirty = True
535 possibly_dirty = True
536 elif not (p1_tracked or wc_tracked):
536 elif not (p1_tracked or wc_tracked):
537 # the file is no longer relevant to anyone
537 # the file is no longer relevant to anyone
538 self._drop(filename)
538 self._drop(filename)
539 elif (not p1_tracked) and wc_tracked:
539 elif (not p1_tracked) and wc_tracked:
540 if entry is not None and entry.added:
540 if entry is not None and entry.added:
541 return # avoid dropping copy information (maybe?)
541 return # avoid dropping copy information (maybe?)
542 elif p1_tracked and not wc_tracked:
542 elif p1_tracked and not wc_tracked:
543 pass
543 pass
544 else:
544 else:
545 assert False, 'unreachable'
545 assert False, 'unreachable'
546
546
547 # this mean we are doing call for file we do not really care about the
547 # this mean we are doing call for file we do not really care about the
548 # data (eg: added or removed), however this should be a minor overhead
548 # data (eg: added or removed), however this should be a minor overhead
549 # compared to the overall update process calling this.
549 # compared to the overall update process calling this.
550 parentfiledata = None
550 parentfiledata = None
551 if wc_tracked:
551 if wc_tracked:
552 parentfiledata = self._get_filedata(filename)
552 parentfiledata = self._get_filedata(filename)
553
553
554 self._updatedfiles.add(filename)
554 self._updatedfiles.add(filename)
555 self._map.reset_state(
555 self._map.reset_state(
556 filename,
556 filename,
557 wc_tracked,
557 wc_tracked,
558 p1_tracked,
558 p1_tracked,
559 possibly_dirty=possibly_dirty,
559 possibly_dirty=possibly_dirty,
560 parentfiledata=parentfiledata,
560 parentfiledata=parentfiledata,
561 )
561 )
562 if (
562 if (
563 parentfiledata is not None
563 parentfiledata is not None
564 and parentfiledata[2] > self._lastnormaltime
564 and parentfiledata[2] > self._lastnormaltime
565 ):
565 ):
566 # Remember the most recent modification timeslot for status(),
566 # Remember the most recent modification timeslot for status(),
567 # to make sure we won't miss future size-preserving file content
567 # to make sure we won't miss future size-preserving file content
568 # modifications that happen within the same timeslot.
568 # modifications that happen within the same timeslot.
569 self._lastnormaltime = parentfiledata[2]
569 self._lastnormaltime = parentfiledata[2]
570
570
571 @requires_parents_change
571 @requires_parents_change
572 def update_file(
572 def update_file(
573 self,
573 self,
574 filename,
574 filename,
575 wc_tracked,
575 wc_tracked,
576 p1_tracked,
576 p1_tracked,
577 p2_tracked=False,
577 p2_tracked=False,
578 merged=False,
578 merged=False,
579 clean_p1=False,
579 clean_p1=False,
580 clean_p2=False,
580 clean_p2=False,
581 possibly_dirty=False,
581 possibly_dirty=False,
582 parentfiledata=None,
582 parentfiledata=None,
583 ):
583 ):
584 """update the information about a file in the dirstate
584 """update the information about a file in the dirstate
585
585
586 This is to be called when the direstates parent changes to keep track
586 This is to be called when the direstates parent changes to keep track
587 of what is the file situation in regards to the working copy and its parent.
587 of what is the file situation in regards to the working copy and its parent.
588
588
589 This function must be called within a `dirstate.parentchange` context.
589 This function must be called within a `dirstate.parentchange` context.
590
590
591 note: the API is at an early stage and we might need to ajust it
591 note: the API is at an early stage and we might need to ajust it
592 depending of what information ends up being relevant and useful to
592 depending of what information ends up being relevant and useful to
593 other processing.
593 other processing.
594 """
594 """
595 if merged and (clean_p1 or clean_p2):
595 if merged and (clean_p1 or clean_p2):
596 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
596 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
597 raise error.ProgrammingError(msg)
597 raise error.ProgrammingError(msg)
598
598
599 # note: I do not think we need to double check name clash here since we
599 # note: I do not think we need to double check name clash here since we
600 # are in a update/merge case that should already have taken care of
600 # are in a update/merge case that should already have taken care of
601 # this. The test agrees
601 # this. The test agrees
602
602
603 self._dirty = True
603 self._dirty = True
604 self._updatedfiles.add(filename)
604 self._updatedfiles.add(filename)
605
605
606 need_parent_file_data = (
606 need_parent_file_data = (
607 not (possibly_dirty or clean_p2 or merged)
607 not (possibly_dirty or clean_p2 or merged)
608 and wc_tracked
608 and wc_tracked
609 and p1_tracked
609 and p1_tracked
610 )
610 )
611
611
612 # this mean we are doing call for file we do not really care about the
612 # this mean we are doing call for file we do not really care about the
613 # data (eg: added or removed), however this should be a minor overhead
613 # data (eg: added or removed), however this should be a minor overhead
614 # compared to the overall update process calling this.
614 # compared to the overall update process calling this.
615 if need_parent_file_data:
615 if need_parent_file_data:
616 if parentfiledata is None:
616 if parentfiledata is None:
617 parentfiledata = self._get_filedata(filename)
617 parentfiledata = self._get_filedata(filename)
618 mtime = parentfiledata[2]
618 mtime = parentfiledata[2]
619
619
620 if mtime > self._lastnormaltime:
620 if mtime > self._lastnormaltime:
621 # Remember the most recent modification timeslot for
621 # Remember the most recent modification timeslot for
622 # status(), to make sure we won't miss future
622 # status(), to make sure we won't miss future
623 # size-preserving file content modifications that happen
623 # size-preserving file content modifications that happen
624 # within the same timeslot.
624 # within the same timeslot.
625 self._lastnormaltime = mtime
625 self._lastnormaltime = mtime
626
626
627 self._map.reset_state(
627 self._map.reset_state(
628 filename,
628 filename,
629 wc_tracked,
629 wc_tracked,
630 p1_tracked,
630 p1_tracked,
631 p2_tracked=p2_tracked,
631 p2_tracked=p2_tracked,
632 merged=merged,
632 merged=merged,
633 clean_p1=clean_p1,
633 clean_p1=clean_p1,
634 clean_p2=clean_p2,
634 clean_p2=clean_p2,
635 possibly_dirty=possibly_dirty,
635 possibly_dirty=possibly_dirty,
636 parentfiledata=parentfiledata,
636 parentfiledata=parentfiledata,
637 )
637 )
638 if (
638 if (
639 parentfiledata is not None
639 parentfiledata is not None
640 and parentfiledata[2] > self._lastnormaltime
640 and parentfiledata[2] > self._lastnormaltime
641 ):
641 ):
642 # Remember the most recent modification timeslot for status(),
642 # Remember the most recent modification timeslot for status(),
643 # to make sure we won't miss future size-preserving file content
643 # to make sure we won't miss future size-preserving file content
644 # modifications that happen within the same timeslot.
644 # modifications that happen within the same timeslot.
645 self._lastnormaltime = parentfiledata[2]
645 self._lastnormaltime = parentfiledata[2]
646
646
647 def _addpath(
647 def _addpath(
648 self,
648 self,
649 f,
649 f,
650 mode=0,
650 mode=0,
651 size=None,
651 size=None,
652 mtime=None,
652 mtime=None,
653 added=False,
653 added=False,
654 merged=False,
654 merged=False,
655 from_p2=False,
655 from_p2=False,
656 possibly_dirty=False,
656 possibly_dirty=False,
657 ):
657 ):
658 entry = self._map.get(f)
658 entry = self._map.get(f)
659 if added or entry is not None and entry.removed:
659 if added or entry is not None and entry.removed:
660 scmutil.checkfilename(f)
660 scmutil.checkfilename(f)
661 if self._map.hastrackeddir(f):
661 if self._map.hastrackeddir(f):
662 msg = _(b'directory %r already in dirstate')
662 msg = _(b'directory %r already in dirstate')
663 msg %= pycompat.bytestr(f)
663 msg %= pycompat.bytestr(f)
664 raise error.Abort(msg)
664 raise error.Abort(msg)
665 # shadows
665 # shadows
666 for d in pathutil.finddirs(f):
666 for d in pathutil.finddirs(f):
667 if self._map.hastrackeddir(d):
667 if self._map.hastrackeddir(d):
668 break
668 break
669 entry = self._map.get(d)
669 entry = self._map.get(d)
670 if entry is not None and not entry.removed:
670 if entry is not None and not entry.removed:
671 msg = _(b'file %r in dirstate clashes with %r')
671 msg = _(b'file %r in dirstate clashes with %r')
672 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
672 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
673 raise error.Abort(msg)
673 raise error.Abort(msg)
674 self._dirty = True
674 self._dirty = True
675 self._updatedfiles.add(f)
675 self._updatedfiles.add(f)
676 self._map.addfile(
676 self._map.addfile(
677 f,
677 f,
678 mode=mode,
678 mode=mode,
679 size=size,
679 size=size,
680 mtime=mtime,
680 mtime=mtime,
681 added=added,
681 added=added,
682 merged=merged,
682 merged=merged,
683 from_p2=from_p2,
683 from_p2=from_p2,
684 possibly_dirty=possibly_dirty,
684 possibly_dirty=possibly_dirty,
685 )
685 )
686
686
687 def _get_filedata(self, filename):
687 def _get_filedata(self, filename):
688 """returns"""
688 """returns"""
689 s = os.lstat(self._join(filename))
689 s = os.lstat(self._join(filename))
690 mode = s.st_mode
690 mode = s.st_mode
691 size = s.st_size
691 size = s.st_size
692 mtime = s[stat.ST_MTIME]
692 mtime = s[stat.ST_MTIME]
693 return (mode, size, mtime)
693 return (mode, size, mtime)
694
694
695 def normal(self, f, parentfiledata=None):
695 def normal(self, f, parentfiledata=None):
696 """Mark a file normal and clean.
696 """Mark a file normal and clean.
697
697
698 parentfiledata: (mode, size, mtime) of the clean file
698 parentfiledata: (mode, size, mtime) of the clean file
699
699
700 parentfiledata should be computed from memory (for mode,
700 parentfiledata should be computed from memory (for mode,
701 size), as or close as possible from the point where we
701 size), as or close as possible from the point where we
702 determined the file was clean, to limit the risk of the
702 determined the file was clean, to limit the risk of the
703 file having been changed by an external process between the
703 file having been changed by an external process between the
704 moment where the file was determined to be clean and now."""
704 moment where the file was determined to be clean and now."""
705 if self.pendingparentchange():
706 util.nouideprecwarn(
707 b"do not use `normal` inside of update/merge context."
708 b" Use `update_file` or `update_file_p1`",
709 b'6.0',
710 stacklevel=2,
711 )
712 else:
713 util.nouideprecwarn(
714 b"do not use `normal` outside of update/merge context."
715 b" Use `set_tracked`",
716 b'6.0',
717 stacklevel=2,
718 )
719 self._normal(f, parentfiledata=parentfiledata)
720
721 def _normal(self, f, parentfiledata=None):
705 if parentfiledata:
722 if parentfiledata:
706 (mode, size, mtime) = parentfiledata
723 (mode, size, mtime) = parentfiledata
707 else:
724 else:
708 (mode, size, mtime) = self._get_filedata(f)
725 (mode, size, mtime) = self._get_filedata(f)
709 self._addpath(f, mode=mode, size=size, mtime=mtime)
726 self._addpath(f, mode=mode, size=size, mtime=mtime)
710 self._map.copymap.pop(f, None)
727 self._map.copymap.pop(f, None)
711 if f in self._map.nonnormalset:
728 if f in self._map.nonnormalset:
712 self._map.nonnormalset.remove(f)
729 self._map.nonnormalset.remove(f)
713 if mtime > self._lastnormaltime:
730 if mtime > self._lastnormaltime:
714 # Remember the most recent modification timeslot for status(),
731 # Remember the most recent modification timeslot for status(),
715 # to make sure we won't miss future size-preserving file content
732 # to make sure we won't miss future size-preserving file content
716 # modifications that happen within the same timeslot.
733 # modifications that happen within the same timeslot.
717 self._lastnormaltime = mtime
734 self._lastnormaltime = mtime
718
735
719 def normallookup(self, f):
736 def normallookup(self, f):
720 '''Mark a file normal, but possibly dirty.'''
737 '''Mark a file normal, but possibly dirty.'''
721 if self.in_merge:
738 if self.in_merge:
722 # if there is a merge going on and the file was either
739 # if there is a merge going on and the file was either
723 # "merged" or coming from other parent (-2) before
740 # "merged" or coming from other parent (-2) before
724 # being removed, restore that state.
741 # being removed, restore that state.
725 entry = self._map.get(f)
742 entry = self._map.get(f)
726 if entry is not None:
743 if entry is not None:
727 # XXX this should probably be dealt with a a lower level
744 # XXX this should probably be dealt with a a lower level
728 # (see `merged_removed` and `from_p2_removed`)
745 # (see `merged_removed` and `from_p2_removed`)
729 if entry.merged_removed or entry.from_p2_removed:
746 if entry.merged_removed or entry.from_p2_removed:
730 source = self._map.copymap.get(f)
747 source = self._map.copymap.get(f)
731 if entry.merged_removed:
748 if entry.merged_removed:
732 self.merge(f)
749 self.merge(f)
733 elif entry.from_p2_removed:
750 elif entry.from_p2_removed:
734 self.otherparent(f)
751 self.otherparent(f)
735 if source is not None:
752 if source is not None:
736 self.copy(source, f)
753 self.copy(source, f)
737 return
754 return
738 elif entry.merged or entry.from_p2:
755 elif entry.merged or entry.from_p2:
739 return
756 return
740 self._addpath(f, possibly_dirty=True)
757 self._addpath(f, possibly_dirty=True)
741 self._map.copymap.pop(f, None)
758 self._map.copymap.pop(f, None)
742
759
743 def otherparent(self, f):
760 def otherparent(self, f):
744 '''Mark as coming from the other parent, always dirty.'''
761 '''Mark as coming from the other parent, always dirty.'''
745 if not self.in_merge:
762 if not self.in_merge:
746 msg = _(b"setting %r to other parent only allowed in merges") % f
763 msg = _(b"setting %r to other parent only allowed in merges") % f
747 raise error.Abort(msg)
764 raise error.Abort(msg)
748 entry = self._map.get(f)
765 entry = self._map.get(f)
749 if entry is not None and entry.tracked:
766 if entry is not None and entry.tracked:
750 # merge-like
767 # merge-like
751 self._addpath(f, merged=True)
768 self._addpath(f, merged=True)
752 else:
769 else:
753 # add-like
770 # add-like
754 self._addpath(f, from_p2=True)
771 self._addpath(f, from_p2=True)
755 self._map.copymap.pop(f, None)
772 self._map.copymap.pop(f, None)
756
773
757 def add(self, f):
774 def add(self, f):
758 '''Mark a file added.'''
775 '''Mark a file added.'''
759 if not self.pendingparentchange():
776 if not self.pendingparentchange():
760 util.nouideprecwarn(
777 util.nouideprecwarn(
761 b"do not use `add` outside of update/merge context."
778 b"do not use `add` outside of update/merge context."
762 b" Use `set_tracked`",
779 b" Use `set_tracked`",
763 b'6.0',
780 b'6.0',
764 stacklevel=2,
781 stacklevel=2,
765 )
782 )
766 self._add(f)
783 self._add(f)
767
784
768 def _add(self, filename):
785 def _add(self, filename):
769 """internal function to mark a file as added"""
786 """internal function to mark a file as added"""
770 self._addpath(filename, added=True)
787 self._addpath(filename, added=True)
771 self._map.copymap.pop(filename, None)
788 self._map.copymap.pop(filename, None)
772
789
773 def remove(self, f):
790 def remove(self, f):
774 '''Mark a file removed'''
791 '''Mark a file removed'''
775 if self.pendingparentchange():
792 if self.pendingparentchange():
776 util.nouideprecwarn(
793 util.nouideprecwarn(
777 b"do not use `remove` insde of update/merge context."
794 b"do not use `remove` insde of update/merge context."
778 b" Use `update_file` or `update_file_p1`",
795 b" Use `update_file` or `update_file_p1`",
779 b'6.0',
796 b'6.0',
780 stacklevel=2,
797 stacklevel=2,
781 )
798 )
782 else:
799 else:
783 util.nouideprecwarn(
800 util.nouideprecwarn(
784 b"do not use `remove` outside of update/merge context."
801 b"do not use `remove` outside of update/merge context."
785 b" Use `set_untracked`",
802 b" Use `set_untracked`",
786 b'6.0',
803 b'6.0',
787 stacklevel=2,
804 stacklevel=2,
788 )
805 )
789 self._remove(f)
806 self._remove(f)
790
807
791 def _remove(self, filename):
808 def _remove(self, filename):
792 """internal function to mark a file removed"""
809 """internal function to mark a file removed"""
793 self._dirty = True
810 self._dirty = True
794 self._updatedfiles.add(filename)
811 self._updatedfiles.add(filename)
795 self._map.removefile(filename, in_merge=self.in_merge)
812 self._map.removefile(filename, in_merge=self.in_merge)
796
813
797 def merge(self, f):
814 def merge(self, f):
798 '''Mark a file merged.'''
815 '''Mark a file merged.'''
799 if not self.in_merge:
816 if not self.in_merge:
800 return self.normallookup(f)
817 return self.normallookup(f)
801 return self.otherparent(f)
818 return self.otherparent(f)
802
819
803 def drop(self, f):
820 def drop(self, f):
804 '''Drop a file from the dirstate'''
821 '''Drop a file from the dirstate'''
805 if not self.pendingparentchange():
822 if not self.pendingparentchange():
806 util.nouideprecwarn(
823 util.nouideprecwarn(
807 b"do not use `drop` outside of update/merge context."
824 b"do not use `drop` outside of update/merge context."
808 b" Use `set_untracked`",
825 b" Use `set_untracked`",
809 b'6.0',
826 b'6.0',
810 stacklevel=2,
827 stacklevel=2,
811 )
828 )
812 self._drop(f)
829 self._drop(f)
813
830
814 def _drop(self, filename):
831 def _drop(self, filename):
815 """internal function to drop a file from the dirstate"""
832 """internal function to drop a file from the dirstate"""
816 if self._map.dropfile(filename):
833 if self._map.dropfile(filename):
817 self._dirty = True
834 self._dirty = True
818 self._updatedfiles.add(filename)
835 self._updatedfiles.add(filename)
819 self._map.copymap.pop(filename, None)
836 self._map.copymap.pop(filename, None)
820
837
821 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
838 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
822 if exists is None:
839 if exists is None:
823 exists = os.path.lexists(os.path.join(self._root, path))
840 exists = os.path.lexists(os.path.join(self._root, path))
824 if not exists:
841 if not exists:
825 # Maybe a path component exists
842 # Maybe a path component exists
826 if not ignoremissing and b'/' in path:
843 if not ignoremissing and b'/' in path:
827 d, f = path.rsplit(b'/', 1)
844 d, f = path.rsplit(b'/', 1)
828 d = self._normalize(d, False, ignoremissing, None)
845 d = self._normalize(d, False, ignoremissing, None)
829 folded = d + b"/" + f
846 folded = d + b"/" + f
830 else:
847 else:
831 # No path components, preserve original case
848 # No path components, preserve original case
832 folded = path
849 folded = path
833 else:
850 else:
834 # recursively normalize leading directory components
851 # recursively normalize leading directory components
835 # against dirstate
852 # against dirstate
836 if b'/' in normed:
853 if b'/' in normed:
837 d, f = normed.rsplit(b'/', 1)
854 d, f = normed.rsplit(b'/', 1)
838 d = self._normalize(d, False, ignoremissing, True)
855 d = self._normalize(d, False, ignoremissing, True)
839 r = self._root + b"/" + d
856 r = self._root + b"/" + d
840 folded = d + b"/" + util.fspath(f, r)
857 folded = d + b"/" + util.fspath(f, r)
841 else:
858 else:
842 folded = util.fspath(normed, self._root)
859 folded = util.fspath(normed, self._root)
843 storemap[normed] = folded
860 storemap[normed] = folded
844
861
845 return folded
862 return folded
846
863
847 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
864 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
848 normed = util.normcase(path)
865 normed = util.normcase(path)
849 folded = self._map.filefoldmap.get(normed, None)
866 folded = self._map.filefoldmap.get(normed, None)
850 if folded is None:
867 if folded is None:
851 if isknown:
868 if isknown:
852 folded = path
869 folded = path
853 else:
870 else:
854 folded = self._discoverpath(
871 folded = self._discoverpath(
855 path, normed, ignoremissing, exists, self._map.filefoldmap
872 path, normed, ignoremissing, exists, self._map.filefoldmap
856 )
873 )
857 return folded
874 return folded
858
875
859 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
876 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
860 normed = util.normcase(path)
877 normed = util.normcase(path)
861 folded = self._map.filefoldmap.get(normed, None)
878 folded = self._map.filefoldmap.get(normed, None)
862 if folded is None:
879 if folded is None:
863 folded = self._map.dirfoldmap.get(normed, None)
880 folded = self._map.dirfoldmap.get(normed, None)
864 if folded is None:
881 if folded is None:
865 if isknown:
882 if isknown:
866 folded = path
883 folded = path
867 else:
884 else:
868 # store discovered result in dirfoldmap so that future
885 # store discovered result in dirfoldmap so that future
869 # normalizefile calls don't start matching directories
886 # normalizefile calls don't start matching directories
870 folded = self._discoverpath(
887 folded = self._discoverpath(
871 path, normed, ignoremissing, exists, self._map.dirfoldmap
888 path, normed, ignoremissing, exists, self._map.dirfoldmap
872 )
889 )
873 return folded
890 return folded
874
891
875 def normalize(self, path, isknown=False, ignoremissing=False):
892 def normalize(self, path, isknown=False, ignoremissing=False):
876 """
893 """
877 normalize the case of a pathname when on a casefolding filesystem
894 normalize the case of a pathname when on a casefolding filesystem
878
895
879 isknown specifies whether the filename came from walking the
896 isknown specifies whether the filename came from walking the
880 disk, to avoid extra filesystem access.
897 disk, to avoid extra filesystem access.
881
898
882 If ignoremissing is True, missing path are returned
899 If ignoremissing is True, missing path are returned
883 unchanged. Otherwise, we try harder to normalize possibly
900 unchanged. Otherwise, we try harder to normalize possibly
884 existing path components.
901 existing path components.
885
902
886 The normalized case is determined based on the following precedence:
903 The normalized case is determined based on the following precedence:
887
904
888 - version of name already stored in the dirstate
905 - version of name already stored in the dirstate
889 - version of name stored on disk
906 - version of name stored on disk
890 - version provided via command arguments
907 - version provided via command arguments
891 """
908 """
892
909
893 if self._checkcase:
910 if self._checkcase:
894 return self._normalize(path, isknown, ignoremissing)
911 return self._normalize(path, isknown, ignoremissing)
895 return path
912 return path
896
913
897 def clear(self):
914 def clear(self):
898 self._map.clear()
915 self._map.clear()
899 self._lastnormaltime = 0
916 self._lastnormaltime = 0
900 self._updatedfiles.clear()
917 self._updatedfiles.clear()
901 self._dirty = True
918 self._dirty = True
902
919
903 def rebuild(self, parent, allfiles, changedfiles=None):
920 def rebuild(self, parent, allfiles, changedfiles=None):
904 if changedfiles is None:
921 if changedfiles is None:
905 # Rebuild entire dirstate
922 # Rebuild entire dirstate
906 to_lookup = allfiles
923 to_lookup = allfiles
907 to_drop = []
924 to_drop = []
908 lastnormaltime = self._lastnormaltime
925 lastnormaltime = self._lastnormaltime
909 self.clear()
926 self.clear()
910 self._lastnormaltime = lastnormaltime
927 self._lastnormaltime = lastnormaltime
911 elif len(changedfiles) < 10:
928 elif len(changedfiles) < 10:
912 # Avoid turning allfiles into a set, which can be expensive if it's
929 # Avoid turning allfiles into a set, which can be expensive if it's
913 # large.
930 # large.
914 to_lookup = []
931 to_lookup = []
915 to_drop = []
932 to_drop = []
916 for f in changedfiles:
933 for f in changedfiles:
917 if f in allfiles:
934 if f in allfiles:
918 to_lookup.append(f)
935 to_lookup.append(f)
919 else:
936 else:
920 to_drop.append(f)
937 to_drop.append(f)
921 else:
938 else:
922 changedfilesset = set(changedfiles)
939 changedfilesset = set(changedfiles)
923 to_lookup = changedfilesset & set(allfiles)
940 to_lookup = changedfilesset & set(allfiles)
924 to_drop = changedfilesset - to_lookup
941 to_drop = changedfilesset - to_lookup
925
942
926 if self._origpl is None:
943 if self._origpl is None:
927 self._origpl = self._pl
944 self._origpl = self._pl
928 self._map.setparents(parent, self._nodeconstants.nullid)
945 self._map.setparents(parent, self._nodeconstants.nullid)
929
946
930 for f in to_lookup:
947 for f in to_lookup:
931 self.normallookup(f)
948 self.normallookup(f)
932 for f in to_drop:
949 for f in to_drop:
933 self._drop(f)
950 self._drop(f)
934
951
935 self._dirty = True
952 self._dirty = True
936
953
937 def identity(self):
954 def identity(self):
938 """Return identity of dirstate itself to detect changing in storage
955 """Return identity of dirstate itself to detect changing in storage
939
956
940 If identity of previous dirstate is equal to this, writing
957 If identity of previous dirstate is equal to this, writing
941 changes based on the former dirstate out can keep consistency.
958 changes based on the former dirstate out can keep consistency.
942 """
959 """
943 return self._map.identity
960 return self._map.identity
944
961
945 def write(self, tr):
962 def write(self, tr):
946 if not self._dirty:
963 if not self._dirty:
947 return
964 return
948
965
949 filename = self._filename
966 filename = self._filename
950 if tr:
967 if tr:
951 # 'dirstate.write()' is not only for writing in-memory
968 # 'dirstate.write()' is not only for writing in-memory
952 # changes out, but also for dropping ambiguous timestamp.
969 # changes out, but also for dropping ambiguous timestamp.
953 # delayed writing re-raise "ambiguous timestamp issue".
970 # delayed writing re-raise "ambiguous timestamp issue".
954 # See also the wiki page below for detail:
971 # See also the wiki page below for detail:
955 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
972 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
956
973
957 # emulate dropping timestamp in 'parsers.pack_dirstate'
974 # emulate dropping timestamp in 'parsers.pack_dirstate'
958 now = _getfsnow(self._opener)
975 now = _getfsnow(self._opener)
959 self._map.clearambiguoustimes(self._updatedfiles, now)
976 self._map.clearambiguoustimes(self._updatedfiles, now)
960
977
961 # emulate that all 'dirstate.normal' results are written out
978 # emulate that all 'dirstate.normal' results are written out
962 self._lastnormaltime = 0
979 self._lastnormaltime = 0
963 self._updatedfiles.clear()
980 self._updatedfiles.clear()
964
981
965 # delay writing in-memory changes out
982 # delay writing in-memory changes out
966 tr.addfilegenerator(
983 tr.addfilegenerator(
967 b'dirstate',
984 b'dirstate',
968 (self._filename,),
985 (self._filename,),
969 lambda f: self._writedirstate(tr, f),
986 lambda f: self._writedirstate(tr, f),
970 location=b'plain',
987 location=b'plain',
971 )
988 )
972 return
989 return
973
990
974 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
991 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
975 self._writedirstate(tr, st)
992 self._writedirstate(tr, st)
976
993
977 def addparentchangecallback(self, category, callback):
994 def addparentchangecallback(self, category, callback):
978 """add a callback to be called when the wd parents are changed
995 """add a callback to be called when the wd parents are changed
979
996
980 Callback will be called with the following arguments:
997 Callback will be called with the following arguments:
981 dirstate, (oldp1, oldp2), (newp1, newp2)
998 dirstate, (oldp1, oldp2), (newp1, newp2)
982
999
983 Category is a unique identifier to allow overwriting an old callback
1000 Category is a unique identifier to allow overwriting an old callback
984 with a newer callback.
1001 with a newer callback.
985 """
1002 """
986 self._plchangecallbacks[category] = callback
1003 self._plchangecallbacks[category] = callback
987
1004
988 def _writedirstate(self, tr, st):
1005 def _writedirstate(self, tr, st):
989 # notify callbacks about parents change
1006 # notify callbacks about parents change
990 if self._origpl is not None and self._origpl != self._pl:
1007 if self._origpl is not None and self._origpl != self._pl:
991 for c, callback in sorted(
1008 for c, callback in sorted(
992 pycompat.iteritems(self._plchangecallbacks)
1009 pycompat.iteritems(self._plchangecallbacks)
993 ):
1010 ):
994 callback(self, self._origpl, self._pl)
1011 callback(self, self._origpl, self._pl)
995 self._origpl = None
1012 self._origpl = None
996 # use the modification time of the newly created temporary file as the
1013 # use the modification time of the newly created temporary file as the
997 # filesystem's notion of 'now'
1014 # filesystem's notion of 'now'
998 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
1015 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
999
1016
1000 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
1017 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
1001 # timestamp of each entries in dirstate, because of 'now > mtime'
1018 # timestamp of each entries in dirstate, because of 'now > mtime'
1002 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
1019 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
1003 if delaywrite > 0:
1020 if delaywrite > 0:
1004 # do we have any files to delay for?
1021 # do we have any files to delay for?
1005 for f, e in pycompat.iteritems(self._map):
1022 for f, e in pycompat.iteritems(self._map):
1006 if e.need_delay(now):
1023 if e.need_delay(now):
1007 import time # to avoid useless import
1024 import time # to avoid useless import
1008
1025
1009 # rather than sleep n seconds, sleep until the next
1026 # rather than sleep n seconds, sleep until the next
1010 # multiple of n seconds
1027 # multiple of n seconds
1011 clock = time.time()
1028 clock = time.time()
1012 start = int(clock) - (int(clock) % delaywrite)
1029 start = int(clock) - (int(clock) % delaywrite)
1013 end = start + delaywrite
1030 end = start + delaywrite
1014 time.sleep(end - clock)
1031 time.sleep(end - clock)
1015 now = end # trust our estimate that the end is near now
1032 now = end # trust our estimate that the end is near now
1016 break
1033 break
1017
1034
1018 self._map.write(tr, st, now)
1035 self._map.write(tr, st, now)
1019 self._lastnormaltime = 0
1036 self._lastnormaltime = 0
1020 self._dirty = False
1037 self._dirty = False
1021
1038
1022 def _dirignore(self, f):
1039 def _dirignore(self, f):
1023 if self._ignore(f):
1040 if self._ignore(f):
1024 return True
1041 return True
1025 for p in pathutil.finddirs(f):
1042 for p in pathutil.finddirs(f):
1026 if self._ignore(p):
1043 if self._ignore(p):
1027 return True
1044 return True
1028 return False
1045 return False
1029
1046
1030 def _ignorefiles(self):
1047 def _ignorefiles(self):
1031 files = []
1048 files = []
1032 if os.path.exists(self._join(b'.hgignore')):
1049 if os.path.exists(self._join(b'.hgignore')):
1033 files.append(self._join(b'.hgignore'))
1050 files.append(self._join(b'.hgignore'))
1034 for name, path in self._ui.configitems(b"ui"):
1051 for name, path in self._ui.configitems(b"ui"):
1035 if name == b'ignore' or name.startswith(b'ignore.'):
1052 if name == b'ignore' or name.startswith(b'ignore.'):
1036 # we need to use os.path.join here rather than self._join
1053 # we need to use os.path.join here rather than self._join
1037 # because path is arbitrary and user-specified
1054 # because path is arbitrary and user-specified
1038 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1055 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1039 return files
1056 return files
1040
1057
1041 def _ignorefileandline(self, f):
1058 def _ignorefileandline(self, f):
1042 files = collections.deque(self._ignorefiles())
1059 files = collections.deque(self._ignorefiles())
1043 visited = set()
1060 visited = set()
1044 while files:
1061 while files:
1045 i = files.popleft()
1062 i = files.popleft()
1046 patterns = matchmod.readpatternfile(
1063 patterns = matchmod.readpatternfile(
1047 i, self._ui.warn, sourceinfo=True
1064 i, self._ui.warn, sourceinfo=True
1048 )
1065 )
1049 for pattern, lineno, line in patterns:
1066 for pattern, lineno, line in patterns:
1050 kind, p = matchmod._patsplit(pattern, b'glob')
1067 kind, p = matchmod._patsplit(pattern, b'glob')
1051 if kind == b"subinclude":
1068 if kind == b"subinclude":
1052 if p not in visited:
1069 if p not in visited:
1053 files.append(p)
1070 files.append(p)
1054 continue
1071 continue
1055 m = matchmod.match(
1072 m = matchmod.match(
1056 self._root, b'', [], [pattern], warn=self._ui.warn
1073 self._root, b'', [], [pattern], warn=self._ui.warn
1057 )
1074 )
1058 if m(f):
1075 if m(f):
1059 return (i, lineno, line)
1076 return (i, lineno, line)
1060 visited.add(i)
1077 visited.add(i)
1061 return (None, -1, b"")
1078 return (None, -1, b"")
1062
1079
1063 def _walkexplicit(self, match, subrepos):
1080 def _walkexplicit(self, match, subrepos):
1064 """Get stat data about the files explicitly specified by match.
1081 """Get stat data about the files explicitly specified by match.
1065
1082
1066 Return a triple (results, dirsfound, dirsnotfound).
1083 Return a triple (results, dirsfound, dirsnotfound).
1067 - results is a mapping from filename to stat result. It also contains
1084 - results is a mapping from filename to stat result. It also contains
1068 listings mapping subrepos and .hg to None.
1085 listings mapping subrepos and .hg to None.
1069 - dirsfound is a list of files found to be directories.
1086 - dirsfound is a list of files found to be directories.
1070 - dirsnotfound is a list of files that the dirstate thinks are
1087 - dirsnotfound is a list of files that the dirstate thinks are
1071 directories and that were not found."""
1088 directories and that were not found."""
1072
1089
1073 def badtype(mode):
1090 def badtype(mode):
1074 kind = _(b'unknown')
1091 kind = _(b'unknown')
1075 if stat.S_ISCHR(mode):
1092 if stat.S_ISCHR(mode):
1076 kind = _(b'character device')
1093 kind = _(b'character device')
1077 elif stat.S_ISBLK(mode):
1094 elif stat.S_ISBLK(mode):
1078 kind = _(b'block device')
1095 kind = _(b'block device')
1079 elif stat.S_ISFIFO(mode):
1096 elif stat.S_ISFIFO(mode):
1080 kind = _(b'fifo')
1097 kind = _(b'fifo')
1081 elif stat.S_ISSOCK(mode):
1098 elif stat.S_ISSOCK(mode):
1082 kind = _(b'socket')
1099 kind = _(b'socket')
1083 elif stat.S_ISDIR(mode):
1100 elif stat.S_ISDIR(mode):
1084 kind = _(b'directory')
1101 kind = _(b'directory')
1085 return _(b'unsupported file type (type is %s)') % kind
1102 return _(b'unsupported file type (type is %s)') % kind
1086
1103
1087 badfn = match.bad
1104 badfn = match.bad
1088 dmap = self._map
1105 dmap = self._map
1089 lstat = os.lstat
1106 lstat = os.lstat
1090 getkind = stat.S_IFMT
1107 getkind = stat.S_IFMT
1091 dirkind = stat.S_IFDIR
1108 dirkind = stat.S_IFDIR
1092 regkind = stat.S_IFREG
1109 regkind = stat.S_IFREG
1093 lnkkind = stat.S_IFLNK
1110 lnkkind = stat.S_IFLNK
1094 join = self._join
1111 join = self._join
1095 dirsfound = []
1112 dirsfound = []
1096 foundadd = dirsfound.append
1113 foundadd = dirsfound.append
1097 dirsnotfound = []
1114 dirsnotfound = []
1098 notfoundadd = dirsnotfound.append
1115 notfoundadd = dirsnotfound.append
1099
1116
1100 if not match.isexact() and self._checkcase:
1117 if not match.isexact() and self._checkcase:
1101 normalize = self._normalize
1118 normalize = self._normalize
1102 else:
1119 else:
1103 normalize = None
1120 normalize = None
1104
1121
1105 files = sorted(match.files())
1122 files = sorted(match.files())
1106 subrepos.sort()
1123 subrepos.sort()
1107 i, j = 0, 0
1124 i, j = 0, 0
1108 while i < len(files) and j < len(subrepos):
1125 while i < len(files) and j < len(subrepos):
1109 subpath = subrepos[j] + b"/"
1126 subpath = subrepos[j] + b"/"
1110 if files[i] < subpath:
1127 if files[i] < subpath:
1111 i += 1
1128 i += 1
1112 continue
1129 continue
1113 while i < len(files) and files[i].startswith(subpath):
1130 while i < len(files) and files[i].startswith(subpath):
1114 del files[i]
1131 del files[i]
1115 j += 1
1132 j += 1
1116
1133
1117 if not files or b'' in files:
1134 if not files or b'' in files:
1118 files = [b'']
1135 files = [b'']
1119 # constructing the foldmap is expensive, so don't do it for the
1136 # constructing the foldmap is expensive, so don't do it for the
1120 # common case where files is ['']
1137 # common case where files is ['']
1121 normalize = None
1138 normalize = None
1122 results = dict.fromkeys(subrepos)
1139 results = dict.fromkeys(subrepos)
1123 results[b'.hg'] = None
1140 results[b'.hg'] = None
1124
1141
1125 for ff in files:
1142 for ff in files:
1126 if normalize:
1143 if normalize:
1127 nf = normalize(ff, False, True)
1144 nf = normalize(ff, False, True)
1128 else:
1145 else:
1129 nf = ff
1146 nf = ff
1130 if nf in results:
1147 if nf in results:
1131 continue
1148 continue
1132
1149
1133 try:
1150 try:
1134 st = lstat(join(nf))
1151 st = lstat(join(nf))
1135 kind = getkind(st.st_mode)
1152 kind = getkind(st.st_mode)
1136 if kind == dirkind:
1153 if kind == dirkind:
1137 if nf in dmap:
1154 if nf in dmap:
1138 # file replaced by dir on disk but still in dirstate
1155 # file replaced by dir on disk but still in dirstate
1139 results[nf] = None
1156 results[nf] = None
1140 foundadd((nf, ff))
1157 foundadd((nf, ff))
1141 elif kind == regkind or kind == lnkkind:
1158 elif kind == regkind or kind == lnkkind:
1142 results[nf] = st
1159 results[nf] = st
1143 else:
1160 else:
1144 badfn(ff, badtype(kind))
1161 badfn(ff, badtype(kind))
1145 if nf in dmap:
1162 if nf in dmap:
1146 results[nf] = None
1163 results[nf] = None
1147 except OSError as inst: # nf not found on disk - it is dirstate only
1164 except OSError as inst: # nf not found on disk - it is dirstate only
1148 if nf in dmap: # does it exactly match a missing file?
1165 if nf in dmap: # does it exactly match a missing file?
1149 results[nf] = None
1166 results[nf] = None
1150 else: # does it match a missing directory?
1167 else: # does it match a missing directory?
1151 if self._map.hasdir(nf):
1168 if self._map.hasdir(nf):
1152 notfoundadd(nf)
1169 notfoundadd(nf)
1153 else:
1170 else:
1154 badfn(ff, encoding.strtolocal(inst.strerror))
1171 badfn(ff, encoding.strtolocal(inst.strerror))
1155
1172
1156 # match.files() may contain explicitly-specified paths that shouldn't
1173 # match.files() may contain explicitly-specified paths that shouldn't
1157 # be taken; drop them from the list of files found. dirsfound/notfound
1174 # be taken; drop them from the list of files found. dirsfound/notfound
1158 # aren't filtered here because they will be tested later.
1175 # aren't filtered here because they will be tested later.
1159 if match.anypats():
1176 if match.anypats():
1160 for f in list(results):
1177 for f in list(results):
1161 if f == b'.hg' or f in subrepos:
1178 if f == b'.hg' or f in subrepos:
1162 # keep sentinel to disable further out-of-repo walks
1179 # keep sentinel to disable further out-of-repo walks
1163 continue
1180 continue
1164 if not match(f):
1181 if not match(f):
1165 del results[f]
1182 del results[f]
1166
1183
1167 # Case insensitive filesystems cannot rely on lstat() failing to detect
1184 # Case insensitive filesystems cannot rely on lstat() failing to detect
1168 # a case-only rename. Prune the stat object for any file that does not
1185 # a case-only rename. Prune the stat object for any file that does not
1169 # match the case in the filesystem, if there are multiple files that
1186 # match the case in the filesystem, if there are multiple files that
1170 # normalize to the same path.
1187 # normalize to the same path.
1171 if match.isexact() and self._checkcase:
1188 if match.isexact() and self._checkcase:
1172 normed = {}
1189 normed = {}
1173
1190
1174 for f, st in pycompat.iteritems(results):
1191 for f, st in pycompat.iteritems(results):
1175 if st is None:
1192 if st is None:
1176 continue
1193 continue
1177
1194
1178 nc = util.normcase(f)
1195 nc = util.normcase(f)
1179 paths = normed.get(nc)
1196 paths = normed.get(nc)
1180
1197
1181 if paths is None:
1198 if paths is None:
1182 paths = set()
1199 paths = set()
1183 normed[nc] = paths
1200 normed[nc] = paths
1184
1201
1185 paths.add(f)
1202 paths.add(f)
1186
1203
1187 for norm, paths in pycompat.iteritems(normed):
1204 for norm, paths in pycompat.iteritems(normed):
1188 if len(paths) > 1:
1205 if len(paths) > 1:
1189 for path in paths:
1206 for path in paths:
1190 folded = self._discoverpath(
1207 folded = self._discoverpath(
1191 path, norm, True, None, self._map.dirfoldmap
1208 path, norm, True, None, self._map.dirfoldmap
1192 )
1209 )
1193 if path != folded:
1210 if path != folded:
1194 results[path] = None
1211 results[path] = None
1195
1212
1196 return results, dirsfound, dirsnotfound
1213 return results, dirsfound, dirsnotfound
1197
1214
1198 def walk(self, match, subrepos, unknown, ignored, full=True):
1215 def walk(self, match, subrepos, unknown, ignored, full=True):
1199 """
1216 """
1200 Walk recursively through the directory tree, finding all files
1217 Walk recursively through the directory tree, finding all files
1201 matched by match.
1218 matched by match.
1202
1219
1203 If full is False, maybe skip some known-clean files.
1220 If full is False, maybe skip some known-clean files.
1204
1221
1205 Return a dict mapping filename to stat-like object (either
1222 Return a dict mapping filename to stat-like object (either
1206 mercurial.osutil.stat instance or return value of os.stat()).
1223 mercurial.osutil.stat instance or return value of os.stat()).
1207
1224
1208 """
1225 """
1209 # full is a flag that extensions that hook into walk can use -- this
1226 # full is a flag that extensions that hook into walk can use -- this
1210 # implementation doesn't use it at all. This satisfies the contract
1227 # implementation doesn't use it at all. This satisfies the contract
1211 # because we only guarantee a "maybe".
1228 # because we only guarantee a "maybe".
1212
1229
1213 if ignored:
1230 if ignored:
1214 ignore = util.never
1231 ignore = util.never
1215 dirignore = util.never
1232 dirignore = util.never
1216 elif unknown:
1233 elif unknown:
1217 ignore = self._ignore
1234 ignore = self._ignore
1218 dirignore = self._dirignore
1235 dirignore = self._dirignore
1219 else:
1236 else:
1220 # if not unknown and not ignored, drop dir recursion and step 2
1237 # if not unknown and not ignored, drop dir recursion and step 2
1221 ignore = util.always
1238 ignore = util.always
1222 dirignore = util.always
1239 dirignore = util.always
1223
1240
1224 matchfn = match.matchfn
1241 matchfn = match.matchfn
1225 matchalways = match.always()
1242 matchalways = match.always()
1226 matchtdir = match.traversedir
1243 matchtdir = match.traversedir
1227 dmap = self._map
1244 dmap = self._map
1228 listdir = util.listdir
1245 listdir = util.listdir
1229 lstat = os.lstat
1246 lstat = os.lstat
1230 dirkind = stat.S_IFDIR
1247 dirkind = stat.S_IFDIR
1231 regkind = stat.S_IFREG
1248 regkind = stat.S_IFREG
1232 lnkkind = stat.S_IFLNK
1249 lnkkind = stat.S_IFLNK
1233 join = self._join
1250 join = self._join
1234
1251
1235 exact = skipstep3 = False
1252 exact = skipstep3 = False
1236 if match.isexact(): # match.exact
1253 if match.isexact(): # match.exact
1237 exact = True
1254 exact = True
1238 dirignore = util.always # skip step 2
1255 dirignore = util.always # skip step 2
1239 elif match.prefix(): # match.match, no patterns
1256 elif match.prefix(): # match.match, no patterns
1240 skipstep3 = True
1257 skipstep3 = True
1241
1258
1242 if not exact and self._checkcase:
1259 if not exact and self._checkcase:
1243 normalize = self._normalize
1260 normalize = self._normalize
1244 normalizefile = self._normalizefile
1261 normalizefile = self._normalizefile
1245 skipstep3 = False
1262 skipstep3 = False
1246 else:
1263 else:
1247 normalize = self._normalize
1264 normalize = self._normalize
1248 normalizefile = None
1265 normalizefile = None
1249
1266
1250 # step 1: find all explicit files
1267 # step 1: find all explicit files
1251 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1268 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1252 if matchtdir:
1269 if matchtdir:
1253 for d in work:
1270 for d in work:
1254 matchtdir(d[0])
1271 matchtdir(d[0])
1255 for d in dirsnotfound:
1272 for d in dirsnotfound:
1256 matchtdir(d)
1273 matchtdir(d)
1257
1274
1258 skipstep3 = skipstep3 and not (work or dirsnotfound)
1275 skipstep3 = skipstep3 and not (work or dirsnotfound)
1259 work = [d for d in work if not dirignore(d[0])]
1276 work = [d for d in work if not dirignore(d[0])]
1260
1277
1261 # step 2: visit subdirectories
1278 # step 2: visit subdirectories
1262 def traverse(work, alreadynormed):
1279 def traverse(work, alreadynormed):
1263 wadd = work.append
1280 wadd = work.append
1264 while work:
1281 while work:
1265 tracing.counter('dirstate.walk work', len(work))
1282 tracing.counter('dirstate.walk work', len(work))
1266 nd = work.pop()
1283 nd = work.pop()
1267 visitentries = match.visitchildrenset(nd)
1284 visitentries = match.visitchildrenset(nd)
1268 if not visitentries:
1285 if not visitentries:
1269 continue
1286 continue
1270 if visitentries == b'this' or visitentries == b'all':
1287 if visitentries == b'this' or visitentries == b'all':
1271 visitentries = None
1288 visitentries = None
1272 skip = None
1289 skip = None
1273 if nd != b'':
1290 if nd != b'':
1274 skip = b'.hg'
1291 skip = b'.hg'
1275 try:
1292 try:
1276 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1293 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1277 entries = listdir(join(nd), stat=True, skip=skip)
1294 entries = listdir(join(nd), stat=True, skip=skip)
1278 except OSError as inst:
1295 except OSError as inst:
1279 if inst.errno in (errno.EACCES, errno.ENOENT):
1296 if inst.errno in (errno.EACCES, errno.ENOENT):
1280 match.bad(
1297 match.bad(
1281 self.pathto(nd), encoding.strtolocal(inst.strerror)
1298 self.pathto(nd), encoding.strtolocal(inst.strerror)
1282 )
1299 )
1283 continue
1300 continue
1284 raise
1301 raise
1285 for f, kind, st in entries:
1302 for f, kind, st in entries:
1286 # Some matchers may return files in the visitentries set,
1303 # Some matchers may return files in the visitentries set,
1287 # instead of 'this', if the matcher explicitly mentions them
1304 # instead of 'this', if the matcher explicitly mentions them
1288 # and is not an exactmatcher. This is acceptable; we do not
1305 # and is not an exactmatcher. This is acceptable; we do not
1289 # make any hard assumptions about file-or-directory below
1306 # make any hard assumptions about file-or-directory below
1290 # based on the presence of `f` in visitentries. If
1307 # based on the presence of `f` in visitentries. If
1291 # visitchildrenset returned a set, we can always skip the
1308 # visitchildrenset returned a set, we can always skip the
1292 # entries *not* in the set it provided regardless of whether
1309 # entries *not* in the set it provided regardless of whether
1293 # they're actually a file or a directory.
1310 # they're actually a file or a directory.
1294 if visitentries and f not in visitentries:
1311 if visitentries and f not in visitentries:
1295 continue
1312 continue
1296 if normalizefile:
1313 if normalizefile:
1297 # even though f might be a directory, we're only
1314 # even though f might be a directory, we're only
1298 # interested in comparing it to files currently in the
1315 # interested in comparing it to files currently in the
1299 # dmap -- therefore normalizefile is enough
1316 # dmap -- therefore normalizefile is enough
1300 nf = normalizefile(
1317 nf = normalizefile(
1301 nd and (nd + b"/" + f) or f, True, True
1318 nd and (nd + b"/" + f) or f, True, True
1302 )
1319 )
1303 else:
1320 else:
1304 nf = nd and (nd + b"/" + f) or f
1321 nf = nd and (nd + b"/" + f) or f
1305 if nf not in results:
1322 if nf not in results:
1306 if kind == dirkind:
1323 if kind == dirkind:
1307 if not ignore(nf):
1324 if not ignore(nf):
1308 if matchtdir:
1325 if matchtdir:
1309 matchtdir(nf)
1326 matchtdir(nf)
1310 wadd(nf)
1327 wadd(nf)
1311 if nf in dmap and (matchalways or matchfn(nf)):
1328 if nf in dmap and (matchalways or matchfn(nf)):
1312 results[nf] = None
1329 results[nf] = None
1313 elif kind == regkind or kind == lnkkind:
1330 elif kind == regkind or kind == lnkkind:
1314 if nf in dmap:
1331 if nf in dmap:
1315 if matchalways or matchfn(nf):
1332 if matchalways or matchfn(nf):
1316 results[nf] = st
1333 results[nf] = st
1317 elif (matchalways or matchfn(nf)) and not ignore(
1334 elif (matchalways or matchfn(nf)) and not ignore(
1318 nf
1335 nf
1319 ):
1336 ):
1320 # unknown file -- normalize if necessary
1337 # unknown file -- normalize if necessary
1321 if not alreadynormed:
1338 if not alreadynormed:
1322 nf = normalize(nf, False, True)
1339 nf = normalize(nf, False, True)
1323 results[nf] = st
1340 results[nf] = st
1324 elif nf in dmap and (matchalways or matchfn(nf)):
1341 elif nf in dmap and (matchalways or matchfn(nf)):
1325 results[nf] = None
1342 results[nf] = None
1326
1343
1327 for nd, d in work:
1344 for nd, d in work:
1328 # alreadynormed means that processwork doesn't have to do any
1345 # alreadynormed means that processwork doesn't have to do any
1329 # expensive directory normalization
1346 # expensive directory normalization
1330 alreadynormed = not normalize or nd == d
1347 alreadynormed = not normalize or nd == d
1331 traverse([d], alreadynormed)
1348 traverse([d], alreadynormed)
1332
1349
1333 for s in subrepos:
1350 for s in subrepos:
1334 del results[s]
1351 del results[s]
1335 del results[b'.hg']
1352 del results[b'.hg']
1336
1353
1337 # step 3: visit remaining files from dmap
1354 # step 3: visit remaining files from dmap
1338 if not skipstep3 and not exact:
1355 if not skipstep3 and not exact:
1339 # If a dmap file is not in results yet, it was either
1356 # If a dmap file is not in results yet, it was either
1340 # a) not matching matchfn b) ignored, c) missing, or d) under a
1357 # a) not matching matchfn b) ignored, c) missing, or d) under a
1341 # symlink directory.
1358 # symlink directory.
1342 if not results and matchalways:
1359 if not results and matchalways:
1343 visit = [f for f in dmap]
1360 visit = [f for f in dmap]
1344 else:
1361 else:
1345 visit = [f for f in dmap if f not in results and matchfn(f)]
1362 visit = [f for f in dmap if f not in results and matchfn(f)]
1346 visit.sort()
1363 visit.sort()
1347
1364
1348 if unknown:
1365 if unknown:
1349 # unknown == True means we walked all dirs under the roots
1366 # unknown == True means we walked all dirs under the roots
1350 # that wasn't ignored, and everything that matched was stat'ed
1367 # that wasn't ignored, and everything that matched was stat'ed
1351 # and is already in results.
1368 # and is already in results.
1352 # The rest must thus be ignored or under a symlink.
1369 # The rest must thus be ignored or under a symlink.
1353 audit_path = pathutil.pathauditor(self._root, cached=True)
1370 audit_path = pathutil.pathauditor(self._root, cached=True)
1354
1371
1355 for nf in iter(visit):
1372 for nf in iter(visit):
1356 # If a stat for the same file was already added with a
1373 # If a stat for the same file was already added with a
1357 # different case, don't add one for this, since that would
1374 # different case, don't add one for this, since that would
1358 # make it appear as if the file exists under both names
1375 # make it appear as if the file exists under both names
1359 # on disk.
1376 # on disk.
1360 if (
1377 if (
1361 normalizefile
1378 normalizefile
1362 and normalizefile(nf, True, True) in results
1379 and normalizefile(nf, True, True) in results
1363 ):
1380 ):
1364 results[nf] = None
1381 results[nf] = None
1365 # Report ignored items in the dmap as long as they are not
1382 # Report ignored items in the dmap as long as they are not
1366 # under a symlink directory.
1383 # under a symlink directory.
1367 elif audit_path.check(nf):
1384 elif audit_path.check(nf):
1368 try:
1385 try:
1369 results[nf] = lstat(join(nf))
1386 results[nf] = lstat(join(nf))
1370 # file was just ignored, no links, and exists
1387 # file was just ignored, no links, and exists
1371 except OSError:
1388 except OSError:
1372 # file doesn't exist
1389 # file doesn't exist
1373 results[nf] = None
1390 results[nf] = None
1374 else:
1391 else:
1375 # It's either missing or under a symlink directory
1392 # It's either missing or under a symlink directory
1376 # which we in this case report as missing
1393 # which we in this case report as missing
1377 results[nf] = None
1394 results[nf] = None
1378 else:
1395 else:
1379 # We may not have walked the full directory tree above,
1396 # We may not have walked the full directory tree above,
1380 # so stat and check everything we missed.
1397 # so stat and check everything we missed.
1381 iv = iter(visit)
1398 iv = iter(visit)
1382 for st in util.statfiles([join(i) for i in visit]):
1399 for st in util.statfiles([join(i) for i in visit]):
1383 results[next(iv)] = st
1400 results[next(iv)] = st
1384 return results
1401 return results
1385
1402
1386 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1403 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1387 # Force Rayon (Rust parallelism library) to respect the number of
1404 # Force Rayon (Rust parallelism library) to respect the number of
1388 # workers. This is a temporary workaround until Rust code knows
1405 # workers. This is a temporary workaround until Rust code knows
1389 # how to read the config file.
1406 # how to read the config file.
1390 numcpus = self._ui.configint(b"worker", b"numcpus")
1407 numcpus = self._ui.configint(b"worker", b"numcpus")
1391 if numcpus is not None:
1408 if numcpus is not None:
1392 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1409 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1393
1410
1394 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1411 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1395 if not workers_enabled:
1412 if not workers_enabled:
1396 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1413 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1397
1414
1398 (
1415 (
1399 lookup,
1416 lookup,
1400 modified,
1417 modified,
1401 added,
1418 added,
1402 removed,
1419 removed,
1403 deleted,
1420 deleted,
1404 clean,
1421 clean,
1405 ignored,
1422 ignored,
1406 unknown,
1423 unknown,
1407 warnings,
1424 warnings,
1408 bad,
1425 bad,
1409 traversed,
1426 traversed,
1410 dirty,
1427 dirty,
1411 ) = rustmod.status(
1428 ) = rustmod.status(
1412 self._map._rustmap,
1429 self._map._rustmap,
1413 matcher,
1430 matcher,
1414 self._rootdir,
1431 self._rootdir,
1415 self._ignorefiles(),
1432 self._ignorefiles(),
1416 self._checkexec,
1433 self._checkexec,
1417 self._lastnormaltime,
1434 self._lastnormaltime,
1418 bool(list_clean),
1435 bool(list_clean),
1419 bool(list_ignored),
1436 bool(list_ignored),
1420 bool(list_unknown),
1437 bool(list_unknown),
1421 bool(matcher.traversedir),
1438 bool(matcher.traversedir),
1422 )
1439 )
1423
1440
1424 self._dirty |= dirty
1441 self._dirty |= dirty
1425
1442
1426 if matcher.traversedir:
1443 if matcher.traversedir:
1427 for dir in traversed:
1444 for dir in traversed:
1428 matcher.traversedir(dir)
1445 matcher.traversedir(dir)
1429
1446
1430 if self._ui.warn:
1447 if self._ui.warn:
1431 for item in warnings:
1448 for item in warnings:
1432 if isinstance(item, tuple):
1449 if isinstance(item, tuple):
1433 file_path, syntax = item
1450 file_path, syntax = item
1434 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1451 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1435 file_path,
1452 file_path,
1436 syntax,
1453 syntax,
1437 )
1454 )
1438 self._ui.warn(msg)
1455 self._ui.warn(msg)
1439 else:
1456 else:
1440 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1457 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1441 self._ui.warn(
1458 self._ui.warn(
1442 msg
1459 msg
1443 % (
1460 % (
1444 pathutil.canonpath(
1461 pathutil.canonpath(
1445 self._rootdir, self._rootdir, item
1462 self._rootdir, self._rootdir, item
1446 ),
1463 ),
1447 b"No such file or directory",
1464 b"No such file or directory",
1448 )
1465 )
1449 )
1466 )
1450
1467
1451 for (fn, message) in bad:
1468 for (fn, message) in bad:
1452 matcher.bad(fn, encoding.strtolocal(message))
1469 matcher.bad(fn, encoding.strtolocal(message))
1453
1470
1454 status = scmutil.status(
1471 status = scmutil.status(
1455 modified=modified,
1472 modified=modified,
1456 added=added,
1473 added=added,
1457 removed=removed,
1474 removed=removed,
1458 deleted=deleted,
1475 deleted=deleted,
1459 unknown=unknown,
1476 unknown=unknown,
1460 ignored=ignored,
1477 ignored=ignored,
1461 clean=clean,
1478 clean=clean,
1462 )
1479 )
1463 return (lookup, status)
1480 return (lookup, status)
1464
1481
1465 def status(self, match, subrepos, ignored, clean, unknown):
1482 def status(self, match, subrepos, ignored, clean, unknown):
1466 """Determine the status of the working copy relative to the
1483 """Determine the status of the working copy relative to the
1467 dirstate and return a pair of (unsure, status), where status is of type
1484 dirstate and return a pair of (unsure, status), where status is of type
1468 scmutil.status and:
1485 scmutil.status and:
1469
1486
1470 unsure:
1487 unsure:
1471 files that might have been modified since the dirstate was
1488 files that might have been modified since the dirstate was
1472 written, but need to be read to be sure (size is the same
1489 written, but need to be read to be sure (size is the same
1473 but mtime differs)
1490 but mtime differs)
1474 status.modified:
1491 status.modified:
1475 files that have definitely been modified since the dirstate
1492 files that have definitely been modified since the dirstate
1476 was written (different size or mode)
1493 was written (different size or mode)
1477 status.clean:
1494 status.clean:
1478 files that have definitely not been modified since the
1495 files that have definitely not been modified since the
1479 dirstate was written
1496 dirstate was written
1480 """
1497 """
1481 listignored, listclean, listunknown = ignored, clean, unknown
1498 listignored, listclean, listunknown = ignored, clean, unknown
1482 lookup, modified, added, unknown, ignored = [], [], [], [], []
1499 lookup, modified, added, unknown, ignored = [], [], [], [], []
1483 removed, deleted, clean = [], [], []
1500 removed, deleted, clean = [], [], []
1484
1501
1485 dmap = self._map
1502 dmap = self._map
1486 dmap.preload()
1503 dmap.preload()
1487
1504
1488 use_rust = True
1505 use_rust = True
1489
1506
1490 allowed_matchers = (
1507 allowed_matchers = (
1491 matchmod.alwaysmatcher,
1508 matchmod.alwaysmatcher,
1492 matchmod.exactmatcher,
1509 matchmod.exactmatcher,
1493 matchmod.includematcher,
1510 matchmod.includematcher,
1494 )
1511 )
1495
1512
1496 if rustmod is None:
1513 if rustmod is None:
1497 use_rust = False
1514 use_rust = False
1498 elif self._checkcase:
1515 elif self._checkcase:
1499 # Case-insensitive filesystems are not handled yet
1516 # Case-insensitive filesystems are not handled yet
1500 use_rust = False
1517 use_rust = False
1501 elif subrepos:
1518 elif subrepos:
1502 use_rust = False
1519 use_rust = False
1503 elif sparse.enabled:
1520 elif sparse.enabled:
1504 use_rust = False
1521 use_rust = False
1505 elif not isinstance(match, allowed_matchers):
1522 elif not isinstance(match, allowed_matchers):
1506 # Some matchers have yet to be implemented
1523 # Some matchers have yet to be implemented
1507 use_rust = False
1524 use_rust = False
1508
1525
1509 if use_rust:
1526 if use_rust:
1510 try:
1527 try:
1511 return self._rust_status(
1528 return self._rust_status(
1512 match, listclean, listignored, listunknown
1529 match, listclean, listignored, listunknown
1513 )
1530 )
1514 except rustmod.FallbackError:
1531 except rustmod.FallbackError:
1515 pass
1532 pass
1516
1533
1517 def noop(f):
1534 def noop(f):
1518 pass
1535 pass
1519
1536
1520 dcontains = dmap.__contains__
1537 dcontains = dmap.__contains__
1521 dget = dmap.__getitem__
1538 dget = dmap.__getitem__
1522 ladd = lookup.append # aka "unsure"
1539 ladd = lookup.append # aka "unsure"
1523 madd = modified.append
1540 madd = modified.append
1524 aadd = added.append
1541 aadd = added.append
1525 uadd = unknown.append if listunknown else noop
1542 uadd = unknown.append if listunknown else noop
1526 iadd = ignored.append if listignored else noop
1543 iadd = ignored.append if listignored else noop
1527 radd = removed.append
1544 radd = removed.append
1528 dadd = deleted.append
1545 dadd = deleted.append
1529 cadd = clean.append if listclean else noop
1546 cadd = clean.append if listclean else noop
1530 mexact = match.exact
1547 mexact = match.exact
1531 dirignore = self._dirignore
1548 dirignore = self._dirignore
1532 checkexec = self._checkexec
1549 checkexec = self._checkexec
1533 copymap = self._map.copymap
1550 copymap = self._map.copymap
1534 lastnormaltime = self._lastnormaltime
1551 lastnormaltime = self._lastnormaltime
1535
1552
1536 # We need to do full walks when either
1553 # We need to do full walks when either
1537 # - we're listing all clean files, or
1554 # - we're listing all clean files, or
1538 # - match.traversedir does something, because match.traversedir should
1555 # - match.traversedir does something, because match.traversedir should
1539 # be called for every dir in the working dir
1556 # be called for every dir in the working dir
1540 full = listclean or match.traversedir is not None
1557 full = listclean or match.traversedir is not None
1541 for fn, st in pycompat.iteritems(
1558 for fn, st in pycompat.iteritems(
1542 self.walk(match, subrepos, listunknown, listignored, full=full)
1559 self.walk(match, subrepos, listunknown, listignored, full=full)
1543 ):
1560 ):
1544 if not dcontains(fn):
1561 if not dcontains(fn):
1545 if (listignored or mexact(fn)) and dirignore(fn):
1562 if (listignored or mexact(fn)) and dirignore(fn):
1546 if listignored:
1563 if listignored:
1547 iadd(fn)
1564 iadd(fn)
1548 else:
1565 else:
1549 uadd(fn)
1566 uadd(fn)
1550 continue
1567 continue
1551
1568
1552 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1569 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1553 # written like that for performance reasons. dmap[fn] is not a
1570 # written like that for performance reasons. dmap[fn] is not a
1554 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1571 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1555 # opcode has fast paths when the value to be unpacked is a tuple or
1572 # opcode has fast paths when the value to be unpacked is a tuple or
1556 # a list, but falls back to creating a full-fledged iterator in
1573 # a list, but falls back to creating a full-fledged iterator in
1557 # general. That is much slower than simply accessing and storing the
1574 # general. That is much slower than simply accessing and storing the
1558 # tuple members one by one.
1575 # tuple members one by one.
1559 t = dget(fn)
1576 t = dget(fn)
1560 mode = t.mode
1577 mode = t.mode
1561 size = t.size
1578 size = t.size
1562 time = t.mtime
1579 time = t.mtime
1563
1580
1564 if not st and t.tracked:
1581 if not st and t.tracked:
1565 dadd(fn)
1582 dadd(fn)
1566 elif t.merged:
1583 elif t.merged:
1567 madd(fn)
1584 madd(fn)
1568 elif t.added:
1585 elif t.added:
1569 aadd(fn)
1586 aadd(fn)
1570 elif t.removed:
1587 elif t.removed:
1571 radd(fn)
1588 radd(fn)
1572 elif t.tracked:
1589 elif t.tracked:
1573 if (
1590 if (
1574 size >= 0
1591 size >= 0
1575 and (
1592 and (
1576 (size != st.st_size and size != st.st_size & _rangemask)
1593 (size != st.st_size and size != st.st_size & _rangemask)
1577 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1594 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1578 )
1595 )
1579 or t.from_p2
1596 or t.from_p2
1580 or fn in copymap
1597 or fn in copymap
1581 ):
1598 ):
1582 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1599 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1583 # issue6456: Size returned may be longer due to
1600 # issue6456: Size returned may be longer due to
1584 # encryption on EXT-4 fscrypt, undecided.
1601 # encryption on EXT-4 fscrypt, undecided.
1585 ladd(fn)
1602 ladd(fn)
1586 else:
1603 else:
1587 madd(fn)
1604 madd(fn)
1588 elif (
1605 elif (
1589 time != st[stat.ST_MTIME]
1606 time != st[stat.ST_MTIME]
1590 and time != st[stat.ST_MTIME] & _rangemask
1607 and time != st[stat.ST_MTIME] & _rangemask
1591 ):
1608 ):
1592 ladd(fn)
1609 ladd(fn)
1593 elif st[stat.ST_MTIME] == lastnormaltime:
1610 elif st[stat.ST_MTIME] == lastnormaltime:
1594 # fn may have just been marked as normal and it may have
1611 # fn may have just been marked as normal and it may have
1595 # changed in the same second without changing its size.
1612 # changed in the same second without changing its size.
1596 # This can happen if we quickly do multiple commits.
1613 # This can happen if we quickly do multiple commits.
1597 # Force lookup, so we don't miss such a racy file change.
1614 # Force lookup, so we don't miss such a racy file change.
1598 ladd(fn)
1615 ladd(fn)
1599 elif listclean:
1616 elif listclean:
1600 cadd(fn)
1617 cadd(fn)
1601 status = scmutil.status(
1618 status = scmutil.status(
1602 modified, added, removed, deleted, unknown, ignored, clean
1619 modified, added, removed, deleted, unknown, ignored, clean
1603 )
1620 )
1604 return (lookup, status)
1621 return (lookup, status)
1605
1622
1606 def matches(self, match):
1623 def matches(self, match):
1607 """
1624 """
1608 return files in the dirstate (in whatever state) filtered by match
1625 return files in the dirstate (in whatever state) filtered by match
1609 """
1626 """
1610 dmap = self._map
1627 dmap = self._map
1611 if rustmod is not None:
1628 if rustmod is not None:
1612 dmap = self._map._rustmap
1629 dmap = self._map._rustmap
1613
1630
1614 if match.always():
1631 if match.always():
1615 return dmap.keys()
1632 return dmap.keys()
1616 files = match.files()
1633 files = match.files()
1617 if match.isexact():
1634 if match.isexact():
1618 # fast path -- filter the other way around, since typically files is
1635 # fast path -- filter the other way around, since typically files is
1619 # much smaller than dmap
1636 # much smaller than dmap
1620 return [f for f in files if f in dmap]
1637 return [f for f in files if f in dmap]
1621 if match.prefix() and all(fn in dmap for fn in files):
1638 if match.prefix() and all(fn in dmap for fn in files):
1622 # fast path -- all the values are known to be files, so just return
1639 # fast path -- all the values are known to be files, so just return
1623 # that
1640 # that
1624 return list(files)
1641 return list(files)
1625 return [f for f in dmap if match(f)]
1642 return [f for f in dmap if match(f)]
1626
1643
1627 def _actualfilename(self, tr):
1644 def _actualfilename(self, tr):
1628 if tr:
1645 if tr:
1629 return self._pendingfilename
1646 return self._pendingfilename
1630 else:
1647 else:
1631 return self._filename
1648 return self._filename
1632
1649
1633 def savebackup(self, tr, backupname):
1650 def savebackup(self, tr, backupname):
1634 '''Save current dirstate into backup file'''
1651 '''Save current dirstate into backup file'''
1635 filename = self._actualfilename(tr)
1652 filename = self._actualfilename(tr)
1636 assert backupname != filename
1653 assert backupname != filename
1637
1654
1638 # use '_writedirstate' instead of 'write' to write changes certainly,
1655 # use '_writedirstate' instead of 'write' to write changes certainly,
1639 # because the latter omits writing out if transaction is running.
1656 # because the latter omits writing out if transaction is running.
1640 # output file will be used to create backup of dirstate at this point.
1657 # output file will be used to create backup of dirstate at this point.
1641 if self._dirty or not self._opener.exists(filename):
1658 if self._dirty or not self._opener.exists(filename):
1642 self._writedirstate(
1659 self._writedirstate(
1643 tr,
1660 tr,
1644 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1661 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1645 )
1662 )
1646
1663
1647 if tr:
1664 if tr:
1648 # ensure that subsequent tr.writepending returns True for
1665 # ensure that subsequent tr.writepending returns True for
1649 # changes written out above, even if dirstate is never
1666 # changes written out above, even if dirstate is never
1650 # changed after this
1667 # changed after this
1651 tr.addfilegenerator(
1668 tr.addfilegenerator(
1652 b'dirstate',
1669 b'dirstate',
1653 (self._filename,),
1670 (self._filename,),
1654 lambda f: self._writedirstate(tr, f),
1671 lambda f: self._writedirstate(tr, f),
1655 location=b'plain',
1672 location=b'plain',
1656 )
1673 )
1657
1674
1658 # ensure that pending file written above is unlinked at
1675 # ensure that pending file written above is unlinked at
1659 # failure, even if tr.writepending isn't invoked until the
1676 # failure, even if tr.writepending isn't invoked until the
1660 # end of this transaction
1677 # end of this transaction
1661 tr.registertmp(filename, location=b'plain')
1678 tr.registertmp(filename, location=b'plain')
1662
1679
1663 self._opener.tryunlink(backupname)
1680 self._opener.tryunlink(backupname)
1664 # hardlink backup is okay because _writedirstate is always called
1681 # hardlink backup is okay because _writedirstate is always called
1665 # with an "atomictemp=True" file.
1682 # with an "atomictemp=True" file.
1666 util.copyfile(
1683 util.copyfile(
1667 self._opener.join(filename),
1684 self._opener.join(filename),
1668 self._opener.join(backupname),
1685 self._opener.join(backupname),
1669 hardlink=True,
1686 hardlink=True,
1670 )
1687 )
1671
1688
1672 def restorebackup(self, tr, backupname):
1689 def restorebackup(self, tr, backupname):
1673 '''Restore dirstate by backup file'''
1690 '''Restore dirstate by backup file'''
1674 # this "invalidate()" prevents "wlock.release()" from writing
1691 # this "invalidate()" prevents "wlock.release()" from writing
1675 # changes of dirstate out after restoring from backup file
1692 # changes of dirstate out after restoring from backup file
1676 self.invalidate()
1693 self.invalidate()
1677 filename = self._actualfilename(tr)
1694 filename = self._actualfilename(tr)
1678 o = self._opener
1695 o = self._opener
1679 if util.samefile(o.join(backupname), o.join(filename)):
1696 if util.samefile(o.join(backupname), o.join(filename)):
1680 o.unlink(backupname)
1697 o.unlink(backupname)
1681 else:
1698 else:
1682 o.rename(backupname, filename, checkambig=True)
1699 o.rename(backupname, filename, checkambig=True)
1683
1700
1684 def clearbackup(self, tr, backupname):
1701 def clearbackup(self, tr, backupname):
1685 '''Clear backup file'''
1702 '''Clear backup file'''
1686 self._opener.unlink(backupname)
1703 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now