##// END OF EJS Templates
dirstate: directly call the dirstatemap in `set_untracked`...
marmoute -
r48669:1c797757 default
parent child Browse files
Show More
@@ -1,1780 +1,1782 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = parsers.DirstateItem
48 DirstateItem = parsers.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._updatedfiles = set()
133 self._updatedfiles = set()
134 self._mapcls = dirstatemap.dirstatemap
134 self._mapcls = dirstatemap.dirstatemap
135 # Access and cache cwd early, so we don't access it for the first time
135 # Access and cache cwd early, so we don't access it for the first time
136 # after a working-copy update caused it to not exist (accessing it then
136 # after a working-copy update caused it to not exist (accessing it then
137 # raises an exception).
137 # raises an exception).
138 self._cwd
138 self._cwd
139
139
140 def prefetch_parents(self):
140 def prefetch_parents(self):
141 """make sure the parents are loaded
141 """make sure the parents are loaded
142
142
143 Used to avoid a race condition.
143 Used to avoid a race condition.
144 """
144 """
145 self._pl
145 self._pl
146
146
147 @contextlib.contextmanager
147 @contextlib.contextmanager
148 def parentchange(self):
148 def parentchange(self):
149 """Context manager for handling dirstate parents.
149 """Context manager for handling dirstate parents.
150
150
151 If an exception occurs in the scope of the context manager,
151 If an exception occurs in the scope of the context manager,
152 the incoherent dirstate won't be written when wlock is
152 the incoherent dirstate won't be written when wlock is
153 released.
153 released.
154 """
154 """
155 self._parentwriters += 1
155 self._parentwriters += 1
156 yield
156 yield
157 # Typically we want the "undo" step of a context manager in a
157 # Typically we want the "undo" step of a context manager in a
158 # finally block so it happens even when an exception
158 # finally block so it happens even when an exception
159 # occurs. In this case, however, we only want to decrement
159 # occurs. In this case, however, we only want to decrement
160 # parentwriters if the code in the with statement exits
160 # parentwriters if the code in the with statement exits
161 # normally, so we don't have a try/finally here on purpose.
161 # normally, so we don't have a try/finally here on purpose.
162 self._parentwriters -= 1
162 self._parentwriters -= 1
163
163
164 def pendingparentchange(self):
164 def pendingparentchange(self):
165 """Returns true if the dirstate is in the middle of a set of changes
165 """Returns true if the dirstate is in the middle of a set of changes
166 that modify the dirstate parent.
166 that modify the dirstate parent.
167 """
167 """
168 return self._parentwriters > 0
168 return self._parentwriters > 0
169
169
170 @propertycache
170 @propertycache
171 def _map(self):
171 def _map(self):
172 """Return the dirstate contents (see documentation for dirstatemap)."""
172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 self._map = self._mapcls(
173 self._map = self._mapcls(
174 self._ui,
174 self._ui,
175 self._opener,
175 self._opener,
176 self._root,
176 self._root,
177 self._nodeconstants,
177 self._nodeconstants,
178 self._use_dirstate_v2,
178 self._use_dirstate_v2,
179 )
179 )
180 return self._map
180 return self._map
181
181
182 @property
182 @property
183 def _sparsematcher(self):
183 def _sparsematcher(self):
184 """The matcher for the sparse checkout.
184 """The matcher for the sparse checkout.
185
185
186 The working directory may not include every file from a manifest. The
186 The working directory may not include every file from a manifest. The
187 matcher obtained by this property will match a path if it is to be
187 matcher obtained by this property will match a path if it is to be
188 included in the working directory.
188 included in the working directory.
189 """
189 """
190 # TODO there is potential to cache this property. For now, the matcher
190 # TODO there is potential to cache this property. For now, the matcher
191 # is resolved on every access. (But the called function does use a
191 # is resolved on every access. (But the called function does use a
192 # cache to keep the lookup fast.)
192 # cache to keep the lookup fast.)
193 return self._sparsematchfn()
193 return self._sparsematchfn()
194
194
195 @repocache(b'branch')
195 @repocache(b'branch')
196 def _branch(self):
196 def _branch(self):
197 try:
197 try:
198 return self._opener.read(b"branch").strip() or b"default"
198 return self._opener.read(b"branch").strip() or b"default"
199 except IOError as inst:
199 except IOError as inst:
200 if inst.errno != errno.ENOENT:
200 if inst.errno != errno.ENOENT:
201 raise
201 raise
202 return b"default"
202 return b"default"
203
203
204 @property
204 @property
205 def _pl(self):
205 def _pl(self):
206 return self._map.parents()
206 return self._map.parents()
207
207
208 def hasdir(self, d):
208 def hasdir(self, d):
209 return self._map.hastrackeddir(d)
209 return self._map.hastrackeddir(d)
210
210
211 @rootcache(b'.hgignore')
211 @rootcache(b'.hgignore')
212 def _ignore(self):
212 def _ignore(self):
213 files = self._ignorefiles()
213 files = self._ignorefiles()
214 if not files:
214 if not files:
215 return matchmod.never()
215 return matchmod.never()
216
216
217 pats = [b'include:%s' % f for f in files]
217 pats = [b'include:%s' % f for f in files]
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219
219
220 @propertycache
220 @propertycache
221 def _slash(self):
221 def _slash(self):
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223
223
224 @propertycache
224 @propertycache
225 def _checklink(self):
225 def _checklink(self):
226 return util.checklink(self._root)
226 return util.checklink(self._root)
227
227
228 @propertycache
228 @propertycache
229 def _checkexec(self):
229 def _checkexec(self):
230 return bool(util.checkexec(self._root))
230 return bool(util.checkexec(self._root))
231
231
232 @propertycache
232 @propertycache
233 def _checkcase(self):
233 def _checkcase(self):
234 return not util.fscasesensitive(self._join(b'.hg'))
234 return not util.fscasesensitive(self._join(b'.hg'))
235
235
236 def _join(self, f):
236 def _join(self, f):
237 # much faster than os.path.join()
237 # much faster than os.path.join()
238 # it's safe because f is always a relative path
238 # it's safe because f is always a relative path
239 return self._rootdir + f
239 return self._rootdir + f
240
240
241 def flagfunc(self, buildfallback):
241 def flagfunc(self, buildfallback):
242 if self._checklink and self._checkexec:
242 if self._checklink and self._checkexec:
243
243
244 def f(x):
244 def f(x):
245 try:
245 try:
246 st = os.lstat(self._join(x))
246 st = os.lstat(self._join(x))
247 if util.statislink(st):
247 if util.statislink(st):
248 return b'l'
248 return b'l'
249 if util.statisexec(st):
249 if util.statisexec(st):
250 return b'x'
250 return b'x'
251 except OSError:
251 except OSError:
252 pass
252 pass
253 return b''
253 return b''
254
254
255 return f
255 return f
256
256
257 fallback = buildfallback()
257 fallback = buildfallback()
258 if self._checklink:
258 if self._checklink:
259
259
260 def f(x):
260 def f(x):
261 if os.path.islink(self._join(x)):
261 if os.path.islink(self._join(x)):
262 return b'l'
262 return b'l'
263 if b'x' in fallback(x):
263 if b'x' in fallback(x):
264 return b'x'
264 return b'x'
265 return b''
265 return b''
266
266
267 return f
267 return f
268 if self._checkexec:
268 if self._checkexec:
269
269
270 def f(x):
270 def f(x):
271 if b'l' in fallback(x):
271 if b'l' in fallback(x):
272 return b'l'
272 return b'l'
273 if util.isexec(self._join(x)):
273 if util.isexec(self._join(x)):
274 return b'x'
274 return b'x'
275 return b''
275 return b''
276
276
277 return f
277 return f
278 else:
278 else:
279 return fallback
279 return fallback
280
280
281 @propertycache
281 @propertycache
282 def _cwd(self):
282 def _cwd(self):
283 # internal config: ui.forcecwd
283 # internal config: ui.forcecwd
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 if forcecwd:
285 if forcecwd:
286 return forcecwd
286 return forcecwd
287 return encoding.getcwd()
287 return encoding.getcwd()
288
288
289 def getcwd(self):
289 def getcwd(self):
290 """Return the path from which a canonical path is calculated.
290 """Return the path from which a canonical path is calculated.
291
291
292 This path should be used to resolve file patterns or to convert
292 This path should be used to resolve file patterns or to convert
293 canonical paths back to file paths for display. It shouldn't be
293 canonical paths back to file paths for display. It shouldn't be
294 used to get real file paths. Use vfs functions instead.
294 used to get real file paths. Use vfs functions instead.
295 """
295 """
296 cwd = self._cwd
296 cwd = self._cwd
297 if cwd == self._root:
297 if cwd == self._root:
298 return b''
298 return b''
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 rootsep = self._root
300 rootsep = self._root
301 if not util.endswithsep(rootsep):
301 if not util.endswithsep(rootsep):
302 rootsep += pycompat.ossep
302 rootsep += pycompat.ossep
303 if cwd.startswith(rootsep):
303 if cwd.startswith(rootsep):
304 return cwd[len(rootsep) :]
304 return cwd[len(rootsep) :]
305 else:
305 else:
306 # we're outside the repo. return an absolute path.
306 # we're outside the repo. return an absolute path.
307 return cwd
307 return cwd
308
308
309 def pathto(self, f, cwd=None):
309 def pathto(self, f, cwd=None):
310 if cwd is None:
310 if cwd is None:
311 cwd = self.getcwd()
311 cwd = self.getcwd()
312 path = util.pathto(self._root, cwd, f)
312 path = util.pathto(self._root, cwd, f)
313 if self._slash:
313 if self._slash:
314 return util.pconvert(path)
314 return util.pconvert(path)
315 return path
315 return path
316
316
317 def __getitem__(self, key):
317 def __getitem__(self, key):
318 """Return the current state of key (a filename) in the dirstate.
318 """Return the current state of key (a filename) in the dirstate.
319
319
320 States are:
320 States are:
321 n normal
321 n normal
322 m needs merging
322 m needs merging
323 r marked for removal
323 r marked for removal
324 a marked for addition
324 a marked for addition
325 ? not tracked
325 ? not tracked
326
326
327 XXX The "state" is a bit obscure to be in the "public" API. we should
327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 consider migrating all user of this to going through the dirstate entry
328 consider migrating all user of this to going through the dirstate entry
329 instead.
329 instead.
330 """
330 """
331 entry = self._map.get(key)
331 entry = self._map.get(key)
332 if entry is not None:
332 if entry is not None:
333 return entry.state
333 return entry.state
334 return b'?'
334 return b'?'
335
335
336 def __contains__(self, key):
336 def __contains__(self, key):
337 return key in self._map
337 return key in self._map
338
338
339 def __iter__(self):
339 def __iter__(self):
340 return iter(sorted(self._map))
340 return iter(sorted(self._map))
341
341
342 def items(self):
342 def items(self):
343 return pycompat.iteritems(self._map)
343 return pycompat.iteritems(self._map)
344
344
345 iteritems = items
345 iteritems = items
346
346
347 def directories(self):
347 def directories(self):
348 return self._map.directories()
348 return self._map.directories()
349
349
350 def parents(self):
350 def parents(self):
351 return [self._validate(p) for p in self._pl]
351 return [self._validate(p) for p in self._pl]
352
352
353 def p1(self):
353 def p1(self):
354 return self._validate(self._pl[0])
354 return self._validate(self._pl[0])
355
355
356 def p2(self):
356 def p2(self):
357 return self._validate(self._pl[1])
357 return self._validate(self._pl[1])
358
358
359 @property
359 @property
360 def in_merge(self):
360 def in_merge(self):
361 """True if a merge is in progress"""
361 """True if a merge is in progress"""
362 return self._pl[1] != self._nodeconstants.nullid
362 return self._pl[1] != self._nodeconstants.nullid
363
363
364 def branch(self):
364 def branch(self):
365 return encoding.tolocal(self._branch)
365 return encoding.tolocal(self._branch)
366
366
367 def setparents(self, p1, p2=None):
367 def setparents(self, p1, p2=None):
368 """Set dirstate parents to p1 and p2.
368 """Set dirstate parents to p1 and p2.
369
369
370 When moving from two parents to one, "merged" entries a
370 When moving from two parents to one, "merged" entries a
371 adjusted to normal and previous copy records discarded and
371 adjusted to normal and previous copy records discarded and
372 returned by the call.
372 returned by the call.
373
373
374 See localrepo.setparents()
374 See localrepo.setparents()
375 """
375 """
376 if p2 is None:
376 if p2 is None:
377 p2 = self._nodeconstants.nullid
377 p2 = self._nodeconstants.nullid
378 if self._parentwriters == 0:
378 if self._parentwriters == 0:
379 raise ValueError(
379 raise ValueError(
380 b"cannot set dirstate parent outside of "
380 b"cannot set dirstate parent outside of "
381 b"dirstate.parentchange context manager"
381 b"dirstate.parentchange context manager"
382 )
382 )
383
383
384 self._dirty = True
384 self._dirty = True
385 oldp2 = self._pl[1]
385 oldp2 = self._pl[1]
386 if self._origpl is None:
386 if self._origpl is None:
387 self._origpl = self._pl
387 self._origpl = self._pl
388 self._map.setparents(p1, p2)
388 self._map.setparents(p1, p2)
389 copies = {}
389 copies = {}
390 if (
390 if (
391 oldp2 != self._nodeconstants.nullid
391 oldp2 != self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
393 ):
393 ):
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395
395
396 for f in candidatefiles:
396 for f in candidatefiles:
397 s = self._map.get(f)
397 s = self._map.get(f)
398 if s is None:
398 if s is None:
399 continue
399 continue
400
400
401 # Discard "merged" markers when moving away from a merge state
401 # Discard "merged" markers when moving away from a merge state
402 if s.merged:
402 if s.merged:
403 source = self._map.copymap.get(f)
403 source = self._map.copymap.get(f)
404 if source:
404 if source:
405 copies[f] = source
405 copies[f] = source
406 self._normallookup(f)
406 self._normallookup(f)
407 # Also fix up otherparent markers
407 # Also fix up otherparent markers
408 elif s.from_p2:
408 elif s.from_p2:
409 source = self._map.copymap.get(f)
409 source = self._map.copymap.get(f)
410 if source:
410 if source:
411 copies[f] = source
411 copies[f] = source
412 self._add(f)
412 self._add(f)
413 return copies
413 return copies
414
414
415 def setbranch(self, branch):
415 def setbranch(self, branch):
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 try:
418 try:
419 f.write(self._branch + b'\n')
419 f.write(self._branch + b'\n')
420 f.close()
420 f.close()
421
421
422 # make sure filecache has the correct stat info for _branch after
422 # make sure filecache has the correct stat info for _branch after
423 # replacing the underlying file
423 # replacing the underlying file
424 ce = self._filecache[b'_branch']
424 ce = self._filecache[b'_branch']
425 if ce:
425 if ce:
426 ce.refresh()
426 ce.refresh()
427 except: # re-raises
427 except: # re-raises
428 f.discard()
428 f.discard()
429 raise
429 raise
430
430
431 def invalidate(self):
431 def invalidate(self):
432 """Causes the next access to reread the dirstate.
432 """Causes the next access to reread the dirstate.
433
433
434 This is different from localrepo.invalidatedirstate() because it always
434 This is different from localrepo.invalidatedirstate() because it always
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 check whether the dirstate has changed before rereading it."""
436 check whether the dirstate has changed before rereading it."""
437
437
438 for a in ("_map", "_branch", "_ignore"):
438 for a in ("_map", "_branch", "_ignore"):
439 if a in self.__dict__:
439 if a in self.__dict__:
440 delattr(self, a)
440 delattr(self, a)
441 self._lastnormaltime = 0
441 self._lastnormaltime = 0
442 self._dirty = False
442 self._dirty = False
443 self._updatedfiles.clear()
443 self._updatedfiles.clear()
444 self._parentwriters = 0
444 self._parentwriters = 0
445 self._origpl = None
445 self._origpl = None
446
446
447 def copy(self, source, dest):
447 def copy(self, source, dest):
448 """Mark dest as a copy of source. Unmark dest if source is None."""
448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 if source == dest:
449 if source == dest:
450 return
450 return
451 self._dirty = True
451 self._dirty = True
452 if source is not None:
452 if source is not None:
453 self._map.copymap[dest] = source
453 self._map.copymap[dest] = source
454 self._updatedfiles.add(source)
454 self._updatedfiles.add(source)
455 self._updatedfiles.add(dest)
455 self._updatedfiles.add(dest)
456 elif self._map.copymap.pop(dest, None):
456 elif self._map.copymap.pop(dest, None):
457 self._updatedfiles.add(dest)
457 self._updatedfiles.add(dest)
458
458
459 def copied(self, file):
459 def copied(self, file):
460 return self._map.copymap.get(file, None)
460 return self._map.copymap.get(file, None)
461
461
462 def copies(self):
462 def copies(self):
463 return self._map.copymap
463 return self._map.copymap
464
464
465 @requires_no_parents_change
465 @requires_no_parents_change
466 def set_tracked(self, filename):
466 def set_tracked(self, filename):
467 """a "public" method for generic code to mark a file as tracked
467 """a "public" method for generic code to mark a file as tracked
468
468
469 This function is to be called outside of "update/merge" case. For
469 This function is to be called outside of "update/merge" case. For
470 example by a command like `hg add X`.
470 example by a command like `hg add X`.
471
471
472 return True the file was previously untracked, False otherwise.
472 return True the file was previously untracked, False otherwise.
473 """
473 """
474 entry = self._map.get(filename)
474 entry = self._map.get(filename)
475 if entry is None:
475 if entry is None:
476 self._add(filename)
476 self._add(filename)
477 return True
477 return True
478 elif not entry.tracked:
478 elif not entry.tracked:
479 self._normallookup(filename)
479 self._normallookup(filename)
480 return True
480 return True
481 # XXX This is probably overkill for more case, but we need this to
481 # XXX This is probably overkill for more case, but we need this to
482 # fully replace the `normallookup` call with `set_tracked` one.
482 # fully replace the `normallookup` call with `set_tracked` one.
483 # Consider smoothing this in the future.
483 # Consider smoothing this in the future.
484 self.set_possibly_dirty(filename)
484 self.set_possibly_dirty(filename)
485 return False
485 return False
486
486
487 @requires_no_parents_change
487 @requires_no_parents_change
488 def set_untracked(self, filename):
488 def set_untracked(self, filename):
489 """a "public" method for generic code to mark a file as untracked
489 """a "public" method for generic code to mark a file as untracked
490
490
491 This function is to be called outside of "update/merge" case. For
491 This function is to be called outside of "update/merge" case. For
492 example by a command like `hg remove X`.
492 example by a command like `hg remove X`.
493
493
494 return True the file was previously tracked, False otherwise.
494 return True the file was previously tracked, False otherwise.
495 """
495 """
496 entry = self._map.get(filename)
496 entry = self._map.get(filename)
497 if entry is None:
497 if entry is None:
498 return False
498 return False
499 elif entry.added:
499 elif entry.added:
500 self._drop(filename)
500 self._drop(filename)
501 return True
501 return True
502 else:
502 else:
503 self._remove(filename)
503 self._dirty = True
504 self._updatedfiles.add(filename)
505 self._map.removefile(filename, in_merge=self.in_merge)
504 return True
506 return True
505
507
506 @requires_no_parents_change
508 @requires_no_parents_change
507 def set_clean(self, filename, parentfiledata=None):
509 def set_clean(self, filename, parentfiledata=None):
508 """record that the current state of the file on disk is known to be clean"""
510 """record that the current state of the file on disk is known to be clean"""
509 self._dirty = True
511 self._dirty = True
510 self._updatedfiles.add(filename)
512 self._updatedfiles.add(filename)
511 self._normal(filename, parentfiledata=parentfiledata)
513 self._normal(filename, parentfiledata=parentfiledata)
512
514
513 @requires_no_parents_change
515 @requires_no_parents_change
514 def set_possibly_dirty(self, filename):
516 def set_possibly_dirty(self, filename):
515 """record that the current state of the file on disk is unknown"""
517 """record that the current state of the file on disk is unknown"""
516 self._dirty = True
518 self._dirty = True
517 self._updatedfiles.add(filename)
519 self._updatedfiles.add(filename)
518 self._map.set_possibly_dirty(filename)
520 self._map.set_possibly_dirty(filename)
519
521
520 @requires_parents_change
522 @requires_parents_change
521 def update_file_p1(
523 def update_file_p1(
522 self,
524 self,
523 filename,
525 filename,
524 p1_tracked,
526 p1_tracked,
525 ):
527 ):
526 """Set a file as tracked in the parent (or not)
528 """Set a file as tracked in the parent (or not)
527
529
528 This is to be called when adjust the dirstate to a new parent after an history
530 This is to be called when adjust the dirstate to a new parent after an history
529 rewriting operation.
531 rewriting operation.
530
532
531 It should not be called during a merge (p2 != nullid) and only within
533 It should not be called during a merge (p2 != nullid) and only within
532 a `with dirstate.parentchange():` context.
534 a `with dirstate.parentchange():` context.
533 """
535 """
534 if self.in_merge:
536 if self.in_merge:
535 msg = b'update_file_reference should not be called when merging'
537 msg = b'update_file_reference should not be called when merging'
536 raise error.ProgrammingError(msg)
538 raise error.ProgrammingError(msg)
537 entry = self._map.get(filename)
539 entry = self._map.get(filename)
538 if entry is None:
540 if entry is None:
539 wc_tracked = False
541 wc_tracked = False
540 else:
542 else:
541 wc_tracked = entry.tracked
543 wc_tracked = entry.tracked
542 possibly_dirty = False
544 possibly_dirty = False
543 if p1_tracked and wc_tracked:
545 if p1_tracked and wc_tracked:
544 # the underlying reference might have changed, we will have to
546 # the underlying reference might have changed, we will have to
545 # check it.
547 # check it.
546 possibly_dirty = True
548 possibly_dirty = True
547 elif not (p1_tracked or wc_tracked):
549 elif not (p1_tracked or wc_tracked):
548 # the file is no longer relevant to anyone
550 # the file is no longer relevant to anyone
549 self._drop(filename)
551 self._drop(filename)
550 elif (not p1_tracked) and wc_tracked:
552 elif (not p1_tracked) and wc_tracked:
551 if entry is not None and entry.added:
553 if entry is not None and entry.added:
552 return # avoid dropping copy information (maybe?)
554 return # avoid dropping copy information (maybe?)
553 elif p1_tracked and not wc_tracked:
555 elif p1_tracked and not wc_tracked:
554 pass
556 pass
555 else:
557 else:
556 assert False, 'unreachable'
558 assert False, 'unreachable'
557
559
558 # this mean we are doing call for file we do not really care about the
560 # this mean we are doing call for file we do not really care about the
559 # data (eg: added or removed), however this should be a minor overhead
561 # data (eg: added or removed), however this should be a minor overhead
560 # compared to the overall update process calling this.
562 # compared to the overall update process calling this.
561 parentfiledata = None
563 parentfiledata = None
562 if wc_tracked:
564 if wc_tracked:
563 parentfiledata = self._get_filedata(filename)
565 parentfiledata = self._get_filedata(filename)
564
566
565 self._updatedfiles.add(filename)
567 self._updatedfiles.add(filename)
566 self._map.reset_state(
568 self._map.reset_state(
567 filename,
569 filename,
568 wc_tracked,
570 wc_tracked,
569 p1_tracked,
571 p1_tracked,
570 possibly_dirty=possibly_dirty,
572 possibly_dirty=possibly_dirty,
571 parentfiledata=parentfiledata,
573 parentfiledata=parentfiledata,
572 )
574 )
573 if (
575 if (
574 parentfiledata is not None
576 parentfiledata is not None
575 and parentfiledata[2] > self._lastnormaltime
577 and parentfiledata[2] > self._lastnormaltime
576 ):
578 ):
577 # Remember the most recent modification timeslot for status(),
579 # Remember the most recent modification timeslot for status(),
578 # to make sure we won't miss future size-preserving file content
580 # to make sure we won't miss future size-preserving file content
579 # modifications that happen within the same timeslot.
581 # modifications that happen within the same timeslot.
580 self._lastnormaltime = parentfiledata[2]
582 self._lastnormaltime = parentfiledata[2]
581
583
582 @requires_parents_change
584 @requires_parents_change
583 def update_file(
585 def update_file(
584 self,
586 self,
585 filename,
587 filename,
586 wc_tracked,
588 wc_tracked,
587 p1_tracked,
589 p1_tracked,
588 p2_tracked=False,
590 p2_tracked=False,
589 merged=False,
591 merged=False,
590 clean_p1=False,
592 clean_p1=False,
591 clean_p2=False,
593 clean_p2=False,
592 possibly_dirty=False,
594 possibly_dirty=False,
593 parentfiledata=None,
595 parentfiledata=None,
594 ):
596 ):
595 """update the information about a file in the dirstate
597 """update the information about a file in the dirstate
596
598
597 This is to be called when the direstates parent changes to keep track
599 This is to be called when the direstates parent changes to keep track
598 of what is the file situation in regards to the working copy and its parent.
600 of what is the file situation in regards to the working copy and its parent.
599
601
600 This function must be called within a `dirstate.parentchange` context.
602 This function must be called within a `dirstate.parentchange` context.
601
603
602 note: the API is at an early stage and we might need to adjust it
604 note: the API is at an early stage and we might need to adjust it
603 depending of what information ends up being relevant and useful to
605 depending of what information ends up being relevant and useful to
604 other processing.
606 other processing.
605 """
607 """
606 if merged and (clean_p1 or clean_p2):
608 if merged and (clean_p1 or clean_p2):
607 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
609 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
608 raise error.ProgrammingError(msg)
610 raise error.ProgrammingError(msg)
609
611
610 # note: I do not think we need to double check name clash here since we
612 # note: I do not think we need to double check name clash here since we
611 # are in a update/merge case that should already have taken care of
613 # are in a update/merge case that should already have taken care of
612 # this. The test agrees
614 # this. The test agrees
613
615
614 self._dirty = True
616 self._dirty = True
615 self._updatedfiles.add(filename)
617 self._updatedfiles.add(filename)
616
618
617 need_parent_file_data = (
619 need_parent_file_data = (
618 not (possibly_dirty or clean_p2 or merged)
620 not (possibly_dirty or clean_p2 or merged)
619 and wc_tracked
621 and wc_tracked
620 and p1_tracked
622 and p1_tracked
621 )
623 )
622
624
623 # this mean we are doing call for file we do not really care about the
625 # this mean we are doing call for file we do not really care about the
624 # data (eg: added or removed), however this should be a minor overhead
626 # data (eg: added or removed), however this should be a minor overhead
625 # compared to the overall update process calling this.
627 # compared to the overall update process calling this.
626 if need_parent_file_data:
628 if need_parent_file_data:
627 if parentfiledata is None:
629 if parentfiledata is None:
628 parentfiledata = self._get_filedata(filename)
630 parentfiledata = self._get_filedata(filename)
629 mtime = parentfiledata[2]
631 mtime = parentfiledata[2]
630
632
631 if mtime > self._lastnormaltime:
633 if mtime > self._lastnormaltime:
632 # Remember the most recent modification timeslot for
634 # Remember the most recent modification timeslot for
633 # status(), to make sure we won't miss future
635 # status(), to make sure we won't miss future
634 # size-preserving file content modifications that happen
636 # size-preserving file content modifications that happen
635 # within the same timeslot.
637 # within the same timeslot.
636 self._lastnormaltime = mtime
638 self._lastnormaltime = mtime
637
639
638 self._map.reset_state(
640 self._map.reset_state(
639 filename,
641 filename,
640 wc_tracked,
642 wc_tracked,
641 p1_tracked,
643 p1_tracked,
642 p2_tracked=p2_tracked,
644 p2_tracked=p2_tracked,
643 merged=merged,
645 merged=merged,
644 clean_p1=clean_p1,
646 clean_p1=clean_p1,
645 clean_p2=clean_p2,
647 clean_p2=clean_p2,
646 possibly_dirty=possibly_dirty,
648 possibly_dirty=possibly_dirty,
647 parentfiledata=parentfiledata,
649 parentfiledata=parentfiledata,
648 )
650 )
649 if (
651 if (
650 parentfiledata is not None
652 parentfiledata is not None
651 and parentfiledata[2] > self._lastnormaltime
653 and parentfiledata[2] > self._lastnormaltime
652 ):
654 ):
653 # Remember the most recent modification timeslot for status(),
655 # Remember the most recent modification timeslot for status(),
654 # to make sure we won't miss future size-preserving file content
656 # to make sure we won't miss future size-preserving file content
655 # modifications that happen within the same timeslot.
657 # modifications that happen within the same timeslot.
656 self._lastnormaltime = parentfiledata[2]
658 self._lastnormaltime = parentfiledata[2]
657
659
658 def _addpath(
660 def _addpath(
659 self,
661 self,
660 f,
662 f,
661 mode=0,
663 mode=0,
662 size=None,
664 size=None,
663 mtime=None,
665 mtime=None,
664 added=False,
666 added=False,
665 merged=False,
667 merged=False,
666 from_p2=False,
668 from_p2=False,
667 possibly_dirty=False,
669 possibly_dirty=False,
668 ):
670 ):
669 entry = self._map.get(f)
671 entry = self._map.get(f)
670 if added or entry is not None and entry.removed:
672 if added or entry is not None and entry.removed:
671 scmutil.checkfilename(f)
673 scmutil.checkfilename(f)
672 if self._map.hastrackeddir(f):
674 if self._map.hastrackeddir(f):
673 msg = _(b'directory %r already in dirstate')
675 msg = _(b'directory %r already in dirstate')
674 msg %= pycompat.bytestr(f)
676 msg %= pycompat.bytestr(f)
675 raise error.Abort(msg)
677 raise error.Abort(msg)
676 # shadows
678 # shadows
677 for d in pathutil.finddirs(f):
679 for d in pathutil.finddirs(f):
678 if self._map.hastrackeddir(d):
680 if self._map.hastrackeddir(d):
679 break
681 break
680 entry = self._map.get(d)
682 entry = self._map.get(d)
681 if entry is not None and not entry.removed:
683 if entry is not None and not entry.removed:
682 msg = _(b'file %r in dirstate clashes with %r')
684 msg = _(b'file %r in dirstate clashes with %r')
683 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
685 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
684 raise error.Abort(msg)
686 raise error.Abort(msg)
685 self._dirty = True
687 self._dirty = True
686 self._updatedfiles.add(f)
688 self._updatedfiles.add(f)
687 self._map.addfile(
689 self._map.addfile(
688 f,
690 f,
689 mode=mode,
691 mode=mode,
690 size=size,
692 size=size,
691 mtime=mtime,
693 mtime=mtime,
692 added=added,
694 added=added,
693 merged=merged,
695 merged=merged,
694 from_p2=from_p2,
696 from_p2=from_p2,
695 possibly_dirty=possibly_dirty,
697 possibly_dirty=possibly_dirty,
696 )
698 )
697
699
698 def _get_filedata(self, filename):
700 def _get_filedata(self, filename):
699 """returns"""
701 """returns"""
700 s = os.lstat(self._join(filename))
702 s = os.lstat(self._join(filename))
701 mode = s.st_mode
703 mode = s.st_mode
702 size = s.st_size
704 size = s.st_size
703 mtime = s[stat.ST_MTIME]
705 mtime = s[stat.ST_MTIME]
704 return (mode, size, mtime)
706 return (mode, size, mtime)
705
707
706 def normal(self, f, parentfiledata=None):
708 def normal(self, f, parentfiledata=None):
707 """Mark a file normal and clean.
709 """Mark a file normal and clean.
708
710
709 parentfiledata: (mode, size, mtime) of the clean file
711 parentfiledata: (mode, size, mtime) of the clean file
710
712
711 parentfiledata should be computed from memory (for mode,
713 parentfiledata should be computed from memory (for mode,
712 size), as or close as possible from the point where we
714 size), as or close as possible from the point where we
713 determined the file was clean, to limit the risk of the
715 determined the file was clean, to limit the risk of the
714 file having been changed by an external process between the
716 file having been changed by an external process between the
715 moment where the file was determined to be clean and now."""
717 moment where the file was determined to be clean and now."""
716 if self.pendingparentchange():
718 if self.pendingparentchange():
717 util.nouideprecwarn(
719 util.nouideprecwarn(
718 b"do not use `normal` inside of update/merge context."
720 b"do not use `normal` inside of update/merge context."
719 b" Use `update_file` or `update_file_p1`",
721 b" Use `update_file` or `update_file_p1`",
720 b'6.0',
722 b'6.0',
721 stacklevel=2,
723 stacklevel=2,
722 )
724 )
723 else:
725 else:
724 util.nouideprecwarn(
726 util.nouideprecwarn(
725 b"do not use `normal` outside of update/merge context."
727 b"do not use `normal` outside of update/merge context."
726 b" Use `set_tracked`",
728 b" Use `set_tracked`",
727 b'6.0',
729 b'6.0',
728 stacklevel=2,
730 stacklevel=2,
729 )
731 )
730 self._normal(f, parentfiledata=parentfiledata)
732 self._normal(f, parentfiledata=parentfiledata)
731
733
732 def _normal(self, f, parentfiledata=None):
734 def _normal(self, f, parentfiledata=None):
733 if parentfiledata:
735 if parentfiledata:
734 (mode, size, mtime) = parentfiledata
736 (mode, size, mtime) = parentfiledata
735 else:
737 else:
736 (mode, size, mtime) = self._get_filedata(f)
738 (mode, size, mtime) = self._get_filedata(f)
737 self._addpath(f, mode=mode, size=size, mtime=mtime)
739 self._addpath(f, mode=mode, size=size, mtime=mtime)
738 self._map.copymap.pop(f, None)
740 self._map.copymap.pop(f, None)
739 if f in self._map.nonnormalset:
741 if f in self._map.nonnormalset:
740 self._map.nonnormalset.remove(f)
742 self._map.nonnormalset.remove(f)
741 if mtime > self._lastnormaltime:
743 if mtime > self._lastnormaltime:
742 # Remember the most recent modification timeslot for status(),
744 # Remember the most recent modification timeslot for status(),
743 # to make sure we won't miss future size-preserving file content
745 # to make sure we won't miss future size-preserving file content
744 # modifications that happen within the same timeslot.
746 # modifications that happen within the same timeslot.
745 self._lastnormaltime = mtime
747 self._lastnormaltime = mtime
746
748
747 def normallookup(self, f):
749 def normallookup(self, f):
748 '''Mark a file normal, but possibly dirty.'''
750 '''Mark a file normal, but possibly dirty.'''
749 if self.pendingparentchange():
751 if self.pendingparentchange():
750 util.nouideprecwarn(
752 util.nouideprecwarn(
751 b"do not use `normallookup` inside of update/merge context."
753 b"do not use `normallookup` inside of update/merge context."
752 b" Use `update_file` or `update_file_p1`",
754 b" Use `update_file` or `update_file_p1`",
753 b'6.0',
755 b'6.0',
754 stacklevel=2,
756 stacklevel=2,
755 )
757 )
756 else:
758 else:
757 util.nouideprecwarn(
759 util.nouideprecwarn(
758 b"do not use `normallookup` outside of update/merge context."
760 b"do not use `normallookup` outside of update/merge context."
759 b" Use `set_possibly_dirty` or `set_tracked`",
761 b" Use `set_possibly_dirty` or `set_tracked`",
760 b'6.0',
762 b'6.0',
761 stacklevel=2,
763 stacklevel=2,
762 )
764 )
763 self._normallookup(f)
765 self._normallookup(f)
764
766
765 def _normallookup(self, f):
767 def _normallookup(self, f):
766 '''Mark a file normal, but possibly dirty.'''
768 '''Mark a file normal, but possibly dirty.'''
767 if self.in_merge:
769 if self.in_merge:
768 # if there is a merge going on and the file was either
770 # if there is a merge going on and the file was either
769 # "merged" or coming from other parent (-2) before
771 # "merged" or coming from other parent (-2) before
770 # being removed, restore that state.
772 # being removed, restore that state.
771 entry = self._map.get(f)
773 entry = self._map.get(f)
772 if entry is not None:
774 if entry is not None:
773 # XXX this should probably be dealt with a a lower level
775 # XXX this should probably be dealt with a a lower level
774 # (see `merged_removed` and `from_p2_removed`)
776 # (see `merged_removed` and `from_p2_removed`)
775 if entry.merged_removed or entry.from_p2_removed:
777 if entry.merged_removed or entry.from_p2_removed:
776 source = self._map.copymap.get(f)
778 source = self._map.copymap.get(f)
777 if entry.merged_removed:
779 if entry.merged_removed:
778 self._merge(f)
780 self._merge(f)
779 elif entry.from_p2_removed:
781 elif entry.from_p2_removed:
780 self._otherparent(f)
782 self._otherparent(f)
781 if source is not None:
783 if source is not None:
782 self.copy(source, f)
784 self.copy(source, f)
783 return
785 return
784 elif entry.merged or entry.from_p2:
786 elif entry.merged or entry.from_p2:
785 return
787 return
786 self._addpath(f, possibly_dirty=True)
788 self._addpath(f, possibly_dirty=True)
787 self._map.copymap.pop(f, None)
789 self._map.copymap.pop(f, None)
788
790
789 def otherparent(self, f):
791 def otherparent(self, f):
790 '''Mark as coming from the other parent, always dirty.'''
792 '''Mark as coming from the other parent, always dirty.'''
791 if self.pendingparentchange():
793 if self.pendingparentchange():
792 util.nouideprecwarn(
794 util.nouideprecwarn(
793 b"do not use `otherparent` inside of update/merge context."
795 b"do not use `otherparent` inside of update/merge context."
794 b" Use `update_file` or `update_file_p1`",
796 b" Use `update_file` or `update_file_p1`",
795 b'6.0',
797 b'6.0',
796 stacklevel=2,
798 stacklevel=2,
797 )
799 )
798 else:
800 else:
799 util.nouideprecwarn(
801 util.nouideprecwarn(
800 b"do not use `otherparent` outside of update/merge context."
802 b"do not use `otherparent` outside of update/merge context."
801 b"It should have been set by the update/merge code",
803 b"It should have been set by the update/merge code",
802 b'6.0',
804 b'6.0',
803 stacklevel=2,
805 stacklevel=2,
804 )
806 )
805 self._otherparent(f)
807 self._otherparent(f)
806
808
807 def _otherparent(self, f):
809 def _otherparent(self, f):
808 if not self.in_merge:
810 if not self.in_merge:
809 msg = _(b"setting %r to other parent only allowed in merges") % f
811 msg = _(b"setting %r to other parent only allowed in merges") % f
810 raise error.Abort(msg)
812 raise error.Abort(msg)
811 entry = self._map.get(f)
813 entry = self._map.get(f)
812 if entry is not None and entry.tracked:
814 if entry is not None and entry.tracked:
813 # merge-like
815 # merge-like
814 self._addpath(f, merged=True)
816 self._addpath(f, merged=True)
815 else:
817 else:
816 # add-like
818 # add-like
817 self._addpath(f, from_p2=True)
819 self._addpath(f, from_p2=True)
818 self._map.copymap.pop(f, None)
820 self._map.copymap.pop(f, None)
819
821
820 def add(self, f):
822 def add(self, f):
821 '''Mark a file added.'''
823 '''Mark a file added.'''
822 if self.pendingparentchange():
824 if self.pendingparentchange():
823 util.nouideprecwarn(
825 util.nouideprecwarn(
824 b"do not use `add` inside of update/merge context."
826 b"do not use `add` inside of update/merge context."
825 b" Use `update_file`",
827 b" Use `update_file`",
826 b'6.0',
828 b'6.0',
827 stacklevel=2,
829 stacklevel=2,
828 )
830 )
829 else:
831 else:
830 util.nouideprecwarn(
832 util.nouideprecwarn(
831 b"do not use `add` outside of update/merge context."
833 b"do not use `add` outside of update/merge context."
832 b" Use `set_tracked`",
834 b" Use `set_tracked`",
833 b'6.0',
835 b'6.0',
834 stacklevel=2,
836 stacklevel=2,
835 )
837 )
836 self._add(f)
838 self._add(f)
837
839
838 def _add(self, filename):
840 def _add(self, filename):
839 """internal function to mark a file as added"""
841 """internal function to mark a file as added"""
840 self._addpath(filename, added=True)
842 self._addpath(filename, added=True)
841 self._map.copymap.pop(filename, None)
843 self._map.copymap.pop(filename, None)
842
844
843 def remove(self, f):
845 def remove(self, f):
844 '''Mark a file removed'''
846 '''Mark a file removed'''
845 if self.pendingparentchange():
847 if self.pendingparentchange():
846 util.nouideprecwarn(
848 util.nouideprecwarn(
847 b"do not use `remove` insde of update/merge context."
849 b"do not use `remove` insde of update/merge context."
848 b" Use `update_file` or `update_file_p1`",
850 b" Use `update_file` or `update_file_p1`",
849 b'6.0',
851 b'6.0',
850 stacklevel=2,
852 stacklevel=2,
851 )
853 )
852 else:
854 else:
853 util.nouideprecwarn(
855 util.nouideprecwarn(
854 b"do not use `remove` outside of update/merge context."
856 b"do not use `remove` outside of update/merge context."
855 b" Use `set_untracked`",
857 b" Use `set_untracked`",
856 b'6.0',
858 b'6.0',
857 stacklevel=2,
859 stacklevel=2,
858 )
860 )
859 self._remove(f)
861 self._remove(f)
860
862
861 def _remove(self, filename):
863 def _remove(self, filename):
862 """internal function to mark a file removed"""
864 """internal function to mark a file removed"""
863 self._dirty = True
865 self._dirty = True
864 self._updatedfiles.add(filename)
866 self._updatedfiles.add(filename)
865 self._map.removefile(filename, in_merge=self.in_merge)
867 self._map.removefile(filename, in_merge=self.in_merge)
866
868
867 def merge(self, f):
869 def merge(self, f):
868 '''Mark a file merged.'''
870 '''Mark a file merged.'''
869 if self.pendingparentchange():
871 if self.pendingparentchange():
870 util.nouideprecwarn(
872 util.nouideprecwarn(
871 b"do not use `merge` inside of update/merge context."
873 b"do not use `merge` inside of update/merge context."
872 b" Use `update_file`",
874 b" Use `update_file`",
873 b'6.0',
875 b'6.0',
874 stacklevel=2,
876 stacklevel=2,
875 )
877 )
876 else:
878 else:
877 util.nouideprecwarn(
879 util.nouideprecwarn(
878 b"do not use `merge` outside of update/merge context."
880 b"do not use `merge` outside of update/merge context."
879 b"It should have been set by the update/merge code",
881 b"It should have been set by the update/merge code",
880 b'6.0',
882 b'6.0',
881 stacklevel=2,
883 stacklevel=2,
882 )
884 )
883 self._merge(f)
885 self._merge(f)
884
886
885 def _merge(self, f):
887 def _merge(self, f):
886 if not self.in_merge:
888 if not self.in_merge:
887 return self._normallookup(f)
889 return self._normallookup(f)
888 return self._otherparent(f)
890 return self._otherparent(f)
889
891
890 def drop(self, f):
892 def drop(self, f):
891 '''Drop a file from the dirstate'''
893 '''Drop a file from the dirstate'''
892 if self.pendingparentchange():
894 if self.pendingparentchange():
893 util.nouideprecwarn(
895 util.nouideprecwarn(
894 b"do not use `drop` inside of update/merge context."
896 b"do not use `drop` inside of update/merge context."
895 b" Use `update_file`",
897 b" Use `update_file`",
896 b'6.0',
898 b'6.0',
897 stacklevel=2,
899 stacklevel=2,
898 )
900 )
899 else:
901 else:
900 util.nouideprecwarn(
902 util.nouideprecwarn(
901 b"do not use `drop` outside of update/merge context."
903 b"do not use `drop` outside of update/merge context."
902 b" Use `set_untracked`",
904 b" Use `set_untracked`",
903 b'6.0',
905 b'6.0',
904 stacklevel=2,
906 stacklevel=2,
905 )
907 )
906 self._drop(f)
908 self._drop(f)
907
909
908 def _drop(self, filename):
910 def _drop(self, filename):
909 """internal function to drop a file from the dirstate"""
911 """internal function to drop a file from the dirstate"""
910 if self._map.dropfile(filename):
912 if self._map.dropfile(filename):
911 self._dirty = True
913 self._dirty = True
912 self._updatedfiles.add(filename)
914 self._updatedfiles.add(filename)
913 self._map.copymap.pop(filename, None)
915 self._map.copymap.pop(filename, None)
914
916
915 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
917 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
916 if exists is None:
918 if exists is None:
917 exists = os.path.lexists(os.path.join(self._root, path))
919 exists = os.path.lexists(os.path.join(self._root, path))
918 if not exists:
920 if not exists:
919 # Maybe a path component exists
921 # Maybe a path component exists
920 if not ignoremissing and b'/' in path:
922 if not ignoremissing and b'/' in path:
921 d, f = path.rsplit(b'/', 1)
923 d, f = path.rsplit(b'/', 1)
922 d = self._normalize(d, False, ignoremissing, None)
924 d = self._normalize(d, False, ignoremissing, None)
923 folded = d + b"/" + f
925 folded = d + b"/" + f
924 else:
926 else:
925 # No path components, preserve original case
927 # No path components, preserve original case
926 folded = path
928 folded = path
927 else:
929 else:
928 # recursively normalize leading directory components
930 # recursively normalize leading directory components
929 # against dirstate
931 # against dirstate
930 if b'/' in normed:
932 if b'/' in normed:
931 d, f = normed.rsplit(b'/', 1)
933 d, f = normed.rsplit(b'/', 1)
932 d = self._normalize(d, False, ignoremissing, True)
934 d = self._normalize(d, False, ignoremissing, True)
933 r = self._root + b"/" + d
935 r = self._root + b"/" + d
934 folded = d + b"/" + util.fspath(f, r)
936 folded = d + b"/" + util.fspath(f, r)
935 else:
937 else:
936 folded = util.fspath(normed, self._root)
938 folded = util.fspath(normed, self._root)
937 storemap[normed] = folded
939 storemap[normed] = folded
938
940
939 return folded
941 return folded
940
942
941 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
943 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
942 normed = util.normcase(path)
944 normed = util.normcase(path)
943 folded = self._map.filefoldmap.get(normed, None)
945 folded = self._map.filefoldmap.get(normed, None)
944 if folded is None:
946 if folded is None:
945 if isknown:
947 if isknown:
946 folded = path
948 folded = path
947 else:
949 else:
948 folded = self._discoverpath(
950 folded = self._discoverpath(
949 path, normed, ignoremissing, exists, self._map.filefoldmap
951 path, normed, ignoremissing, exists, self._map.filefoldmap
950 )
952 )
951 return folded
953 return folded
952
954
953 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
955 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
954 normed = util.normcase(path)
956 normed = util.normcase(path)
955 folded = self._map.filefoldmap.get(normed, None)
957 folded = self._map.filefoldmap.get(normed, None)
956 if folded is None:
958 if folded is None:
957 folded = self._map.dirfoldmap.get(normed, None)
959 folded = self._map.dirfoldmap.get(normed, None)
958 if folded is None:
960 if folded is None:
959 if isknown:
961 if isknown:
960 folded = path
962 folded = path
961 else:
963 else:
962 # store discovered result in dirfoldmap so that future
964 # store discovered result in dirfoldmap so that future
963 # normalizefile calls don't start matching directories
965 # normalizefile calls don't start matching directories
964 folded = self._discoverpath(
966 folded = self._discoverpath(
965 path, normed, ignoremissing, exists, self._map.dirfoldmap
967 path, normed, ignoremissing, exists, self._map.dirfoldmap
966 )
968 )
967 return folded
969 return folded
968
970
969 def normalize(self, path, isknown=False, ignoremissing=False):
971 def normalize(self, path, isknown=False, ignoremissing=False):
970 """
972 """
971 normalize the case of a pathname when on a casefolding filesystem
973 normalize the case of a pathname when on a casefolding filesystem
972
974
973 isknown specifies whether the filename came from walking the
975 isknown specifies whether the filename came from walking the
974 disk, to avoid extra filesystem access.
976 disk, to avoid extra filesystem access.
975
977
976 If ignoremissing is True, missing path are returned
978 If ignoremissing is True, missing path are returned
977 unchanged. Otherwise, we try harder to normalize possibly
979 unchanged. Otherwise, we try harder to normalize possibly
978 existing path components.
980 existing path components.
979
981
980 The normalized case is determined based on the following precedence:
982 The normalized case is determined based on the following precedence:
981
983
982 - version of name already stored in the dirstate
984 - version of name already stored in the dirstate
983 - version of name stored on disk
985 - version of name stored on disk
984 - version provided via command arguments
986 - version provided via command arguments
985 """
987 """
986
988
987 if self._checkcase:
989 if self._checkcase:
988 return self._normalize(path, isknown, ignoremissing)
990 return self._normalize(path, isknown, ignoremissing)
989 return path
991 return path
990
992
991 def clear(self):
993 def clear(self):
992 self._map.clear()
994 self._map.clear()
993 self._lastnormaltime = 0
995 self._lastnormaltime = 0
994 self._updatedfiles.clear()
996 self._updatedfiles.clear()
995 self._dirty = True
997 self._dirty = True
996
998
997 def rebuild(self, parent, allfiles, changedfiles=None):
999 def rebuild(self, parent, allfiles, changedfiles=None):
998 if changedfiles is None:
1000 if changedfiles is None:
999 # Rebuild entire dirstate
1001 # Rebuild entire dirstate
1000 to_lookup = allfiles
1002 to_lookup = allfiles
1001 to_drop = []
1003 to_drop = []
1002 lastnormaltime = self._lastnormaltime
1004 lastnormaltime = self._lastnormaltime
1003 self.clear()
1005 self.clear()
1004 self._lastnormaltime = lastnormaltime
1006 self._lastnormaltime = lastnormaltime
1005 elif len(changedfiles) < 10:
1007 elif len(changedfiles) < 10:
1006 # Avoid turning allfiles into a set, which can be expensive if it's
1008 # Avoid turning allfiles into a set, which can be expensive if it's
1007 # large.
1009 # large.
1008 to_lookup = []
1010 to_lookup = []
1009 to_drop = []
1011 to_drop = []
1010 for f in changedfiles:
1012 for f in changedfiles:
1011 if f in allfiles:
1013 if f in allfiles:
1012 to_lookup.append(f)
1014 to_lookup.append(f)
1013 else:
1015 else:
1014 to_drop.append(f)
1016 to_drop.append(f)
1015 else:
1017 else:
1016 changedfilesset = set(changedfiles)
1018 changedfilesset = set(changedfiles)
1017 to_lookup = changedfilesset & set(allfiles)
1019 to_lookup = changedfilesset & set(allfiles)
1018 to_drop = changedfilesset - to_lookup
1020 to_drop = changedfilesset - to_lookup
1019
1021
1020 if self._origpl is None:
1022 if self._origpl is None:
1021 self._origpl = self._pl
1023 self._origpl = self._pl
1022 self._map.setparents(parent, self._nodeconstants.nullid)
1024 self._map.setparents(parent, self._nodeconstants.nullid)
1023
1025
1024 for f in to_lookup:
1026 for f in to_lookup:
1025 self._normallookup(f)
1027 self._normallookup(f)
1026 for f in to_drop:
1028 for f in to_drop:
1027 self._drop(f)
1029 self._drop(f)
1028
1030
1029 self._dirty = True
1031 self._dirty = True
1030
1032
1031 def identity(self):
1033 def identity(self):
1032 """Return identity of dirstate itself to detect changing in storage
1034 """Return identity of dirstate itself to detect changing in storage
1033
1035
1034 If identity of previous dirstate is equal to this, writing
1036 If identity of previous dirstate is equal to this, writing
1035 changes based on the former dirstate out can keep consistency.
1037 changes based on the former dirstate out can keep consistency.
1036 """
1038 """
1037 return self._map.identity
1039 return self._map.identity
1038
1040
1039 def write(self, tr):
1041 def write(self, tr):
1040 if not self._dirty:
1042 if not self._dirty:
1041 return
1043 return
1042
1044
1043 filename = self._filename
1045 filename = self._filename
1044 if tr:
1046 if tr:
1045 # 'dirstate.write()' is not only for writing in-memory
1047 # 'dirstate.write()' is not only for writing in-memory
1046 # changes out, but also for dropping ambiguous timestamp.
1048 # changes out, but also for dropping ambiguous timestamp.
1047 # delayed writing re-raise "ambiguous timestamp issue".
1049 # delayed writing re-raise "ambiguous timestamp issue".
1048 # See also the wiki page below for detail:
1050 # See also the wiki page below for detail:
1049 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
1051 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
1050
1052
1051 # emulate dropping timestamp in 'parsers.pack_dirstate'
1053 # emulate dropping timestamp in 'parsers.pack_dirstate'
1052 now = _getfsnow(self._opener)
1054 now = _getfsnow(self._opener)
1053 self._map.clearambiguoustimes(self._updatedfiles, now)
1055 self._map.clearambiguoustimes(self._updatedfiles, now)
1054
1056
1055 # emulate that all 'dirstate.normal' results are written out
1057 # emulate that all 'dirstate.normal' results are written out
1056 self._lastnormaltime = 0
1058 self._lastnormaltime = 0
1057 self._updatedfiles.clear()
1059 self._updatedfiles.clear()
1058
1060
1059 # delay writing in-memory changes out
1061 # delay writing in-memory changes out
1060 tr.addfilegenerator(
1062 tr.addfilegenerator(
1061 b'dirstate',
1063 b'dirstate',
1062 (self._filename,),
1064 (self._filename,),
1063 lambda f: self._writedirstate(tr, f),
1065 lambda f: self._writedirstate(tr, f),
1064 location=b'plain',
1066 location=b'plain',
1065 )
1067 )
1066 return
1068 return
1067
1069
1068 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
1070 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
1069 self._writedirstate(tr, st)
1071 self._writedirstate(tr, st)
1070
1072
1071 def addparentchangecallback(self, category, callback):
1073 def addparentchangecallback(self, category, callback):
1072 """add a callback to be called when the wd parents are changed
1074 """add a callback to be called when the wd parents are changed
1073
1075
1074 Callback will be called with the following arguments:
1076 Callback will be called with the following arguments:
1075 dirstate, (oldp1, oldp2), (newp1, newp2)
1077 dirstate, (oldp1, oldp2), (newp1, newp2)
1076
1078
1077 Category is a unique identifier to allow overwriting an old callback
1079 Category is a unique identifier to allow overwriting an old callback
1078 with a newer callback.
1080 with a newer callback.
1079 """
1081 """
1080 self._plchangecallbacks[category] = callback
1082 self._plchangecallbacks[category] = callback
1081
1083
1082 def _writedirstate(self, tr, st):
1084 def _writedirstate(self, tr, st):
1083 # notify callbacks about parents change
1085 # notify callbacks about parents change
1084 if self._origpl is not None and self._origpl != self._pl:
1086 if self._origpl is not None and self._origpl != self._pl:
1085 for c, callback in sorted(
1087 for c, callback in sorted(
1086 pycompat.iteritems(self._plchangecallbacks)
1088 pycompat.iteritems(self._plchangecallbacks)
1087 ):
1089 ):
1088 callback(self, self._origpl, self._pl)
1090 callback(self, self._origpl, self._pl)
1089 self._origpl = None
1091 self._origpl = None
1090 # use the modification time of the newly created temporary file as the
1092 # use the modification time of the newly created temporary file as the
1091 # filesystem's notion of 'now'
1093 # filesystem's notion of 'now'
1092 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
1094 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
1093
1095
1094 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
1096 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
1095 # timestamp of each entries in dirstate, because of 'now > mtime'
1097 # timestamp of each entries in dirstate, because of 'now > mtime'
1096 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
1098 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
1097 if delaywrite > 0:
1099 if delaywrite > 0:
1098 # do we have any files to delay for?
1100 # do we have any files to delay for?
1099 for f, e in pycompat.iteritems(self._map):
1101 for f, e in pycompat.iteritems(self._map):
1100 if e.need_delay(now):
1102 if e.need_delay(now):
1101 import time # to avoid useless import
1103 import time # to avoid useless import
1102
1104
1103 # rather than sleep n seconds, sleep until the next
1105 # rather than sleep n seconds, sleep until the next
1104 # multiple of n seconds
1106 # multiple of n seconds
1105 clock = time.time()
1107 clock = time.time()
1106 start = int(clock) - (int(clock) % delaywrite)
1108 start = int(clock) - (int(clock) % delaywrite)
1107 end = start + delaywrite
1109 end = start + delaywrite
1108 time.sleep(end - clock)
1110 time.sleep(end - clock)
1109 now = end # trust our estimate that the end is near now
1111 now = end # trust our estimate that the end is near now
1110 break
1112 break
1111
1113
1112 self._map.write(tr, st, now)
1114 self._map.write(tr, st, now)
1113 self._lastnormaltime = 0
1115 self._lastnormaltime = 0
1114 self._dirty = False
1116 self._dirty = False
1115
1117
1116 def _dirignore(self, f):
1118 def _dirignore(self, f):
1117 if self._ignore(f):
1119 if self._ignore(f):
1118 return True
1120 return True
1119 for p in pathutil.finddirs(f):
1121 for p in pathutil.finddirs(f):
1120 if self._ignore(p):
1122 if self._ignore(p):
1121 return True
1123 return True
1122 return False
1124 return False
1123
1125
1124 def _ignorefiles(self):
1126 def _ignorefiles(self):
1125 files = []
1127 files = []
1126 if os.path.exists(self._join(b'.hgignore')):
1128 if os.path.exists(self._join(b'.hgignore')):
1127 files.append(self._join(b'.hgignore'))
1129 files.append(self._join(b'.hgignore'))
1128 for name, path in self._ui.configitems(b"ui"):
1130 for name, path in self._ui.configitems(b"ui"):
1129 if name == b'ignore' or name.startswith(b'ignore.'):
1131 if name == b'ignore' or name.startswith(b'ignore.'):
1130 # we need to use os.path.join here rather than self._join
1132 # we need to use os.path.join here rather than self._join
1131 # because path is arbitrary and user-specified
1133 # because path is arbitrary and user-specified
1132 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1134 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1133 return files
1135 return files
1134
1136
1135 def _ignorefileandline(self, f):
1137 def _ignorefileandline(self, f):
1136 files = collections.deque(self._ignorefiles())
1138 files = collections.deque(self._ignorefiles())
1137 visited = set()
1139 visited = set()
1138 while files:
1140 while files:
1139 i = files.popleft()
1141 i = files.popleft()
1140 patterns = matchmod.readpatternfile(
1142 patterns = matchmod.readpatternfile(
1141 i, self._ui.warn, sourceinfo=True
1143 i, self._ui.warn, sourceinfo=True
1142 )
1144 )
1143 for pattern, lineno, line in patterns:
1145 for pattern, lineno, line in patterns:
1144 kind, p = matchmod._patsplit(pattern, b'glob')
1146 kind, p = matchmod._patsplit(pattern, b'glob')
1145 if kind == b"subinclude":
1147 if kind == b"subinclude":
1146 if p not in visited:
1148 if p not in visited:
1147 files.append(p)
1149 files.append(p)
1148 continue
1150 continue
1149 m = matchmod.match(
1151 m = matchmod.match(
1150 self._root, b'', [], [pattern], warn=self._ui.warn
1152 self._root, b'', [], [pattern], warn=self._ui.warn
1151 )
1153 )
1152 if m(f):
1154 if m(f):
1153 return (i, lineno, line)
1155 return (i, lineno, line)
1154 visited.add(i)
1156 visited.add(i)
1155 return (None, -1, b"")
1157 return (None, -1, b"")
1156
1158
1157 def _walkexplicit(self, match, subrepos):
1159 def _walkexplicit(self, match, subrepos):
1158 """Get stat data about the files explicitly specified by match.
1160 """Get stat data about the files explicitly specified by match.
1159
1161
1160 Return a triple (results, dirsfound, dirsnotfound).
1162 Return a triple (results, dirsfound, dirsnotfound).
1161 - results is a mapping from filename to stat result. It also contains
1163 - results is a mapping from filename to stat result. It also contains
1162 listings mapping subrepos and .hg to None.
1164 listings mapping subrepos and .hg to None.
1163 - dirsfound is a list of files found to be directories.
1165 - dirsfound is a list of files found to be directories.
1164 - dirsnotfound is a list of files that the dirstate thinks are
1166 - dirsnotfound is a list of files that the dirstate thinks are
1165 directories and that were not found."""
1167 directories and that were not found."""
1166
1168
1167 def badtype(mode):
1169 def badtype(mode):
1168 kind = _(b'unknown')
1170 kind = _(b'unknown')
1169 if stat.S_ISCHR(mode):
1171 if stat.S_ISCHR(mode):
1170 kind = _(b'character device')
1172 kind = _(b'character device')
1171 elif stat.S_ISBLK(mode):
1173 elif stat.S_ISBLK(mode):
1172 kind = _(b'block device')
1174 kind = _(b'block device')
1173 elif stat.S_ISFIFO(mode):
1175 elif stat.S_ISFIFO(mode):
1174 kind = _(b'fifo')
1176 kind = _(b'fifo')
1175 elif stat.S_ISSOCK(mode):
1177 elif stat.S_ISSOCK(mode):
1176 kind = _(b'socket')
1178 kind = _(b'socket')
1177 elif stat.S_ISDIR(mode):
1179 elif stat.S_ISDIR(mode):
1178 kind = _(b'directory')
1180 kind = _(b'directory')
1179 return _(b'unsupported file type (type is %s)') % kind
1181 return _(b'unsupported file type (type is %s)') % kind
1180
1182
1181 badfn = match.bad
1183 badfn = match.bad
1182 dmap = self._map
1184 dmap = self._map
1183 lstat = os.lstat
1185 lstat = os.lstat
1184 getkind = stat.S_IFMT
1186 getkind = stat.S_IFMT
1185 dirkind = stat.S_IFDIR
1187 dirkind = stat.S_IFDIR
1186 regkind = stat.S_IFREG
1188 regkind = stat.S_IFREG
1187 lnkkind = stat.S_IFLNK
1189 lnkkind = stat.S_IFLNK
1188 join = self._join
1190 join = self._join
1189 dirsfound = []
1191 dirsfound = []
1190 foundadd = dirsfound.append
1192 foundadd = dirsfound.append
1191 dirsnotfound = []
1193 dirsnotfound = []
1192 notfoundadd = dirsnotfound.append
1194 notfoundadd = dirsnotfound.append
1193
1195
1194 if not match.isexact() and self._checkcase:
1196 if not match.isexact() and self._checkcase:
1195 normalize = self._normalize
1197 normalize = self._normalize
1196 else:
1198 else:
1197 normalize = None
1199 normalize = None
1198
1200
1199 files = sorted(match.files())
1201 files = sorted(match.files())
1200 subrepos.sort()
1202 subrepos.sort()
1201 i, j = 0, 0
1203 i, j = 0, 0
1202 while i < len(files) and j < len(subrepos):
1204 while i < len(files) and j < len(subrepos):
1203 subpath = subrepos[j] + b"/"
1205 subpath = subrepos[j] + b"/"
1204 if files[i] < subpath:
1206 if files[i] < subpath:
1205 i += 1
1207 i += 1
1206 continue
1208 continue
1207 while i < len(files) and files[i].startswith(subpath):
1209 while i < len(files) and files[i].startswith(subpath):
1208 del files[i]
1210 del files[i]
1209 j += 1
1211 j += 1
1210
1212
1211 if not files or b'' in files:
1213 if not files or b'' in files:
1212 files = [b'']
1214 files = [b'']
1213 # constructing the foldmap is expensive, so don't do it for the
1215 # constructing the foldmap is expensive, so don't do it for the
1214 # common case where files is ['']
1216 # common case where files is ['']
1215 normalize = None
1217 normalize = None
1216 results = dict.fromkeys(subrepos)
1218 results = dict.fromkeys(subrepos)
1217 results[b'.hg'] = None
1219 results[b'.hg'] = None
1218
1220
1219 for ff in files:
1221 for ff in files:
1220 if normalize:
1222 if normalize:
1221 nf = normalize(ff, False, True)
1223 nf = normalize(ff, False, True)
1222 else:
1224 else:
1223 nf = ff
1225 nf = ff
1224 if nf in results:
1226 if nf in results:
1225 continue
1227 continue
1226
1228
1227 try:
1229 try:
1228 st = lstat(join(nf))
1230 st = lstat(join(nf))
1229 kind = getkind(st.st_mode)
1231 kind = getkind(st.st_mode)
1230 if kind == dirkind:
1232 if kind == dirkind:
1231 if nf in dmap:
1233 if nf in dmap:
1232 # file replaced by dir on disk but still in dirstate
1234 # file replaced by dir on disk but still in dirstate
1233 results[nf] = None
1235 results[nf] = None
1234 foundadd((nf, ff))
1236 foundadd((nf, ff))
1235 elif kind == regkind or kind == lnkkind:
1237 elif kind == regkind or kind == lnkkind:
1236 results[nf] = st
1238 results[nf] = st
1237 else:
1239 else:
1238 badfn(ff, badtype(kind))
1240 badfn(ff, badtype(kind))
1239 if nf in dmap:
1241 if nf in dmap:
1240 results[nf] = None
1242 results[nf] = None
1241 except OSError as inst: # nf not found on disk - it is dirstate only
1243 except OSError as inst: # nf not found on disk - it is dirstate only
1242 if nf in dmap: # does it exactly match a missing file?
1244 if nf in dmap: # does it exactly match a missing file?
1243 results[nf] = None
1245 results[nf] = None
1244 else: # does it match a missing directory?
1246 else: # does it match a missing directory?
1245 if self._map.hasdir(nf):
1247 if self._map.hasdir(nf):
1246 notfoundadd(nf)
1248 notfoundadd(nf)
1247 else:
1249 else:
1248 badfn(ff, encoding.strtolocal(inst.strerror))
1250 badfn(ff, encoding.strtolocal(inst.strerror))
1249
1251
1250 # match.files() may contain explicitly-specified paths that shouldn't
1252 # match.files() may contain explicitly-specified paths that shouldn't
1251 # be taken; drop them from the list of files found. dirsfound/notfound
1253 # be taken; drop them from the list of files found. dirsfound/notfound
1252 # aren't filtered here because they will be tested later.
1254 # aren't filtered here because they will be tested later.
1253 if match.anypats():
1255 if match.anypats():
1254 for f in list(results):
1256 for f in list(results):
1255 if f == b'.hg' or f in subrepos:
1257 if f == b'.hg' or f in subrepos:
1256 # keep sentinel to disable further out-of-repo walks
1258 # keep sentinel to disable further out-of-repo walks
1257 continue
1259 continue
1258 if not match(f):
1260 if not match(f):
1259 del results[f]
1261 del results[f]
1260
1262
1261 # Case insensitive filesystems cannot rely on lstat() failing to detect
1263 # Case insensitive filesystems cannot rely on lstat() failing to detect
1262 # a case-only rename. Prune the stat object for any file that does not
1264 # a case-only rename. Prune the stat object for any file that does not
1263 # match the case in the filesystem, if there are multiple files that
1265 # match the case in the filesystem, if there are multiple files that
1264 # normalize to the same path.
1266 # normalize to the same path.
1265 if match.isexact() and self._checkcase:
1267 if match.isexact() and self._checkcase:
1266 normed = {}
1268 normed = {}
1267
1269
1268 for f, st in pycompat.iteritems(results):
1270 for f, st in pycompat.iteritems(results):
1269 if st is None:
1271 if st is None:
1270 continue
1272 continue
1271
1273
1272 nc = util.normcase(f)
1274 nc = util.normcase(f)
1273 paths = normed.get(nc)
1275 paths = normed.get(nc)
1274
1276
1275 if paths is None:
1277 if paths is None:
1276 paths = set()
1278 paths = set()
1277 normed[nc] = paths
1279 normed[nc] = paths
1278
1280
1279 paths.add(f)
1281 paths.add(f)
1280
1282
1281 for norm, paths in pycompat.iteritems(normed):
1283 for norm, paths in pycompat.iteritems(normed):
1282 if len(paths) > 1:
1284 if len(paths) > 1:
1283 for path in paths:
1285 for path in paths:
1284 folded = self._discoverpath(
1286 folded = self._discoverpath(
1285 path, norm, True, None, self._map.dirfoldmap
1287 path, norm, True, None, self._map.dirfoldmap
1286 )
1288 )
1287 if path != folded:
1289 if path != folded:
1288 results[path] = None
1290 results[path] = None
1289
1291
1290 return results, dirsfound, dirsnotfound
1292 return results, dirsfound, dirsnotfound
1291
1293
1292 def walk(self, match, subrepos, unknown, ignored, full=True):
1294 def walk(self, match, subrepos, unknown, ignored, full=True):
1293 """
1295 """
1294 Walk recursively through the directory tree, finding all files
1296 Walk recursively through the directory tree, finding all files
1295 matched by match.
1297 matched by match.
1296
1298
1297 If full is False, maybe skip some known-clean files.
1299 If full is False, maybe skip some known-clean files.
1298
1300
1299 Return a dict mapping filename to stat-like object (either
1301 Return a dict mapping filename to stat-like object (either
1300 mercurial.osutil.stat instance or return value of os.stat()).
1302 mercurial.osutil.stat instance or return value of os.stat()).
1301
1303
1302 """
1304 """
1303 # full is a flag that extensions that hook into walk can use -- this
1305 # full is a flag that extensions that hook into walk can use -- this
1304 # implementation doesn't use it at all. This satisfies the contract
1306 # implementation doesn't use it at all. This satisfies the contract
1305 # because we only guarantee a "maybe".
1307 # because we only guarantee a "maybe".
1306
1308
1307 if ignored:
1309 if ignored:
1308 ignore = util.never
1310 ignore = util.never
1309 dirignore = util.never
1311 dirignore = util.never
1310 elif unknown:
1312 elif unknown:
1311 ignore = self._ignore
1313 ignore = self._ignore
1312 dirignore = self._dirignore
1314 dirignore = self._dirignore
1313 else:
1315 else:
1314 # if not unknown and not ignored, drop dir recursion and step 2
1316 # if not unknown and not ignored, drop dir recursion and step 2
1315 ignore = util.always
1317 ignore = util.always
1316 dirignore = util.always
1318 dirignore = util.always
1317
1319
1318 matchfn = match.matchfn
1320 matchfn = match.matchfn
1319 matchalways = match.always()
1321 matchalways = match.always()
1320 matchtdir = match.traversedir
1322 matchtdir = match.traversedir
1321 dmap = self._map
1323 dmap = self._map
1322 listdir = util.listdir
1324 listdir = util.listdir
1323 lstat = os.lstat
1325 lstat = os.lstat
1324 dirkind = stat.S_IFDIR
1326 dirkind = stat.S_IFDIR
1325 regkind = stat.S_IFREG
1327 regkind = stat.S_IFREG
1326 lnkkind = stat.S_IFLNK
1328 lnkkind = stat.S_IFLNK
1327 join = self._join
1329 join = self._join
1328
1330
1329 exact = skipstep3 = False
1331 exact = skipstep3 = False
1330 if match.isexact(): # match.exact
1332 if match.isexact(): # match.exact
1331 exact = True
1333 exact = True
1332 dirignore = util.always # skip step 2
1334 dirignore = util.always # skip step 2
1333 elif match.prefix(): # match.match, no patterns
1335 elif match.prefix(): # match.match, no patterns
1334 skipstep3 = True
1336 skipstep3 = True
1335
1337
1336 if not exact and self._checkcase:
1338 if not exact and self._checkcase:
1337 normalize = self._normalize
1339 normalize = self._normalize
1338 normalizefile = self._normalizefile
1340 normalizefile = self._normalizefile
1339 skipstep3 = False
1341 skipstep3 = False
1340 else:
1342 else:
1341 normalize = self._normalize
1343 normalize = self._normalize
1342 normalizefile = None
1344 normalizefile = None
1343
1345
1344 # step 1: find all explicit files
1346 # step 1: find all explicit files
1345 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1347 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1346 if matchtdir:
1348 if matchtdir:
1347 for d in work:
1349 for d in work:
1348 matchtdir(d[0])
1350 matchtdir(d[0])
1349 for d in dirsnotfound:
1351 for d in dirsnotfound:
1350 matchtdir(d)
1352 matchtdir(d)
1351
1353
1352 skipstep3 = skipstep3 and not (work or dirsnotfound)
1354 skipstep3 = skipstep3 and not (work or dirsnotfound)
1353 work = [d for d in work if not dirignore(d[0])]
1355 work = [d for d in work if not dirignore(d[0])]
1354
1356
1355 # step 2: visit subdirectories
1357 # step 2: visit subdirectories
1356 def traverse(work, alreadynormed):
1358 def traverse(work, alreadynormed):
1357 wadd = work.append
1359 wadd = work.append
1358 while work:
1360 while work:
1359 tracing.counter('dirstate.walk work', len(work))
1361 tracing.counter('dirstate.walk work', len(work))
1360 nd = work.pop()
1362 nd = work.pop()
1361 visitentries = match.visitchildrenset(nd)
1363 visitentries = match.visitchildrenset(nd)
1362 if not visitentries:
1364 if not visitentries:
1363 continue
1365 continue
1364 if visitentries == b'this' or visitentries == b'all':
1366 if visitentries == b'this' or visitentries == b'all':
1365 visitentries = None
1367 visitentries = None
1366 skip = None
1368 skip = None
1367 if nd != b'':
1369 if nd != b'':
1368 skip = b'.hg'
1370 skip = b'.hg'
1369 try:
1371 try:
1370 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1372 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1371 entries = listdir(join(nd), stat=True, skip=skip)
1373 entries = listdir(join(nd), stat=True, skip=skip)
1372 except OSError as inst:
1374 except OSError as inst:
1373 if inst.errno in (errno.EACCES, errno.ENOENT):
1375 if inst.errno in (errno.EACCES, errno.ENOENT):
1374 match.bad(
1376 match.bad(
1375 self.pathto(nd), encoding.strtolocal(inst.strerror)
1377 self.pathto(nd), encoding.strtolocal(inst.strerror)
1376 )
1378 )
1377 continue
1379 continue
1378 raise
1380 raise
1379 for f, kind, st in entries:
1381 for f, kind, st in entries:
1380 # Some matchers may return files in the visitentries set,
1382 # Some matchers may return files in the visitentries set,
1381 # instead of 'this', if the matcher explicitly mentions them
1383 # instead of 'this', if the matcher explicitly mentions them
1382 # and is not an exactmatcher. This is acceptable; we do not
1384 # and is not an exactmatcher. This is acceptable; we do not
1383 # make any hard assumptions about file-or-directory below
1385 # make any hard assumptions about file-or-directory below
1384 # based on the presence of `f` in visitentries. If
1386 # based on the presence of `f` in visitentries. If
1385 # visitchildrenset returned a set, we can always skip the
1387 # visitchildrenset returned a set, we can always skip the
1386 # entries *not* in the set it provided regardless of whether
1388 # entries *not* in the set it provided regardless of whether
1387 # they're actually a file or a directory.
1389 # they're actually a file or a directory.
1388 if visitentries and f not in visitentries:
1390 if visitentries and f not in visitentries:
1389 continue
1391 continue
1390 if normalizefile:
1392 if normalizefile:
1391 # even though f might be a directory, we're only
1393 # even though f might be a directory, we're only
1392 # interested in comparing it to files currently in the
1394 # interested in comparing it to files currently in the
1393 # dmap -- therefore normalizefile is enough
1395 # dmap -- therefore normalizefile is enough
1394 nf = normalizefile(
1396 nf = normalizefile(
1395 nd and (nd + b"/" + f) or f, True, True
1397 nd and (nd + b"/" + f) or f, True, True
1396 )
1398 )
1397 else:
1399 else:
1398 nf = nd and (nd + b"/" + f) or f
1400 nf = nd and (nd + b"/" + f) or f
1399 if nf not in results:
1401 if nf not in results:
1400 if kind == dirkind:
1402 if kind == dirkind:
1401 if not ignore(nf):
1403 if not ignore(nf):
1402 if matchtdir:
1404 if matchtdir:
1403 matchtdir(nf)
1405 matchtdir(nf)
1404 wadd(nf)
1406 wadd(nf)
1405 if nf in dmap and (matchalways or matchfn(nf)):
1407 if nf in dmap and (matchalways or matchfn(nf)):
1406 results[nf] = None
1408 results[nf] = None
1407 elif kind == regkind or kind == lnkkind:
1409 elif kind == regkind or kind == lnkkind:
1408 if nf in dmap:
1410 if nf in dmap:
1409 if matchalways or matchfn(nf):
1411 if matchalways or matchfn(nf):
1410 results[nf] = st
1412 results[nf] = st
1411 elif (matchalways or matchfn(nf)) and not ignore(
1413 elif (matchalways or matchfn(nf)) and not ignore(
1412 nf
1414 nf
1413 ):
1415 ):
1414 # unknown file -- normalize if necessary
1416 # unknown file -- normalize if necessary
1415 if not alreadynormed:
1417 if not alreadynormed:
1416 nf = normalize(nf, False, True)
1418 nf = normalize(nf, False, True)
1417 results[nf] = st
1419 results[nf] = st
1418 elif nf in dmap and (matchalways or matchfn(nf)):
1420 elif nf in dmap and (matchalways or matchfn(nf)):
1419 results[nf] = None
1421 results[nf] = None
1420
1422
1421 for nd, d in work:
1423 for nd, d in work:
1422 # alreadynormed means that processwork doesn't have to do any
1424 # alreadynormed means that processwork doesn't have to do any
1423 # expensive directory normalization
1425 # expensive directory normalization
1424 alreadynormed = not normalize or nd == d
1426 alreadynormed = not normalize or nd == d
1425 traverse([d], alreadynormed)
1427 traverse([d], alreadynormed)
1426
1428
1427 for s in subrepos:
1429 for s in subrepos:
1428 del results[s]
1430 del results[s]
1429 del results[b'.hg']
1431 del results[b'.hg']
1430
1432
1431 # step 3: visit remaining files from dmap
1433 # step 3: visit remaining files from dmap
1432 if not skipstep3 and not exact:
1434 if not skipstep3 and not exact:
1433 # If a dmap file is not in results yet, it was either
1435 # If a dmap file is not in results yet, it was either
1434 # a) not matching matchfn b) ignored, c) missing, or d) under a
1436 # a) not matching matchfn b) ignored, c) missing, or d) under a
1435 # symlink directory.
1437 # symlink directory.
1436 if not results and matchalways:
1438 if not results and matchalways:
1437 visit = [f for f in dmap]
1439 visit = [f for f in dmap]
1438 else:
1440 else:
1439 visit = [f for f in dmap if f not in results and matchfn(f)]
1441 visit = [f for f in dmap if f not in results and matchfn(f)]
1440 visit.sort()
1442 visit.sort()
1441
1443
1442 if unknown:
1444 if unknown:
1443 # unknown == True means we walked all dirs under the roots
1445 # unknown == True means we walked all dirs under the roots
1444 # that wasn't ignored, and everything that matched was stat'ed
1446 # that wasn't ignored, and everything that matched was stat'ed
1445 # and is already in results.
1447 # and is already in results.
1446 # The rest must thus be ignored or under a symlink.
1448 # The rest must thus be ignored or under a symlink.
1447 audit_path = pathutil.pathauditor(self._root, cached=True)
1449 audit_path = pathutil.pathauditor(self._root, cached=True)
1448
1450
1449 for nf in iter(visit):
1451 for nf in iter(visit):
1450 # If a stat for the same file was already added with a
1452 # If a stat for the same file was already added with a
1451 # different case, don't add one for this, since that would
1453 # different case, don't add one for this, since that would
1452 # make it appear as if the file exists under both names
1454 # make it appear as if the file exists under both names
1453 # on disk.
1455 # on disk.
1454 if (
1456 if (
1455 normalizefile
1457 normalizefile
1456 and normalizefile(nf, True, True) in results
1458 and normalizefile(nf, True, True) in results
1457 ):
1459 ):
1458 results[nf] = None
1460 results[nf] = None
1459 # Report ignored items in the dmap as long as they are not
1461 # Report ignored items in the dmap as long as they are not
1460 # under a symlink directory.
1462 # under a symlink directory.
1461 elif audit_path.check(nf):
1463 elif audit_path.check(nf):
1462 try:
1464 try:
1463 results[nf] = lstat(join(nf))
1465 results[nf] = lstat(join(nf))
1464 # file was just ignored, no links, and exists
1466 # file was just ignored, no links, and exists
1465 except OSError:
1467 except OSError:
1466 # file doesn't exist
1468 # file doesn't exist
1467 results[nf] = None
1469 results[nf] = None
1468 else:
1470 else:
1469 # It's either missing or under a symlink directory
1471 # It's either missing or under a symlink directory
1470 # which we in this case report as missing
1472 # which we in this case report as missing
1471 results[nf] = None
1473 results[nf] = None
1472 else:
1474 else:
1473 # We may not have walked the full directory tree above,
1475 # We may not have walked the full directory tree above,
1474 # so stat and check everything we missed.
1476 # so stat and check everything we missed.
1475 iv = iter(visit)
1477 iv = iter(visit)
1476 for st in util.statfiles([join(i) for i in visit]):
1478 for st in util.statfiles([join(i) for i in visit]):
1477 results[next(iv)] = st
1479 results[next(iv)] = st
1478 return results
1480 return results
1479
1481
1480 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1482 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1481 # Force Rayon (Rust parallelism library) to respect the number of
1483 # Force Rayon (Rust parallelism library) to respect the number of
1482 # workers. This is a temporary workaround until Rust code knows
1484 # workers. This is a temporary workaround until Rust code knows
1483 # how to read the config file.
1485 # how to read the config file.
1484 numcpus = self._ui.configint(b"worker", b"numcpus")
1486 numcpus = self._ui.configint(b"worker", b"numcpus")
1485 if numcpus is not None:
1487 if numcpus is not None:
1486 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1488 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1487
1489
1488 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1490 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1489 if not workers_enabled:
1491 if not workers_enabled:
1490 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1492 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1491
1493
1492 (
1494 (
1493 lookup,
1495 lookup,
1494 modified,
1496 modified,
1495 added,
1497 added,
1496 removed,
1498 removed,
1497 deleted,
1499 deleted,
1498 clean,
1500 clean,
1499 ignored,
1501 ignored,
1500 unknown,
1502 unknown,
1501 warnings,
1503 warnings,
1502 bad,
1504 bad,
1503 traversed,
1505 traversed,
1504 dirty,
1506 dirty,
1505 ) = rustmod.status(
1507 ) = rustmod.status(
1506 self._map._rustmap,
1508 self._map._rustmap,
1507 matcher,
1509 matcher,
1508 self._rootdir,
1510 self._rootdir,
1509 self._ignorefiles(),
1511 self._ignorefiles(),
1510 self._checkexec,
1512 self._checkexec,
1511 self._lastnormaltime,
1513 self._lastnormaltime,
1512 bool(list_clean),
1514 bool(list_clean),
1513 bool(list_ignored),
1515 bool(list_ignored),
1514 bool(list_unknown),
1516 bool(list_unknown),
1515 bool(matcher.traversedir),
1517 bool(matcher.traversedir),
1516 )
1518 )
1517
1519
1518 self._dirty |= dirty
1520 self._dirty |= dirty
1519
1521
1520 if matcher.traversedir:
1522 if matcher.traversedir:
1521 for dir in traversed:
1523 for dir in traversed:
1522 matcher.traversedir(dir)
1524 matcher.traversedir(dir)
1523
1525
1524 if self._ui.warn:
1526 if self._ui.warn:
1525 for item in warnings:
1527 for item in warnings:
1526 if isinstance(item, tuple):
1528 if isinstance(item, tuple):
1527 file_path, syntax = item
1529 file_path, syntax = item
1528 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1530 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1529 file_path,
1531 file_path,
1530 syntax,
1532 syntax,
1531 )
1533 )
1532 self._ui.warn(msg)
1534 self._ui.warn(msg)
1533 else:
1535 else:
1534 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1536 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1535 self._ui.warn(
1537 self._ui.warn(
1536 msg
1538 msg
1537 % (
1539 % (
1538 pathutil.canonpath(
1540 pathutil.canonpath(
1539 self._rootdir, self._rootdir, item
1541 self._rootdir, self._rootdir, item
1540 ),
1542 ),
1541 b"No such file or directory",
1543 b"No such file or directory",
1542 )
1544 )
1543 )
1545 )
1544
1546
1545 for (fn, message) in bad:
1547 for (fn, message) in bad:
1546 matcher.bad(fn, encoding.strtolocal(message))
1548 matcher.bad(fn, encoding.strtolocal(message))
1547
1549
1548 status = scmutil.status(
1550 status = scmutil.status(
1549 modified=modified,
1551 modified=modified,
1550 added=added,
1552 added=added,
1551 removed=removed,
1553 removed=removed,
1552 deleted=deleted,
1554 deleted=deleted,
1553 unknown=unknown,
1555 unknown=unknown,
1554 ignored=ignored,
1556 ignored=ignored,
1555 clean=clean,
1557 clean=clean,
1556 )
1558 )
1557 return (lookup, status)
1559 return (lookup, status)
1558
1560
1559 def status(self, match, subrepos, ignored, clean, unknown):
1561 def status(self, match, subrepos, ignored, clean, unknown):
1560 """Determine the status of the working copy relative to the
1562 """Determine the status of the working copy relative to the
1561 dirstate and return a pair of (unsure, status), where status is of type
1563 dirstate and return a pair of (unsure, status), where status is of type
1562 scmutil.status and:
1564 scmutil.status and:
1563
1565
1564 unsure:
1566 unsure:
1565 files that might have been modified since the dirstate was
1567 files that might have been modified since the dirstate was
1566 written, but need to be read to be sure (size is the same
1568 written, but need to be read to be sure (size is the same
1567 but mtime differs)
1569 but mtime differs)
1568 status.modified:
1570 status.modified:
1569 files that have definitely been modified since the dirstate
1571 files that have definitely been modified since the dirstate
1570 was written (different size or mode)
1572 was written (different size or mode)
1571 status.clean:
1573 status.clean:
1572 files that have definitely not been modified since the
1574 files that have definitely not been modified since the
1573 dirstate was written
1575 dirstate was written
1574 """
1576 """
1575 listignored, listclean, listunknown = ignored, clean, unknown
1577 listignored, listclean, listunknown = ignored, clean, unknown
1576 lookup, modified, added, unknown, ignored = [], [], [], [], []
1578 lookup, modified, added, unknown, ignored = [], [], [], [], []
1577 removed, deleted, clean = [], [], []
1579 removed, deleted, clean = [], [], []
1578
1580
1579 dmap = self._map
1581 dmap = self._map
1580 dmap.preload()
1582 dmap.preload()
1581
1583
1582 use_rust = True
1584 use_rust = True
1583
1585
1584 allowed_matchers = (
1586 allowed_matchers = (
1585 matchmod.alwaysmatcher,
1587 matchmod.alwaysmatcher,
1586 matchmod.exactmatcher,
1588 matchmod.exactmatcher,
1587 matchmod.includematcher,
1589 matchmod.includematcher,
1588 )
1590 )
1589
1591
1590 if rustmod is None:
1592 if rustmod is None:
1591 use_rust = False
1593 use_rust = False
1592 elif self._checkcase:
1594 elif self._checkcase:
1593 # Case-insensitive filesystems are not handled yet
1595 # Case-insensitive filesystems are not handled yet
1594 use_rust = False
1596 use_rust = False
1595 elif subrepos:
1597 elif subrepos:
1596 use_rust = False
1598 use_rust = False
1597 elif sparse.enabled:
1599 elif sparse.enabled:
1598 use_rust = False
1600 use_rust = False
1599 elif not isinstance(match, allowed_matchers):
1601 elif not isinstance(match, allowed_matchers):
1600 # Some matchers have yet to be implemented
1602 # Some matchers have yet to be implemented
1601 use_rust = False
1603 use_rust = False
1602
1604
1603 if use_rust:
1605 if use_rust:
1604 try:
1606 try:
1605 return self._rust_status(
1607 return self._rust_status(
1606 match, listclean, listignored, listunknown
1608 match, listclean, listignored, listunknown
1607 )
1609 )
1608 except rustmod.FallbackError:
1610 except rustmod.FallbackError:
1609 pass
1611 pass
1610
1612
1611 def noop(f):
1613 def noop(f):
1612 pass
1614 pass
1613
1615
1614 dcontains = dmap.__contains__
1616 dcontains = dmap.__contains__
1615 dget = dmap.__getitem__
1617 dget = dmap.__getitem__
1616 ladd = lookup.append # aka "unsure"
1618 ladd = lookup.append # aka "unsure"
1617 madd = modified.append
1619 madd = modified.append
1618 aadd = added.append
1620 aadd = added.append
1619 uadd = unknown.append if listunknown else noop
1621 uadd = unknown.append if listunknown else noop
1620 iadd = ignored.append if listignored else noop
1622 iadd = ignored.append if listignored else noop
1621 radd = removed.append
1623 radd = removed.append
1622 dadd = deleted.append
1624 dadd = deleted.append
1623 cadd = clean.append if listclean else noop
1625 cadd = clean.append if listclean else noop
1624 mexact = match.exact
1626 mexact = match.exact
1625 dirignore = self._dirignore
1627 dirignore = self._dirignore
1626 checkexec = self._checkexec
1628 checkexec = self._checkexec
1627 copymap = self._map.copymap
1629 copymap = self._map.copymap
1628 lastnormaltime = self._lastnormaltime
1630 lastnormaltime = self._lastnormaltime
1629
1631
1630 # We need to do full walks when either
1632 # We need to do full walks when either
1631 # - we're listing all clean files, or
1633 # - we're listing all clean files, or
1632 # - match.traversedir does something, because match.traversedir should
1634 # - match.traversedir does something, because match.traversedir should
1633 # be called for every dir in the working dir
1635 # be called for every dir in the working dir
1634 full = listclean or match.traversedir is not None
1636 full = listclean or match.traversedir is not None
1635 for fn, st in pycompat.iteritems(
1637 for fn, st in pycompat.iteritems(
1636 self.walk(match, subrepos, listunknown, listignored, full=full)
1638 self.walk(match, subrepos, listunknown, listignored, full=full)
1637 ):
1639 ):
1638 if not dcontains(fn):
1640 if not dcontains(fn):
1639 if (listignored or mexact(fn)) and dirignore(fn):
1641 if (listignored or mexact(fn)) and dirignore(fn):
1640 if listignored:
1642 if listignored:
1641 iadd(fn)
1643 iadd(fn)
1642 else:
1644 else:
1643 uadd(fn)
1645 uadd(fn)
1644 continue
1646 continue
1645
1647
1646 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1648 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1647 # written like that for performance reasons. dmap[fn] is not a
1649 # written like that for performance reasons. dmap[fn] is not a
1648 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1650 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1649 # opcode has fast paths when the value to be unpacked is a tuple or
1651 # opcode has fast paths when the value to be unpacked is a tuple or
1650 # a list, but falls back to creating a full-fledged iterator in
1652 # a list, but falls back to creating a full-fledged iterator in
1651 # general. That is much slower than simply accessing and storing the
1653 # general. That is much slower than simply accessing and storing the
1652 # tuple members one by one.
1654 # tuple members one by one.
1653 t = dget(fn)
1655 t = dget(fn)
1654 mode = t.mode
1656 mode = t.mode
1655 size = t.size
1657 size = t.size
1656 time = t.mtime
1658 time = t.mtime
1657
1659
1658 if not st and t.tracked:
1660 if not st and t.tracked:
1659 dadd(fn)
1661 dadd(fn)
1660 elif t.merged:
1662 elif t.merged:
1661 madd(fn)
1663 madd(fn)
1662 elif t.added:
1664 elif t.added:
1663 aadd(fn)
1665 aadd(fn)
1664 elif t.removed:
1666 elif t.removed:
1665 radd(fn)
1667 radd(fn)
1666 elif t.tracked:
1668 elif t.tracked:
1667 if (
1669 if (
1668 size >= 0
1670 size >= 0
1669 and (
1671 and (
1670 (size != st.st_size and size != st.st_size & _rangemask)
1672 (size != st.st_size and size != st.st_size & _rangemask)
1671 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1673 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1672 )
1674 )
1673 or t.from_p2
1675 or t.from_p2
1674 or fn in copymap
1676 or fn in copymap
1675 ):
1677 ):
1676 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1678 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1677 # issue6456: Size returned may be longer due to
1679 # issue6456: Size returned may be longer due to
1678 # encryption on EXT-4 fscrypt, undecided.
1680 # encryption on EXT-4 fscrypt, undecided.
1679 ladd(fn)
1681 ladd(fn)
1680 else:
1682 else:
1681 madd(fn)
1683 madd(fn)
1682 elif (
1684 elif (
1683 time != st[stat.ST_MTIME]
1685 time != st[stat.ST_MTIME]
1684 and time != st[stat.ST_MTIME] & _rangemask
1686 and time != st[stat.ST_MTIME] & _rangemask
1685 ):
1687 ):
1686 ladd(fn)
1688 ladd(fn)
1687 elif st[stat.ST_MTIME] == lastnormaltime:
1689 elif st[stat.ST_MTIME] == lastnormaltime:
1688 # fn may have just been marked as normal and it may have
1690 # fn may have just been marked as normal and it may have
1689 # changed in the same second without changing its size.
1691 # changed in the same second without changing its size.
1690 # This can happen if we quickly do multiple commits.
1692 # This can happen if we quickly do multiple commits.
1691 # Force lookup, so we don't miss such a racy file change.
1693 # Force lookup, so we don't miss such a racy file change.
1692 ladd(fn)
1694 ladd(fn)
1693 elif listclean:
1695 elif listclean:
1694 cadd(fn)
1696 cadd(fn)
1695 status = scmutil.status(
1697 status = scmutil.status(
1696 modified, added, removed, deleted, unknown, ignored, clean
1698 modified, added, removed, deleted, unknown, ignored, clean
1697 )
1699 )
1698 return (lookup, status)
1700 return (lookup, status)
1699
1701
1700 def matches(self, match):
1702 def matches(self, match):
1701 """
1703 """
1702 return files in the dirstate (in whatever state) filtered by match
1704 return files in the dirstate (in whatever state) filtered by match
1703 """
1705 """
1704 dmap = self._map
1706 dmap = self._map
1705 if rustmod is not None:
1707 if rustmod is not None:
1706 dmap = self._map._rustmap
1708 dmap = self._map._rustmap
1707
1709
1708 if match.always():
1710 if match.always():
1709 return dmap.keys()
1711 return dmap.keys()
1710 files = match.files()
1712 files = match.files()
1711 if match.isexact():
1713 if match.isexact():
1712 # fast path -- filter the other way around, since typically files is
1714 # fast path -- filter the other way around, since typically files is
1713 # much smaller than dmap
1715 # much smaller than dmap
1714 return [f for f in files if f in dmap]
1716 return [f for f in files if f in dmap]
1715 if match.prefix() and all(fn in dmap for fn in files):
1717 if match.prefix() and all(fn in dmap for fn in files):
1716 # fast path -- all the values are known to be files, so just return
1718 # fast path -- all the values are known to be files, so just return
1717 # that
1719 # that
1718 return list(files)
1720 return list(files)
1719 return [f for f in dmap if match(f)]
1721 return [f for f in dmap if match(f)]
1720
1722
1721 def _actualfilename(self, tr):
1723 def _actualfilename(self, tr):
1722 if tr:
1724 if tr:
1723 return self._pendingfilename
1725 return self._pendingfilename
1724 else:
1726 else:
1725 return self._filename
1727 return self._filename
1726
1728
1727 def savebackup(self, tr, backupname):
1729 def savebackup(self, tr, backupname):
1728 '''Save current dirstate into backup file'''
1730 '''Save current dirstate into backup file'''
1729 filename = self._actualfilename(tr)
1731 filename = self._actualfilename(tr)
1730 assert backupname != filename
1732 assert backupname != filename
1731
1733
1732 # use '_writedirstate' instead of 'write' to write changes certainly,
1734 # use '_writedirstate' instead of 'write' to write changes certainly,
1733 # because the latter omits writing out if transaction is running.
1735 # because the latter omits writing out if transaction is running.
1734 # output file will be used to create backup of dirstate at this point.
1736 # output file will be used to create backup of dirstate at this point.
1735 if self._dirty or not self._opener.exists(filename):
1737 if self._dirty or not self._opener.exists(filename):
1736 self._writedirstate(
1738 self._writedirstate(
1737 tr,
1739 tr,
1738 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1740 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1739 )
1741 )
1740
1742
1741 if tr:
1743 if tr:
1742 # ensure that subsequent tr.writepending returns True for
1744 # ensure that subsequent tr.writepending returns True for
1743 # changes written out above, even if dirstate is never
1745 # changes written out above, even if dirstate is never
1744 # changed after this
1746 # changed after this
1745 tr.addfilegenerator(
1747 tr.addfilegenerator(
1746 b'dirstate',
1748 b'dirstate',
1747 (self._filename,),
1749 (self._filename,),
1748 lambda f: self._writedirstate(tr, f),
1750 lambda f: self._writedirstate(tr, f),
1749 location=b'plain',
1751 location=b'plain',
1750 )
1752 )
1751
1753
1752 # ensure that pending file written above is unlinked at
1754 # ensure that pending file written above is unlinked at
1753 # failure, even if tr.writepending isn't invoked until the
1755 # failure, even if tr.writepending isn't invoked until the
1754 # end of this transaction
1756 # end of this transaction
1755 tr.registertmp(filename, location=b'plain')
1757 tr.registertmp(filename, location=b'plain')
1756
1758
1757 self._opener.tryunlink(backupname)
1759 self._opener.tryunlink(backupname)
1758 # hardlink backup is okay because _writedirstate is always called
1760 # hardlink backup is okay because _writedirstate is always called
1759 # with an "atomictemp=True" file.
1761 # with an "atomictemp=True" file.
1760 util.copyfile(
1762 util.copyfile(
1761 self._opener.join(filename),
1763 self._opener.join(filename),
1762 self._opener.join(backupname),
1764 self._opener.join(backupname),
1763 hardlink=True,
1765 hardlink=True,
1764 )
1766 )
1765
1767
1766 def restorebackup(self, tr, backupname):
1768 def restorebackup(self, tr, backupname):
1767 '''Restore dirstate by backup file'''
1769 '''Restore dirstate by backup file'''
1768 # this "invalidate()" prevents "wlock.release()" from writing
1770 # this "invalidate()" prevents "wlock.release()" from writing
1769 # changes of dirstate out after restoring from backup file
1771 # changes of dirstate out after restoring from backup file
1770 self.invalidate()
1772 self.invalidate()
1771 filename = self._actualfilename(tr)
1773 filename = self._actualfilename(tr)
1772 o = self._opener
1774 o = self._opener
1773 if util.samefile(o.join(backupname), o.join(filename)):
1775 if util.samefile(o.join(backupname), o.join(filename)):
1774 o.unlink(backupname)
1776 o.unlink(backupname)
1775 else:
1777 else:
1776 o.rename(backupname, filename, checkambig=True)
1778 o.rename(backupname, filename, checkambig=True)
1777
1779
1778 def clearbackup(self, tr, backupname):
1780 def clearbackup(self, tr, backupname):
1779 '''Clear backup file'''
1781 '''Clear backup file'''
1780 self._opener.unlink(backupname)
1782 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now