##// END OF EJS Templates
dirstate: fix typo in docstring...
Augie Fackler -
r48570:03089463 stable
parent child Browse files
Show More
@@ -1,1780 +1,1780 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = parsers.DirstateItem
48 DirstateItem = parsers.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._updatedfiles = set()
133 self._updatedfiles = set()
134 self._mapcls = dirstatemap.dirstatemap
134 self._mapcls = dirstatemap.dirstatemap
135 # Access and cache cwd early, so we don't access it for the first time
135 # Access and cache cwd early, so we don't access it for the first time
136 # after a working-copy update caused it to not exist (accessing it then
136 # after a working-copy update caused it to not exist (accessing it then
137 # raises an exception).
137 # raises an exception).
138 self._cwd
138 self._cwd
139
139
140 def prefetch_parents(self):
140 def prefetch_parents(self):
141 """make sure the parents are loaded
141 """make sure the parents are loaded
142
142
143 Used to avoid a race condition.
143 Used to avoid a race condition.
144 """
144 """
145 self._pl
145 self._pl
146
146
147 @contextlib.contextmanager
147 @contextlib.contextmanager
148 def parentchange(self):
148 def parentchange(self):
149 """Context manager for handling dirstate parents.
149 """Context manager for handling dirstate parents.
150
150
151 If an exception occurs in the scope of the context manager,
151 If an exception occurs in the scope of the context manager,
152 the incoherent dirstate won't be written when wlock is
152 the incoherent dirstate won't be written when wlock is
153 released.
153 released.
154 """
154 """
155 self._parentwriters += 1
155 self._parentwriters += 1
156 yield
156 yield
157 # Typically we want the "undo" step of a context manager in a
157 # Typically we want the "undo" step of a context manager in a
158 # finally block so it happens even when an exception
158 # finally block so it happens even when an exception
159 # occurs. In this case, however, we only want to decrement
159 # occurs. In this case, however, we only want to decrement
160 # parentwriters if the code in the with statement exits
160 # parentwriters if the code in the with statement exits
161 # normally, so we don't have a try/finally here on purpose.
161 # normally, so we don't have a try/finally here on purpose.
162 self._parentwriters -= 1
162 self._parentwriters -= 1
163
163
164 def pendingparentchange(self):
164 def pendingparentchange(self):
165 """Returns true if the dirstate is in the middle of a set of changes
165 """Returns true if the dirstate is in the middle of a set of changes
166 that modify the dirstate parent.
166 that modify the dirstate parent.
167 """
167 """
168 return self._parentwriters > 0
168 return self._parentwriters > 0
169
169
170 @propertycache
170 @propertycache
171 def _map(self):
171 def _map(self):
172 """Return the dirstate contents (see documentation for dirstatemap)."""
172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 self._map = self._mapcls(
173 self._map = self._mapcls(
174 self._ui,
174 self._ui,
175 self._opener,
175 self._opener,
176 self._root,
176 self._root,
177 self._nodeconstants,
177 self._nodeconstants,
178 self._use_dirstate_v2,
178 self._use_dirstate_v2,
179 )
179 )
180 return self._map
180 return self._map
181
181
182 @property
182 @property
183 def _sparsematcher(self):
183 def _sparsematcher(self):
184 """The matcher for the sparse checkout.
184 """The matcher for the sparse checkout.
185
185
186 The working directory may not include every file from a manifest. The
186 The working directory may not include every file from a manifest. The
187 matcher obtained by this property will match a path if it is to be
187 matcher obtained by this property will match a path if it is to be
188 included in the working directory.
188 included in the working directory.
189 """
189 """
190 # TODO there is potential to cache this property. For now, the matcher
190 # TODO there is potential to cache this property. For now, the matcher
191 # is resolved on every access. (But the called function does use a
191 # is resolved on every access. (But the called function does use a
192 # cache to keep the lookup fast.)
192 # cache to keep the lookup fast.)
193 return self._sparsematchfn()
193 return self._sparsematchfn()
194
194
195 @repocache(b'branch')
195 @repocache(b'branch')
196 def _branch(self):
196 def _branch(self):
197 try:
197 try:
198 return self._opener.read(b"branch").strip() or b"default"
198 return self._opener.read(b"branch").strip() or b"default"
199 except IOError as inst:
199 except IOError as inst:
200 if inst.errno != errno.ENOENT:
200 if inst.errno != errno.ENOENT:
201 raise
201 raise
202 return b"default"
202 return b"default"
203
203
204 @property
204 @property
205 def _pl(self):
205 def _pl(self):
206 return self._map.parents()
206 return self._map.parents()
207
207
208 def hasdir(self, d):
208 def hasdir(self, d):
209 return self._map.hastrackeddir(d)
209 return self._map.hastrackeddir(d)
210
210
211 @rootcache(b'.hgignore')
211 @rootcache(b'.hgignore')
212 def _ignore(self):
212 def _ignore(self):
213 files = self._ignorefiles()
213 files = self._ignorefiles()
214 if not files:
214 if not files:
215 return matchmod.never()
215 return matchmod.never()
216
216
217 pats = [b'include:%s' % f for f in files]
217 pats = [b'include:%s' % f for f in files]
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219
219
220 @propertycache
220 @propertycache
221 def _slash(self):
221 def _slash(self):
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223
223
224 @propertycache
224 @propertycache
225 def _checklink(self):
225 def _checklink(self):
226 return util.checklink(self._root)
226 return util.checklink(self._root)
227
227
228 @propertycache
228 @propertycache
229 def _checkexec(self):
229 def _checkexec(self):
230 return bool(util.checkexec(self._root))
230 return bool(util.checkexec(self._root))
231
231
232 @propertycache
232 @propertycache
233 def _checkcase(self):
233 def _checkcase(self):
234 return not util.fscasesensitive(self._join(b'.hg'))
234 return not util.fscasesensitive(self._join(b'.hg'))
235
235
236 def _join(self, f):
236 def _join(self, f):
237 # much faster than os.path.join()
237 # much faster than os.path.join()
238 # it's safe because f is always a relative path
238 # it's safe because f is always a relative path
239 return self._rootdir + f
239 return self._rootdir + f
240
240
241 def flagfunc(self, buildfallback):
241 def flagfunc(self, buildfallback):
242 if self._checklink and self._checkexec:
242 if self._checklink and self._checkexec:
243
243
244 def f(x):
244 def f(x):
245 try:
245 try:
246 st = os.lstat(self._join(x))
246 st = os.lstat(self._join(x))
247 if util.statislink(st):
247 if util.statislink(st):
248 return b'l'
248 return b'l'
249 if util.statisexec(st):
249 if util.statisexec(st):
250 return b'x'
250 return b'x'
251 except OSError:
251 except OSError:
252 pass
252 pass
253 return b''
253 return b''
254
254
255 return f
255 return f
256
256
257 fallback = buildfallback()
257 fallback = buildfallback()
258 if self._checklink:
258 if self._checklink:
259
259
260 def f(x):
260 def f(x):
261 if os.path.islink(self._join(x)):
261 if os.path.islink(self._join(x)):
262 return b'l'
262 return b'l'
263 if b'x' in fallback(x):
263 if b'x' in fallback(x):
264 return b'x'
264 return b'x'
265 return b''
265 return b''
266
266
267 return f
267 return f
268 if self._checkexec:
268 if self._checkexec:
269
269
270 def f(x):
270 def f(x):
271 if b'l' in fallback(x):
271 if b'l' in fallback(x):
272 return b'l'
272 return b'l'
273 if util.isexec(self._join(x)):
273 if util.isexec(self._join(x)):
274 return b'x'
274 return b'x'
275 return b''
275 return b''
276
276
277 return f
277 return f
278 else:
278 else:
279 return fallback
279 return fallback
280
280
281 @propertycache
281 @propertycache
282 def _cwd(self):
282 def _cwd(self):
283 # internal config: ui.forcecwd
283 # internal config: ui.forcecwd
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 if forcecwd:
285 if forcecwd:
286 return forcecwd
286 return forcecwd
287 return encoding.getcwd()
287 return encoding.getcwd()
288
288
289 def getcwd(self):
289 def getcwd(self):
290 """Return the path from which a canonical path is calculated.
290 """Return the path from which a canonical path is calculated.
291
291
292 This path should be used to resolve file patterns or to convert
292 This path should be used to resolve file patterns or to convert
293 canonical paths back to file paths for display. It shouldn't be
293 canonical paths back to file paths for display. It shouldn't be
294 used to get real file paths. Use vfs functions instead.
294 used to get real file paths. Use vfs functions instead.
295 """
295 """
296 cwd = self._cwd
296 cwd = self._cwd
297 if cwd == self._root:
297 if cwd == self._root:
298 return b''
298 return b''
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 rootsep = self._root
300 rootsep = self._root
301 if not util.endswithsep(rootsep):
301 if not util.endswithsep(rootsep):
302 rootsep += pycompat.ossep
302 rootsep += pycompat.ossep
303 if cwd.startswith(rootsep):
303 if cwd.startswith(rootsep):
304 return cwd[len(rootsep) :]
304 return cwd[len(rootsep) :]
305 else:
305 else:
306 # we're outside the repo. return an absolute path.
306 # we're outside the repo. return an absolute path.
307 return cwd
307 return cwd
308
308
309 def pathto(self, f, cwd=None):
309 def pathto(self, f, cwd=None):
310 if cwd is None:
310 if cwd is None:
311 cwd = self.getcwd()
311 cwd = self.getcwd()
312 path = util.pathto(self._root, cwd, f)
312 path = util.pathto(self._root, cwd, f)
313 if self._slash:
313 if self._slash:
314 return util.pconvert(path)
314 return util.pconvert(path)
315 return path
315 return path
316
316
317 def __getitem__(self, key):
317 def __getitem__(self, key):
318 """Return the current state of key (a filename) in the dirstate.
318 """Return the current state of key (a filename) in the dirstate.
319
319
320 States are:
320 States are:
321 n normal
321 n normal
322 m needs merging
322 m needs merging
323 r marked for removal
323 r marked for removal
324 a marked for addition
324 a marked for addition
325 ? not tracked
325 ? not tracked
326
326
327 XXX The "state" is a bit obscure to be in the "public" API. we should
327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 consider migrating all user of this to going through the dirstate entry
328 consider migrating all user of this to going through the dirstate entry
329 instead.
329 instead.
330 """
330 """
331 entry = self._map.get(key)
331 entry = self._map.get(key)
332 if entry is not None:
332 if entry is not None:
333 return entry.state
333 return entry.state
334 return b'?'
334 return b'?'
335
335
336 def __contains__(self, key):
336 def __contains__(self, key):
337 return key in self._map
337 return key in self._map
338
338
339 def __iter__(self):
339 def __iter__(self):
340 return iter(sorted(self._map))
340 return iter(sorted(self._map))
341
341
342 def items(self):
342 def items(self):
343 return pycompat.iteritems(self._map)
343 return pycompat.iteritems(self._map)
344
344
345 iteritems = items
345 iteritems = items
346
346
347 def directories(self):
347 def directories(self):
348 return self._map.directories()
348 return self._map.directories()
349
349
350 def parents(self):
350 def parents(self):
351 return [self._validate(p) for p in self._pl]
351 return [self._validate(p) for p in self._pl]
352
352
353 def p1(self):
353 def p1(self):
354 return self._validate(self._pl[0])
354 return self._validate(self._pl[0])
355
355
356 def p2(self):
356 def p2(self):
357 return self._validate(self._pl[1])
357 return self._validate(self._pl[1])
358
358
359 @property
359 @property
360 def in_merge(self):
360 def in_merge(self):
361 """True if a merge is in progress"""
361 """True if a merge is in progress"""
362 return self._pl[1] != self._nodeconstants.nullid
362 return self._pl[1] != self._nodeconstants.nullid
363
363
364 def branch(self):
364 def branch(self):
365 return encoding.tolocal(self._branch)
365 return encoding.tolocal(self._branch)
366
366
367 def setparents(self, p1, p2=None):
367 def setparents(self, p1, p2=None):
368 """Set dirstate parents to p1 and p2.
368 """Set dirstate parents to p1 and p2.
369
369
370 When moving from two parents to one, "merged" entries a
370 When moving from two parents to one, "merged" entries a
371 adjusted to normal and previous copy records discarded and
371 adjusted to normal and previous copy records discarded and
372 returned by the call.
372 returned by the call.
373
373
374 See localrepo.setparents()
374 See localrepo.setparents()
375 """
375 """
376 if p2 is None:
376 if p2 is None:
377 p2 = self._nodeconstants.nullid
377 p2 = self._nodeconstants.nullid
378 if self._parentwriters == 0:
378 if self._parentwriters == 0:
379 raise ValueError(
379 raise ValueError(
380 b"cannot set dirstate parent outside of "
380 b"cannot set dirstate parent outside of "
381 b"dirstate.parentchange context manager"
381 b"dirstate.parentchange context manager"
382 )
382 )
383
383
384 self._dirty = True
384 self._dirty = True
385 oldp2 = self._pl[1]
385 oldp2 = self._pl[1]
386 if self._origpl is None:
386 if self._origpl is None:
387 self._origpl = self._pl
387 self._origpl = self._pl
388 self._map.setparents(p1, p2)
388 self._map.setparents(p1, p2)
389 copies = {}
389 copies = {}
390 if (
390 if (
391 oldp2 != self._nodeconstants.nullid
391 oldp2 != self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
393 ):
393 ):
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395
395
396 for f in candidatefiles:
396 for f in candidatefiles:
397 s = self._map.get(f)
397 s = self._map.get(f)
398 if s is None:
398 if s is None:
399 continue
399 continue
400
400
401 # Discard "merged" markers when moving away from a merge state
401 # Discard "merged" markers when moving away from a merge state
402 if s.merged:
402 if s.merged:
403 source = self._map.copymap.get(f)
403 source = self._map.copymap.get(f)
404 if source:
404 if source:
405 copies[f] = source
405 copies[f] = source
406 self._normallookup(f)
406 self._normallookup(f)
407 # Also fix up otherparent markers
407 # Also fix up otherparent markers
408 elif s.from_p2:
408 elif s.from_p2:
409 source = self._map.copymap.get(f)
409 source = self._map.copymap.get(f)
410 if source:
410 if source:
411 copies[f] = source
411 copies[f] = source
412 self._add(f)
412 self._add(f)
413 return copies
413 return copies
414
414
415 def setbranch(self, branch):
415 def setbranch(self, branch):
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 try:
418 try:
419 f.write(self._branch + b'\n')
419 f.write(self._branch + b'\n')
420 f.close()
420 f.close()
421
421
422 # make sure filecache has the correct stat info for _branch after
422 # make sure filecache has the correct stat info for _branch after
423 # replacing the underlying file
423 # replacing the underlying file
424 ce = self._filecache[b'_branch']
424 ce = self._filecache[b'_branch']
425 if ce:
425 if ce:
426 ce.refresh()
426 ce.refresh()
427 except: # re-raises
427 except: # re-raises
428 f.discard()
428 f.discard()
429 raise
429 raise
430
430
431 def invalidate(self):
431 def invalidate(self):
432 """Causes the next access to reread the dirstate.
432 """Causes the next access to reread the dirstate.
433
433
434 This is different from localrepo.invalidatedirstate() because it always
434 This is different from localrepo.invalidatedirstate() because it always
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 check whether the dirstate has changed before rereading it."""
436 check whether the dirstate has changed before rereading it."""
437
437
438 for a in ("_map", "_branch", "_ignore"):
438 for a in ("_map", "_branch", "_ignore"):
439 if a in self.__dict__:
439 if a in self.__dict__:
440 delattr(self, a)
440 delattr(self, a)
441 self._lastnormaltime = 0
441 self._lastnormaltime = 0
442 self._dirty = False
442 self._dirty = False
443 self._updatedfiles.clear()
443 self._updatedfiles.clear()
444 self._parentwriters = 0
444 self._parentwriters = 0
445 self._origpl = None
445 self._origpl = None
446
446
447 def copy(self, source, dest):
447 def copy(self, source, dest):
448 """Mark dest as a copy of source. Unmark dest if source is None."""
448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 if source == dest:
449 if source == dest:
450 return
450 return
451 self._dirty = True
451 self._dirty = True
452 if source is not None:
452 if source is not None:
453 self._map.copymap[dest] = source
453 self._map.copymap[dest] = source
454 self._updatedfiles.add(source)
454 self._updatedfiles.add(source)
455 self._updatedfiles.add(dest)
455 self._updatedfiles.add(dest)
456 elif self._map.copymap.pop(dest, None):
456 elif self._map.copymap.pop(dest, None):
457 self._updatedfiles.add(dest)
457 self._updatedfiles.add(dest)
458
458
459 def copied(self, file):
459 def copied(self, file):
460 return self._map.copymap.get(file, None)
460 return self._map.copymap.get(file, None)
461
461
462 def copies(self):
462 def copies(self):
463 return self._map.copymap
463 return self._map.copymap
464
464
465 @requires_no_parents_change
465 @requires_no_parents_change
466 def set_tracked(self, filename):
466 def set_tracked(self, filename):
467 """a "public" method for generic code to mark a file as tracked
467 """a "public" method for generic code to mark a file as tracked
468
468
469 This function is to be called outside of "update/merge" case. For
469 This function is to be called outside of "update/merge" case. For
470 example by a command like `hg add X`.
470 example by a command like `hg add X`.
471
471
472 return True the file was previously untracked, False otherwise.
472 return True the file was previously untracked, False otherwise.
473 """
473 """
474 entry = self._map.get(filename)
474 entry = self._map.get(filename)
475 if entry is None:
475 if entry is None:
476 self._add(filename)
476 self._add(filename)
477 return True
477 return True
478 elif not entry.tracked:
478 elif not entry.tracked:
479 self._normallookup(filename)
479 self._normallookup(filename)
480 return True
480 return True
481 # XXX This is probably overkill for more case, but we need this to
481 # XXX This is probably overkill for more case, but we need this to
482 # fully replace the `normallookup` call with `set_tracked` one.
482 # fully replace the `normallookup` call with `set_tracked` one.
483 # Consider smoothing this in the future.
483 # Consider smoothing this in the future.
484 self.set_possibly_dirty(filename)
484 self.set_possibly_dirty(filename)
485 return False
485 return False
486
486
487 @requires_no_parents_change
487 @requires_no_parents_change
488 def set_untracked(self, filename):
488 def set_untracked(self, filename):
489 """a "public" method for generic code to mark a file as untracked
489 """a "public" method for generic code to mark a file as untracked
490
490
491 This function is to be called outside of "update/merge" case. For
491 This function is to be called outside of "update/merge" case. For
492 example by a command like `hg remove X`.
492 example by a command like `hg remove X`.
493
493
494 return True the file was previously tracked, False otherwise.
494 return True the file was previously tracked, False otherwise.
495 """
495 """
496 entry = self._map.get(filename)
496 entry = self._map.get(filename)
497 if entry is None:
497 if entry is None:
498 return False
498 return False
499 elif entry.added:
499 elif entry.added:
500 self._drop(filename)
500 self._drop(filename)
501 return True
501 return True
502 else:
502 else:
503 self._remove(filename)
503 self._remove(filename)
504 return True
504 return True
505
505
506 @requires_no_parents_change
506 @requires_no_parents_change
507 def set_clean(self, filename, parentfiledata=None):
507 def set_clean(self, filename, parentfiledata=None):
508 """record that the current state of the file on disk is known to be clean"""
508 """record that the current state of the file on disk is known to be clean"""
509 self._dirty = True
509 self._dirty = True
510 self._updatedfiles.add(filename)
510 self._updatedfiles.add(filename)
511 self._normal(filename, parentfiledata=parentfiledata)
511 self._normal(filename, parentfiledata=parentfiledata)
512
512
513 @requires_no_parents_change
513 @requires_no_parents_change
514 def set_possibly_dirty(self, filename):
514 def set_possibly_dirty(self, filename):
515 """record that the current state of the file on disk is unknown"""
515 """record that the current state of the file on disk is unknown"""
516 self._dirty = True
516 self._dirty = True
517 self._updatedfiles.add(filename)
517 self._updatedfiles.add(filename)
518 self._map.set_possibly_dirty(filename)
518 self._map.set_possibly_dirty(filename)
519
519
520 @requires_parents_change
520 @requires_parents_change
521 def update_file_p1(
521 def update_file_p1(
522 self,
522 self,
523 filename,
523 filename,
524 p1_tracked,
524 p1_tracked,
525 ):
525 ):
526 """Set a file as tracked in the parent (or not)
526 """Set a file as tracked in the parent (or not)
527
527
528 This is to be called when adjust the dirstate to a new parent after an history
528 This is to be called when adjust the dirstate to a new parent after an history
529 rewriting operation.
529 rewriting operation.
530
530
531 It should not be called during a merge (p2 != nullid) and only within
531 It should not be called during a merge (p2 != nullid) and only within
532 a `with dirstate.parentchange():` context.
532 a `with dirstate.parentchange():` context.
533 """
533 """
534 if self.in_merge:
534 if self.in_merge:
535 msg = b'update_file_reference should not be called when merging'
535 msg = b'update_file_reference should not be called when merging'
536 raise error.ProgrammingError(msg)
536 raise error.ProgrammingError(msg)
537 entry = self._map.get(filename)
537 entry = self._map.get(filename)
538 if entry is None:
538 if entry is None:
539 wc_tracked = False
539 wc_tracked = False
540 else:
540 else:
541 wc_tracked = entry.tracked
541 wc_tracked = entry.tracked
542 possibly_dirty = False
542 possibly_dirty = False
543 if p1_tracked and wc_tracked:
543 if p1_tracked and wc_tracked:
544 # the underlying reference might have changed, we will have to
544 # the underlying reference might have changed, we will have to
545 # check it.
545 # check it.
546 possibly_dirty = True
546 possibly_dirty = True
547 elif not (p1_tracked or wc_tracked):
547 elif not (p1_tracked or wc_tracked):
548 # the file is no longer relevant to anyone
548 # the file is no longer relevant to anyone
549 self._drop(filename)
549 self._drop(filename)
550 elif (not p1_tracked) and wc_tracked:
550 elif (not p1_tracked) and wc_tracked:
551 if entry is not None and entry.added:
551 if entry is not None and entry.added:
552 return # avoid dropping copy information (maybe?)
552 return # avoid dropping copy information (maybe?)
553 elif p1_tracked and not wc_tracked:
553 elif p1_tracked and not wc_tracked:
554 pass
554 pass
555 else:
555 else:
556 assert False, 'unreachable'
556 assert False, 'unreachable'
557
557
558 # this mean we are doing call for file we do not really care about the
558 # this mean we are doing call for file we do not really care about the
559 # data (eg: added or removed), however this should be a minor overhead
559 # data (eg: added or removed), however this should be a minor overhead
560 # compared to the overall update process calling this.
560 # compared to the overall update process calling this.
561 parentfiledata = None
561 parentfiledata = None
562 if wc_tracked:
562 if wc_tracked:
563 parentfiledata = self._get_filedata(filename)
563 parentfiledata = self._get_filedata(filename)
564
564
565 self._updatedfiles.add(filename)
565 self._updatedfiles.add(filename)
566 self._map.reset_state(
566 self._map.reset_state(
567 filename,
567 filename,
568 wc_tracked,
568 wc_tracked,
569 p1_tracked,
569 p1_tracked,
570 possibly_dirty=possibly_dirty,
570 possibly_dirty=possibly_dirty,
571 parentfiledata=parentfiledata,
571 parentfiledata=parentfiledata,
572 )
572 )
573 if (
573 if (
574 parentfiledata is not None
574 parentfiledata is not None
575 and parentfiledata[2] > self._lastnormaltime
575 and parentfiledata[2] > self._lastnormaltime
576 ):
576 ):
577 # Remember the most recent modification timeslot for status(),
577 # Remember the most recent modification timeslot for status(),
578 # to make sure we won't miss future size-preserving file content
578 # to make sure we won't miss future size-preserving file content
579 # modifications that happen within the same timeslot.
579 # modifications that happen within the same timeslot.
580 self._lastnormaltime = parentfiledata[2]
580 self._lastnormaltime = parentfiledata[2]
581
581
582 @requires_parents_change
582 @requires_parents_change
583 def update_file(
583 def update_file(
584 self,
584 self,
585 filename,
585 filename,
586 wc_tracked,
586 wc_tracked,
587 p1_tracked,
587 p1_tracked,
588 p2_tracked=False,
588 p2_tracked=False,
589 merged=False,
589 merged=False,
590 clean_p1=False,
590 clean_p1=False,
591 clean_p2=False,
591 clean_p2=False,
592 possibly_dirty=False,
592 possibly_dirty=False,
593 parentfiledata=None,
593 parentfiledata=None,
594 ):
594 ):
595 """update the information about a file in the dirstate
595 """update the information about a file in the dirstate
596
596
597 This is to be called when the direstates parent changes to keep track
597 This is to be called when the direstates parent changes to keep track
598 of what is the file situation in regards to the working copy and its parent.
598 of what is the file situation in regards to the working copy and its parent.
599
599
600 This function must be called within a `dirstate.parentchange` context.
600 This function must be called within a `dirstate.parentchange` context.
601
601
602 note: the API is at an early stage and we might need to ajust it
602 note: the API is at an early stage and we might need to adjust it
603 depending of what information ends up being relevant and useful to
603 depending of what information ends up being relevant and useful to
604 other processing.
604 other processing.
605 """
605 """
606 if merged and (clean_p1 or clean_p2):
606 if merged and (clean_p1 or clean_p2):
607 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
607 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
608 raise error.ProgrammingError(msg)
608 raise error.ProgrammingError(msg)
609
609
610 # note: I do not think we need to double check name clash here since we
610 # note: I do not think we need to double check name clash here since we
611 # are in a update/merge case that should already have taken care of
611 # are in a update/merge case that should already have taken care of
612 # this. The test agrees
612 # this. The test agrees
613
613
614 self._dirty = True
614 self._dirty = True
615 self._updatedfiles.add(filename)
615 self._updatedfiles.add(filename)
616
616
617 need_parent_file_data = (
617 need_parent_file_data = (
618 not (possibly_dirty or clean_p2 or merged)
618 not (possibly_dirty or clean_p2 or merged)
619 and wc_tracked
619 and wc_tracked
620 and p1_tracked
620 and p1_tracked
621 )
621 )
622
622
623 # this mean we are doing call for file we do not really care about the
623 # this mean we are doing call for file we do not really care about the
624 # data (eg: added or removed), however this should be a minor overhead
624 # data (eg: added or removed), however this should be a minor overhead
625 # compared to the overall update process calling this.
625 # compared to the overall update process calling this.
626 if need_parent_file_data:
626 if need_parent_file_data:
627 if parentfiledata is None:
627 if parentfiledata is None:
628 parentfiledata = self._get_filedata(filename)
628 parentfiledata = self._get_filedata(filename)
629 mtime = parentfiledata[2]
629 mtime = parentfiledata[2]
630
630
631 if mtime > self._lastnormaltime:
631 if mtime > self._lastnormaltime:
632 # Remember the most recent modification timeslot for
632 # Remember the most recent modification timeslot for
633 # status(), to make sure we won't miss future
633 # status(), to make sure we won't miss future
634 # size-preserving file content modifications that happen
634 # size-preserving file content modifications that happen
635 # within the same timeslot.
635 # within the same timeslot.
636 self._lastnormaltime = mtime
636 self._lastnormaltime = mtime
637
637
638 self._map.reset_state(
638 self._map.reset_state(
639 filename,
639 filename,
640 wc_tracked,
640 wc_tracked,
641 p1_tracked,
641 p1_tracked,
642 p2_tracked=p2_tracked,
642 p2_tracked=p2_tracked,
643 merged=merged,
643 merged=merged,
644 clean_p1=clean_p1,
644 clean_p1=clean_p1,
645 clean_p2=clean_p2,
645 clean_p2=clean_p2,
646 possibly_dirty=possibly_dirty,
646 possibly_dirty=possibly_dirty,
647 parentfiledata=parentfiledata,
647 parentfiledata=parentfiledata,
648 )
648 )
649 if (
649 if (
650 parentfiledata is not None
650 parentfiledata is not None
651 and parentfiledata[2] > self._lastnormaltime
651 and parentfiledata[2] > self._lastnormaltime
652 ):
652 ):
653 # Remember the most recent modification timeslot for status(),
653 # Remember the most recent modification timeslot for status(),
654 # to make sure we won't miss future size-preserving file content
654 # to make sure we won't miss future size-preserving file content
655 # modifications that happen within the same timeslot.
655 # modifications that happen within the same timeslot.
656 self._lastnormaltime = parentfiledata[2]
656 self._lastnormaltime = parentfiledata[2]
657
657
658 def _addpath(
658 def _addpath(
659 self,
659 self,
660 f,
660 f,
661 mode=0,
661 mode=0,
662 size=None,
662 size=None,
663 mtime=None,
663 mtime=None,
664 added=False,
664 added=False,
665 merged=False,
665 merged=False,
666 from_p2=False,
666 from_p2=False,
667 possibly_dirty=False,
667 possibly_dirty=False,
668 ):
668 ):
669 entry = self._map.get(f)
669 entry = self._map.get(f)
670 if added or entry is not None and entry.removed:
670 if added or entry is not None and entry.removed:
671 scmutil.checkfilename(f)
671 scmutil.checkfilename(f)
672 if self._map.hastrackeddir(f):
672 if self._map.hastrackeddir(f):
673 msg = _(b'directory %r already in dirstate')
673 msg = _(b'directory %r already in dirstate')
674 msg %= pycompat.bytestr(f)
674 msg %= pycompat.bytestr(f)
675 raise error.Abort(msg)
675 raise error.Abort(msg)
676 # shadows
676 # shadows
677 for d in pathutil.finddirs(f):
677 for d in pathutil.finddirs(f):
678 if self._map.hastrackeddir(d):
678 if self._map.hastrackeddir(d):
679 break
679 break
680 entry = self._map.get(d)
680 entry = self._map.get(d)
681 if entry is not None and not entry.removed:
681 if entry is not None and not entry.removed:
682 msg = _(b'file %r in dirstate clashes with %r')
682 msg = _(b'file %r in dirstate clashes with %r')
683 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
683 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
684 raise error.Abort(msg)
684 raise error.Abort(msg)
685 self._dirty = True
685 self._dirty = True
686 self._updatedfiles.add(f)
686 self._updatedfiles.add(f)
687 self._map.addfile(
687 self._map.addfile(
688 f,
688 f,
689 mode=mode,
689 mode=mode,
690 size=size,
690 size=size,
691 mtime=mtime,
691 mtime=mtime,
692 added=added,
692 added=added,
693 merged=merged,
693 merged=merged,
694 from_p2=from_p2,
694 from_p2=from_p2,
695 possibly_dirty=possibly_dirty,
695 possibly_dirty=possibly_dirty,
696 )
696 )
697
697
698 def _get_filedata(self, filename):
698 def _get_filedata(self, filename):
699 """returns"""
699 """returns"""
700 s = os.lstat(self._join(filename))
700 s = os.lstat(self._join(filename))
701 mode = s.st_mode
701 mode = s.st_mode
702 size = s.st_size
702 size = s.st_size
703 mtime = s[stat.ST_MTIME]
703 mtime = s[stat.ST_MTIME]
704 return (mode, size, mtime)
704 return (mode, size, mtime)
705
705
706 def normal(self, f, parentfiledata=None):
706 def normal(self, f, parentfiledata=None):
707 """Mark a file normal and clean.
707 """Mark a file normal and clean.
708
708
709 parentfiledata: (mode, size, mtime) of the clean file
709 parentfiledata: (mode, size, mtime) of the clean file
710
710
711 parentfiledata should be computed from memory (for mode,
711 parentfiledata should be computed from memory (for mode,
712 size), as or close as possible from the point where we
712 size), as or close as possible from the point where we
713 determined the file was clean, to limit the risk of the
713 determined the file was clean, to limit the risk of the
714 file having been changed by an external process between the
714 file having been changed by an external process between the
715 moment where the file was determined to be clean and now."""
715 moment where the file was determined to be clean and now."""
716 if self.pendingparentchange():
716 if self.pendingparentchange():
717 util.nouideprecwarn(
717 util.nouideprecwarn(
718 b"do not use `normal` inside of update/merge context."
718 b"do not use `normal` inside of update/merge context."
719 b" Use `update_file` or `update_file_p1`",
719 b" Use `update_file` or `update_file_p1`",
720 b'6.0',
720 b'6.0',
721 stacklevel=2,
721 stacklevel=2,
722 )
722 )
723 else:
723 else:
724 util.nouideprecwarn(
724 util.nouideprecwarn(
725 b"do not use `normal` outside of update/merge context."
725 b"do not use `normal` outside of update/merge context."
726 b" Use `set_tracked`",
726 b" Use `set_tracked`",
727 b'6.0',
727 b'6.0',
728 stacklevel=2,
728 stacklevel=2,
729 )
729 )
730 self._normal(f, parentfiledata=parentfiledata)
730 self._normal(f, parentfiledata=parentfiledata)
731
731
732 def _normal(self, f, parentfiledata=None):
732 def _normal(self, f, parentfiledata=None):
733 if parentfiledata:
733 if parentfiledata:
734 (mode, size, mtime) = parentfiledata
734 (mode, size, mtime) = parentfiledata
735 else:
735 else:
736 (mode, size, mtime) = self._get_filedata(f)
736 (mode, size, mtime) = self._get_filedata(f)
737 self._addpath(f, mode=mode, size=size, mtime=mtime)
737 self._addpath(f, mode=mode, size=size, mtime=mtime)
738 self._map.copymap.pop(f, None)
738 self._map.copymap.pop(f, None)
739 if f in self._map.nonnormalset:
739 if f in self._map.nonnormalset:
740 self._map.nonnormalset.remove(f)
740 self._map.nonnormalset.remove(f)
741 if mtime > self._lastnormaltime:
741 if mtime > self._lastnormaltime:
742 # Remember the most recent modification timeslot for status(),
742 # Remember the most recent modification timeslot for status(),
743 # to make sure we won't miss future size-preserving file content
743 # to make sure we won't miss future size-preserving file content
744 # modifications that happen within the same timeslot.
744 # modifications that happen within the same timeslot.
745 self._lastnormaltime = mtime
745 self._lastnormaltime = mtime
746
746
747 def normallookup(self, f):
747 def normallookup(self, f):
748 '''Mark a file normal, but possibly dirty.'''
748 '''Mark a file normal, but possibly dirty.'''
749 if self.pendingparentchange():
749 if self.pendingparentchange():
750 util.nouideprecwarn(
750 util.nouideprecwarn(
751 b"do not use `normallookup` inside of update/merge context."
751 b"do not use `normallookup` inside of update/merge context."
752 b" Use `update_file` or `update_file_p1`",
752 b" Use `update_file` or `update_file_p1`",
753 b'6.0',
753 b'6.0',
754 stacklevel=2,
754 stacklevel=2,
755 )
755 )
756 else:
756 else:
757 util.nouideprecwarn(
757 util.nouideprecwarn(
758 b"do not use `normallookup` outside of update/merge context."
758 b"do not use `normallookup` outside of update/merge context."
759 b" Use `set_possibly_dirty` or `set_tracked`",
759 b" Use `set_possibly_dirty` or `set_tracked`",
760 b'6.0',
760 b'6.0',
761 stacklevel=2,
761 stacklevel=2,
762 )
762 )
763 self._normallookup(f)
763 self._normallookup(f)
764
764
765 def _normallookup(self, f):
765 def _normallookup(self, f):
766 '''Mark a file normal, but possibly dirty.'''
766 '''Mark a file normal, but possibly dirty.'''
767 if self.in_merge:
767 if self.in_merge:
768 # if there is a merge going on and the file was either
768 # if there is a merge going on and the file was either
769 # "merged" or coming from other parent (-2) before
769 # "merged" or coming from other parent (-2) before
770 # being removed, restore that state.
770 # being removed, restore that state.
771 entry = self._map.get(f)
771 entry = self._map.get(f)
772 if entry is not None:
772 if entry is not None:
773 # XXX this should probably be dealt with a a lower level
773 # XXX this should probably be dealt with a a lower level
774 # (see `merged_removed` and `from_p2_removed`)
774 # (see `merged_removed` and `from_p2_removed`)
775 if entry.merged_removed or entry.from_p2_removed:
775 if entry.merged_removed or entry.from_p2_removed:
776 source = self._map.copymap.get(f)
776 source = self._map.copymap.get(f)
777 if entry.merged_removed:
777 if entry.merged_removed:
778 self._merge(f)
778 self._merge(f)
779 elif entry.from_p2_removed:
779 elif entry.from_p2_removed:
780 self._otherparent(f)
780 self._otherparent(f)
781 if source is not None:
781 if source is not None:
782 self.copy(source, f)
782 self.copy(source, f)
783 return
783 return
784 elif entry.merged or entry.from_p2:
784 elif entry.merged or entry.from_p2:
785 return
785 return
786 self._addpath(f, possibly_dirty=True)
786 self._addpath(f, possibly_dirty=True)
787 self._map.copymap.pop(f, None)
787 self._map.copymap.pop(f, None)
788
788
789 def otherparent(self, f):
789 def otherparent(self, f):
790 '''Mark as coming from the other parent, always dirty.'''
790 '''Mark as coming from the other parent, always dirty.'''
791 if self.pendingparentchange():
791 if self.pendingparentchange():
792 util.nouideprecwarn(
792 util.nouideprecwarn(
793 b"do not use `otherparent` inside of update/merge context."
793 b"do not use `otherparent` inside of update/merge context."
794 b" Use `update_file` or `update_file_p1`",
794 b" Use `update_file` or `update_file_p1`",
795 b'6.0',
795 b'6.0',
796 stacklevel=2,
796 stacklevel=2,
797 )
797 )
798 else:
798 else:
799 util.nouideprecwarn(
799 util.nouideprecwarn(
800 b"do not use `otherparent` outside of update/merge context."
800 b"do not use `otherparent` outside of update/merge context."
801 b"It should have been set by the update/merge code",
801 b"It should have been set by the update/merge code",
802 b'6.0',
802 b'6.0',
803 stacklevel=2,
803 stacklevel=2,
804 )
804 )
805 self._otherparent(f)
805 self._otherparent(f)
806
806
807 def _otherparent(self, f):
807 def _otherparent(self, f):
808 if not self.in_merge:
808 if not self.in_merge:
809 msg = _(b"setting %r to other parent only allowed in merges") % f
809 msg = _(b"setting %r to other parent only allowed in merges") % f
810 raise error.Abort(msg)
810 raise error.Abort(msg)
811 entry = self._map.get(f)
811 entry = self._map.get(f)
812 if entry is not None and entry.tracked:
812 if entry is not None and entry.tracked:
813 # merge-like
813 # merge-like
814 self._addpath(f, merged=True)
814 self._addpath(f, merged=True)
815 else:
815 else:
816 # add-like
816 # add-like
817 self._addpath(f, from_p2=True)
817 self._addpath(f, from_p2=True)
818 self._map.copymap.pop(f, None)
818 self._map.copymap.pop(f, None)
819
819
820 def add(self, f):
820 def add(self, f):
821 '''Mark a file added.'''
821 '''Mark a file added.'''
822 if self.pendingparentchange():
822 if self.pendingparentchange():
823 util.nouideprecwarn(
823 util.nouideprecwarn(
824 b"do not use `add` inside of update/merge context."
824 b"do not use `add` inside of update/merge context."
825 b" Use `update_file`",
825 b" Use `update_file`",
826 b'6.0',
826 b'6.0',
827 stacklevel=2,
827 stacklevel=2,
828 )
828 )
829 else:
829 else:
830 util.nouideprecwarn(
830 util.nouideprecwarn(
831 b"do not use `remove` outside of update/merge context."
831 b"do not use `remove` outside of update/merge context."
832 b" Use `set_tracked`",
832 b" Use `set_tracked`",
833 b'6.0',
833 b'6.0',
834 stacklevel=2,
834 stacklevel=2,
835 )
835 )
836 self._add(f)
836 self._add(f)
837
837
838 def _add(self, filename):
838 def _add(self, filename):
839 """internal function to mark a file as added"""
839 """internal function to mark a file as added"""
840 self._addpath(filename, added=True)
840 self._addpath(filename, added=True)
841 self._map.copymap.pop(filename, None)
841 self._map.copymap.pop(filename, None)
842
842
843 def remove(self, f):
843 def remove(self, f):
844 '''Mark a file removed'''
844 '''Mark a file removed'''
845 if self.pendingparentchange():
845 if self.pendingparentchange():
846 util.nouideprecwarn(
846 util.nouideprecwarn(
847 b"do not use `remove` insde of update/merge context."
847 b"do not use `remove` insde of update/merge context."
848 b" Use `update_file` or `update_file_p1`",
848 b" Use `update_file` or `update_file_p1`",
849 b'6.0',
849 b'6.0',
850 stacklevel=2,
850 stacklevel=2,
851 )
851 )
852 else:
852 else:
853 util.nouideprecwarn(
853 util.nouideprecwarn(
854 b"do not use `remove` outside of update/merge context."
854 b"do not use `remove` outside of update/merge context."
855 b" Use `set_untracked`",
855 b" Use `set_untracked`",
856 b'6.0',
856 b'6.0',
857 stacklevel=2,
857 stacklevel=2,
858 )
858 )
859 self._remove(f)
859 self._remove(f)
860
860
861 def _remove(self, filename):
861 def _remove(self, filename):
862 """internal function to mark a file removed"""
862 """internal function to mark a file removed"""
863 self._dirty = True
863 self._dirty = True
864 self._updatedfiles.add(filename)
864 self._updatedfiles.add(filename)
865 self._map.removefile(filename, in_merge=self.in_merge)
865 self._map.removefile(filename, in_merge=self.in_merge)
866
866
867 def merge(self, f):
867 def merge(self, f):
868 '''Mark a file merged.'''
868 '''Mark a file merged.'''
869 if self.pendingparentchange():
869 if self.pendingparentchange():
870 util.nouideprecwarn(
870 util.nouideprecwarn(
871 b"do not use `merge` inside of update/merge context."
871 b"do not use `merge` inside of update/merge context."
872 b" Use `update_file`",
872 b" Use `update_file`",
873 b'6.0',
873 b'6.0',
874 stacklevel=2,
874 stacklevel=2,
875 )
875 )
876 else:
876 else:
877 util.nouideprecwarn(
877 util.nouideprecwarn(
878 b"do not use `merge` outside of update/merge context."
878 b"do not use `merge` outside of update/merge context."
879 b"It should have been set by the update/merge code",
879 b"It should have been set by the update/merge code",
880 b'6.0',
880 b'6.0',
881 stacklevel=2,
881 stacklevel=2,
882 )
882 )
883 self._merge(f)
883 self._merge(f)
884
884
885 def _merge(self, f):
885 def _merge(self, f):
886 if not self.in_merge:
886 if not self.in_merge:
887 return self._normallookup(f)
887 return self._normallookup(f)
888 return self._otherparent(f)
888 return self._otherparent(f)
889
889
890 def drop(self, f):
890 def drop(self, f):
891 '''Drop a file from the dirstate'''
891 '''Drop a file from the dirstate'''
892 if self.pendingparentchange():
892 if self.pendingparentchange():
893 util.nouideprecwarn(
893 util.nouideprecwarn(
894 b"do not use `drop` inside of update/merge context."
894 b"do not use `drop` inside of update/merge context."
895 b" Use `update_file`",
895 b" Use `update_file`",
896 b'6.0',
896 b'6.0',
897 stacklevel=2,
897 stacklevel=2,
898 )
898 )
899 else:
899 else:
900 util.nouideprecwarn(
900 util.nouideprecwarn(
901 b"do not use `drop` outside of update/merge context."
901 b"do not use `drop` outside of update/merge context."
902 b" Use `set_untracked`",
902 b" Use `set_untracked`",
903 b'6.0',
903 b'6.0',
904 stacklevel=2,
904 stacklevel=2,
905 )
905 )
906 self._drop(f)
906 self._drop(f)
907
907
908 def _drop(self, filename):
908 def _drop(self, filename):
909 """internal function to drop a file from the dirstate"""
909 """internal function to drop a file from the dirstate"""
910 if self._map.dropfile(filename):
910 if self._map.dropfile(filename):
911 self._dirty = True
911 self._dirty = True
912 self._updatedfiles.add(filename)
912 self._updatedfiles.add(filename)
913 self._map.copymap.pop(filename, None)
913 self._map.copymap.pop(filename, None)
914
914
915 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
915 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
916 if exists is None:
916 if exists is None:
917 exists = os.path.lexists(os.path.join(self._root, path))
917 exists = os.path.lexists(os.path.join(self._root, path))
918 if not exists:
918 if not exists:
919 # Maybe a path component exists
919 # Maybe a path component exists
920 if not ignoremissing and b'/' in path:
920 if not ignoremissing and b'/' in path:
921 d, f = path.rsplit(b'/', 1)
921 d, f = path.rsplit(b'/', 1)
922 d = self._normalize(d, False, ignoremissing, None)
922 d = self._normalize(d, False, ignoremissing, None)
923 folded = d + b"/" + f
923 folded = d + b"/" + f
924 else:
924 else:
925 # No path components, preserve original case
925 # No path components, preserve original case
926 folded = path
926 folded = path
927 else:
927 else:
928 # recursively normalize leading directory components
928 # recursively normalize leading directory components
929 # against dirstate
929 # against dirstate
930 if b'/' in normed:
930 if b'/' in normed:
931 d, f = normed.rsplit(b'/', 1)
931 d, f = normed.rsplit(b'/', 1)
932 d = self._normalize(d, False, ignoremissing, True)
932 d = self._normalize(d, False, ignoremissing, True)
933 r = self._root + b"/" + d
933 r = self._root + b"/" + d
934 folded = d + b"/" + util.fspath(f, r)
934 folded = d + b"/" + util.fspath(f, r)
935 else:
935 else:
936 folded = util.fspath(normed, self._root)
936 folded = util.fspath(normed, self._root)
937 storemap[normed] = folded
937 storemap[normed] = folded
938
938
939 return folded
939 return folded
940
940
941 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
941 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
942 normed = util.normcase(path)
942 normed = util.normcase(path)
943 folded = self._map.filefoldmap.get(normed, None)
943 folded = self._map.filefoldmap.get(normed, None)
944 if folded is None:
944 if folded is None:
945 if isknown:
945 if isknown:
946 folded = path
946 folded = path
947 else:
947 else:
948 folded = self._discoverpath(
948 folded = self._discoverpath(
949 path, normed, ignoremissing, exists, self._map.filefoldmap
949 path, normed, ignoremissing, exists, self._map.filefoldmap
950 )
950 )
951 return folded
951 return folded
952
952
953 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
953 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
954 normed = util.normcase(path)
954 normed = util.normcase(path)
955 folded = self._map.filefoldmap.get(normed, None)
955 folded = self._map.filefoldmap.get(normed, None)
956 if folded is None:
956 if folded is None:
957 folded = self._map.dirfoldmap.get(normed, None)
957 folded = self._map.dirfoldmap.get(normed, None)
958 if folded is None:
958 if folded is None:
959 if isknown:
959 if isknown:
960 folded = path
960 folded = path
961 else:
961 else:
962 # store discovered result in dirfoldmap so that future
962 # store discovered result in dirfoldmap so that future
963 # normalizefile calls don't start matching directories
963 # normalizefile calls don't start matching directories
964 folded = self._discoverpath(
964 folded = self._discoverpath(
965 path, normed, ignoremissing, exists, self._map.dirfoldmap
965 path, normed, ignoremissing, exists, self._map.dirfoldmap
966 )
966 )
967 return folded
967 return folded
968
968
969 def normalize(self, path, isknown=False, ignoremissing=False):
969 def normalize(self, path, isknown=False, ignoremissing=False):
970 """
970 """
971 normalize the case of a pathname when on a casefolding filesystem
971 normalize the case of a pathname when on a casefolding filesystem
972
972
973 isknown specifies whether the filename came from walking the
973 isknown specifies whether the filename came from walking the
974 disk, to avoid extra filesystem access.
974 disk, to avoid extra filesystem access.
975
975
976 If ignoremissing is True, missing path are returned
976 If ignoremissing is True, missing path are returned
977 unchanged. Otherwise, we try harder to normalize possibly
977 unchanged. Otherwise, we try harder to normalize possibly
978 existing path components.
978 existing path components.
979
979
980 The normalized case is determined based on the following precedence:
980 The normalized case is determined based on the following precedence:
981
981
982 - version of name already stored in the dirstate
982 - version of name already stored in the dirstate
983 - version of name stored on disk
983 - version of name stored on disk
984 - version provided via command arguments
984 - version provided via command arguments
985 """
985 """
986
986
987 if self._checkcase:
987 if self._checkcase:
988 return self._normalize(path, isknown, ignoremissing)
988 return self._normalize(path, isknown, ignoremissing)
989 return path
989 return path
990
990
991 def clear(self):
991 def clear(self):
992 self._map.clear()
992 self._map.clear()
993 self._lastnormaltime = 0
993 self._lastnormaltime = 0
994 self._updatedfiles.clear()
994 self._updatedfiles.clear()
995 self._dirty = True
995 self._dirty = True
996
996
997 def rebuild(self, parent, allfiles, changedfiles=None):
997 def rebuild(self, parent, allfiles, changedfiles=None):
998 if changedfiles is None:
998 if changedfiles is None:
999 # Rebuild entire dirstate
999 # Rebuild entire dirstate
1000 to_lookup = allfiles
1000 to_lookup = allfiles
1001 to_drop = []
1001 to_drop = []
1002 lastnormaltime = self._lastnormaltime
1002 lastnormaltime = self._lastnormaltime
1003 self.clear()
1003 self.clear()
1004 self._lastnormaltime = lastnormaltime
1004 self._lastnormaltime = lastnormaltime
1005 elif len(changedfiles) < 10:
1005 elif len(changedfiles) < 10:
1006 # Avoid turning allfiles into a set, which can be expensive if it's
1006 # Avoid turning allfiles into a set, which can be expensive if it's
1007 # large.
1007 # large.
1008 to_lookup = []
1008 to_lookup = []
1009 to_drop = []
1009 to_drop = []
1010 for f in changedfiles:
1010 for f in changedfiles:
1011 if f in allfiles:
1011 if f in allfiles:
1012 to_lookup.append(f)
1012 to_lookup.append(f)
1013 else:
1013 else:
1014 to_drop.append(f)
1014 to_drop.append(f)
1015 else:
1015 else:
1016 changedfilesset = set(changedfiles)
1016 changedfilesset = set(changedfiles)
1017 to_lookup = changedfilesset & set(allfiles)
1017 to_lookup = changedfilesset & set(allfiles)
1018 to_drop = changedfilesset - to_lookup
1018 to_drop = changedfilesset - to_lookup
1019
1019
1020 if self._origpl is None:
1020 if self._origpl is None:
1021 self._origpl = self._pl
1021 self._origpl = self._pl
1022 self._map.setparents(parent, self._nodeconstants.nullid)
1022 self._map.setparents(parent, self._nodeconstants.nullid)
1023
1023
1024 for f in to_lookup:
1024 for f in to_lookup:
1025 self._normallookup(f)
1025 self._normallookup(f)
1026 for f in to_drop:
1026 for f in to_drop:
1027 self._drop(f)
1027 self._drop(f)
1028
1028
1029 self._dirty = True
1029 self._dirty = True
1030
1030
1031 def identity(self):
1031 def identity(self):
1032 """Return identity of dirstate itself to detect changing in storage
1032 """Return identity of dirstate itself to detect changing in storage
1033
1033
1034 If identity of previous dirstate is equal to this, writing
1034 If identity of previous dirstate is equal to this, writing
1035 changes based on the former dirstate out can keep consistency.
1035 changes based on the former dirstate out can keep consistency.
1036 """
1036 """
1037 return self._map.identity
1037 return self._map.identity
1038
1038
1039 def write(self, tr):
1039 def write(self, tr):
1040 if not self._dirty:
1040 if not self._dirty:
1041 return
1041 return
1042
1042
1043 filename = self._filename
1043 filename = self._filename
1044 if tr:
1044 if tr:
1045 # 'dirstate.write()' is not only for writing in-memory
1045 # 'dirstate.write()' is not only for writing in-memory
1046 # changes out, but also for dropping ambiguous timestamp.
1046 # changes out, but also for dropping ambiguous timestamp.
1047 # delayed writing re-raise "ambiguous timestamp issue".
1047 # delayed writing re-raise "ambiguous timestamp issue".
1048 # See also the wiki page below for detail:
1048 # See also the wiki page below for detail:
1049 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
1049 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
1050
1050
1051 # emulate dropping timestamp in 'parsers.pack_dirstate'
1051 # emulate dropping timestamp in 'parsers.pack_dirstate'
1052 now = _getfsnow(self._opener)
1052 now = _getfsnow(self._opener)
1053 self._map.clearambiguoustimes(self._updatedfiles, now)
1053 self._map.clearambiguoustimes(self._updatedfiles, now)
1054
1054
1055 # emulate that all 'dirstate.normal' results are written out
1055 # emulate that all 'dirstate.normal' results are written out
1056 self._lastnormaltime = 0
1056 self._lastnormaltime = 0
1057 self._updatedfiles.clear()
1057 self._updatedfiles.clear()
1058
1058
1059 # delay writing in-memory changes out
1059 # delay writing in-memory changes out
1060 tr.addfilegenerator(
1060 tr.addfilegenerator(
1061 b'dirstate',
1061 b'dirstate',
1062 (self._filename,),
1062 (self._filename,),
1063 lambda f: self._writedirstate(tr, f),
1063 lambda f: self._writedirstate(tr, f),
1064 location=b'plain',
1064 location=b'plain',
1065 )
1065 )
1066 return
1066 return
1067
1067
1068 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
1068 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
1069 self._writedirstate(tr, st)
1069 self._writedirstate(tr, st)
1070
1070
1071 def addparentchangecallback(self, category, callback):
1071 def addparentchangecallback(self, category, callback):
1072 """add a callback to be called when the wd parents are changed
1072 """add a callback to be called when the wd parents are changed
1073
1073
1074 Callback will be called with the following arguments:
1074 Callback will be called with the following arguments:
1075 dirstate, (oldp1, oldp2), (newp1, newp2)
1075 dirstate, (oldp1, oldp2), (newp1, newp2)
1076
1076
1077 Category is a unique identifier to allow overwriting an old callback
1077 Category is a unique identifier to allow overwriting an old callback
1078 with a newer callback.
1078 with a newer callback.
1079 """
1079 """
1080 self._plchangecallbacks[category] = callback
1080 self._plchangecallbacks[category] = callback
1081
1081
1082 def _writedirstate(self, tr, st):
1082 def _writedirstate(self, tr, st):
1083 # notify callbacks about parents change
1083 # notify callbacks about parents change
1084 if self._origpl is not None and self._origpl != self._pl:
1084 if self._origpl is not None and self._origpl != self._pl:
1085 for c, callback in sorted(
1085 for c, callback in sorted(
1086 pycompat.iteritems(self._plchangecallbacks)
1086 pycompat.iteritems(self._plchangecallbacks)
1087 ):
1087 ):
1088 callback(self, self._origpl, self._pl)
1088 callback(self, self._origpl, self._pl)
1089 self._origpl = None
1089 self._origpl = None
1090 # use the modification time of the newly created temporary file as the
1090 # use the modification time of the newly created temporary file as the
1091 # filesystem's notion of 'now'
1091 # filesystem's notion of 'now'
1092 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
1092 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
1093
1093
1094 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
1094 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
1095 # timestamp of each entries in dirstate, because of 'now > mtime'
1095 # timestamp of each entries in dirstate, because of 'now > mtime'
1096 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
1096 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
1097 if delaywrite > 0:
1097 if delaywrite > 0:
1098 # do we have any files to delay for?
1098 # do we have any files to delay for?
1099 for f, e in pycompat.iteritems(self._map):
1099 for f, e in pycompat.iteritems(self._map):
1100 if e.need_delay(now):
1100 if e.need_delay(now):
1101 import time # to avoid useless import
1101 import time # to avoid useless import
1102
1102
1103 # rather than sleep n seconds, sleep until the next
1103 # rather than sleep n seconds, sleep until the next
1104 # multiple of n seconds
1104 # multiple of n seconds
1105 clock = time.time()
1105 clock = time.time()
1106 start = int(clock) - (int(clock) % delaywrite)
1106 start = int(clock) - (int(clock) % delaywrite)
1107 end = start + delaywrite
1107 end = start + delaywrite
1108 time.sleep(end - clock)
1108 time.sleep(end - clock)
1109 now = end # trust our estimate that the end is near now
1109 now = end # trust our estimate that the end is near now
1110 break
1110 break
1111
1111
1112 self._map.write(tr, st, now)
1112 self._map.write(tr, st, now)
1113 self._lastnormaltime = 0
1113 self._lastnormaltime = 0
1114 self._dirty = False
1114 self._dirty = False
1115
1115
1116 def _dirignore(self, f):
1116 def _dirignore(self, f):
1117 if self._ignore(f):
1117 if self._ignore(f):
1118 return True
1118 return True
1119 for p in pathutil.finddirs(f):
1119 for p in pathutil.finddirs(f):
1120 if self._ignore(p):
1120 if self._ignore(p):
1121 return True
1121 return True
1122 return False
1122 return False
1123
1123
1124 def _ignorefiles(self):
1124 def _ignorefiles(self):
1125 files = []
1125 files = []
1126 if os.path.exists(self._join(b'.hgignore')):
1126 if os.path.exists(self._join(b'.hgignore')):
1127 files.append(self._join(b'.hgignore'))
1127 files.append(self._join(b'.hgignore'))
1128 for name, path in self._ui.configitems(b"ui"):
1128 for name, path in self._ui.configitems(b"ui"):
1129 if name == b'ignore' or name.startswith(b'ignore.'):
1129 if name == b'ignore' or name.startswith(b'ignore.'):
1130 # we need to use os.path.join here rather than self._join
1130 # we need to use os.path.join here rather than self._join
1131 # because path is arbitrary and user-specified
1131 # because path is arbitrary and user-specified
1132 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1132 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1133 return files
1133 return files
1134
1134
1135 def _ignorefileandline(self, f):
1135 def _ignorefileandline(self, f):
1136 files = collections.deque(self._ignorefiles())
1136 files = collections.deque(self._ignorefiles())
1137 visited = set()
1137 visited = set()
1138 while files:
1138 while files:
1139 i = files.popleft()
1139 i = files.popleft()
1140 patterns = matchmod.readpatternfile(
1140 patterns = matchmod.readpatternfile(
1141 i, self._ui.warn, sourceinfo=True
1141 i, self._ui.warn, sourceinfo=True
1142 )
1142 )
1143 for pattern, lineno, line in patterns:
1143 for pattern, lineno, line in patterns:
1144 kind, p = matchmod._patsplit(pattern, b'glob')
1144 kind, p = matchmod._patsplit(pattern, b'glob')
1145 if kind == b"subinclude":
1145 if kind == b"subinclude":
1146 if p not in visited:
1146 if p not in visited:
1147 files.append(p)
1147 files.append(p)
1148 continue
1148 continue
1149 m = matchmod.match(
1149 m = matchmod.match(
1150 self._root, b'', [], [pattern], warn=self._ui.warn
1150 self._root, b'', [], [pattern], warn=self._ui.warn
1151 )
1151 )
1152 if m(f):
1152 if m(f):
1153 return (i, lineno, line)
1153 return (i, lineno, line)
1154 visited.add(i)
1154 visited.add(i)
1155 return (None, -1, b"")
1155 return (None, -1, b"")
1156
1156
1157 def _walkexplicit(self, match, subrepos):
1157 def _walkexplicit(self, match, subrepos):
1158 """Get stat data about the files explicitly specified by match.
1158 """Get stat data about the files explicitly specified by match.
1159
1159
1160 Return a triple (results, dirsfound, dirsnotfound).
1160 Return a triple (results, dirsfound, dirsnotfound).
1161 - results is a mapping from filename to stat result. It also contains
1161 - results is a mapping from filename to stat result. It also contains
1162 listings mapping subrepos and .hg to None.
1162 listings mapping subrepos and .hg to None.
1163 - dirsfound is a list of files found to be directories.
1163 - dirsfound is a list of files found to be directories.
1164 - dirsnotfound is a list of files that the dirstate thinks are
1164 - dirsnotfound is a list of files that the dirstate thinks are
1165 directories and that were not found."""
1165 directories and that were not found."""
1166
1166
1167 def badtype(mode):
1167 def badtype(mode):
1168 kind = _(b'unknown')
1168 kind = _(b'unknown')
1169 if stat.S_ISCHR(mode):
1169 if stat.S_ISCHR(mode):
1170 kind = _(b'character device')
1170 kind = _(b'character device')
1171 elif stat.S_ISBLK(mode):
1171 elif stat.S_ISBLK(mode):
1172 kind = _(b'block device')
1172 kind = _(b'block device')
1173 elif stat.S_ISFIFO(mode):
1173 elif stat.S_ISFIFO(mode):
1174 kind = _(b'fifo')
1174 kind = _(b'fifo')
1175 elif stat.S_ISSOCK(mode):
1175 elif stat.S_ISSOCK(mode):
1176 kind = _(b'socket')
1176 kind = _(b'socket')
1177 elif stat.S_ISDIR(mode):
1177 elif stat.S_ISDIR(mode):
1178 kind = _(b'directory')
1178 kind = _(b'directory')
1179 return _(b'unsupported file type (type is %s)') % kind
1179 return _(b'unsupported file type (type is %s)') % kind
1180
1180
1181 badfn = match.bad
1181 badfn = match.bad
1182 dmap = self._map
1182 dmap = self._map
1183 lstat = os.lstat
1183 lstat = os.lstat
1184 getkind = stat.S_IFMT
1184 getkind = stat.S_IFMT
1185 dirkind = stat.S_IFDIR
1185 dirkind = stat.S_IFDIR
1186 regkind = stat.S_IFREG
1186 regkind = stat.S_IFREG
1187 lnkkind = stat.S_IFLNK
1187 lnkkind = stat.S_IFLNK
1188 join = self._join
1188 join = self._join
1189 dirsfound = []
1189 dirsfound = []
1190 foundadd = dirsfound.append
1190 foundadd = dirsfound.append
1191 dirsnotfound = []
1191 dirsnotfound = []
1192 notfoundadd = dirsnotfound.append
1192 notfoundadd = dirsnotfound.append
1193
1193
1194 if not match.isexact() and self._checkcase:
1194 if not match.isexact() and self._checkcase:
1195 normalize = self._normalize
1195 normalize = self._normalize
1196 else:
1196 else:
1197 normalize = None
1197 normalize = None
1198
1198
1199 files = sorted(match.files())
1199 files = sorted(match.files())
1200 subrepos.sort()
1200 subrepos.sort()
1201 i, j = 0, 0
1201 i, j = 0, 0
1202 while i < len(files) and j < len(subrepos):
1202 while i < len(files) and j < len(subrepos):
1203 subpath = subrepos[j] + b"/"
1203 subpath = subrepos[j] + b"/"
1204 if files[i] < subpath:
1204 if files[i] < subpath:
1205 i += 1
1205 i += 1
1206 continue
1206 continue
1207 while i < len(files) and files[i].startswith(subpath):
1207 while i < len(files) and files[i].startswith(subpath):
1208 del files[i]
1208 del files[i]
1209 j += 1
1209 j += 1
1210
1210
1211 if not files or b'' in files:
1211 if not files or b'' in files:
1212 files = [b'']
1212 files = [b'']
1213 # constructing the foldmap is expensive, so don't do it for the
1213 # constructing the foldmap is expensive, so don't do it for the
1214 # common case where files is ['']
1214 # common case where files is ['']
1215 normalize = None
1215 normalize = None
1216 results = dict.fromkeys(subrepos)
1216 results = dict.fromkeys(subrepos)
1217 results[b'.hg'] = None
1217 results[b'.hg'] = None
1218
1218
1219 for ff in files:
1219 for ff in files:
1220 if normalize:
1220 if normalize:
1221 nf = normalize(ff, False, True)
1221 nf = normalize(ff, False, True)
1222 else:
1222 else:
1223 nf = ff
1223 nf = ff
1224 if nf in results:
1224 if nf in results:
1225 continue
1225 continue
1226
1226
1227 try:
1227 try:
1228 st = lstat(join(nf))
1228 st = lstat(join(nf))
1229 kind = getkind(st.st_mode)
1229 kind = getkind(st.st_mode)
1230 if kind == dirkind:
1230 if kind == dirkind:
1231 if nf in dmap:
1231 if nf in dmap:
1232 # file replaced by dir on disk but still in dirstate
1232 # file replaced by dir on disk but still in dirstate
1233 results[nf] = None
1233 results[nf] = None
1234 foundadd((nf, ff))
1234 foundadd((nf, ff))
1235 elif kind == regkind or kind == lnkkind:
1235 elif kind == regkind or kind == lnkkind:
1236 results[nf] = st
1236 results[nf] = st
1237 else:
1237 else:
1238 badfn(ff, badtype(kind))
1238 badfn(ff, badtype(kind))
1239 if nf in dmap:
1239 if nf in dmap:
1240 results[nf] = None
1240 results[nf] = None
1241 except OSError as inst: # nf not found on disk - it is dirstate only
1241 except OSError as inst: # nf not found on disk - it is dirstate only
1242 if nf in dmap: # does it exactly match a missing file?
1242 if nf in dmap: # does it exactly match a missing file?
1243 results[nf] = None
1243 results[nf] = None
1244 else: # does it match a missing directory?
1244 else: # does it match a missing directory?
1245 if self._map.hasdir(nf):
1245 if self._map.hasdir(nf):
1246 notfoundadd(nf)
1246 notfoundadd(nf)
1247 else:
1247 else:
1248 badfn(ff, encoding.strtolocal(inst.strerror))
1248 badfn(ff, encoding.strtolocal(inst.strerror))
1249
1249
1250 # match.files() may contain explicitly-specified paths that shouldn't
1250 # match.files() may contain explicitly-specified paths that shouldn't
1251 # be taken; drop them from the list of files found. dirsfound/notfound
1251 # be taken; drop them from the list of files found. dirsfound/notfound
1252 # aren't filtered here because they will be tested later.
1252 # aren't filtered here because they will be tested later.
1253 if match.anypats():
1253 if match.anypats():
1254 for f in list(results):
1254 for f in list(results):
1255 if f == b'.hg' or f in subrepos:
1255 if f == b'.hg' or f in subrepos:
1256 # keep sentinel to disable further out-of-repo walks
1256 # keep sentinel to disable further out-of-repo walks
1257 continue
1257 continue
1258 if not match(f):
1258 if not match(f):
1259 del results[f]
1259 del results[f]
1260
1260
1261 # Case insensitive filesystems cannot rely on lstat() failing to detect
1261 # Case insensitive filesystems cannot rely on lstat() failing to detect
1262 # a case-only rename. Prune the stat object for any file that does not
1262 # a case-only rename. Prune the stat object for any file that does not
1263 # match the case in the filesystem, if there are multiple files that
1263 # match the case in the filesystem, if there are multiple files that
1264 # normalize to the same path.
1264 # normalize to the same path.
1265 if match.isexact() and self._checkcase:
1265 if match.isexact() and self._checkcase:
1266 normed = {}
1266 normed = {}
1267
1267
1268 for f, st in pycompat.iteritems(results):
1268 for f, st in pycompat.iteritems(results):
1269 if st is None:
1269 if st is None:
1270 continue
1270 continue
1271
1271
1272 nc = util.normcase(f)
1272 nc = util.normcase(f)
1273 paths = normed.get(nc)
1273 paths = normed.get(nc)
1274
1274
1275 if paths is None:
1275 if paths is None:
1276 paths = set()
1276 paths = set()
1277 normed[nc] = paths
1277 normed[nc] = paths
1278
1278
1279 paths.add(f)
1279 paths.add(f)
1280
1280
1281 for norm, paths in pycompat.iteritems(normed):
1281 for norm, paths in pycompat.iteritems(normed):
1282 if len(paths) > 1:
1282 if len(paths) > 1:
1283 for path in paths:
1283 for path in paths:
1284 folded = self._discoverpath(
1284 folded = self._discoverpath(
1285 path, norm, True, None, self._map.dirfoldmap
1285 path, norm, True, None, self._map.dirfoldmap
1286 )
1286 )
1287 if path != folded:
1287 if path != folded:
1288 results[path] = None
1288 results[path] = None
1289
1289
1290 return results, dirsfound, dirsnotfound
1290 return results, dirsfound, dirsnotfound
1291
1291
1292 def walk(self, match, subrepos, unknown, ignored, full=True):
1292 def walk(self, match, subrepos, unknown, ignored, full=True):
1293 """
1293 """
1294 Walk recursively through the directory tree, finding all files
1294 Walk recursively through the directory tree, finding all files
1295 matched by match.
1295 matched by match.
1296
1296
1297 If full is False, maybe skip some known-clean files.
1297 If full is False, maybe skip some known-clean files.
1298
1298
1299 Return a dict mapping filename to stat-like object (either
1299 Return a dict mapping filename to stat-like object (either
1300 mercurial.osutil.stat instance or return value of os.stat()).
1300 mercurial.osutil.stat instance or return value of os.stat()).
1301
1301
1302 """
1302 """
1303 # full is a flag that extensions that hook into walk can use -- this
1303 # full is a flag that extensions that hook into walk can use -- this
1304 # implementation doesn't use it at all. This satisfies the contract
1304 # implementation doesn't use it at all. This satisfies the contract
1305 # because we only guarantee a "maybe".
1305 # because we only guarantee a "maybe".
1306
1306
1307 if ignored:
1307 if ignored:
1308 ignore = util.never
1308 ignore = util.never
1309 dirignore = util.never
1309 dirignore = util.never
1310 elif unknown:
1310 elif unknown:
1311 ignore = self._ignore
1311 ignore = self._ignore
1312 dirignore = self._dirignore
1312 dirignore = self._dirignore
1313 else:
1313 else:
1314 # if not unknown and not ignored, drop dir recursion and step 2
1314 # if not unknown and not ignored, drop dir recursion and step 2
1315 ignore = util.always
1315 ignore = util.always
1316 dirignore = util.always
1316 dirignore = util.always
1317
1317
1318 matchfn = match.matchfn
1318 matchfn = match.matchfn
1319 matchalways = match.always()
1319 matchalways = match.always()
1320 matchtdir = match.traversedir
1320 matchtdir = match.traversedir
1321 dmap = self._map
1321 dmap = self._map
1322 listdir = util.listdir
1322 listdir = util.listdir
1323 lstat = os.lstat
1323 lstat = os.lstat
1324 dirkind = stat.S_IFDIR
1324 dirkind = stat.S_IFDIR
1325 regkind = stat.S_IFREG
1325 regkind = stat.S_IFREG
1326 lnkkind = stat.S_IFLNK
1326 lnkkind = stat.S_IFLNK
1327 join = self._join
1327 join = self._join
1328
1328
1329 exact = skipstep3 = False
1329 exact = skipstep3 = False
1330 if match.isexact(): # match.exact
1330 if match.isexact(): # match.exact
1331 exact = True
1331 exact = True
1332 dirignore = util.always # skip step 2
1332 dirignore = util.always # skip step 2
1333 elif match.prefix(): # match.match, no patterns
1333 elif match.prefix(): # match.match, no patterns
1334 skipstep3 = True
1334 skipstep3 = True
1335
1335
1336 if not exact and self._checkcase:
1336 if not exact and self._checkcase:
1337 normalize = self._normalize
1337 normalize = self._normalize
1338 normalizefile = self._normalizefile
1338 normalizefile = self._normalizefile
1339 skipstep3 = False
1339 skipstep3 = False
1340 else:
1340 else:
1341 normalize = self._normalize
1341 normalize = self._normalize
1342 normalizefile = None
1342 normalizefile = None
1343
1343
1344 # step 1: find all explicit files
1344 # step 1: find all explicit files
1345 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1345 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1346 if matchtdir:
1346 if matchtdir:
1347 for d in work:
1347 for d in work:
1348 matchtdir(d[0])
1348 matchtdir(d[0])
1349 for d in dirsnotfound:
1349 for d in dirsnotfound:
1350 matchtdir(d)
1350 matchtdir(d)
1351
1351
1352 skipstep3 = skipstep3 and not (work or dirsnotfound)
1352 skipstep3 = skipstep3 and not (work or dirsnotfound)
1353 work = [d for d in work if not dirignore(d[0])]
1353 work = [d for d in work if not dirignore(d[0])]
1354
1354
1355 # step 2: visit subdirectories
1355 # step 2: visit subdirectories
1356 def traverse(work, alreadynormed):
1356 def traverse(work, alreadynormed):
1357 wadd = work.append
1357 wadd = work.append
1358 while work:
1358 while work:
1359 tracing.counter('dirstate.walk work', len(work))
1359 tracing.counter('dirstate.walk work', len(work))
1360 nd = work.pop()
1360 nd = work.pop()
1361 visitentries = match.visitchildrenset(nd)
1361 visitentries = match.visitchildrenset(nd)
1362 if not visitentries:
1362 if not visitentries:
1363 continue
1363 continue
1364 if visitentries == b'this' or visitentries == b'all':
1364 if visitentries == b'this' or visitentries == b'all':
1365 visitentries = None
1365 visitentries = None
1366 skip = None
1366 skip = None
1367 if nd != b'':
1367 if nd != b'':
1368 skip = b'.hg'
1368 skip = b'.hg'
1369 try:
1369 try:
1370 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1370 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1371 entries = listdir(join(nd), stat=True, skip=skip)
1371 entries = listdir(join(nd), stat=True, skip=skip)
1372 except OSError as inst:
1372 except OSError as inst:
1373 if inst.errno in (errno.EACCES, errno.ENOENT):
1373 if inst.errno in (errno.EACCES, errno.ENOENT):
1374 match.bad(
1374 match.bad(
1375 self.pathto(nd), encoding.strtolocal(inst.strerror)
1375 self.pathto(nd), encoding.strtolocal(inst.strerror)
1376 )
1376 )
1377 continue
1377 continue
1378 raise
1378 raise
1379 for f, kind, st in entries:
1379 for f, kind, st in entries:
1380 # Some matchers may return files in the visitentries set,
1380 # Some matchers may return files in the visitentries set,
1381 # instead of 'this', if the matcher explicitly mentions them
1381 # instead of 'this', if the matcher explicitly mentions them
1382 # and is not an exactmatcher. This is acceptable; we do not
1382 # and is not an exactmatcher. This is acceptable; we do not
1383 # make any hard assumptions about file-or-directory below
1383 # make any hard assumptions about file-or-directory below
1384 # based on the presence of `f` in visitentries. If
1384 # based on the presence of `f` in visitentries. If
1385 # visitchildrenset returned a set, we can always skip the
1385 # visitchildrenset returned a set, we can always skip the
1386 # entries *not* in the set it provided regardless of whether
1386 # entries *not* in the set it provided regardless of whether
1387 # they're actually a file or a directory.
1387 # they're actually a file or a directory.
1388 if visitentries and f not in visitentries:
1388 if visitentries and f not in visitentries:
1389 continue
1389 continue
1390 if normalizefile:
1390 if normalizefile:
1391 # even though f might be a directory, we're only
1391 # even though f might be a directory, we're only
1392 # interested in comparing it to files currently in the
1392 # interested in comparing it to files currently in the
1393 # dmap -- therefore normalizefile is enough
1393 # dmap -- therefore normalizefile is enough
1394 nf = normalizefile(
1394 nf = normalizefile(
1395 nd and (nd + b"/" + f) or f, True, True
1395 nd and (nd + b"/" + f) or f, True, True
1396 )
1396 )
1397 else:
1397 else:
1398 nf = nd and (nd + b"/" + f) or f
1398 nf = nd and (nd + b"/" + f) or f
1399 if nf not in results:
1399 if nf not in results:
1400 if kind == dirkind:
1400 if kind == dirkind:
1401 if not ignore(nf):
1401 if not ignore(nf):
1402 if matchtdir:
1402 if matchtdir:
1403 matchtdir(nf)
1403 matchtdir(nf)
1404 wadd(nf)
1404 wadd(nf)
1405 if nf in dmap and (matchalways or matchfn(nf)):
1405 if nf in dmap and (matchalways or matchfn(nf)):
1406 results[nf] = None
1406 results[nf] = None
1407 elif kind == regkind or kind == lnkkind:
1407 elif kind == regkind or kind == lnkkind:
1408 if nf in dmap:
1408 if nf in dmap:
1409 if matchalways or matchfn(nf):
1409 if matchalways or matchfn(nf):
1410 results[nf] = st
1410 results[nf] = st
1411 elif (matchalways or matchfn(nf)) and not ignore(
1411 elif (matchalways or matchfn(nf)) and not ignore(
1412 nf
1412 nf
1413 ):
1413 ):
1414 # unknown file -- normalize if necessary
1414 # unknown file -- normalize if necessary
1415 if not alreadynormed:
1415 if not alreadynormed:
1416 nf = normalize(nf, False, True)
1416 nf = normalize(nf, False, True)
1417 results[nf] = st
1417 results[nf] = st
1418 elif nf in dmap and (matchalways or matchfn(nf)):
1418 elif nf in dmap and (matchalways or matchfn(nf)):
1419 results[nf] = None
1419 results[nf] = None
1420
1420
1421 for nd, d in work:
1421 for nd, d in work:
1422 # alreadynormed means that processwork doesn't have to do any
1422 # alreadynormed means that processwork doesn't have to do any
1423 # expensive directory normalization
1423 # expensive directory normalization
1424 alreadynormed = not normalize or nd == d
1424 alreadynormed = not normalize or nd == d
1425 traverse([d], alreadynormed)
1425 traverse([d], alreadynormed)
1426
1426
1427 for s in subrepos:
1427 for s in subrepos:
1428 del results[s]
1428 del results[s]
1429 del results[b'.hg']
1429 del results[b'.hg']
1430
1430
1431 # step 3: visit remaining files from dmap
1431 # step 3: visit remaining files from dmap
1432 if not skipstep3 and not exact:
1432 if not skipstep3 and not exact:
1433 # If a dmap file is not in results yet, it was either
1433 # If a dmap file is not in results yet, it was either
1434 # a) not matching matchfn b) ignored, c) missing, or d) under a
1434 # a) not matching matchfn b) ignored, c) missing, or d) under a
1435 # symlink directory.
1435 # symlink directory.
1436 if not results and matchalways:
1436 if not results and matchalways:
1437 visit = [f for f in dmap]
1437 visit = [f for f in dmap]
1438 else:
1438 else:
1439 visit = [f for f in dmap if f not in results and matchfn(f)]
1439 visit = [f for f in dmap if f not in results and matchfn(f)]
1440 visit.sort()
1440 visit.sort()
1441
1441
1442 if unknown:
1442 if unknown:
1443 # unknown == True means we walked all dirs under the roots
1443 # unknown == True means we walked all dirs under the roots
1444 # that wasn't ignored, and everything that matched was stat'ed
1444 # that wasn't ignored, and everything that matched was stat'ed
1445 # and is already in results.
1445 # and is already in results.
1446 # The rest must thus be ignored or under a symlink.
1446 # The rest must thus be ignored or under a symlink.
1447 audit_path = pathutil.pathauditor(self._root, cached=True)
1447 audit_path = pathutil.pathauditor(self._root, cached=True)
1448
1448
1449 for nf in iter(visit):
1449 for nf in iter(visit):
1450 # If a stat for the same file was already added with a
1450 # If a stat for the same file was already added with a
1451 # different case, don't add one for this, since that would
1451 # different case, don't add one for this, since that would
1452 # make it appear as if the file exists under both names
1452 # make it appear as if the file exists under both names
1453 # on disk.
1453 # on disk.
1454 if (
1454 if (
1455 normalizefile
1455 normalizefile
1456 and normalizefile(nf, True, True) in results
1456 and normalizefile(nf, True, True) in results
1457 ):
1457 ):
1458 results[nf] = None
1458 results[nf] = None
1459 # Report ignored items in the dmap as long as they are not
1459 # Report ignored items in the dmap as long as they are not
1460 # under a symlink directory.
1460 # under a symlink directory.
1461 elif audit_path.check(nf):
1461 elif audit_path.check(nf):
1462 try:
1462 try:
1463 results[nf] = lstat(join(nf))
1463 results[nf] = lstat(join(nf))
1464 # file was just ignored, no links, and exists
1464 # file was just ignored, no links, and exists
1465 except OSError:
1465 except OSError:
1466 # file doesn't exist
1466 # file doesn't exist
1467 results[nf] = None
1467 results[nf] = None
1468 else:
1468 else:
1469 # It's either missing or under a symlink directory
1469 # It's either missing or under a symlink directory
1470 # which we in this case report as missing
1470 # which we in this case report as missing
1471 results[nf] = None
1471 results[nf] = None
1472 else:
1472 else:
1473 # We may not have walked the full directory tree above,
1473 # We may not have walked the full directory tree above,
1474 # so stat and check everything we missed.
1474 # so stat and check everything we missed.
1475 iv = iter(visit)
1475 iv = iter(visit)
1476 for st in util.statfiles([join(i) for i in visit]):
1476 for st in util.statfiles([join(i) for i in visit]):
1477 results[next(iv)] = st
1477 results[next(iv)] = st
1478 return results
1478 return results
1479
1479
1480 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1480 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1481 # Force Rayon (Rust parallelism library) to respect the number of
1481 # Force Rayon (Rust parallelism library) to respect the number of
1482 # workers. This is a temporary workaround until Rust code knows
1482 # workers. This is a temporary workaround until Rust code knows
1483 # how to read the config file.
1483 # how to read the config file.
1484 numcpus = self._ui.configint(b"worker", b"numcpus")
1484 numcpus = self._ui.configint(b"worker", b"numcpus")
1485 if numcpus is not None:
1485 if numcpus is not None:
1486 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1486 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1487
1487
1488 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1488 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1489 if not workers_enabled:
1489 if not workers_enabled:
1490 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1490 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1491
1491
1492 (
1492 (
1493 lookup,
1493 lookup,
1494 modified,
1494 modified,
1495 added,
1495 added,
1496 removed,
1496 removed,
1497 deleted,
1497 deleted,
1498 clean,
1498 clean,
1499 ignored,
1499 ignored,
1500 unknown,
1500 unknown,
1501 warnings,
1501 warnings,
1502 bad,
1502 bad,
1503 traversed,
1503 traversed,
1504 dirty,
1504 dirty,
1505 ) = rustmod.status(
1505 ) = rustmod.status(
1506 self._map._rustmap,
1506 self._map._rustmap,
1507 matcher,
1507 matcher,
1508 self._rootdir,
1508 self._rootdir,
1509 self._ignorefiles(),
1509 self._ignorefiles(),
1510 self._checkexec,
1510 self._checkexec,
1511 self._lastnormaltime,
1511 self._lastnormaltime,
1512 bool(list_clean),
1512 bool(list_clean),
1513 bool(list_ignored),
1513 bool(list_ignored),
1514 bool(list_unknown),
1514 bool(list_unknown),
1515 bool(matcher.traversedir),
1515 bool(matcher.traversedir),
1516 )
1516 )
1517
1517
1518 self._dirty |= dirty
1518 self._dirty |= dirty
1519
1519
1520 if matcher.traversedir:
1520 if matcher.traversedir:
1521 for dir in traversed:
1521 for dir in traversed:
1522 matcher.traversedir(dir)
1522 matcher.traversedir(dir)
1523
1523
1524 if self._ui.warn:
1524 if self._ui.warn:
1525 for item in warnings:
1525 for item in warnings:
1526 if isinstance(item, tuple):
1526 if isinstance(item, tuple):
1527 file_path, syntax = item
1527 file_path, syntax = item
1528 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1528 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1529 file_path,
1529 file_path,
1530 syntax,
1530 syntax,
1531 )
1531 )
1532 self._ui.warn(msg)
1532 self._ui.warn(msg)
1533 else:
1533 else:
1534 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1534 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1535 self._ui.warn(
1535 self._ui.warn(
1536 msg
1536 msg
1537 % (
1537 % (
1538 pathutil.canonpath(
1538 pathutil.canonpath(
1539 self._rootdir, self._rootdir, item
1539 self._rootdir, self._rootdir, item
1540 ),
1540 ),
1541 b"No such file or directory",
1541 b"No such file or directory",
1542 )
1542 )
1543 )
1543 )
1544
1544
1545 for (fn, message) in bad:
1545 for (fn, message) in bad:
1546 matcher.bad(fn, encoding.strtolocal(message))
1546 matcher.bad(fn, encoding.strtolocal(message))
1547
1547
1548 status = scmutil.status(
1548 status = scmutil.status(
1549 modified=modified,
1549 modified=modified,
1550 added=added,
1550 added=added,
1551 removed=removed,
1551 removed=removed,
1552 deleted=deleted,
1552 deleted=deleted,
1553 unknown=unknown,
1553 unknown=unknown,
1554 ignored=ignored,
1554 ignored=ignored,
1555 clean=clean,
1555 clean=clean,
1556 )
1556 )
1557 return (lookup, status)
1557 return (lookup, status)
1558
1558
1559 def status(self, match, subrepos, ignored, clean, unknown):
1559 def status(self, match, subrepos, ignored, clean, unknown):
1560 """Determine the status of the working copy relative to the
1560 """Determine the status of the working copy relative to the
1561 dirstate and return a pair of (unsure, status), where status is of type
1561 dirstate and return a pair of (unsure, status), where status is of type
1562 scmutil.status and:
1562 scmutil.status and:
1563
1563
1564 unsure:
1564 unsure:
1565 files that might have been modified since the dirstate was
1565 files that might have been modified since the dirstate was
1566 written, but need to be read to be sure (size is the same
1566 written, but need to be read to be sure (size is the same
1567 but mtime differs)
1567 but mtime differs)
1568 status.modified:
1568 status.modified:
1569 files that have definitely been modified since the dirstate
1569 files that have definitely been modified since the dirstate
1570 was written (different size or mode)
1570 was written (different size or mode)
1571 status.clean:
1571 status.clean:
1572 files that have definitely not been modified since the
1572 files that have definitely not been modified since the
1573 dirstate was written
1573 dirstate was written
1574 """
1574 """
1575 listignored, listclean, listunknown = ignored, clean, unknown
1575 listignored, listclean, listunknown = ignored, clean, unknown
1576 lookup, modified, added, unknown, ignored = [], [], [], [], []
1576 lookup, modified, added, unknown, ignored = [], [], [], [], []
1577 removed, deleted, clean = [], [], []
1577 removed, deleted, clean = [], [], []
1578
1578
1579 dmap = self._map
1579 dmap = self._map
1580 dmap.preload()
1580 dmap.preload()
1581
1581
1582 use_rust = True
1582 use_rust = True
1583
1583
1584 allowed_matchers = (
1584 allowed_matchers = (
1585 matchmod.alwaysmatcher,
1585 matchmod.alwaysmatcher,
1586 matchmod.exactmatcher,
1586 matchmod.exactmatcher,
1587 matchmod.includematcher,
1587 matchmod.includematcher,
1588 )
1588 )
1589
1589
1590 if rustmod is None:
1590 if rustmod is None:
1591 use_rust = False
1591 use_rust = False
1592 elif self._checkcase:
1592 elif self._checkcase:
1593 # Case-insensitive filesystems are not handled yet
1593 # Case-insensitive filesystems are not handled yet
1594 use_rust = False
1594 use_rust = False
1595 elif subrepos:
1595 elif subrepos:
1596 use_rust = False
1596 use_rust = False
1597 elif sparse.enabled:
1597 elif sparse.enabled:
1598 use_rust = False
1598 use_rust = False
1599 elif not isinstance(match, allowed_matchers):
1599 elif not isinstance(match, allowed_matchers):
1600 # Some matchers have yet to be implemented
1600 # Some matchers have yet to be implemented
1601 use_rust = False
1601 use_rust = False
1602
1602
1603 if use_rust:
1603 if use_rust:
1604 try:
1604 try:
1605 return self._rust_status(
1605 return self._rust_status(
1606 match, listclean, listignored, listunknown
1606 match, listclean, listignored, listunknown
1607 )
1607 )
1608 except rustmod.FallbackError:
1608 except rustmod.FallbackError:
1609 pass
1609 pass
1610
1610
1611 def noop(f):
1611 def noop(f):
1612 pass
1612 pass
1613
1613
1614 dcontains = dmap.__contains__
1614 dcontains = dmap.__contains__
1615 dget = dmap.__getitem__
1615 dget = dmap.__getitem__
1616 ladd = lookup.append # aka "unsure"
1616 ladd = lookup.append # aka "unsure"
1617 madd = modified.append
1617 madd = modified.append
1618 aadd = added.append
1618 aadd = added.append
1619 uadd = unknown.append if listunknown else noop
1619 uadd = unknown.append if listunknown else noop
1620 iadd = ignored.append if listignored else noop
1620 iadd = ignored.append if listignored else noop
1621 radd = removed.append
1621 radd = removed.append
1622 dadd = deleted.append
1622 dadd = deleted.append
1623 cadd = clean.append if listclean else noop
1623 cadd = clean.append if listclean else noop
1624 mexact = match.exact
1624 mexact = match.exact
1625 dirignore = self._dirignore
1625 dirignore = self._dirignore
1626 checkexec = self._checkexec
1626 checkexec = self._checkexec
1627 copymap = self._map.copymap
1627 copymap = self._map.copymap
1628 lastnormaltime = self._lastnormaltime
1628 lastnormaltime = self._lastnormaltime
1629
1629
1630 # We need to do full walks when either
1630 # We need to do full walks when either
1631 # - we're listing all clean files, or
1631 # - we're listing all clean files, or
1632 # - match.traversedir does something, because match.traversedir should
1632 # - match.traversedir does something, because match.traversedir should
1633 # be called for every dir in the working dir
1633 # be called for every dir in the working dir
1634 full = listclean or match.traversedir is not None
1634 full = listclean or match.traversedir is not None
1635 for fn, st in pycompat.iteritems(
1635 for fn, st in pycompat.iteritems(
1636 self.walk(match, subrepos, listunknown, listignored, full=full)
1636 self.walk(match, subrepos, listunknown, listignored, full=full)
1637 ):
1637 ):
1638 if not dcontains(fn):
1638 if not dcontains(fn):
1639 if (listignored or mexact(fn)) and dirignore(fn):
1639 if (listignored or mexact(fn)) and dirignore(fn):
1640 if listignored:
1640 if listignored:
1641 iadd(fn)
1641 iadd(fn)
1642 else:
1642 else:
1643 uadd(fn)
1643 uadd(fn)
1644 continue
1644 continue
1645
1645
1646 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1646 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1647 # written like that for performance reasons. dmap[fn] is not a
1647 # written like that for performance reasons. dmap[fn] is not a
1648 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1648 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1649 # opcode has fast paths when the value to be unpacked is a tuple or
1649 # opcode has fast paths when the value to be unpacked is a tuple or
1650 # a list, but falls back to creating a full-fledged iterator in
1650 # a list, but falls back to creating a full-fledged iterator in
1651 # general. That is much slower than simply accessing and storing the
1651 # general. That is much slower than simply accessing and storing the
1652 # tuple members one by one.
1652 # tuple members one by one.
1653 t = dget(fn)
1653 t = dget(fn)
1654 mode = t.mode
1654 mode = t.mode
1655 size = t.size
1655 size = t.size
1656 time = t.mtime
1656 time = t.mtime
1657
1657
1658 if not st and t.tracked:
1658 if not st and t.tracked:
1659 dadd(fn)
1659 dadd(fn)
1660 elif t.merged:
1660 elif t.merged:
1661 madd(fn)
1661 madd(fn)
1662 elif t.added:
1662 elif t.added:
1663 aadd(fn)
1663 aadd(fn)
1664 elif t.removed:
1664 elif t.removed:
1665 radd(fn)
1665 radd(fn)
1666 elif t.tracked:
1666 elif t.tracked:
1667 if (
1667 if (
1668 size >= 0
1668 size >= 0
1669 and (
1669 and (
1670 (size != st.st_size and size != st.st_size & _rangemask)
1670 (size != st.st_size and size != st.st_size & _rangemask)
1671 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1671 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1672 )
1672 )
1673 or t.from_p2
1673 or t.from_p2
1674 or fn in copymap
1674 or fn in copymap
1675 ):
1675 ):
1676 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1676 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1677 # issue6456: Size returned may be longer due to
1677 # issue6456: Size returned may be longer due to
1678 # encryption on EXT-4 fscrypt, undecided.
1678 # encryption on EXT-4 fscrypt, undecided.
1679 ladd(fn)
1679 ladd(fn)
1680 else:
1680 else:
1681 madd(fn)
1681 madd(fn)
1682 elif (
1682 elif (
1683 time != st[stat.ST_MTIME]
1683 time != st[stat.ST_MTIME]
1684 and time != st[stat.ST_MTIME] & _rangemask
1684 and time != st[stat.ST_MTIME] & _rangemask
1685 ):
1685 ):
1686 ladd(fn)
1686 ladd(fn)
1687 elif st[stat.ST_MTIME] == lastnormaltime:
1687 elif st[stat.ST_MTIME] == lastnormaltime:
1688 # fn may have just been marked as normal and it may have
1688 # fn may have just been marked as normal and it may have
1689 # changed in the same second without changing its size.
1689 # changed in the same second without changing its size.
1690 # This can happen if we quickly do multiple commits.
1690 # This can happen if we quickly do multiple commits.
1691 # Force lookup, so we don't miss such a racy file change.
1691 # Force lookup, so we don't miss such a racy file change.
1692 ladd(fn)
1692 ladd(fn)
1693 elif listclean:
1693 elif listclean:
1694 cadd(fn)
1694 cadd(fn)
1695 status = scmutil.status(
1695 status = scmutil.status(
1696 modified, added, removed, deleted, unknown, ignored, clean
1696 modified, added, removed, deleted, unknown, ignored, clean
1697 )
1697 )
1698 return (lookup, status)
1698 return (lookup, status)
1699
1699
1700 def matches(self, match):
1700 def matches(self, match):
1701 """
1701 """
1702 return files in the dirstate (in whatever state) filtered by match
1702 return files in the dirstate (in whatever state) filtered by match
1703 """
1703 """
1704 dmap = self._map
1704 dmap = self._map
1705 if rustmod is not None:
1705 if rustmod is not None:
1706 dmap = self._map._rustmap
1706 dmap = self._map._rustmap
1707
1707
1708 if match.always():
1708 if match.always():
1709 return dmap.keys()
1709 return dmap.keys()
1710 files = match.files()
1710 files = match.files()
1711 if match.isexact():
1711 if match.isexact():
1712 # fast path -- filter the other way around, since typically files is
1712 # fast path -- filter the other way around, since typically files is
1713 # much smaller than dmap
1713 # much smaller than dmap
1714 return [f for f in files if f in dmap]
1714 return [f for f in files if f in dmap]
1715 if match.prefix() and all(fn in dmap for fn in files):
1715 if match.prefix() and all(fn in dmap for fn in files):
1716 # fast path -- all the values are known to be files, so just return
1716 # fast path -- all the values are known to be files, so just return
1717 # that
1717 # that
1718 return list(files)
1718 return list(files)
1719 return [f for f in dmap if match(f)]
1719 return [f for f in dmap if match(f)]
1720
1720
1721 def _actualfilename(self, tr):
1721 def _actualfilename(self, tr):
1722 if tr:
1722 if tr:
1723 return self._pendingfilename
1723 return self._pendingfilename
1724 else:
1724 else:
1725 return self._filename
1725 return self._filename
1726
1726
1727 def savebackup(self, tr, backupname):
1727 def savebackup(self, tr, backupname):
1728 '''Save current dirstate into backup file'''
1728 '''Save current dirstate into backup file'''
1729 filename = self._actualfilename(tr)
1729 filename = self._actualfilename(tr)
1730 assert backupname != filename
1730 assert backupname != filename
1731
1731
1732 # use '_writedirstate' instead of 'write' to write changes certainly,
1732 # use '_writedirstate' instead of 'write' to write changes certainly,
1733 # because the latter omits writing out if transaction is running.
1733 # because the latter omits writing out if transaction is running.
1734 # output file will be used to create backup of dirstate at this point.
1734 # output file will be used to create backup of dirstate at this point.
1735 if self._dirty or not self._opener.exists(filename):
1735 if self._dirty or not self._opener.exists(filename):
1736 self._writedirstate(
1736 self._writedirstate(
1737 tr,
1737 tr,
1738 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1738 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1739 )
1739 )
1740
1740
1741 if tr:
1741 if tr:
1742 # ensure that subsequent tr.writepending returns True for
1742 # ensure that subsequent tr.writepending returns True for
1743 # changes written out above, even if dirstate is never
1743 # changes written out above, even if dirstate is never
1744 # changed after this
1744 # changed after this
1745 tr.addfilegenerator(
1745 tr.addfilegenerator(
1746 b'dirstate',
1746 b'dirstate',
1747 (self._filename,),
1747 (self._filename,),
1748 lambda f: self._writedirstate(tr, f),
1748 lambda f: self._writedirstate(tr, f),
1749 location=b'plain',
1749 location=b'plain',
1750 )
1750 )
1751
1751
1752 # ensure that pending file written above is unlinked at
1752 # ensure that pending file written above is unlinked at
1753 # failure, even if tr.writepending isn't invoked until the
1753 # failure, even if tr.writepending isn't invoked until the
1754 # end of this transaction
1754 # end of this transaction
1755 tr.registertmp(filename, location=b'plain')
1755 tr.registertmp(filename, location=b'plain')
1756
1756
1757 self._opener.tryunlink(backupname)
1757 self._opener.tryunlink(backupname)
1758 # hardlink backup is okay because _writedirstate is always called
1758 # hardlink backup is okay because _writedirstate is always called
1759 # with an "atomictemp=True" file.
1759 # with an "atomictemp=True" file.
1760 util.copyfile(
1760 util.copyfile(
1761 self._opener.join(filename),
1761 self._opener.join(filename),
1762 self._opener.join(backupname),
1762 self._opener.join(backupname),
1763 hardlink=True,
1763 hardlink=True,
1764 )
1764 )
1765
1765
1766 def restorebackup(self, tr, backupname):
1766 def restorebackup(self, tr, backupname):
1767 '''Restore dirstate by backup file'''
1767 '''Restore dirstate by backup file'''
1768 # this "invalidate()" prevents "wlock.release()" from writing
1768 # this "invalidate()" prevents "wlock.release()" from writing
1769 # changes of dirstate out after restoring from backup file
1769 # changes of dirstate out after restoring from backup file
1770 self.invalidate()
1770 self.invalidate()
1771 filename = self._actualfilename(tr)
1771 filename = self._actualfilename(tr)
1772 o = self._opener
1772 o = self._opener
1773 if util.samefile(o.join(backupname), o.join(filename)):
1773 if util.samefile(o.join(backupname), o.join(filename)):
1774 o.unlink(backupname)
1774 o.unlink(backupname)
1775 else:
1775 else:
1776 o.rename(backupname, filename, checkambig=True)
1776 o.rename(backupname, filename, checkambig=True)
1777
1777
1778 def clearbackup(self, tr, backupname):
1778 def clearbackup(self, tr, backupname):
1779 '''Clear backup file'''
1779 '''Clear backup file'''
1780 self._opener.unlink(backupname)
1780 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now