##// END OF EJS Templates
dirstate: deprecate the `merge` method in all cases...
marmoute -
r48544:372ff463 default
parent child Browse files
Show More
@@ -1,1749 +1,1766 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = parsers.DirstateItem
48 DirstateItem = parsers.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._updatedfiles = set()
133 self._updatedfiles = set()
134 self._mapcls = dirstatemap.dirstatemap
134 self._mapcls = dirstatemap.dirstatemap
135 # Access and cache cwd early, so we don't access it for the first time
135 # Access and cache cwd early, so we don't access it for the first time
136 # after a working-copy update caused it to not exist (accessing it then
136 # after a working-copy update caused it to not exist (accessing it then
137 # raises an exception).
137 # raises an exception).
138 self._cwd
138 self._cwd
139
139
140 def prefetch_parents(self):
140 def prefetch_parents(self):
141 """make sure the parents are loaded
141 """make sure the parents are loaded
142
142
143 Used to avoid a race condition.
143 Used to avoid a race condition.
144 """
144 """
145 self._pl
145 self._pl
146
146
147 @contextlib.contextmanager
147 @contextlib.contextmanager
148 def parentchange(self):
148 def parentchange(self):
149 """Context manager for handling dirstate parents.
149 """Context manager for handling dirstate parents.
150
150
151 If an exception occurs in the scope of the context manager,
151 If an exception occurs in the scope of the context manager,
152 the incoherent dirstate won't be written when wlock is
152 the incoherent dirstate won't be written when wlock is
153 released.
153 released.
154 """
154 """
155 self._parentwriters += 1
155 self._parentwriters += 1
156 yield
156 yield
157 # Typically we want the "undo" step of a context manager in a
157 # Typically we want the "undo" step of a context manager in a
158 # finally block so it happens even when an exception
158 # finally block so it happens even when an exception
159 # occurs. In this case, however, we only want to decrement
159 # occurs. In this case, however, we only want to decrement
160 # parentwriters if the code in the with statement exits
160 # parentwriters if the code in the with statement exits
161 # normally, so we don't have a try/finally here on purpose.
161 # normally, so we don't have a try/finally here on purpose.
162 self._parentwriters -= 1
162 self._parentwriters -= 1
163
163
164 def pendingparentchange(self):
164 def pendingparentchange(self):
165 """Returns true if the dirstate is in the middle of a set of changes
165 """Returns true if the dirstate is in the middle of a set of changes
166 that modify the dirstate parent.
166 that modify the dirstate parent.
167 """
167 """
168 return self._parentwriters > 0
168 return self._parentwriters > 0
169
169
170 @propertycache
170 @propertycache
171 def _map(self):
171 def _map(self):
172 """Return the dirstate contents (see documentation for dirstatemap)."""
172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 self._map = self._mapcls(
173 self._map = self._mapcls(
174 self._ui,
174 self._ui,
175 self._opener,
175 self._opener,
176 self._root,
176 self._root,
177 self._nodeconstants,
177 self._nodeconstants,
178 self._use_dirstate_v2,
178 self._use_dirstate_v2,
179 )
179 )
180 return self._map
180 return self._map
181
181
182 @property
182 @property
183 def _sparsematcher(self):
183 def _sparsematcher(self):
184 """The matcher for the sparse checkout.
184 """The matcher for the sparse checkout.
185
185
186 The working directory may not include every file from a manifest. The
186 The working directory may not include every file from a manifest. The
187 matcher obtained by this property will match a path if it is to be
187 matcher obtained by this property will match a path if it is to be
188 included in the working directory.
188 included in the working directory.
189 """
189 """
190 # TODO there is potential to cache this property. For now, the matcher
190 # TODO there is potential to cache this property. For now, the matcher
191 # is resolved on every access. (But the called function does use a
191 # is resolved on every access. (But the called function does use a
192 # cache to keep the lookup fast.)
192 # cache to keep the lookup fast.)
193 return self._sparsematchfn()
193 return self._sparsematchfn()
194
194
195 @repocache(b'branch')
195 @repocache(b'branch')
196 def _branch(self):
196 def _branch(self):
197 try:
197 try:
198 return self._opener.read(b"branch").strip() or b"default"
198 return self._opener.read(b"branch").strip() or b"default"
199 except IOError as inst:
199 except IOError as inst:
200 if inst.errno != errno.ENOENT:
200 if inst.errno != errno.ENOENT:
201 raise
201 raise
202 return b"default"
202 return b"default"
203
203
204 @property
204 @property
205 def _pl(self):
205 def _pl(self):
206 return self._map.parents()
206 return self._map.parents()
207
207
208 def hasdir(self, d):
208 def hasdir(self, d):
209 return self._map.hastrackeddir(d)
209 return self._map.hastrackeddir(d)
210
210
211 @rootcache(b'.hgignore')
211 @rootcache(b'.hgignore')
212 def _ignore(self):
212 def _ignore(self):
213 files = self._ignorefiles()
213 files = self._ignorefiles()
214 if not files:
214 if not files:
215 return matchmod.never()
215 return matchmod.never()
216
216
217 pats = [b'include:%s' % f for f in files]
217 pats = [b'include:%s' % f for f in files]
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219
219
220 @propertycache
220 @propertycache
221 def _slash(self):
221 def _slash(self):
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223
223
224 @propertycache
224 @propertycache
225 def _checklink(self):
225 def _checklink(self):
226 return util.checklink(self._root)
226 return util.checklink(self._root)
227
227
228 @propertycache
228 @propertycache
229 def _checkexec(self):
229 def _checkexec(self):
230 return bool(util.checkexec(self._root))
230 return bool(util.checkexec(self._root))
231
231
232 @propertycache
232 @propertycache
233 def _checkcase(self):
233 def _checkcase(self):
234 return not util.fscasesensitive(self._join(b'.hg'))
234 return not util.fscasesensitive(self._join(b'.hg'))
235
235
236 def _join(self, f):
236 def _join(self, f):
237 # much faster than os.path.join()
237 # much faster than os.path.join()
238 # it's safe because f is always a relative path
238 # it's safe because f is always a relative path
239 return self._rootdir + f
239 return self._rootdir + f
240
240
241 def flagfunc(self, buildfallback):
241 def flagfunc(self, buildfallback):
242 if self._checklink and self._checkexec:
242 if self._checklink and self._checkexec:
243
243
244 def f(x):
244 def f(x):
245 try:
245 try:
246 st = os.lstat(self._join(x))
246 st = os.lstat(self._join(x))
247 if util.statislink(st):
247 if util.statislink(st):
248 return b'l'
248 return b'l'
249 if util.statisexec(st):
249 if util.statisexec(st):
250 return b'x'
250 return b'x'
251 except OSError:
251 except OSError:
252 pass
252 pass
253 return b''
253 return b''
254
254
255 return f
255 return f
256
256
257 fallback = buildfallback()
257 fallback = buildfallback()
258 if self._checklink:
258 if self._checklink:
259
259
260 def f(x):
260 def f(x):
261 if os.path.islink(self._join(x)):
261 if os.path.islink(self._join(x)):
262 return b'l'
262 return b'l'
263 if b'x' in fallback(x):
263 if b'x' in fallback(x):
264 return b'x'
264 return b'x'
265 return b''
265 return b''
266
266
267 return f
267 return f
268 if self._checkexec:
268 if self._checkexec:
269
269
270 def f(x):
270 def f(x):
271 if b'l' in fallback(x):
271 if b'l' in fallback(x):
272 return b'l'
272 return b'l'
273 if util.isexec(self._join(x)):
273 if util.isexec(self._join(x)):
274 return b'x'
274 return b'x'
275 return b''
275 return b''
276
276
277 return f
277 return f
278 else:
278 else:
279 return fallback
279 return fallback
280
280
281 @propertycache
281 @propertycache
282 def _cwd(self):
282 def _cwd(self):
283 # internal config: ui.forcecwd
283 # internal config: ui.forcecwd
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 if forcecwd:
285 if forcecwd:
286 return forcecwd
286 return forcecwd
287 return encoding.getcwd()
287 return encoding.getcwd()
288
288
289 def getcwd(self):
289 def getcwd(self):
290 """Return the path from which a canonical path is calculated.
290 """Return the path from which a canonical path is calculated.
291
291
292 This path should be used to resolve file patterns or to convert
292 This path should be used to resolve file patterns or to convert
293 canonical paths back to file paths for display. It shouldn't be
293 canonical paths back to file paths for display. It shouldn't be
294 used to get real file paths. Use vfs functions instead.
294 used to get real file paths. Use vfs functions instead.
295 """
295 """
296 cwd = self._cwd
296 cwd = self._cwd
297 if cwd == self._root:
297 if cwd == self._root:
298 return b''
298 return b''
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 rootsep = self._root
300 rootsep = self._root
301 if not util.endswithsep(rootsep):
301 if not util.endswithsep(rootsep):
302 rootsep += pycompat.ossep
302 rootsep += pycompat.ossep
303 if cwd.startswith(rootsep):
303 if cwd.startswith(rootsep):
304 return cwd[len(rootsep) :]
304 return cwd[len(rootsep) :]
305 else:
305 else:
306 # we're outside the repo. return an absolute path.
306 # we're outside the repo. return an absolute path.
307 return cwd
307 return cwd
308
308
309 def pathto(self, f, cwd=None):
309 def pathto(self, f, cwd=None):
310 if cwd is None:
310 if cwd is None:
311 cwd = self.getcwd()
311 cwd = self.getcwd()
312 path = util.pathto(self._root, cwd, f)
312 path = util.pathto(self._root, cwd, f)
313 if self._slash:
313 if self._slash:
314 return util.pconvert(path)
314 return util.pconvert(path)
315 return path
315 return path
316
316
317 def __getitem__(self, key):
317 def __getitem__(self, key):
318 """Return the current state of key (a filename) in the dirstate.
318 """Return the current state of key (a filename) in the dirstate.
319
319
320 States are:
320 States are:
321 n normal
321 n normal
322 m needs merging
322 m needs merging
323 r marked for removal
323 r marked for removal
324 a marked for addition
324 a marked for addition
325 ? not tracked
325 ? not tracked
326
326
327 XXX The "state" is a bit obscure to be in the "public" API. we should
327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 consider migrating all user of this to going through the dirstate entry
328 consider migrating all user of this to going through the dirstate entry
329 instead.
329 instead.
330 """
330 """
331 entry = self._map.get(key)
331 entry = self._map.get(key)
332 if entry is not None:
332 if entry is not None:
333 return entry.state
333 return entry.state
334 return b'?'
334 return b'?'
335
335
336 def __contains__(self, key):
336 def __contains__(self, key):
337 return key in self._map
337 return key in self._map
338
338
339 def __iter__(self):
339 def __iter__(self):
340 return iter(sorted(self._map))
340 return iter(sorted(self._map))
341
341
342 def items(self):
342 def items(self):
343 return pycompat.iteritems(self._map)
343 return pycompat.iteritems(self._map)
344
344
345 iteritems = items
345 iteritems = items
346
346
347 def directories(self):
347 def directories(self):
348 return self._map.directories()
348 return self._map.directories()
349
349
350 def parents(self):
350 def parents(self):
351 return [self._validate(p) for p in self._pl]
351 return [self._validate(p) for p in self._pl]
352
352
353 def p1(self):
353 def p1(self):
354 return self._validate(self._pl[0])
354 return self._validate(self._pl[0])
355
355
356 def p2(self):
356 def p2(self):
357 return self._validate(self._pl[1])
357 return self._validate(self._pl[1])
358
358
359 @property
359 @property
360 def in_merge(self):
360 def in_merge(self):
361 """True if a merge is in progress"""
361 """True if a merge is in progress"""
362 return self._pl[1] != self._nodeconstants.nullid
362 return self._pl[1] != self._nodeconstants.nullid
363
363
364 def branch(self):
364 def branch(self):
365 return encoding.tolocal(self._branch)
365 return encoding.tolocal(self._branch)
366
366
367 def setparents(self, p1, p2=None):
367 def setparents(self, p1, p2=None):
368 """Set dirstate parents to p1 and p2.
368 """Set dirstate parents to p1 and p2.
369
369
370 When moving from two parents to one, "merged" entries a
370 When moving from two parents to one, "merged" entries a
371 adjusted to normal and previous copy records discarded and
371 adjusted to normal and previous copy records discarded and
372 returned by the call.
372 returned by the call.
373
373
374 See localrepo.setparents()
374 See localrepo.setparents()
375 """
375 """
376 if p2 is None:
376 if p2 is None:
377 p2 = self._nodeconstants.nullid
377 p2 = self._nodeconstants.nullid
378 if self._parentwriters == 0:
378 if self._parentwriters == 0:
379 raise ValueError(
379 raise ValueError(
380 b"cannot set dirstate parent outside of "
380 b"cannot set dirstate parent outside of "
381 b"dirstate.parentchange context manager"
381 b"dirstate.parentchange context manager"
382 )
382 )
383
383
384 self._dirty = True
384 self._dirty = True
385 oldp2 = self._pl[1]
385 oldp2 = self._pl[1]
386 if self._origpl is None:
386 if self._origpl is None:
387 self._origpl = self._pl
387 self._origpl = self._pl
388 self._map.setparents(p1, p2)
388 self._map.setparents(p1, p2)
389 copies = {}
389 copies = {}
390 if (
390 if (
391 oldp2 != self._nodeconstants.nullid
391 oldp2 != self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
393 ):
393 ):
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395
395
396 for f in candidatefiles:
396 for f in candidatefiles:
397 s = self._map.get(f)
397 s = self._map.get(f)
398 if s is None:
398 if s is None:
399 continue
399 continue
400
400
401 # Discard "merged" markers when moving away from a merge state
401 # Discard "merged" markers when moving away from a merge state
402 if s.merged:
402 if s.merged:
403 source = self._map.copymap.get(f)
403 source = self._map.copymap.get(f)
404 if source:
404 if source:
405 copies[f] = source
405 copies[f] = source
406 self._normallookup(f)
406 self._normallookup(f)
407 # Also fix up otherparent markers
407 # Also fix up otherparent markers
408 elif s.from_p2:
408 elif s.from_p2:
409 source = self._map.copymap.get(f)
409 source = self._map.copymap.get(f)
410 if source:
410 if source:
411 copies[f] = source
411 copies[f] = source
412 self._add(f)
412 self._add(f)
413 return copies
413 return copies
414
414
415 def setbranch(self, branch):
415 def setbranch(self, branch):
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 try:
418 try:
419 f.write(self._branch + b'\n')
419 f.write(self._branch + b'\n')
420 f.close()
420 f.close()
421
421
422 # make sure filecache has the correct stat info for _branch after
422 # make sure filecache has the correct stat info for _branch after
423 # replacing the underlying file
423 # replacing the underlying file
424 ce = self._filecache[b'_branch']
424 ce = self._filecache[b'_branch']
425 if ce:
425 if ce:
426 ce.refresh()
426 ce.refresh()
427 except: # re-raises
427 except: # re-raises
428 f.discard()
428 f.discard()
429 raise
429 raise
430
430
431 def invalidate(self):
431 def invalidate(self):
432 """Causes the next access to reread the dirstate.
432 """Causes the next access to reread the dirstate.
433
433
434 This is different from localrepo.invalidatedirstate() because it always
434 This is different from localrepo.invalidatedirstate() because it always
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 check whether the dirstate has changed before rereading it."""
436 check whether the dirstate has changed before rereading it."""
437
437
438 for a in ("_map", "_branch", "_ignore"):
438 for a in ("_map", "_branch", "_ignore"):
439 if a in self.__dict__:
439 if a in self.__dict__:
440 delattr(self, a)
440 delattr(self, a)
441 self._lastnormaltime = 0
441 self._lastnormaltime = 0
442 self._dirty = False
442 self._dirty = False
443 self._updatedfiles.clear()
443 self._updatedfiles.clear()
444 self._parentwriters = 0
444 self._parentwriters = 0
445 self._origpl = None
445 self._origpl = None
446
446
447 def copy(self, source, dest):
447 def copy(self, source, dest):
448 """Mark dest as a copy of source. Unmark dest if source is None."""
448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 if source == dest:
449 if source == dest:
450 return
450 return
451 self._dirty = True
451 self._dirty = True
452 if source is not None:
452 if source is not None:
453 self._map.copymap[dest] = source
453 self._map.copymap[dest] = source
454 self._updatedfiles.add(source)
454 self._updatedfiles.add(source)
455 self._updatedfiles.add(dest)
455 self._updatedfiles.add(dest)
456 elif self._map.copymap.pop(dest, None):
456 elif self._map.copymap.pop(dest, None):
457 self._updatedfiles.add(dest)
457 self._updatedfiles.add(dest)
458
458
459 def copied(self, file):
459 def copied(self, file):
460 return self._map.copymap.get(file, None)
460 return self._map.copymap.get(file, None)
461
461
462 def copies(self):
462 def copies(self):
463 return self._map.copymap
463 return self._map.copymap
464
464
465 @requires_no_parents_change
465 @requires_no_parents_change
466 def set_tracked(self, filename):
466 def set_tracked(self, filename):
467 """a "public" method for generic code to mark a file as tracked
467 """a "public" method for generic code to mark a file as tracked
468
468
469 This function is to be called outside of "update/merge" case. For
469 This function is to be called outside of "update/merge" case. For
470 example by a command like `hg add X`.
470 example by a command like `hg add X`.
471
471
472 return True the file was previously untracked, False otherwise.
472 return True the file was previously untracked, False otherwise.
473 """
473 """
474 entry = self._map.get(filename)
474 entry = self._map.get(filename)
475 if entry is None:
475 if entry is None:
476 self._add(filename)
476 self._add(filename)
477 return True
477 return True
478 elif not entry.tracked:
478 elif not entry.tracked:
479 self._normallookup(filename)
479 self._normallookup(filename)
480 return True
480 return True
481 # XXX This is probably overkill for more case, but we need this to
481 # XXX This is probably overkill for more case, but we need this to
482 # fully replace the `normallookup` call with `set_tracked` one.
482 # fully replace the `normallookup` call with `set_tracked` one.
483 # Consider smoothing this in the future.
483 # Consider smoothing this in the future.
484 self.set_possibly_dirty(filename)
484 self.set_possibly_dirty(filename)
485 return False
485 return False
486
486
487 @requires_no_parents_change
487 @requires_no_parents_change
488 def set_untracked(self, filename):
488 def set_untracked(self, filename):
489 """a "public" method for generic code to mark a file as untracked
489 """a "public" method for generic code to mark a file as untracked
490
490
491 This function is to be called outside of "update/merge" case. For
491 This function is to be called outside of "update/merge" case. For
492 example by a command like `hg remove X`.
492 example by a command like `hg remove X`.
493
493
494 return True the file was previously tracked, False otherwise.
494 return True the file was previously tracked, False otherwise.
495 """
495 """
496 entry = self._map.get(filename)
496 entry = self._map.get(filename)
497 if entry is None:
497 if entry is None:
498 return False
498 return False
499 elif entry.added:
499 elif entry.added:
500 self._drop(filename)
500 self._drop(filename)
501 return True
501 return True
502 else:
502 else:
503 self._remove(filename)
503 self._remove(filename)
504 return True
504 return True
505
505
506 @requires_no_parents_change
506 @requires_no_parents_change
507 def set_clean(self, filename, parentfiledata=None):
507 def set_clean(self, filename, parentfiledata=None):
508 """record that the current state of the file on disk is known to be clean"""
508 """record that the current state of the file on disk is known to be clean"""
509 self._dirty = True
509 self._dirty = True
510 self._updatedfiles.add(filename)
510 self._updatedfiles.add(filename)
511 self._normal(filename, parentfiledata=parentfiledata)
511 self._normal(filename, parentfiledata=parentfiledata)
512
512
513 @requires_no_parents_change
513 @requires_no_parents_change
514 def set_possibly_dirty(self, filename):
514 def set_possibly_dirty(self, filename):
515 """record that the current state of the file on disk is unknown"""
515 """record that the current state of the file on disk is unknown"""
516 self._dirty = True
516 self._dirty = True
517 self._updatedfiles.add(filename)
517 self._updatedfiles.add(filename)
518 self._map.set_possibly_dirty(filename)
518 self._map.set_possibly_dirty(filename)
519
519
520 @requires_parents_change
520 @requires_parents_change
521 def update_file_p1(
521 def update_file_p1(
522 self,
522 self,
523 filename,
523 filename,
524 p1_tracked,
524 p1_tracked,
525 ):
525 ):
526 """Set a file as tracked in the parent (or not)
526 """Set a file as tracked in the parent (or not)
527
527
528 This is to be called when adjust the dirstate to a new parent after an history
528 This is to be called when adjust the dirstate to a new parent after an history
529 rewriting operation.
529 rewriting operation.
530
530
531 It should not be called during a merge (p2 != nullid) and only within
531 It should not be called during a merge (p2 != nullid) and only within
532 a `with dirstate.parentchange():` context.
532 a `with dirstate.parentchange():` context.
533 """
533 """
534 if self.in_merge:
534 if self.in_merge:
535 msg = b'update_file_reference should not be called when merging'
535 msg = b'update_file_reference should not be called when merging'
536 raise error.ProgrammingError(msg)
536 raise error.ProgrammingError(msg)
537 entry = self._map.get(filename)
537 entry = self._map.get(filename)
538 if entry is None:
538 if entry is None:
539 wc_tracked = False
539 wc_tracked = False
540 else:
540 else:
541 wc_tracked = entry.tracked
541 wc_tracked = entry.tracked
542 possibly_dirty = False
542 possibly_dirty = False
543 if p1_tracked and wc_tracked:
543 if p1_tracked and wc_tracked:
544 # the underlying reference might have changed, we will have to
544 # the underlying reference might have changed, we will have to
545 # check it.
545 # check it.
546 possibly_dirty = True
546 possibly_dirty = True
547 elif not (p1_tracked or wc_tracked):
547 elif not (p1_tracked or wc_tracked):
548 # the file is no longer relevant to anyone
548 # the file is no longer relevant to anyone
549 self._drop(filename)
549 self._drop(filename)
550 elif (not p1_tracked) and wc_tracked:
550 elif (not p1_tracked) and wc_tracked:
551 if entry is not None and entry.added:
551 if entry is not None and entry.added:
552 return # avoid dropping copy information (maybe?)
552 return # avoid dropping copy information (maybe?)
553 elif p1_tracked and not wc_tracked:
553 elif p1_tracked and not wc_tracked:
554 pass
554 pass
555 else:
555 else:
556 assert False, 'unreachable'
556 assert False, 'unreachable'
557
557
558 # this mean we are doing call for file we do not really care about the
558 # this mean we are doing call for file we do not really care about the
559 # data (eg: added or removed), however this should be a minor overhead
559 # data (eg: added or removed), however this should be a minor overhead
560 # compared to the overall update process calling this.
560 # compared to the overall update process calling this.
561 parentfiledata = None
561 parentfiledata = None
562 if wc_tracked:
562 if wc_tracked:
563 parentfiledata = self._get_filedata(filename)
563 parentfiledata = self._get_filedata(filename)
564
564
565 self._updatedfiles.add(filename)
565 self._updatedfiles.add(filename)
566 self._map.reset_state(
566 self._map.reset_state(
567 filename,
567 filename,
568 wc_tracked,
568 wc_tracked,
569 p1_tracked,
569 p1_tracked,
570 possibly_dirty=possibly_dirty,
570 possibly_dirty=possibly_dirty,
571 parentfiledata=parentfiledata,
571 parentfiledata=parentfiledata,
572 )
572 )
573 if (
573 if (
574 parentfiledata is not None
574 parentfiledata is not None
575 and parentfiledata[2] > self._lastnormaltime
575 and parentfiledata[2] > self._lastnormaltime
576 ):
576 ):
577 # Remember the most recent modification timeslot for status(),
577 # Remember the most recent modification timeslot for status(),
578 # to make sure we won't miss future size-preserving file content
578 # to make sure we won't miss future size-preserving file content
579 # modifications that happen within the same timeslot.
579 # modifications that happen within the same timeslot.
580 self._lastnormaltime = parentfiledata[2]
580 self._lastnormaltime = parentfiledata[2]
581
581
582 @requires_parents_change
582 @requires_parents_change
583 def update_file(
583 def update_file(
584 self,
584 self,
585 filename,
585 filename,
586 wc_tracked,
586 wc_tracked,
587 p1_tracked,
587 p1_tracked,
588 p2_tracked=False,
588 p2_tracked=False,
589 merged=False,
589 merged=False,
590 clean_p1=False,
590 clean_p1=False,
591 clean_p2=False,
591 clean_p2=False,
592 possibly_dirty=False,
592 possibly_dirty=False,
593 parentfiledata=None,
593 parentfiledata=None,
594 ):
594 ):
595 """update the information about a file in the dirstate
595 """update the information about a file in the dirstate
596
596
597 This is to be called when the direstates parent changes to keep track
597 This is to be called when the direstates parent changes to keep track
598 of what is the file situation in regards to the working copy and its parent.
598 of what is the file situation in regards to the working copy and its parent.
599
599
600 This function must be called within a `dirstate.parentchange` context.
600 This function must be called within a `dirstate.parentchange` context.
601
601
602 note: the API is at an early stage and we might need to ajust it
602 note: the API is at an early stage and we might need to ajust it
603 depending of what information ends up being relevant and useful to
603 depending of what information ends up being relevant and useful to
604 other processing.
604 other processing.
605 """
605 """
606 if merged and (clean_p1 or clean_p2):
606 if merged and (clean_p1 or clean_p2):
607 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
607 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
608 raise error.ProgrammingError(msg)
608 raise error.ProgrammingError(msg)
609
609
610 # note: I do not think we need to double check name clash here since we
610 # note: I do not think we need to double check name clash here since we
611 # are in a update/merge case that should already have taken care of
611 # are in a update/merge case that should already have taken care of
612 # this. The test agrees
612 # this. The test agrees
613
613
614 self._dirty = True
614 self._dirty = True
615 self._updatedfiles.add(filename)
615 self._updatedfiles.add(filename)
616
616
617 need_parent_file_data = (
617 need_parent_file_data = (
618 not (possibly_dirty or clean_p2 or merged)
618 not (possibly_dirty or clean_p2 or merged)
619 and wc_tracked
619 and wc_tracked
620 and p1_tracked
620 and p1_tracked
621 )
621 )
622
622
623 # this mean we are doing call for file we do not really care about the
623 # this mean we are doing call for file we do not really care about the
624 # data (eg: added or removed), however this should be a minor overhead
624 # data (eg: added or removed), however this should be a minor overhead
625 # compared to the overall update process calling this.
625 # compared to the overall update process calling this.
626 if need_parent_file_data:
626 if need_parent_file_data:
627 if parentfiledata is None:
627 if parentfiledata is None:
628 parentfiledata = self._get_filedata(filename)
628 parentfiledata = self._get_filedata(filename)
629 mtime = parentfiledata[2]
629 mtime = parentfiledata[2]
630
630
631 if mtime > self._lastnormaltime:
631 if mtime > self._lastnormaltime:
632 # Remember the most recent modification timeslot for
632 # Remember the most recent modification timeslot for
633 # status(), to make sure we won't miss future
633 # status(), to make sure we won't miss future
634 # size-preserving file content modifications that happen
634 # size-preserving file content modifications that happen
635 # within the same timeslot.
635 # within the same timeslot.
636 self._lastnormaltime = mtime
636 self._lastnormaltime = mtime
637
637
638 self._map.reset_state(
638 self._map.reset_state(
639 filename,
639 filename,
640 wc_tracked,
640 wc_tracked,
641 p1_tracked,
641 p1_tracked,
642 p2_tracked=p2_tracked,
642 p2_tracked=p2_tracked,
643 merged=merged,
643 merged=merged,
644 clean_p1=clean_p1,
644 clean_p1=clean_p1,
645 clean_p2=clean_p2,
645 clean_p2=clean_p2,
646 possibly_dirty=possibly_dirty,
646 possibly_dirty=possibly_dirty,
647 parentfiledata=parentfiledata,
647 parentfiledata=parentfiledata,
648 )
648 )
649 if (
649 if (
650 parentfiledata is not None
650 parentfiledata is not None
651 and parentfiledata[2] > self._lastnormaltime
651 and parentfiledata[2] > self._lastnormaltime
652 ):
652 ):
653 # Remember the most recent modification timeslot for status(),
653 # Remember the most recent modification timeslot for status(),
654 # to make sure we won't miss future size-preserving file content
654 # to make sure we won't miss future size-preserving file content
655 # modifications that happen within the same timeslot.
655 # modifications that happen within the same timeslot.
656 self._lastnormaltime = parentfiledata[2]
656 self._lastnormaltime = parentfiledata[2]
657
657
658 def _addpath(
658 def _addpath(
659 self,
659 self,
660 f,
660 f,
661 mode=0,
661 mode=0,
662 size=None,
662 size=None,
663 mtime=None,
663 mtime=None,
664 added=False,
664 added=False,
665 merged=False,
665 merged=False,
666 from_p2=False,
666 from_p2=False,
667 possibly_dirty=False,
667 possibly_dirty=False,
668 ):
668 ):
669 entry = self._map.get(f)
669 entry = self._map.get(f)
670 if added or entry is not None and entry.removed:
670 if added or entry is not None and entry.removed:
671 scmutil.checkfilename(f)
671 scmutil.checkfilename(f)
672 if self._map.hastrackeddir(f):
672 if self._map.hastrackeddir(f):
673 msg = _(b'directory %r already in dirstate')
673 msg = _(b'directory %r already in dirstate')
674 msg %= pycompat.bytestr(f)
674 msg %= pycompat.bytestr(f)
675 raise error.Abort(msg)
675 raise error.Abort(msg)
676 # shadows
676 # shadows
677 for d in pathutil.finddirs(f):
677 for d in pathutil.finddirs(f):
678 if self._map.hastrackeddir(d):
678 if self._map.hastrackeddir(d):
679 break
679 break
680 entry = self._map.get(d)
680 entry = self._map.get(d)
681 if entry is not None and not entry.removed:
681 if entry is not None and not entry.removed:
682 msg = _(b'file %r in dirstate clashes with %r')
682 msg = _(b'file %r in dirstate clashes with %r')
683 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
683 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
684 raise error.Abort(msg)
684 raise error.Abort(msg)
685 self._dirty = True
685 self._dirty = True
686 self._updatedfiles.add(f)
686 self._updatedfiles.add(f)
687 self._map.addfile(
687 self._map.addfile(
688 f,
688 f,
689 mode=mode,
689 mode=mode,
690 size=size,
690 size=size,
691 mtime=mtime,
691 mtime=mtime,
692 added=added,
692 added=added,
693 merged=merged,
693 merged=merged,
694 from_p2=from_p2,
694 from_p2=from_p2,
695 possibly_dirty=possibly_dirty,
695 possibly_dirty=possibly_dirty,
696 )
696 )
697
697
698 def _get_filedata(self, filename):
698 def _get_filedata(self, filename):
699 """returns"""
699 """returns"""
700 s = os.lstat(self._join(filename))
700 s = os.lstat(self._join(filename))
701 mode = s.st_mode
701 mode = s.st_mode
702 size = s.st_size
702 size = s.st_size
703 mtime = s[stat.ST_MTIME]
703 mtime = s[stat.ST_MTIME]
704 return (mode, size, mtime)
704 return (mode, size, mtime)
705
705
706 def normal(self, f, parentfiledata=None):
706 def normal(self, f, parentfiledata=None):
707 """Mark a file normal and clean.
707 """Mark a file normal and clean.
708
708
709 parentfiledata: (mode, size, mtime) of the clean file
709 parentfiledata: (mode, size, mtime) of the clean file
710
710
711 parentfiledata should be computed from memory (for mode,
711 parentfiledata should be computed from memory (for mode,
712 size), as or close as possible from the point where we
712 size), as or close as possible from the point where we
713 determined the file was clean, to limit the risk of the
713 determined the file was clean, to limit the risk of the
714 file having been changed by an external process between the
714 file having been changed by an external process between the
715 moment where the file was determined to be clean and now."""
715 moment where the file was determined to be clean and now."""
716 if self.pendingparentchange():
716 if self.pendingparentchange():
717 util.nouideprecwarn(
717 util.nouideprecwarn(
718 b"do not use `normal` inside of update/merge context."
718 b"do not use `normal` inside of update/merge context."
719 b" Use `update_file` or `update_file_p1`",
719 b" Use `update_file` or `update_file_p1`",
720 b'6.0',
720 b'6.0',
721 stacklevel=2,
721 stacklevel=2,
722 )
722 )
723 else:
723 else:
724 util.nouideprecwarn(
724 util.nouideprecwarn(
725 b"do not use `normal` outside of update/merge context."
725 b"do not use `normal` outside of update/merge context."
726 b" Use `set_tracked`",
726 b" Use `set_tracked`",
727 b'6.0',
727 b'6.0',
728 stacklevel=2,
728 stacklevel=2,
729 )
729 )
730 self._normal(f, parentfiledata=parentfiledata)
730 self._normal(f, parentfiledata=parentfiledata)
731
731
732 def _normal(self, f, parentfiledata=None):
732 def _normal(self, f, parentfiledata=None):
733 if parentfiledata:
733 if parentfiledata:
734 (mode, size, mtime) = parentfiledata
734 (mode, size, mtime) = parentfiledata
735 else:
735 else:
736 (mode, size, mtime) = self._get_filedata(f)
736 (mode, size, mtime) = self._get_filedata(f)
737 self._addpath(f, mode=mode, size=size, mtime=mtime)
737 self._addpath(f, mode=mode, size=size, mtime=mtime)
738 self._map.copymap.pop(f, None)
738 self._map.copymap.pop(f, None)
739 if f in self._map.nonnormalset:
739 if f in self._map.nonnormalset:
740 self._map.nonnormalset.remove(f)
740 self._map.nonnormalset.remove(f)
741 if mtime > self._lastnormaltime:
741 if mtime > self._lastnormaltime:
742 # Remember the most recent modification timeslot for status(),
742 # Remember the most recent modification timeslot for status(),
743 # to make sure we won't miss future size-preserving file content
743 # to make sure we won't miss future size-preserving file content
744 # modifications that happen within the same timeslot.
744 # modifications that happen within the same timeslot.
745 self._lastnormaltime = mtime
745 self._lastnormaltime = mtime
746
746
747 def normallookup(self, f):
747 def normallookup(self, f):
748 '''Mark a file normal, but possibly dirty.'''
748 '''Mark a file normal, but possibly dirty.'''
749 if self.pendingparentchange():
749 if self.pendingparentchange():
750 util.nouideprecwarn(
750 util.nouideprecwarn(
751 b"do not use `normallookup` inside of update/merge context."
751 b"do not use `normallookup` inside of update/merge context."
752 b" Use `update_file` or `update_file_p1`",
752 b" Use `update_file` or `update_file_p1`",
753 b'6.0',
753 b'6.0',
754 stacklevel=2,
754 stacklevel=2,
755 )
755 )
756 else:
756 else:
757 util.nouideprecwarn(
757 util.nouideprecwarn(
758 b"do not use `normallookup` outside of update/merge context."
758 b"do not use `normallookup` outside of update/merge context."
759 b" Use `set_possibly_dirty` or `set_tracked`",
759 b" Use `set_possibly_dirty` or `set_tracked`",
760 b'6.0',
760 b'6.0',
761 stacklevel=2,
761 stacklevel=2,
762 )
762 )
763 self._normallookup(f)
763 self._normallookup(f)
764
764
765 def _normallookup(self, f):
765 def _normallookup(self, f):
766 '''Mark a file normal, but possibly dirty.'''
766 '''Mark a file normal, but possibly dirty.'''
767 if self.in_merge:
767 if self.in_merge:
768 # if there is a merge going on and the file was either
768 # if there is a merge going on and the file was either
769 # "merged" or coming from other parent (-2) before
769 # "merged" or coming from other parent (-2) before
770 # being removed, restore that state.
770 # being removed, restore that state.
771 entry = self._map.get(f)
771 entry = self._map.get(f)
772 if entry is not None:
772 if entry is not None:
773 # XXX this should probably be dealt with a a lower level
773 # XXX this should probably be dealt with a a lower level
774 # (see `merged_removed` and `from_p2_removed`)
774 # (see `merged_removed` and `from_p2_removed`)
775 if entry.merged_removed or entry.from_p2_removed:
775 if entry.merged_removed or entry.from_p2_removed:
776 source = self._map.copymap.get(f)
776 source = self._map.copymap.get(f)
777 if entry.merged_removed:
777 if entry.merged_removed:
778 self.merge(f)
778 self._merge(f)
779 elif entry.from_p2_removed:
779 elif entry.from_p2_removed:
780 self._otherparent(f)
780 self._otherparent(f)
781 if source is not None:
781 if source is not None:
782 self.copy(source, f)
782 self.copy(source, f)
783 return
783 return
784 elif entry.merged or entry.from_p2:
784 elif entry.merged or entry.from_p2:
785 return
785 return
786 self._addpath(f, possibly_dirty=True)
786 self._addpath(f, possibly_dirty=True)
787 self._map.copymap.pop(f, None)
787 self._map.copymap.pop(f, None)
788
788
789 def otherparent(self, f):
789 def otherparent(self, f):
790 '''Mark as coming from the other parent, always dirty.'''
790 '''Mark as coming from the other parent, always dirty.'''
791 if self.pendingparentchange():
791 if self.pendingparentchange():
792 util.nouideprecwarn(
792 util.nouideprecwarn(
793 b"do not use `otherparent` inside of update/merge context."
793 b"do not use `otherparent` inside of update/merge context."
794 b" Use `update_file` or `update_file_p1`",
794 b" Use `update_file` or `update_file_p1`",
795 b'6.0',
795 b'6.0',
796 stacklevel=2,
796 stacklevel=2,
797 )
797 )
798 else:
798 else:
799 util.nouideprecwarn(
799 util.nouideprecwarn(
800 b"do not use `otherparent` outside of update/merge context."
800 b"do not use `otherparent` outside of update/merge context."
801 b"It should have been set by the update/merge code",
801 b"It should have been set by the update/merge code",
802 b'6.0',
802 b'6.0',
803 stacklevel=2,
803 stacklevel=2,
804 )
804 )
805 self._otherparent(f)
805 self._otherparent(f)
806
806
807 def _otherparent(self, f):
807 def _otherparent(self, f):
808 if not self.in_merge:
808 if not self.in_merge:
809 msg = _(b"setting %r to other parent only allowed in merges") % f
809 msg = _(b"setting %r to other parent only allowed in merges") % f
810 raise error.Abort(msg)
810 raise error.Abort(msg)
811 entry = self._map.get(f)
811 entry = self._map.get(f)
812 if entry is not None and entry.tracked:
812 if entry is not None and entry.tracked:
813 # merge-like
813 # merge-like
814 self._addpath(f, merged=True)
814 self._addpath(f, merged=True)
815 else:
815 else:
816 # add-like
816 # add-like
817 self._addpath(f, from_p2=True)
817 self._addpath(f, from_p2=True)
818 self._map.copymap.pop(f, None)
818 self._map.copymap.pop(f, None)
819
819
820 def add(self, f):
820 def add(self, f):
821 '''Mark a file added.'''
821 '''Mark a file added.'''
822 if not self.pendingparentchange():
822 if not self.pendingparentchange():
823 util.nouideprecwarn(
823 util.nouideprecwarn(
824 b"do not use `add` outside of update/merge context."
824 b"do not use `add` outside of update/merge context."
825 b" Use `set_tracked`",
825 b" Use `set_tracked`",
826 b'6.0',
826 b'6.0',
827 stacklevel=2,
827 stacklevel=2,
828 )
828 )
829 self._add(f)
829 self._add(f)
830
830
831 def _add(self, filename):
831 def _add(self, filename):
832 """internal function to mark a file as added"""
832 """internal function to mark a file as added"""
833 self._addpath(filename, added=True)
833 self._addpath(filename, added=True)
834 self._map.copymap.pop(filename, None)
834 self._map.copymap.pop(filename, None)
835
835
836 def remove(self, f):
836 def remove(self, f):
837 '''Mark a file removed'''
837 '''Mark a file removed'''
838 if self.pendingparentchange():
838 if self.pendingparentchange():
839 util.nouideprecwarn(
839 util.nouideprecwarn(
840 b"do not use `remove` insde of update/merge context."
840 b"do not use `remove` insde of update/merge context."
841 b" Use `update_file` or `update_file_p1`",
841 b" Use `update_file` or `update_file_p1`",
842 b'6.0',
842 b'6.0',
843 stacklevel=2,
843 stacklevel=2,
844 )
844 )
845 else:
845 else:
846 util.nouideprecwarn(
846 util.nouideprecwarn(
847 b"do not use `remove` outside of update/merge context."
847 b"do not use `remove` outside of update/merge context."
848 b" Use `set_untracked`",
848 b" Use `set_untracked`",
849 b'6.0',
849 b'6.0',
850 stacklevel=2,
850 stacklevel=2,
851 )
851 )
852 self._remove(f)
852 self._remove(f)
853
853
854 def _remove(self, filename):
854 def _remove(self, filename):
855 """internal function to mark a file removed"""
855 """internal function to mark a file removed"""
856 self._dirty = True
856 self._dirty = True
857 self._updatedfiles.add(filename)
857 self._updatedfiles.add(filename)
858 self._map.removefile(filename, in_merge=self.in_merge)
858 self._map.removefile(filename, in_merge=self.in_merge)
859
859
860 def merge(self, f):
860 def merge(self, f):
861 '''Mark a file merged.'''
861 '''Mark a file merged.'''
862 if self.pendingparentchange():
863 util.nouideprecwarn(
864 b"do not use `merge` inside of update/merge context."
865 b" Use `update_file`",
866 b'6.0',
867 stacklevel=2,
868 )
869 else:
870 util.nouideprecwarn(
871 b"do not use `merge` outside of update/merge context."
872 b"It should have been set by the update/merge code",
873 b'6.0',
874 stacklevel=2,
875 )
876 self._merge(f)
877
878 def _merge(self, f):
862 if not self.in_merge:
879 if not self.in_merge:
863 return self._normallookup(f)
880 return self._normallookup(f)
864 return self._otherparent(f)
881 return self._otherparent(f)
865
882
866 def drop(self, f):
883 def drop(self, f):
867 '''Drop a file from the dirstate'''
884 '''Drop a file from the dirstate'''
868 if not self.pendingparentchange():
885 if not self.pendingparentchange():
869 util.nouideprecwarn(
886 util.nouideprecwarn(
870 b"do not use `drop` outside of update/merge context."
887 b"do not use `drop` outside of update/merge context."
871 b" Use `set_untracked`",
888 b" Use `set_untracked`",
872 b'6.0',
889 b'6.0',
873 stacklevel=2,
890 stacklevel=2,
874 )
891 )
875 self._drop(f)
892 self._drop(f)
876
893
877 def _drop(self, filename):
894 def _drop(self, filename):
878 """internal function to drop a file from the dirstate"""
895 """internal function to drop a file from the dirstate"""
879 if self._map.dropfile(filename):
896 if self._map.dropfile(filename):
880 self._dirty = True
897 self._dirty = True
881 self._updatedfiles.add(filename)
898 self._updatedfiles.add(filename)
882 self._map.copymap.pop(filename, None)
899 self._map.copymap.pop(filename, None)
883
900
884 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
901 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
885 if exists is None:
902 if exists is None:
886 exists = os.path.lexists(os.path.join(self._root, path))
903 exists = os.path.lexists(os.path.join(self._root, path))
887 if not exists:
904 if not exists:
888 # Maybe a path component exists
905 # Maybe a path component exists
889 if not ignoremissing and b'/' in path:
906 if not ignoremissing and b'/' in path:
890 d, f = path.rsplit(b'/', 1)
907 d, f = path.rsplit(b'/', 1)
891 d = self._normalize(d, False, ignoremissing, None)
908 d = self._normalize(d, False, ignoremissing, None)
892 folded = d + b"/" + f
909 folded = d + b"/" + f
893 else:
910 else:
894 # No path components, preserve original case
911 # No path components, preserve original case
895 folded = path
912 folded = path
896 else:
913 else:
897 # recursively normalize leading directory components
914 # recursively normalize leading directory components
898 # against dirstate
915 # against dirstate
899 if b'/' in normed:
916 if b'/' in normed:
900 d, f = normed.rsplit(b'/', 1)
917 d, f = normed.rsplit(b'/', 1)
901 d = self._normalize(d, False, ignoremissing, True)
918 d = self._normalize(d, False, ignoremissing, True)
902 r = self._root + b"/" + d
919 r = self._root + b"/" + d
903 folded = d + b"/" + util.fspath(f, r)
920 folded = d + b"/" + util.fspath(f, r)
904 else:
921 else:
905 folded = util.fspath(normed, self._root)
922 folded = util.fspath(normed, self._root)
906 storemap[normed] = folded
923 storemap[normed] = folded
907
924
908 return folded
925 return folded
909
926
910 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
927 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
911 normed = util.normcase(path)
928 normed = util.normcase(path)
912 folded = self._map.filefoldmap.get(normed, None)
929 folded = self._map.filefoldmap.get(normed, None)
913 if folded is None:
930 if folded is None:
914 if isknown:
931 if isknown:
915 folded = path
932 folded = path
916 else:
933 else:
917 folded = self._discoverpath(
934 folded = self._discoverpath(
918 path, normed, ignoremissing, exists, self._map.filefoldmap
935 path, normed, ignoremissing, exists, self._map.filefoldmap
919 )
936 )
920 return folded
937 return folded
921
938
922 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
939 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
923 normed = util.normcase(path)
940 normed = util.normcase(path)
924 folded = self._map.filefoldmap.get(normed, None)
941 folded = self._map.filefoldmap.get(normed, None)
925 if folded is None:
942 if folded is None:
926 folded = self._map.dirfoldmap.get(normed, None)
943 folded = self._map.dirfoldmap.get(normed, None)
927 if folded is None:
944 if folded is None:
928 if isknown:
945 if isknown:
929 folded = path
946 folded = path
930 else:
947 else:
931 # store discovered result in dirfoldmap so that future
948 # store discovered result in dirfoldmap so that future
932 # normalizefile calls don't start matching directories
949 # normalizefile calls don't start matching directories
933 folded = self._discoverpath(
950 folded = self._discoverpath(
934 path, normed, ignoremissing, exists, self._map.dirfoldmap
951 path, normed, ignoremissing, exists, self._map.dirfoldmap
935 )
952 )
936 return folded
953 return folded
937
954
938 def normalize(self, path, isknown=False, ignoremissing=False):
955 def normalize(self, path, isknown=False, ignoremissing=False):
939 """
956 """
940 normalize the case of a pathname when on a casefolding filesystem
957 normalize the case of a pathname when on a casefolding filesystem
941
958
942 isknown specifies whether the filename came from walking the
959 isknown specifies whether the filename came from walking the
943 disk, to avoid extra filesystem access.
960 disk, to avoid extra filesystem access.
944
961
945 If ignoremissing is True, missing path are returned
962 If ignoremissing is True, missing path are returned
946 unchanged. Otherwise, we try harder to normalize possibly
963 unchanged. Otherwise, we try harder to normalize possibly
947 existing path components.
964 existing path components.
948
965
949 The normalized case is determined based on the following precedence:
966 The normalized case is determined based on the following precedence:
950
967
951 - version of name already stored in the dirstate
968 - version of name already stored in the dirstate
952 - version of name stored on disk
969 - version of name stored on disk
953 - version provided via command arguments
970 - version provided via command arguments
954 """
971 """
955
972
956 if self._checkcase:
973 if self._checkcase:
957 return self._normalize(path, isknown, ignoremissing)
974 return self._normalize(path, isknown, ignoremissing)
958 return path
975 return path
959
976
960 def clear(self):
977 def clear(self):
961 self._map.clear()
978 self._map.clear()
962 self._lastnormaltime = 0
979 self._lastnormaltime = 0
963 self._updatedfiles.clear()
980 self._updatedfiles.clear()
964 self._dirty = True
981 self._dirty = True
965
982
966 def rebuild(self, parent, allfiles, changedfiles=None):
983 def rebuild(self, parent, allfiles, changedfiles=None):
967 if changedfiles is None:
984 if changedfiles is None:
968 # Rebuild entire dirstate
985 # Rebuild entire dirstate
969 to_lookup = allfiles
986 to_lookup = allfiles
970 to_drop = []
987 to_drop = []
971 lastnormaltime = self._lastnormaltime
988 lastnormaltime = self._lastnormaltime
972 self.clear()
989 self.clear()
973 self._lastnormaltime = lastnormaltime
990 self._lastnormaltime = lastnormaltime
974 elif len(changedfiles) < 10:
991 elif len(changedfiles) < 10:
975 # Avoid turning allfiles into a set, which can be expensive if it's
992 # Avoid turning allfiles into a set, which can be expensive if it's
976 # large.
993 # large.
977 to_lookup = []
994 to_lookup = []
978 to_drop = []
995 to_drop = []
979 for f in changedfiles:
996 for f in changedfiles:
980 if f in allfiles:
997 if f in allfiles:
981 to_lookup.append(f)
998 to_lookup.append(f)
982 else:
999 else:
983 to_drop.append(f)
1000 to_drop.append(f)
984 else:
1001 else:
985 changedfilesset = set(changedfiles)
1002 changedfilesset = set(changedfiles)
986 to_lookup = changedfilesset & set(allfiles)
1003 to_lookup = changedfilesset & set(allfiles)
987 to_drop = changedfilesset - to_lookup
1004 to_drop = changedfilesset - to_lookup
988
1005
989 if self._origpl is None:
1006 if self._origpl is None:
990 self._origpl = self._pl
1007 self._origpl = self._pl
991 self._map.setparents(parent, self._nodeconstants.nullid)
1008 self._map.setparents(parent, self._nodeconstants.nullid)
992
1009
993 for f in to_lookup:
1010 for f in to_lookup:
994 self._normallookup(f)
1011 self._normallookup(f)
995 for f in to_drop:
1012 for f in to_drop:
996 self._drop(f)
1013 self._drop(f)
997
1014
998 self._dirty = True
1015 self._dirty = True
999
1016
1000 def identity(self):
1017 def identity(self):
1001 """Return identity of dirstate itself to detect changing in storage
1018 """Return identity of dirstate itself to detect changing in storage
1002
1019
1003 If identity of previous dirstate is equal to this, writing
1020 If identity of previous dirstate is equal to this, writing
1004 changes based on the former dirstate out can keep consistency.
1021 changes based on the former dirstate out can keep consistency.
1005 """
1022 """
1006 return self._map.identity
1023 return self._map.identity
1007
1024
1008 def write(self, tr):
1025 def write(self, tr):
1009 if not self._dirty:
1026 if not self._dirty:
1010 return
1027 return
1011
1028
1012 filename = self._filename
1029 filename = self._filename
1013 if tr:
1030 if tr:
1014 # 'dirstate.write()' is not only for writing in-memory
1031 # 'dirstate.write()' is not only for writing in-memory
1015 # changes out, but also for dropping ambiguous timestamp.
1032 # changes out, but also for dropping ambiguous timestamp.
1016 # delayed writing re-raise "ambiguous timestamp issue".
1033 # delayed writing re-raise "ambiguous timestamp issue".
1017 # See also the wiki page below for detail:
1034 # See also the wiki page below for detail:
1018 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
1035 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
1019
1036
1020 # emulate dropping timestamp in 'parsers.pack_dirstate'
1037 # emulate dropping timestamp in 'parsers.pack_dirstate'
1021 now = _getfsnow(self._opener)
1038 now = _getfsnow(self._opener)
1022 self._map.clearambiguoustimes(self._updatedfiles, now)
1039 self._map.clearambiguoustimes(self._updatedfiles, now)
1023
1040
1024 # emulate that all 'dirstate.normal' results are written out
1041 # emulate that all 'dirstate.normal' results are written out
1025 self._lastnormaltime = 0
1042 self._lastnormaltime = 0
1026 self._updatedfiles.clear()
1043 self._updatedfiles.clear()
1027
1044
1028 # delay writing in-memory changes out
1045 # delay writing in-memory changes out
1029 tr.addfilegenerator(
1046 tr.addfilegenerator(
1030 b'dirstate',
1047 b'dirstate',
1031 (self._filename,),
1048 (self._filename,),
1032 lambda f: self._writedirstate(tr, f),
1049 lambda f: self._writedirstate(tr, f),
1033 location=b'plain',
1050 location=b'plain',
1034 )
1051 )
1035 return
1052 return
1036
1053
1037 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
1054 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
1038 self._writedirstate(tr, st)
1055 self._writedirstate(tr, st)
1039
1056
1040 def addparentchangecallback(self, category, callback):
1057 def addparentchangecallback(self, category, callback):
1041 """add a callback to be called when the wd parents are changed
1058 """add a callback to be called when the wd parents are changed
1042
1059
1043 Callback will be called with the following arguments:
1060 Callback will be called with the following arguments:
1044 dirstate, (oldp1, oldp2), (newp1, newp2)
1061 dirstate, (oldp1, oldp2), (newp1, newp2)
1045
1062
1046 Category is a unique identifier to allow overwriting an old callback
1063 Category is a unique identifier to allow overwriting an old callback
1047 with a newer callback.
1064 with a newer callback.
1048 """
1065 """
1049 self._plchangecallbacks[category] = callback
1066 self._plchangecallbacks[category] = callback
1050
1067
1051 def _writedirstate(self, tr, st):
1068 def _writedirstate(self, tr, st):
1052 # notify callbacks about parents change
1069 # notify callbacks about parents change
1053 if self._origpl is not None and self._origpl != self._pl:
1070 if self._origpl is not None and self._origpl != self._pl:
1054 for c, callback in sorted(
1071 for c, callback in sorted(
1055 pycompat.iteritems(self._plchangecallbacks)
1072 pycompat.iteritems(self._plchangecallbacks)
1056 ):
1073 ):
1057 callback(self, self._origpl, self._pl)
1074 callback(self, self._origpl, self._pl)
1058 self._origpl = None
1075 self._origpl = None
1059 # use the modification time of the newly created temporary file as the
1076 # use the modification time of the newly created temporary file as the
1060 # filesystem's notion of 'now'
1077 # filesystem's notion of 'now'
1061 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
1078 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
1062
1079
1063 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
1080 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
1064 # timestamp of each entries in dirstate, because of 'now > mtime'
1081 # timestamp of each entries in dirstate, because of 'now > mtime'
1065 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
1082 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
1066 if delaywrite > 0:
1083 if delaywrite > 0:
1067 # do we have any files to delay for?
1084 # do we have any files to delay for?
1068 for f, e in pycompat.iteritems(self._map):
1085 for f, e in pycompat.iteritems(self._map):
1069 if e.need_delay(now):
1086 if e.need_delay(now):
1070 import time # to avoid useless import
1087 import time # to avoid useless import
1071
1088
1072 # rather than sleep n seconds, sleep until the next
1089 # rather than sleep n seconds, sleep until the next
1073 # multiple of n seconds
1090 # multiple of n seconds
1074 clock = time.time()
1091 clock = time.time()
1075 start = int(clock) - (int(clock) % delaywrite)
1092 start = int(clock) - (int(clock) % delaywrite)
1076 end = start + delaywrite
1093 end = start + delaywrite
1077 time.sleep(end - clock)
1094 time.sleep(end - clock)
1078 now = end # trust our estimate that the end is near now
1095 now = end # trust our estimate that the end is near now
1079 break
1096 break
1080
1097
1081 self._map.write(tr, st, now)
1098 self._map.write(tr, st, now)
1082 self._lastnormaltime = 0
1099 self._lastnormaltime = 0
1083 self._dirty = False
1100 self._dirty = False
1084
1101
1085 def _dirignore(self, f):
1102 def _dirignore(self, f):
1086 if self._ignore(f):
1103 if self._ignore(f):
1087 return True
1104 return True
1088 for p in pathutil.finddirs(f):
1105 for p in pathutil.finddirs(f):
1089 if self._ignore(p):
1106 if self._ignore(p):
1090 return True
1107 return True
1091 return False
1108 return False
1092
1109
1093 def _ignorefiles(self):
1110 def _ignorefiles(self):
1094 files = []
1111 files = []
1095 if os.path.exists(self._join(b'.hgignore')):
1112 if os.path.exists(self._join(b'.hgignore')):
1096 files.append(self._join(b'.hgignore'))
1113 files.append(self._join(b'.hgignore'))
1097 for name, path in self._ui.configitems(b"ui"):
1114 for name, path in self._ui.configitems(b"ui"):
1098 if name == b'ignore' or name.startswith(b'ignore.'):
1115 if name == b'ignore' or name.startswith(b'ignore.'):
1099 # we need to use os.path.join here rather than self._join
1116 # we need to use os.path.join here rather than self._join
1100 # because path is arbitrary and user-specified
1117 # because path is arbitrary and user-specified
1101 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1118 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1102 return files
1119 return files
1103
1120
1104 def _ignorefileandline(self, f):
1121 def _ignorefileandline(self, f):
1105 files = collections.deque(self._ignorefiles())
1122 files = collections.deque(self._ignorefiles())
1106 visited = set()
1123 visited = set()
1107 while files:
1124 while files:
1108 i = files.popleft()
1125 i = files.popleft()
1109 patterns = matchmod.readpatternfile(
1126 patterns = matchmod.readpatternfile(
1110 i, self._ui.warn, sourceinfo=True
1127 i, self._ui.warn, sourceinfo=True
1111 )
1128 )
1112 for pattern, lineno, line in patterns:
1129 for pattern, lineno, line in patterns:
1113 kind, p = matchmod._patsplit(pattern, b'glob')
1130 kind, p = matchmod._patsplit(pattern, b'glob')
1114 if kind == b"subinclude":
1131 if kind == b"subinclude":
1115 if p not in visited:
1132 if p not in visited:
1116 files.append(p)
1133 files.append(p)
1117 continue
1134 continue
1118 m = matchmod.match(
1135 m = matchmod.match(
1119 self._root, b'', [], [pattern], warn=self._ui.warn
1136 self._root, b'', [], [pattern], warn=self._ui.warn
1120 )
1137 )
1121 if m(f):
1138 if m(f):
1122 return (i, lineno, line)
1139 return (i, lineno, line)
1123 visited.add(i)
1140 visited.add(i)
1124 return (None, -1, b"")
1141 return (None, -1, b"")
1125
1142
1126 def _walkexplicit(self, match, subrepos):
1143 def _walkexplicit(self, match, subrepos):
1127 """Get stat data about the files explicitly specified by match.
1144 """Get stat data about the files explicitly specified by match.
1128
1145
1129 Return a triple (results, dirsfound, dirsnotfound).
1146 Return a triple (results, dirsfound, dirsnotfound).
1130 - results is a mapping from filename to stat result. It also contains
1147 - results is a mapping from filename to stat result. It also contains
1131 listings mapping subrepos and .hg to None.
1148 listings mapping subrepos and .hg to None.
1132 - dirsfound is a list of files found to be directories.
1149 - dirsfound is a list of files found to be directories.
1133 - dirsnotfound is a list of files that the dirstate thinks are
1150 - dirsnotfound is a list of files that the dirstate thinks are
1134 directories and that were not found."""
1151 directories and that were not found."""
1135
1152
1136 def badtype(mode):
1153 def badtype(mode):
1137 kind = _(b'unknown')
1154 kind = _(b'unknown')
1138 if stat.S_ISCHR(mode):
1155 if stat.S_ISCHR(mode):
1139 kind = _(b'character device')
1156 kind = _(b'character device')
1140 elif stat.S_ISBLK(mode):
1157 elif stat.S_ISBLK(mode):
1141 kind = _(b'block device')
1158 kind = _(b'block device')
1142 elif stat.S_ISFIFO(mode):
1159 elif stat.S_ISFIFO(mode):
1143 kind = _(b'fifo')
1160 kind = _(b'fifo')
1144 elif stat.S_ISSOCK(mode):
1161 elif stat.S_ISSOCK(mode):
1145 kind = _(b'socket')
1162 kind = _(b'socket')
1146 elif stat.S_ISDIR(mode):
1163 elif stat.S_ISDIR(mode):
1147 kind = _(b'directory')
1164 kind = _(b'directory')
1148 return _(b'unsupported file type (type is %s)') % kind
1165 return _(b'unsupported file type (type is %s)') % kind
1149
1166
1150 badfn = match.bad
1167 badfn = match.bad
1151 dmap = self._map
1168 dmap = self._map
1152 lstat = os.lstat
1169 lstat = os.lstat
1153 getkind = stat.S_IFMT
1170 getkind = stat.S_IFMT
1154 dirkind = stat.S_IFDIR
1171 dirkind = stat.S_IFDIR
1155 regkind = stat.S_IFREG
1172 regkind = stat.S_IFREG
1156 lnkkind = stat.S_IFLNK
1173 lnkkind = stat.S_IFLNK
1157 join = self._join
1174 join = self._join
1158 dirsfound = []
1175 dirsfound = []
1159 foundadd = dirsfound.append
1176 foundadd = dirsfound.append
1160 dirsnotfound = []
1177 dirsnotfound = []
1161 notfoundadd = dirsnotfound.append
1178 notfoundadd = dirsnotfound.append
1162
1179
1163 if not match.isexact() and self._checkcase:
1180 if not match.isexact() and self._checkcase:
1164 normalize = self._normalize
1181 normalize = self._normalize
1165 else:
1182 else:
1166 normalize = None
1183 normalize = None
1167
1184
1168 files = sorted(match.files())
1185 files = sorted(match.files())
1169 subrepos.sort()
1186 subrepos.sort()
1170 i, j = 0, 0
1187 i, j = 0, 0
1171 while i < len(files) and j < len(subrepos):
1188 while i < len(files) and j < len(subrepos):
1172 subpath = subrepos[j] + b"/"
1189 subpath = subrepos[j] + b"/"
1173 if files[i] < subpath:
1190 if files[i] < subpath:
1174 i += 1
1191 i += 1
1175 continue
1192 continue
1176 while i < len(files) and files[i].startswith(subpath):
1193 while i < len(files) and files[i].startswith(subpath):
1177 del files[i]
1194 del files[i]
1178 j += 1
1195 j += 1
1179
1196
1180 if not files or b'' in files:
1197 if not files or b'' in files:
1181 files = [b'']
1198 files = [b'']
1182 # constructing the foldmap is expensive, so don't do it for the
1199 # constructing the foldmap is expensive, so don't do it for the
1183 # common case where files is ['']
1200 # common case where files is ['']
1184 normalize = None
1201 normalize = None
1185 results = dict.fromkeys(subrepos)
1202 results = dict.fromkeys(subrepos)
1186 results[b'.hg'] = None
1203 results[b'.hg'] = None
1187
1204
1188 for ff in files:
1205 for ff in files:
1189 if normalize:
1206 if normalize:
1190 nf = normalize(ff, False, True)
1207 nf = normalize(ff, False, True)
1191 else:
1208 else:
1192 nf = ff
1209 nf = ff
1193 if nf in results:
1210 if nf in results:
1194 continue
1211 continue
1195
1212
1196 try:
1213 try:
1197 st = lstat(join(nf))
1214 st = lstat(join(nf))
1198 kind = getkind(st.st_mode)
1215 kind = getkind(st.st_mode)
1199 if kind == dirkind:
1216 if kind == dirkind:
1200 if nf in dmap:
1217 if nf in dmap:
1201 # file replaced by dir on disk but still in dirstate
1218 # file replaced by dir on disk but still in dirstate
1202 results[nf] = None
1219 results[nf] = None
1203 foundadd((nf, ff))
1220 foundadd((nf, ff))
1204 elif kind == regkind or kind == lnkkind:
1221 elif kind == regkind or kind == lnkkind:
1205 results[nf] = st
1222 results[nf] = st
1206 else:
1223 else:
1207 badfn(ff, badtype(kind))
1224 badfn(ff, badtype(kind))
1208 if nf in dmap:
1225 if nf in dmap:
1209 results[nf] = None
1226 results[nf] = None
1210 except OSError as inst: # nf not found on disk - it is dirstate only
1227 except OSError as inst: # nf not found on disk - it is dirstate only
1211 if nf in dmap: # does it exactly match a missing file?
1228 if nf in dmap: # does it exactly match a missing file?
1212 results[nf] = None
1229 results[nf] = None
1213 else: # does it match a missing directory?
1230 else: # does it match a missing directory?
1214 if self._map.hasdir(nf):
1231 if self._map.hasdir(nf):
1215 notfoundadd(nf)
1232 notfoundadd(nf)
1216 else:
1233 else:
1217 badfn(ff, encoding.strtolocal(inst.strerror))
1234 badfn(ff, encoding.strtolocal(inst.strerror))
1218
1235
1219 # match.files() may contain explicitly-specified paths that shouldn't
1236 # match.files() may contain explicitly-specified paths that shouldn't
1220 # be taken; drop them from the list of files found. dirsfound/notfound
1237 # be taken; drop them from the list of files found. dirsfound/notfound
1221 # aren't filtered here because they will be tested later.
1238 # aren't filtered here because they will be tested later.
1222 if match.anypats():
1239 if match.anypats():
1223 for f in list(results):
1240 for f in list(results):
1224 if f == b'.hg' or f in subrepos:
1241 if f == b'.hg' or f in subrepos:
1225 # keep sentinel to disable further out-of-repo walks
1242 # keep sentinel to disable further out-of-repo walks
1226 continue
1243 continue
1227 if not match(f):
1244 if not match(f):
1228 del results[f]
1245 del results[f]
1229
1246
1230 # Case insensitive filesystems cannot rely on lstat() failing to detect
1247 # Case insensitive filesystems cannot rely on lstat() failing to detect
1231 # a case-only rename. Prune the stat object for any file that does not
1248 # a case-only rename. Prune the stat object for any file that does not
1232 # match the case in the filesystem, if there are multiple files that
1249 # match the case in the filesystem, if there are multiple files that
1233 # normalize to the same path.
1250 # normalize to the same path.
1234 if match.isexact() and self._checkcase:
1251 if match.isexact() and self._checkcase:
1235 normed = {}
1252 normed = {}
1236
1253
1237 for f, st in pycompat.iteritems(results):
1254 for f, st in pycompat.iteritems(results):
1238 if st is None:
1255 if st is None:
1239 continue
1256 continue
1240
1257
1241 nc = util.normcase(f)
1258 nc = util.normcase(f)
1242 paths = normed.get(nc)
1259 paths = normed.get(nc)
1243
1260
1244 if paths is None:
1261 if paths is None:
1245 paths = set()
1262 paths = set()
1246 normed[nc] = paths
1263 normed[nc] = paths
1247
1264
1248 paths.add(f)
1265 paths.add(f)
1249
1266
1250 for norm, paths in pycompat.iteritems(normed):
1267 for norm, paths in pycompat.iteritems(normed):
1251 if len(paths) > 1:
1268 if len(paths) > 1:
1252 for path in paths:
1269 for path in paths:
1253 folded = self._discoverpath(
1270 folded = self._discoverpath(
1254 path, norm, True, None, self._map.dirfoldmap
1271 path, norm, True, None, self._map.dirfoldmap
1255 )
1272 )
1256 if path != folded:
1273 if path != folded:
1257 results[path] = None
1274 results[path] = None
1258
1275
1259 return results, dirsfound, dirsnotfound
1276 return results, dirsfound, dirsnotfound
1260
1277
1261 def walk(self, match, subrepos, unknown, ignored, full=True):
1278 def walk(self, match, subrepos, unknown, ignored, full=True):
1262 """
1279 """
1263 Walk recursively through the directory tree, finding all files
1280 Walk recursively through the directory tree, finding all files
1264 matched by match.
1281 matched by match.
1265
1282
1266 If full is False, maybe skip some known-clean files.
1283 If full is False, maybe skip some known-clean files.
1267
1284
1268 Return a dict mapping filename to stat-like object (either
1285 Return a dict mapping filename to stat-like object (either
1269 mercurial.osutil.stat instance or return value of os.stat()).
1286 mercurial.osutil.stat instance or return value of os.stat()).
1270
1287
1271 """
1288 """
1272 # full is a flag that extensions that hook into walk can use -- this
1289 # full is a flag that extensions that hook into walk can use -- this
1273 # implementation doesn't use it at all. This satisfies the contract
1290 # implementation doesn't use it at all. This satisfies the contract
1274 # because we only guarantee a "maybe".
1291 # because we only guarantee a "maybe".
1275
1292
1276 if ignored:
1293 if ignored:
1277 ignore = util.never
1294 ignore = util.never
1278 dirignore = util.never
1295 dirignore = util.never
1279 elif unknown:
1296 elif unknown:
1280 ignore = self._ignore
1297 ignore = self._ignore
1281 dirignore = self._dirignore
1298 dirignore = self._dirignore
1282 else:
1299 else:
1283 # if not unknown and not ignored, drop dir recursion and step 2
1300 # if not unknown and not ignored, drop dir recursion and step 2
1284 ignore = util.always
1301 ignore = util.always
1285 dirignore = util.always
1302 dirignore = util.always
1286
1303
1287 matchfn = match.matchfn
1304 matchfn = match.matchfn
1288 matchalways = match.always()
1305 matchalways = match.always()
1289 matchtdir = match.traversedir
1306 matchtdir = match.traversedir
1290 dmap = self._map
1307 dmap = self._map
1291 listdir = util.listdir
1308 listdir = util.listdir
1292 lstat = os.lstat
1309 lstat = os.lstat
1293 dirkind = stat.S_IFDIR
1310 dirkind = stat.S_IFDIR
1294 regkind = stat.S_IFREG
1311 regkind = stat.S_IFREG
1295 lnkkind = stat.S_IFLNK
1312 lnkkind = stat.S_IFLNK
1296 join = self._join
1313 join = self._join
1297
1314
1298 exact = skipstep3 = False
1315 exact = skipstep3 = False
1299 if match.isexact(): # match.exact
1316 if match.isexact(): # match.exact
1300 exact = True
1317 exact = True
1301 dirignore = util.always # skip step 2
1318 dirignore = util.always # skip step 2
1302 elif match.prefix(): # match.match, no patterns
1319 elif match.prefix(): # match.match, no patterns
1303 skipstep3 = True
1320 skipstep3 = True
1304
1321
1305 if not exact and self._checkcase:
1322 if not exact and self._checkcase:
1306 normalize = self._normalize
1323 normalize = self._normalize
1307 normalizefile = self._normalizefile
1324 normalizefile = self._normalizefile
1308 skipstep3 = False
1325 skipstep3 = False
1309 else:
1326 else:
1310 normalize = self._normalize
1327 normalize = self._normalize
1311 normalizefile = None
1328 normalizefile = None
1312
1329
1313 # step 1: find all explicit files
1330 # step 1: find all explicit files
1314 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1331 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1315 if matchtdir:
1332 if matchtdir:
1316 for d in work:
1333 for d in work:
1317 matchtdir(d[0])
1334 matchtdir(d[0])
1318 for d in dirsnotfound:
1335 for d in dirsnotfound:
1319 matchtdir(d)
1336 matchtdir(d)
1320
1337
1321 skipstep3 = skipstep3 and not (work or dirsnotfound)
1338 skipstep3 = skipstep3 and not (work or dirsnotfound)
1322 work = [d for d in work if not dirignore(d[0])]
1339 work = [d for d in work if not dirignore(d[0])]
1323
1340
1324 # step 2: visit subdirectories
1341 # step 2: visit subdirectories
1325 def traverse(work, alreadynormed):
1342 def traverse(work, alreadynormed):
1326 wadd = work.append
1343 wadd = work.append
1327 while work:
1344 while work:
1328 tracing.counter('dirstate.walk work', len(work))
1345 tracing.counter('dirstate.walk work', len(work))
1329 nd = work.pop()
1346 nd = work.pop()
1330 visitentries = match.visitchildrenset(nd)
1347 visitentries = match.visitchildrenset(nd)
1331 if not visitentries:
1348 if not visitentries:
1332 continue
1349 continue
1333 if visitentries == b'this' or visitentries == b'all':
1350 if visitentries == b'this' or visitentries == b'all':
1334 visitentries = None
1351 visitentries = None
1335 skip = None
1352 skip = None
1336 if nd != b'':
1353 if nd != b'':
1337 skip = b'.hg'
1354 skip = b'.hg'
1338 try:
1355 try:
1339 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1356 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1340 entries = listdir(join(nd), stat=True, skip=skip)
1357 entries = listdir(join(nd), stat=True, skip=skip)
1341 except OSError as inst:
1358 except OSError as inst:
1342 if inst.errno in (errno.EACCES, errno.ENOENT):
1359 if inst.errno in (errno.EACCES, errno.ENOENT):
1343 match.bad(
1360 match.bad(
1344 self.pathto(nd), encoding.strtolocal(inst.strerror)
1361 self.pathto(nd), encoding.strtolocal(inst.strerror)
1345 )
1362 )
1346 continue
1363 continue
1347 raise
1364 raise
1348 for f, kind, st in entries:
1365 for f, kind, st in entries:
1349 # Some matchers may return files in the visitentries set,
1366 # Some matchers may return files in the visitentries set,
1350 # instead of 'this', if the matcher explicitly mentions them
1367 # instead of 'this', if the matcher explicitly mentions them
1351 # and is not an exactmatcher. This is acceptable; we do not
1368 # and is not an exactmatcher. This is acceptable; we do not
1352 # make any hard assumptions about file-or-directory below
1369 # make any hard assumptions about file-or-directory below
1353 # based on the presence of `f` in visitentries. If
1370 # based on the presence of `f` in visitentries. If
1354 # visitchildrenset returned a set, we can always skip the
1371 # visitchildrenset returned a set, we can always skip the
1355 # entries *not* in the set it provided regardless of whether
1372 # entries *not* in the set it provided regardless of whether
1356 # they're actually a file or a directory.
1373 # they're actually a file or a directory.
1357 if visitentries and f not in visitentries:
1374 if visitentries and f not in visitentries:
1358 continue
1375 continue
1359 if normalizefile:
1376 if normalizefile:
1360 # even though f might be a directory, we're only
1377 # even though f might be a directory, we're only
1361 # interested in comparing it to files currently in the
1378 # interested in comparing it to files currently in the
1362 # dmap -- therefore normalizefile is enough
1379 # dmap -- therefore normalizefile is enough
1363 nf = normalizefile(
1380 nf = normalizefile(
1364 nd and (nd + b"/" + f) or f, True, True
1381 nd and (nd + b"/" + f) or f, True, True
1365 )
1382 )
1366 else:
1383 else:
1367 nf = nd and (nd + b"/" + f) or f
1384 nf = nd and (nd + b"/" + f) or f
1368 if nf not in results:
1385 if nf not in results:
1369 if kind == dirkind:
1386 if kind == dirkind:
1370 if not ignore(nf):
1387 if not ignore(nf):
1371 if matchtdir:
1388 if matchtdir:
1372 matchtdir(nf)
1389 matchtdir(nf)
1373 wadd(nf)
1390 wadd(nf)
1374 if nf in dmap and (matchalways or matchfn(nf)):
1391 if nf in dmap and (matchalways or matchfn(nf)):
1375 results[nf] = None
1392 results[nf] = None
1376 elif kind == regkind or kind == lnkkind:
1393 elif kind == regkind or kind == lnkkind:
1377 if nf in dmap:
1394 if nf in dmap:
1378 if matchalways or matchfn(nf):
1395 if matchalways or matchfn(nf):
1379 results[nf] = st
1396 results[nf] = st
1380 elif (matchalways or matchfn(nf)) and not ignore(
1397 elif (matchalways or matchfn(nf)) and not ignore(
1381 nf
1398 nf
1382 ):
1399 ):
1383 # unknown file -- normalize if necessary
1400 # unknown file -- normalize if necessary
1384 if not alreadynormed:
1401 if not alreadynormed:
1385 nf = normalize(nf, False, True)
1402 nf = normalize(nf, False, True)
1386 results[nf] = st
1403 results[nf] = st
1387 elif nf in dmap and (matchalways or matchfn(nf)):
1404 elif nf in dmap and (matchalways or matchfn(nf)):
1388 results[nf] = None
1405 results[nf] = None
1389
1406
1390 for nd, d in work:
1407 for nd, d in work:
1391 # alreadynormed means that processwork doesn't have to do any
1408 # alreadynormed means that processwork doesn't have to do any
1392 # expensive directory normalization
1409 # expensive directory normalization
1393 alreadynormed = not normalize or nd == d
1410 alreadynormed = not normalize or nd == d
1394 traverse([d], alreadynormed)
1411 traverse([d], alreadynormed)
1395
1412
1396 for s in subrepos:
1413 for s in subrepos:
1397 del results[s]
1414 del results[s]
1398 del results[b'.hg']
1415 del results[b'.hg']
1399
1416
1400 # step 3: visit remaining files from dmap
1417 # step 3: visit remaining files from dmap
1401 if not skipstep3 and not exact:
1418 if not skipstep3 and not exact:
1402 # If a dmap file is not in results yet, it was either
1419 # If a dmap file is not in results yet, it was either
1403 # a) not matching matchfn b) ignored, c) missing, or d) under a
1420 # a) not matching matchfn b) ignored, c) missing, or d) under a
1404 # symlink directory.
1421 # symlink directory.
1405 if not results and matchalways:
1422 if not results and matchalways:
1406 visit = [f for f in dmap]
1423 visit = [f for f in dmap]
1407 else:
1424 else:
1408 visit = [f for f in dmap if f not in results and matchfn(f)]
1425 visit = [f for f in dmap if f not in results and matchfn(f)]
1409 visit.sort()
1426 visit.sort()
1410
1427
1411 if unknown:
1428 if unknown:
1412 # unknown == True means we walked all dirs under the roots
1429 # unknown == True means we walked all dirs under the roots
1413 # that wasn't ignored, and everything that matched was stat'ed
1430 # that wasn't ignored, and everything that matched was stat'ed
1414 # and is already in results.
1431 # and is already in results.
1415 # The rest must thus be ignored or under a symlink.
1432 # The rest must thus be ignored or under a symlink.
1416 audit_path = pathutil.pathauditor(self._root, cached=True)
1433 audit_path = pathutil.pathauditor(self._root, cached=True)
1417
1434
1418 for nf in iter(visit):
1435 for nf in iter(visit):
1419 # If a stat for the same file was already added with a
1436 # If a stat for the same file was already added with a
1420 # different case, don't add one for this, since that would
1437 # different case, don't add one for this, since that would
1421 # make it appear as if the file exists under both names
1438 # make it appear as if the file exists under both names
1422 # on disk.
1439 # on disk.
1423 if (
1440 if (
1424 normalizefile
1441 normalizefile
1425 and normalizefile(nf, True, True) in results
1442 and normalizefile(nf, True, True) in results
1426 ):
1443 ):
1427 results[nf] = None
1444 results[nf] = None
1428 # Report ignored items in the dmap as long as they are not
1445 # Report ignored items in the dmap as long as they are not
1429 # under a symlink directory.
1446 # under a symlink directory.
1430 elif audit_path.check(nf):
1447 elif audit_path.check(nf):
1431 try:
1448 try:
1432 results[nf] = lstat(join(nf))
1449 results[nf] = lstat(join(nf))
1433 # file was just ignored, no links, and exists
1450 # file was just ignored, no links, and exists
1434 except OSError:
1451 except OSError:
1435 # file doesn't exist
1452 # file doesn't exist
1436 results[nf] = None
1453 results[nf] = None
1437 else:
1454 else:
1438 # It's either missing or under a symlink directory
1455 # It's either missing or under a symlink directory
1439 # which we in this case report as missing
1456 # which we in this case report as missing
1440 results[nf] = None
1457 results[nf] = None
1441 else:
1458 else:
1442 # We may not have walked the full directory tree above,
1459 # We may not have walked the full directory tree above,
1443 # so stat and check everything we missed.
1460 # so stat and check everything we missed.
1444 iv = iter(visit)
1461 iv = iter(visit)
1445 for st in util.statfiles([join(i) for i in visit]):
1462 for st in util.statfiles([join(i) for i in visit]):
1446 results[next(iv)] = st
1463 results[next(iv)] = st
1447 return results
1464 return results
1448
1465
1449 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1466 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1450 # Force Rayon (Rust parallelism library) to respect the number of
1467 # Force Rayon (Rust parallelism library) to respect the number of
1451 # workers. This is a temporary workaround until Rust code knows
1468 # workers. This is a temporary workaround until Rust code knows
1452 # how to read the config file.
1469 # how to read the config file.
1453 numcpus = self._ui.configint(b"worker", b"numcpus")
1470 numcpus = self._ui.configint(b"worker", b"numcpus")
1454 if numcpus is not None:
1471 if numcpus is not None:
1455 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1472 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1456
1473
1457 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1474 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1458 if not workers_enabled:
1475 if not workers_enabled:
1459 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1476 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1460
1477
1461 (
1478 (
1462 lookup,
1479 lookup,
1463 modified,
1480 modified,
1464 added,
1481 added,
1465 removed,
1482 removed,
1466 deleted,
1483 deleted,
1467 clean,
1484 clean,
1468 ignored,
1485 ignored,
1469 unknown,
1486 unknown,
1470 warnings,
1487 warnings,
1471 bad,
1488 bad,
1472 traversed,
1489 traversed,
1473 dirty,
1490 dirty,
1474 ) = rustmod.status(
1491 ) = rustmod.status(
1475 self._map._rustmap,
1492 self._map._rustmap,
1476 matcher,
1493 matcher,
1477 self._rootdir,
1494 self._rootdir,
1478 self._ignorefiles(),
1495 self._ignorefiles(),
1479 self._checkexec,
1496 self._checkexec,
1480 self._lastnormaltime,
1497 self._lastnormaltime,
1481 bool(list_clean),
1498 bool(list_clean),
1482 bool(list_ignored),
1499 bool(list_ignored),
1483 bool(list_unknown),
1500 bool(list_unknown),
1484 bool(matcher.traversedir),
1501 bool(matcher.traversedir),
1485 )
1502 )
1486
1503
1487 self._dirty |= dirty
1504 self._dirty |= dirty
1488
1505
1489 if matcher.traversedir:
1506 if matcher.traversedir:
1490 for dir in traversed:
1507 for dir in traversed:
1491 matcher.traversedir(dir)
1508 matcher.traversedir(dir)
1492
1509
1493 if self._ui.warn:
1510 if self._ui.warn:
1494 for item in warnings:
1511 for item in warnings:
1495 if isinstance(item, tuple):
1512 if isinstance(item, tuple):
1496 file_path, syntax = item
1513 file_path, syntax = item
1497 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1514 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1498 file_path,
1515 file_path,
1499 syntax,
1516 syntax,
1500 )
1517 )
1501 self._ui.warn(msg)
1518 self._ui.warn(msg)
1502 else:
1519 else:
1503 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1520 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1504 self._ui.warn(
1521 self._ui.warn(
1505 msg
1522 msg
1506 % (
1523 % (
1507 pathutil.canonpath(
1524 pathutil.canonpath(
1508 self._rootdir, self._rootdir, item
1525 self._rootdir, self._rootdir, item
1509 ),
1526 ),
1510 b"No such file or directory",
1527 b"No such file or directory",
1511 )
1528 )
1512 )
1529 )
1513
1530
1514 for (fn, message) in bad:
1531 for (fn, message) in bad:
1515 matcher.bad(fn, encoding.strtolocal(message))
1532 matcher.bad(fn, encoding.strtolocal(message))
1516
1533
1517 status = scmutil.status(
1534 status = scmutil.status(
1518 modified=modified,
1535 modified=modified,
1519 added=added,
1536 added=added,
1520 removed=removed,
1537 removed=removed,
1521 deleted=deleted,
1538 deleted=deleted,
1522 unknown=unknown,
1539 unknown=unknown,
1523 ignored=ignored,
1540 ignored=ignored,
1524 clean=clean,
1541 clean=clean,
1525 )
1542 )
1526 return (lookup, status)
1543 return (lookup, status)
1527
1544
1528 def status(self, match, subrepos, ignored, clean, unknown):
1545 def status(self, match, subrepos, ignored, clean, unknown):
1529 """Determine the status of the working copy relative to the
1546 """Determine the status of the working copy relative to the
1530 dirstate and return a pair of (unsure, status), where status is of type
1547 dirstate and return a pair of (unsure, status), where status is of type
1531 scmutil.status and:
1548 scmutil.status and:
1532
1549
1533 unsure:
1550 unsure:
1534 files that might have been modified since the dirstate was
1551 files that might have been modified since the dirstate was
1535 written, but need to be read to be sure (size is the same
1552 written, but need to be read to be sure (size is the same
1536 but mtime differs)
1553 but mtime differs)
1537 status.modified:
1554 status.modified:
1538 files that have definitely been modified since the dirstate
1555 files that have definitely been modified since the dirstate
1539 was written (different size or mode)
1556 was written (different size or mode)
1540 status.clean:
1557 status.clean:
1541 files that have definitely not been modified since the
1558 files that have definitely not been modified since the
1542 dirstate was written
1559 dirstate was written
1543 """
1560 """
1544 listignored, listclean, listunknown = ignored, clean, unknown
1561 listignored, listclean, listunknown = ignored, clean, unknown
1545 lookup, modified, added, unknown, ignored = [], [], [], [], []
1562 lookup, modified, added, unknown, ignored = [], [], [], [], []
1546 removed, deleted, clean = [], [], []
1563 removed, deleted, clean = [], [], []
1547
1564
1548 dmap = self._map
1565 dmap = self._map
1549 dmap.preload()
1566 dmap.preload()
1550
1567
1551 use_rust = True
1568 use_rust = True
1552
1569
1553 allowed_matchers = (
1570 allowed_matchers = (
1554 matchmod.alwaysmatcher,
1571 matchmod.alwaysmatcher,
1555 matchmod.exactmatcher,
1572 matchmod.exactmatcher,
1556 matchmod.includematcher,
1573 matchmod.includematcher,
1557 )
1574 )
1558
1575
1559 if rustmod is None:
1576 if rustmod is None:
1560 use_rust = False
1577 use_rust = False
1561 elif self._checkcase:
1578 elif self._checkcase:
1562 # Case-insensitive filesystems are not handled yet
1579 # Case-insensitive filesystems are not handled yet
1563 use_rust = False
1580 use_rust = False
1564 elif subrepos:
1581 elif subrepos:
1565 use_rust = False
1582 use_rust = False
1566 elif sparse.enabled:
1583 elif sparse.enabled:
1567 use_rust = False
1584 use_rust = False
1568 elif not isinstance(match, allowed_matchers):
1585 elif not isinstance(match, allowed_matchers):
1569 # Some matchers have yet to be implemented
1586 # Some matchers have yet to be implemented
1570 use_rust = False
1587 use_rust = False
1571
1588
1572 if use_rust:
1589 if use_rust:
1573 try:
1590 try:
1574 return self._rust_status(
1591 return self._rust_status(
1575 match, listclean, listignored, listunknown
1592 match, listclean, listignored, listunknown
1576 )
1593 )
1577 except rustmod.FallbackError:
1594 except rustmod.FallbackError:
1578 pass
1595 pass
1579
1596
1580 def noop(f):
1597 def noop(f):
1581 pass
1598 pass
1582
1599
1583 dcontains = dmap.__contains__
1600 dcontains = dmap.__contains__
1584 dget = dmap.__getitem__
1601 dget = dmap.__getitem__
1585 ladd = lookup.append # aka "unsure"
1602 ladd = lookup.append # aka "unsure"
1586 madd = modified.append
1603 madd = modified.append
1587 aadd = added.append
1604 aadd = added.append
1588 uadd = unknown.append if listunknown else noop
1605 uadd = unknown.append if listunknown else noop
1589 iadd = ignored.append if listignored else noop
1606 iadd = ignored.append if listignored else noop
1590 radd = removed.append
1607 radd = removed.append
1591 dadd = deleted.append
1608 dadd = deleted.append
1592 cadd = clean.append if listclean else noop
1609 cadd = clean.append if listclean else noop
1593 mexact = match.exact
1610 mexact = match.exact
1594 dirignore = self._dirignore
1611 dirignore = self._dirignore
1595 checkexec = self._checkexec
1612 checkexec = self._checkexec
1596 copymap = self._map.copymap
1613 copymap = self._map.copymap
1597 lastnormaltime = self._lastnormaltime
1614 lastnormaltime = self._lastnormaltime
1598
1615
1599 # We need to do full walks when either
1616 # We need to do full walks when either
1600 # - we're listing all clean files, or
1617 # - we're listing all clean files, or
1601 # - match.traversedir does something, because match.traversedir should
1618 # - match.traversedir does something, because match.traversedir should
1602 # be called for every dir in the working dir
1619 # be called for every dir in the working dir
1603 full = listclean or match.traversedir is not None
1620 full = listclean or match.traversedir is not None
1604 for fn, st in pycompat.iteritems(
1621 for fn, st in pycompat.iteritems(
1605 self.walk(match, subrepos, listunknown, listignored, full=full)
1622 self.walk(match, subrepos, listunknown, listignored, full=full)
1606 ):
1623 ):
1607 if not dcontains(fn):
1624 if not dcontains(fn):
1608 if (listignored or mexact(fn)) and dirignore(fn):
1625 if (listignored or mexact(fn)) and dirignore(fn):
1609 if listignored:
1626 if listignored:
1610 iadd(fn)
1627 iadd(fn)
1611 else:
1628 else:
1612 uadd(fn)
1629 uadd(fn)
1613 continue
1630 continue
1614
1631
1615 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1632 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1616 # written like that for performance reasons. dmap[fn] is not a
1633 # written like that for performance reasons. dmap[fn] is not a
1617 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1634 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1618 # opcode has fast paths when the value to be unpacked is a tuple or
1635 # opcode has fast paths when the value to be unpacked is a tuple or
1619 # a list, but falls back to creating a full-fledged iterator in
1636 # a list, but falls back to creating a full-fledged iterator in
1620 # general. That is much slower than simply accessing and storing the
1637 # general. That is much slower than simply accessing and storing the
1621 # tuple members one by one.
1638 # tuple members one by one.
1622 t = dget(fn)
1639 t = dget(fn)
1623 mode = t.mode
1640 mode = t.mode
1624 size = t.size
1641 size = t.size
1625 time = t.mtime
1642 time = t.mtime
1626
1643
1627 if not st and t.tracked:
1644 if not st and t.tracked:
1628 dadd(fn)
1645 dadd(fn)
1629 elif t.merged:
1646 elif t.merged:
1630 madd(fn)
1647 madd(fn)
1631 elif t.added:
1648 elif t.added:
1632 aadd(fn)
1649 aadd(fn)
1633 elif t.removed:
1650 elif t.removed:
1634 radd(fn)
1651 radd(fn)
1635 elif t.tracked:
1652 elif t.tracked:
1636 if (
1653 if (
1637 size >= 0
1654 size >= 0
1638 and (
1655 and (
1639 (size != st.st_size and size != st.st_size & _rangemask)
1656 (size != st.st_size and size != st.st_size & _rangemask)
1640 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1657 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1641 )
1658 )
1642 or t.from_p2
1659 or t.from_p2
1643 or fn in copymap
1660 or fn in copymap
1644 ):
1661 ):
1645 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1662 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1646 # issue6456: Size returned may be longer due to
1663 # issue6456: Size returned may be longer due to
1647 # encryption on EXT-4 fscrypt, undecided.
1664 # encryption on EXT-4 fscrypt, undecided.
1648 ladd(fn)
1665 ladd(fn)
1649 else:
1666 else:
1650 madd(fn)
1667 madd(fn)
1651 elif (
1668 elif (
1652 time != st[stat.ST_MTIME]
1669 time != st[stat.ST_MTIME]
1653 and time != st[stat.ST_MTIME] & _rangemask
1670 and time != st[stat.ST_MTIME] & _rangemask
1654 ):
1671 ):
1655 ladd(fn)
1672 ladd(fn)
1656 elif st[stat.ST_MTIME] == lastnormaltime:
1673 elif st[stat.ST_MTIME] == lastnormaltime:
1657 # fn may have just been marked as normal and it may have
1674 # fn may have just been marked as normal and it may have
1658 # changed in the same second without changing its size.
1675 # changed in the same second without changing its size.
1659 # This can happen if we quickly do multiple commits.
1676 # This can happen if we quickly do multiple commits.
1660 # Force lookup, so we don't miss such a racy file change.
1677 # Force lookup, so we don't miss such a racy file change.
1661 ladd(fn)
1678 ladd(fn)
1662 elif listclean:
1679 elif listclean:
1663 cadd(fn)
1680 cadd(fn)
1664 status = scmutil.status(
1681 status = scmutil.status(
1665 modified, added, removed, deleted, unknown, ignored, clean
1682 modified, added, removed, deleted, unknown, ignored, clean
1666 )
1683 )
1667 return (lookup, status)
1684 return (lookup, status)
1668
1685
1669 def matches(self, match):
1686 def matches(self, match):
1670 """
1687 """
1671 return files in the dirstate (in whatever state) filtered by match
1688 return files in the dirstate (in whatever state) filtered by match
1672 """
1689 """
1673 dmap = self._map
1690 dmap = self._map
1674 if rustmod is not None:
1691 if rustmod is not None:
1675 dmap = self._map._rustmap
1692 dmap = self._map._rustmap
1676
1693
1677 if match.always():
1694 if match.always():
1678 return dmap.keys()
1695 return dmap.keys()
1679 files = match.files()
1696 files = match.files()
1680 if match.isexact():
1697 if match.isexact():
1681 # fast path -- filter the other way around, since typically files is
1698 # fast path -- filter the other way around, since typically files is
1682 # much smaller than dmap
1699 # much smaller than dmap
1683 return [f for f in files if f in dmap]
1700 return [f for f in files if f in dmap]
1684 if match.prefix() and all(fn in dmap for fn in files):
1701 if match.prefix() and all(fn in dmap for fn in files):
1685 # fast path -- all the values are known to be files, so just return
1702 # fast path -- all the values are known to be files, so just return
1686 # that
1703 # that
1687 return list(files)
1704 return list(files)
1688 return [f for f in dmap if match(f)]
1705 return [f for f in dmap if match(f)]
1689
1706
1690 def _actualfilename(self, tr):
1707 def _actualfilename(self, tr):
1691 if tr:
1708 if tr:
1692 return self._pendingfilename
1709 return self._pendingfilename
1693 else:
1710 else:
1694 return self._filename
1711 return self._filename
1695
1712
1696 def savebackup(self, tr, backupname):
1713 def savebackup(self, tr, backupname):
1697 '''Save current dirstate into backup file'''
1714 '''Save current dirstate into backup file'''
1698 filename = self._actualfilename(tr)
1715 filename = self._actualfilename(tr)
1699 assert backupname != filename
1716 assert backupname != filename
1700
1717
1701 # use '_writedirstate' instead of 'write' to write changes certainly,
1718 # use '_writedirstate' instead of 'write' to write changes certainly,
1702 # because the latter omits writing out if transaction is running.
1719 # because the latter omits writing out if transaction is running.
1703 # output file will be used to create backup of dirstate at this point.
1720 # output file will be used to create backup of dirstate at this point.
1704 if self._dirty or not self._opener.exists(filename):
1721 if self._dirty or not self._opener.exists(filename):
1705 self._writedirstate(
1722 self._writedirstate(
1706 tr,
1723 tr,
1707 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1724 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1708 )
1725 )
1709
1726
1710 if tr:
1727 if tr:
1711 # ensure that subsequent tr.writepending returns True for
1728 # ensure that subsequent tr.writepending returns True for
1712 # changes written out above, even if dirstate is never
1729 # changes written out above, even if dirstate is never
1713 # changed after this
1730 # changed after this
1714 tr.addfilegenerator(
1731 tr.addfilegenerator(
1715 b'dirstate',
1732 b'dirstate',
1716 (self._filename,),
1733 (self._filename,),
1717 lambda f: self._writedirstate(tr, f),
1734 lambda f: self._writedirstate(tr, f),
1718 location=b'plain',
1735 location=b'plain',
1719 )
1736 )
1720
1737
1721 # ensure that pending file written above is unlinked at
1738 # ensure that pending file written above is unlinked at
1722 # failure, even if tr.writepending isn't invoked until the
1739 # failure, even if tr.writepending isn't invoked until the
1723 # end of this transaction
1740 # end of this transaction
1724 tr.registertmp(filename, location=b'plain')
1741 tr.registertmp(filename, location=b'plain')
1725
1742
1726 self._opener.tryunlink(backupname)
1743 self._opener.tryunlink(backupname)
1727 # hardlink backup is okay because _writedirstate is always called
1744 # hardlink backup is okay because _writedirstate is always called
1728 # with an "atomictemp=True" file.
1745 # with an "atomictemp=True" file.
1729 util.copyfile(
1746 util.copyfile(
1730 self._opener.join(filename),
1747 self._opener.join(filename),
1731 self._opener.join(backupname),
1748 self._opener.join(backupname),
1732 hardlink=True,
1749 hardlink=True,
1733 )
1750 )
1734
1751
1735 def restorebackup(self, tr, backupname):
1752 def restorebackup(self, tr, backupname):
1736 '''Restore dirstate by backup file'''
1753 '''Restore dirstate by backup file'''
1737 # this "invalidate()" prevents "wlock.release()" from writing
1754 # this "invalidate()" prevents "wlock.release()" from writing
1738 # changes of dirstate out after restoring from backup file
1755 # changes of dirstate out after restoring from backup file
1739 self.invalidate()
1756 self.invalidate()
1740 filename = self._actualfilename(tr)
1757 filename = self._actualfilename(tr)
1741 o = self._opener
1758 o = self._opener
1742 if util.samefile(o.join(backupname), o.join(filename)):
1759 if util.samefile(o.join(backupname), o.join(filename)):
1743 o.unlink(backupname)
1760 o.unlink(backupname)
1744 else:
1761 else:
1745 o.rename(backupname, filename, checkambig=True)
1762 o.rename(backupname, filename, checkambig=True)
1746
1763
1747 def clearbackup(self, tr, backupname):
1764 def clearbackup(self, tr, backupname):
1748 '''Clear backup file'''
1765 '''Clear backup file'''
1749 self._opener.unlink(backupname)
1766 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now