##// END OF EJS Templates
dirstate: deprecate the `drop` method...
marmoute -
r48553:aca197f5 default
parent child Browse files
Show More
@@ -1,1766 +1,1773 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = parsers.DirstateItem
48 DirstateItem = parsers.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._updatedfiles = set()
133 self._updatedfiles = set()
134 self._mapcls = dirstatemap.dirstatemap
134 self._mapcls = dirstatemap.dirstatemap
135 # Access and cache cwd early, so we don't access it for the first time
135 # Access and cache cwd early, so we don't access it for the first time
136 # after a working-copy update caused it to not exist (accessing it then
136 # after a working-copy update caused it to not exist (accessing it then
137 # raises an exception).
137 # raises an exception).
138 self._cwd
138 self._cwd
139
139
140 def prefetch_parents(self):
140 def prefetch_parents(self):
141 """make sure the parents are loaded
141 """make sure the parents are loaded
142
142
143 Used to avoid a race condition.
143 Used to avoid a race condition.
144 """
144 """
145 self._pl
145 self._pl
146
146
147 @contextlib.contextmanager
147 @contextlib.contextmanager
148 def parentchange(self):
148 def parentchange(self):
149 """Context manager for handling dirstate parents.
149 """Context manager for handling dirstate parents.
150
150
151 If an exception occurs in the scope of the context manager,
151 If an exception occurs in the scope of the context manager,
152 the incoherent dirstate won't be written when wlock is
152 the incoherent dirstate won't be written when wlock is
153 released.
153 released.
154 """
154 """
155 self._parentwriters += 1
155 self._parentwriters += 1
156 yield
156 yield
157 # Typically we want the "undo" step of a context manager in a
157 # Typically we want the "undo" step of a context manager in a
158 # finally block so it happens even when an exception
158 # finally block so it happens even when an exception
159 # occurs. In this case, however, we only want to decrement
159 # occurs. In this case, however, we only want to decrement
160 # parentwriters if the code in the with statement exits
160 # parentwriters if the code in the with statement exits
161 # normally, so we don't have a try/finally here on purpose.
161 # normally, so we don't have a try/finally here on purpose.
162 self._parentwriters -= 1
162 self._parentwriters -= 1
163
163
164 def pendingparentchange(self):
164 def pendingparentchange(self):
165 """Returns true if the dirstate is in the middle of a set of changes
165 """Returns true if the dirstate is in the middle of a set of changes
166 that modify the dirstate parent.
166 that modify the dirstate parent.
167 """
167 """
168 return self._parentwriters > 0
168 return self._parentwriters > 0
169
169
170 @propertycache
170 @propertycache
171 def _map(self):
171 def _map(self):
172 """Return the dirstate contents (see documentation for dirstatemap)."""
172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 self._map = self._mapcls(
173 self._map = self._mapcls(
174 self._ui,
174 self._ui,
175 self._opener,
175 self._opener,
176 self._root,
176 self._root,
177 self._nodeconstants,
177 self._nodeconstants,
178 self._use_dirstate_v2,
178 self._use_dirstate_v2,
179 )
179 )
180 return self._map
180 return self._map
181
181
182 @property
182 @property
183 def _sparsematcher(self):
183 def _sparsematcher(self):
184 """The matcher for the sparse checkout.
184 """The matcher for the sparse checkout.
185
185
186 The working directory may not include every file from a manifest. The
186 The working directory may not include every file from a manifest. The
187 matcher obtained by this property will match a path if it is to be
187 matcher obtained by this property will match a path if it is to be
188 included in the working directory.
188 included in the working directory.
189 """
189 """
190 # TODO there is potential to cache this property. For now, the matcher
190 # TODO there is potential to cache this property. For now, the matcher
191 # is resolved on every access. (But the called function does use a
191 # is resolved on every access. (But the called function does use a
192 # cache to keep the lookup fast.)
192 # cache to keep the lookup fast.)
193 return self._sparsematchfn()
193 return self._sparsematchfn()
194
194
195 @repocache(b'branch')
195 @repocache(b'branch')
196 def _branch(self):
196 def _branch(self):
197 try:
197 try:
198 return self._opener.read(b"branch").strip() or b"default"
198 return self._opener.read(b"branch").strip() or b"default"
199 except IOError as inst:
199 except IOError as inst:
200 if inst.errno != errno.ENOENT:
200 if inst.errno != errno.ENOENT:
201 raise
201 raise
202 return b"default"
202 return b"default"
203
203
204 @property
204 @property
205 def _pl(self):
205 def _pl(self):
206 return self._map.parents()
206 return self._map.parents()
207
207
208 def hasdir(self, d):
208 def hasdir(self, d):
209 return self._map.hastrackeddir(d)
209 return self._map.hastrackeddir(d)
210
210
211 @rootcache(b'.hgignore')
211 @rootcache(b'.hgignore')
212 def _ignore(self):
212 def _ignore(self):
213 files = self._ignorefiles()
213 files = self._ignorefiles()
214 if not files:
214 if not files:
215 return matchmod.never()
215 return matchmod.never()
216
216
217 pats = [b'include:%s' % f for f in files]
217 pats = [b'include:%s' % f for f in files]
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219
219
220 @propertycache
220 @propertycache
221 def _slash(self):
221 def _slash(self):
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223
223
224 @propertycache
224 @propertycache
225 def _checklink(self):
225 def _checklink(self):
226 return util.checklink(self._root)
226 return util.checklink(self._root)
227
227
228 @propertycache
228 @propertycache
229 def _checkexec(self):
229 def _checkexec(self):
230 return bool(util.checkexec(self._root))
230 return bool(util.checkexec(self._root))
231
231
232 @propertycache
232 @propertycache
233 def _checkcase(self):
233 def _checkcase(self):
234 return not util.fscasesensitive(self._join(b'.hg'))
234 return not util.fscasesensitive(self._join(b'.hg'))
235
235
236 def _join(self, f):
236 def _join(self, f):
237 # much faster than os.path.join()
237 # much faster than os.path.join()
238 # it's safe because f is always a relative path
238 # it's safe because f is always a relative path
239 return self._rootdir + f
239 return self._rootdir + f
240
240
241 def flagfunc(self, buildfallback):
241 def flagfunc(self, buildfallback):
242 if self._checklink and self._checkexec:
242 if self._checklink and self._checkexec:
243
243
244 def f(x):
244 def f(x):
245 try:
245 try:
246 st = os.lstat(self._join(x))
246 st = os.lstat(self._join(x))
247 if util.statislink(st):
247 if util.statislink(st):
248 return b'l'
248 return b'l'
249 if util.statisexec(st):
249 if util.statisexec(st):
250 return b'x'
250 return b'x'
251 except OSError:
251 except OSError:
252 pass
252 pass
253 return b''
253 return b''
254
254
255 return f
255 return f
256
256
257 fallback = buildfallback()
257 fallback = buildfallback()
258 if self._checklink:
258 if self._checklink:
259
259
260 def f(x):
260 def f(x):
261 if os.path.islink(self._join(x)):
261 if os.path.islink(self._join(x)):
262 return b'l'
262 return b'l'
263 if b'x' in fallback(x):
263 if b'x' in fallback(x):
264 return b'x'
264 return b'x'
265 return b''
265 return b''
266
266
267 return f
267 return f
268 if self._checkexec:
268 if self._checkexec:
269
269
270 def f(x):
270 def f(x):
271 if b'l' in fallback(x):
271 if b'l' in fallback(x):
272 return b'l'
272 return b'l'
273 if util.isexec(self._join(x)):
273 if util.isexec(self._join(x)):
274 return b'x'
274 return b'x'
275 return b''
275 return b''
276
276
277 return f
277 return f
278 else:
278 else:
279 return fallback
279 return fallback
280
280
281 @propertycache
281 @propertycache
282 def _cwd(self):
282 def _cwd(self):
283 # internal config: ui.forcecwd
283 # internal config: ui.forcecwd
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 if forcecwd:
285 if forcecwd:
286 return forcecwd
286 return forcecwd
287 return encoding.getcwd()
287 return encoding.getcwd()
288
288
289 def getcwd(self):
289 def getcwd(self):
290 """Return the path from which a canonical path is calculated.
290 """Return the path from which a canonical path is calculated.
291
291
292 This path should be used to resolve file patterns or to convert
292 This path should be used to resolve file patterns or to convert
293 canonical paths back to file paths for display. It shouldn't be
293 canonical paths back to file paths for display. It shouldn't be
294 used to get real file paths. Use vfs functions instead.
294 used to get real file paths. Use vfs functions instead.
295 """
295 """
296 cwd = self._cwd
296 cwd = self._cwd
297 if cwd == self._root:
297 if cwd == self._root:
298 return b''
298 return b''
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 rootsep = self._root
300 rootsep = self._root
301 if not util.endswithsep(rootsep):
301 if not util.endswithsep(rootsep):
302 rootsep += pycompat.ossep
302 rootsep += pycompat.ossep
303 if cwd.startswith(rootsep):
303 if cwd.startswith(rootsep):
304 return cwd[len(rootsep) :]
304 return cwd[len(rootsep) :]
305 else:
305 else:
306 # we're outside the repo. return an absolute path.
306 # we're outside the repo. return an absolute path.
307 return cwd
307 return cwd
308
308
309 def pathto(self, f, cwd=None):
309 def pathto(self, f, cwd=None):
310 if cwd is None:
310 if cwd is None:
311 cwd = self.getcwd()
311 cwd = self.getcwd()
312 path = util.pathto(self._root, cwd, f)
312 path = util.pathto(self._root, cwd, f)
313 if self._slash:
313 if self._slash:
314 return util.pconvert(path)
314 return util.pconvert(path)
315 return path
315 return path
316
316
317 def __getitem__(self, key):
317 def __getitem__(self, key):
318 """Return the current state of key (a filename) in the dirstate.
318 """Return the current state of key (a filename) in the dirstate.
319
319
320 States are:
320 States are:
321 n normal
321 n normal
322 m needs merging
322 m needs merging
323 r marked for removal
323 r marked for removal
324 a marked for addition
324 a marked for addition
325 ? not tracked
325 ? not tracked
326
326
327 XXX The "state" is a bit obscure to be in the "public" API. we should
327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 consider migrating all user of this to going through the dirstate entry
328 consider migrating all user of this to going through the dirstate entry
329 instead.
329 instead.
330 """
330 """
331 entry = self._map.get(key)
331 entry = self._map.get(key)
332 if entry is not None:
332 if entry is not None:
333 return entry.state
333 return entry.state
334 return b'?'
334 return b'?'
335
335
336 def __contains__(self, key):
336 def __contains__(self, key):
337 return key in self._map
337 return key in self._map
338
338
339 def __iter__(self):
339 def __iter__(self):
340 return iter(sorted(self._map))
340 return iter(sorted(self._map))
341
341
342 def items(self):
342 def items(self):
343 return pycompat.iteritems(self._map)
343 return pycompat.iteritems(self._map)
344
344
345 iteritems = items
345 iteritems = items
346
346
347 def directories(self):
347 def directories(self):
348 return self._map.directories()
348 return self._map.directories()
349
349
350 def parents(self):
350 def parents(self):
351 return [self._validate(p) for p in self._pl]
351 return [self._validate(p) for p in self._pl]
352
352
353 def p1(self):
353 def p1(self):
354 return self._validate(self._pl[0])
354 return self._validate(self._pl[0])
355
355
356 def p2(self):
356 def p2(self):
357 return self._validate(self._pl[1])
357 return self._validate(self._pl[1])
358
358
359 @property
359 @property
360 def in_merge(self):
360 def in_merge(self):
361 """True if a merge is in progress"""
361 """True if a merge is in progress"""
362 return self._pl[1] != self._nodeconstants.nullid
362 return self._pl[1] != self._nodeconstants.nullid
363
363
364 def branch(self):
364 def branch(self):
365 return encoding.tolocal(self._branch)
365 return encoding.tolocal(self._branch)
366
366
367 def setparents(self, p1, p2=None):
367 def setparents(self, p1, p2=None):
368 """Set dirstate parents to p1 and p2.
368 """Set dirstate parents to p1 and p2.
369
369
370 When moving from two parents to one, "merged" entries a
370 When moving from two parents to one, "merged" entries a
371 adjusted to normal and previous copy records discarded and
371 adjusted to normal and previous copy records discarded and
372 returned by the call.
372 returned by the call.
373
373
374 See localrepo.setparents()
374 See localrepo.setparents()
375 """
375 """
376 if p2 is None:
376 if p2 is None:
377 p2 = self._nodeconstants.nullid
377 p2 = self._nodeconstants.nullid
378 if self._parentwriters == 0:
378 if self._parentwriters == 0:
379 raise ValueError(
379 raise ValueError(
380 b"cannot set dirstate parent outside of "
380 b"cannot set dirstate parent outside of "
381 b"dirstate.parentchange context manager"
381 b"dirstate.parentchange context manager"
382 )
382 )
383
383
384 self._dirty = True
384 self._dirty = True
385 oldp2 = self._pl[1]
385 oldp2 = self._pl[1]
386 if self._origpl is None:
386 if self._origpl is None:
387 self._origpl = self._pl
387 self._origpl = self._pl
388 self._map.setparents(p1, p2)
388 self._map.setparents(p1, p2)
389 copies = {}
389 copies = {}
390 if (
390 if (
391 oldp2 != self._nodeconstants.nullid
391 oldp2 != self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
393 ):
393 ):
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395
395
396 for f in candidatefiles:
396 for f in candidatefiles:
397 s = self._map.get(f)
397 s = self._map.get(f)
398 if s is None:
398 if s is None:
399 continue
399 continue
400
400
401 # Discard "merged" markers when moving away from a merge state
401 # Discard "merged" markers when moving away from a merge state
402 if s.merged:
402 if s.merged:
403 source = self._map.copymap.get(f)
403 source = self._map.copymap.get(f)
404 if source:
404 if source:
405 copies[f] = source
405 copies[f] = source
406 self._normallookup(f)
406 self._normallookup(f)
407 # Also fix up otherparent markers
407 # Also fix up otherparent markers
408 elif s.from_p2:
408 elif s.from_p2:
409 source = self._map.copymap.get(f)
409 source = self._map.copymap.get(f)
410 if source:
410 if source:
411 copies[f] = source
411 copies[f] = source
412 self._add(f)
412 self._add(f)
413 return copies
413 return copies
414
414
415 def setbranch(self, branch):
415 def setbranch(self, branch):
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 try:
418 try:
419 f.write(self._branch + b'\n')
419 f.write(self._branch + b'\n')
420 f.close()
420 f.close()
421
421
422 # make sure filecache has the correct stat info for _branch after
422 # make sure filecache has the correct stat info for _branch after
423 # replacing the underlying file
423 # replacing the underlying file
424 ce = self._filecache[b'_branch']
424 ce = self._filecache[b'_branch']
425 if ce:
425 if ce:
426 ce.refresh()
426 ce.refresh()
427 except: # re-raises
427 except: # re-raises
428 f.discard()
428 f.discard()
429 raise
429 raise
430
430
431 def invalidate(self):
431 def invalidate(self):
432 """Causes the next access to reread the dirstate.
432 """Causes the next access to reread the dirstate.
433
433
434 This is different from localrepo.invalidatedirstate() because it always
434 This is different from localrepo.invalidatedirstate() because it always
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 check whether the dirstate has changed before rereading it."""
436 check whether the dirstate has changed before rereading it."""
437
437
438 for a in ("_map", "_branch", "_ignore"):
438 for a in ("_map", "_branch", "_ignore"):
439 if a in self.__dict__:
439 if a in self.__dict__:
440 delattr(self, a)
440 delattr(self, a)
441 self._lastnormaltime = 0
441 self._lastnormaltime = 0
442 self._dirty = False
442 self._dirty = False
443 self._updatedfiles.clear()
443 self._updatedfiles.clear()
444 self._parentwriters = 0
444 self._parentwriters = 0
445 self._origpl = None
445 self._origpl = None
446
446
447 def copy(self, source, dest):
447 def copy(self, source, dest):
448 """Mark dest as a copy of source. Unmark dest if source is None."""
448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 if source == dest:
449 if source == dest:
450 return
450 return
451 self._dirty = True
451 self._dirty = True
452 if source is not None:
452 if source is not None:
453 self._map.copymap[dest] = source
453 self._map.copymap[dest] = source
454 self._updatedfiles.add(source)
454 self._updatedfiles.add(source)
455 self._updatedfiles.add(dest)
455 self._updatedfiles.add(dest)
456 elif self._map.copymap.pop(dest, None):
456 elif self._map.copymap.pop(dest, None):
457 self._updatedfiles.add(dest)
457 self._updatedfiles.add(dest)
458
458
459 def copied(self, file):
459 def copied(self, file):
460 return self._map.copymap.get(file, None)
460 return self._map.copymap.get(file, None)
461
461
462 def copies(self):
462 def copies(self):
463 return self._map.copymap
463 return self._map.copymap
464
464
465 @requires_no_parents_change
465 @requires_no_parents_change
466 def set_tracked(self, filename):
466 def set_tracked(self, filename):
467 """a "public" method for generic code to mark a file as tracked
467 """a "public" method for generic code to mark a file as tracked
468
468
469 This function is to be called outside of "update/merge" case. For
469 This function is to be called outside of "update/merge" case. For
470 example by a command like `hg add X`.
470 example by a command like `hg add X`.
471
471
472 return True the file was previously untracked, False otherwise.
472 return True the file was previously untracked, False otherwise.
473 """
473 """
474 entry = self._map.get(filename)
474 entry = self._map.get(filename)
475 if entry is None:
475 if entry is None:
476 self._add(filename)
476 self._add(filename)
477 return True
477 return True
478 elif not entry.tracked:
478 elif not entry.tracked:
479 self._normallookup(filename)
479 self._normallookup(filename)
480 return True
480 return True
481 # XXX This is probably overkill for more case, but we need this to
481 # XXX This is probably overkill for more case, but we need this to
482 # fully replace the `normallookup` call with `set_tracked` one.
482 # fully replace the `normallookup` call with `set_tracked` one.
483 # Consider smoothing this in the future.
483 # Consider smoothing this in the future.
484 self.set_possibly_dirty(filename)
484 self.set_possibly_dirty(filename)
485 return False
485 return False
486
486
487 @requires_no_parents_change
487 @requires_no_parents_change
488 def set_untracked(self, filename):
488 def set_untracked(self, filename):
489 """a "public" method for generic code to mark a file as untracked
489 """a "public" method for generic code to mark a file as untracked
490
490
491 This function is to be called outside of "update/merge" case. For
491 This function is to be called outside of "update/merge" case. For
492 example by a command like `hg remove X`.
492 example by a command like `hg remove X`.
493
493
494 return True the file was previously tracked, False otherwise.
494 return True the file was previously tracked, False otherwise.
495 """
495 """
496 entry = self._map.get(filename)
496 entry = self._map.get(filename)
497 if entry is None:
497 if entry is None:
498 return False
498 return False
499 elif entry.added:
499 elif entry.added:
500 self._drop(filename)
500 self._drop(filename)
501 return True
501 return True
502 else:
502 else:
503 self._remove(filename)
503 self._remove(filename)
504 return True
504 return True
505
505
506 @requires_no_parents_change
506 @requires_no_parents_change
507 def set_clean(self, filename, parentfiledata=None):
507 def set_clean(self, filename, parentfiledata=None):
508 """record that the current state of the file on disk is known to be clean"""
508 """record that the current state of the file on disk is known to be clean"""
509 self._dirty = True
509 self._dirty = True
510 self._updatedfiles.add(filename)
510 self._updatedfiles.add(filename)
511 self._normal(filename, parentfiledata=parentfiledata)
511 self._normal(filename, parentfiledata=parentfiledata)
512
512
513 @requires_no_parents_change
513 @requires_no_parents_change
514 def set_possibly_dirty(self, filename):
514 def set_possibly_dirty(self, filename):
515 """record that the current state of the file on disk is unknown"""
515 """record that the current state of the file on disk is unknown"""
516 self._dirty = True
516 self._dirty = True
517 self._updatedfiles.add(filename)
517 self._updatedfiles.add(filename)
518 self._map.set_possibly_dirty(filename)
518 self._map.set_possibly_dirty(filename)
519
519
520 @requires_parents_change
520 @requires_parents_change
521 def update_file_p1(
521 def update_file_p1(
522 self,
522 self,
523 filename,
523 filename,
524 p1_tracked,
524 p1_tracked,
525 ):
525 ):
526 """Set a file as tracked in the parent (or not)
526 """Set a file as tracked in the parent (or not)
527
527
528 This is to be called when adjust the dirstate to a new parent after an history
528 This is to be called when adjust the dirstate to a new parent after an history
529 rewriting operation.
529 rewriting operation.
530
530
531 It should not be called during a merge (p2 != nullid) and only within
531 It should not be called during a merge (p2 != nullid) and only within
532 a `with dirstate.parentchange():` context.
532 a `with dirstate.parentchange():` context.
533 """
533 """
534 if self.in_merge:
534 if self.in_merge:
535 msg = b'update_file_reference should not be called when merging'
535 msg = b'update_file_reference should not be called when merging'
536 raise error.ProgrammingError(msg)
536 raise error.ProgrammingError(msg)
537 entry = self._map.get(filename)
537 entry = self._map.get(filename)
538 if entry is None:
538 if entry is None:
539 wc_tracked = False
539 wc_tracked = False
540 else:
540 else:
541 wc_tracked = entry.tracked
541 wc_tracked = entry.tracked
542 possibly_dirty = False
542 possibly_dirty = False
543 if p1_tracked and wc_tracked:
543 if p1_tracked and wc_tracked:
544 # the underlying reference might have changed, we will have to
544 # the underlying reference might have changed, we will have to
545 # check it.
545 # check it.
546 possibly_dirty = True
546 possibly_dirty = True
547 elif not (p1_tracked or wc_tracked):
547 elif not (p1_tracked or wc_tracked):
548 # the file is no longer relevant to anyone
548 # the file is no longer relevant to anyone
549 self._drop(filename)
549 self._drop(filename)
550 elif (not p1_tracked) and wc_tracked:
550 elif (not p1_tracked) and wc_tracked:
551 if entry is not None and entry.added:
551 if entry is not None and entry.added:
552 return # avoid dropping copy information (maybe?)
552 return # avoid dropping copy information (maybe?)
553 elif p1_tracked and not wc_tracked:
553 elif p1_tracked and not wc_tracked:
554 pass
554 pass
555 else:
555 else:
556 assert False, 'unreachable'
556 assert False, 'unreachable'
557
557
558 # this mean we are doing call for file we do not really care about the
558 # this mean we are doing call for file we do not really care about the
559 # data (eg: added or removed), however this should be a minor overhead
559 # data (eg: added or removed), however this should be a minor overhead
560 # compared to the overall update process calling this.
560 # compared to the overall update process calling this.
561 parentfiledata = None
561 parentfiledata = None
562 if wc_tracked:
562 if wc_tracked:
563 parentfiledata = self._get_filedata(filename)
563 parentfiledata = self._get_filedata(filename)
564
564
565 self._updatedfiles.add(filename)
565 self._updatedfiles.add(filename)
566 self._map.reset_state(
566 self._map.reset_state(
567 filename,
567 filename,
568 wc_tracked,
568 wc_tracked,
569 p1_tracked,
569 p1_tracked,
570 possibly_dirty=possibly_dirty,
570 possibly_dirty=possibly_dirty,
571 parentfiledata=parentfiledata,
571 parentfiledata=parentfiledata,
572 )
572 )
573 if (
573 if (
574 parentfiledata is not None
574 parentfiledata is not None
575 and parentfiledata[2] > self._lastnormaltime
575 and parentfiledata[2] > self._lastnormaltime
576 ):
576 ):
577 # Remember the most recent modification timeslot for status(),
577 # Remember the most recent modification timeslot for status(),
578 # to make sure we won't miss future size-preserving file content
578 # to make sure we won't miss future size-preserving file content
579 # modifications that happen within the same timeslot.
579 # modifications that happen within the same timeslot.
580 self._lastnormaltime = parentfiledata[2]
580 self._lastnormaltime = parentfiledata[2]
581
581
582 @requires_parents_change
582 @requires_parents_change
583 def update_file(
583 def update_file(
584 self,
584 self,
585 filename,
585 filename,
586 wc_tracked,
586 wc_tracked,
587 p1_tracked,
587 p1_tracked,
588 p2_tracked=False,
588 p2_tracked=False,
589 merged=False,
589 merged=False,
590 clean_p1=False,
590 clean_p1=False,
591 clean_p2=False,
591 clean_p2=False,
592 possibly_dirty=False,
592 possibly_dirty=False,
593 parentfiledata=None,
593 parentfiledata=None,
594 ):
594 ):
595 """update the information about a file in the dirstate
595 """update the information about a file in the dirstate
596
596
597 This is to be called when the direstates parent changes to keep track
597 This is to be called when the direstates parent changes to keep track
598 of what is the file situation in regards to the working copy and its parent.
598 of what is the file situation in regards to the working copy and its parent.
599
599
600 This function must be called within a `dirstate.parentchange` context.
600 This function must be called within a `dirstate.parentchange` context.
601
601
602 note: the API is at an early stage and we might need to ajust it
602 note: the API is at an early stage and we might need to ajust it
603 depending of what information ends up being relevant and useful to
603 depending of what information ends up being relevant and useful to
604 other processing.
604 other processing.
605 """
605 """
606 if merged and (clean_p1 or clean_p2):
606 if merged and (clean_p1 or clean_p2):
607 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
607 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
608 raise error.ProgrammingError(msg)
608 raise error.ProgrammingError(msg)
609
609
610 # note: I do not think we need to double check name clash here since we
610 # note: I do not think we need to double check name clash here since we
611 # are in a update/merge case that should already have taken care of
611 # are in a update/merge case that should already have taken care of
612 # this. The test agrees
612 # this. The test agrees
613
613
614 self._dirty = True
614 self._dirty = True
615 self._updatedfiles.add(filename)
615 self._updatedfiles.add(filename)
616
616
617 need_parent_file_data = (
617 need_parent_file_data = (
618 not (possibly_dirty or clean_p2 or merged)
618 not (possibly_dirty or clean_p2 or merged)
619 and wc_tracked
619 and wc_tracked
620 and p1_tracked
620 and p1_tracked
621 )
621 )
622
622
623 # this mean we are doing call for file we do not really care about the
623 # this mean we are doing call for file we do not really care about the
624 # data (eg: added or removed), however this should be a minor overhead
624 # data (eg: added or removed), however this should be a minor overhead
625 # compared to the overall update process calling this.
625 # compared to the overall update process calling this.
626 if need_parent_file_data:
626 if need_parent_file_data:
627 if parentfiledata is None:
627 if parentfiledata is None:
628 parentfiledata = self._get_filedata(filename)
628 parentfiledata = self._get_filedata(filename)
629 mtime = parentfiledata[2]
629 mtime = parentfiledata[2]
630
630
631 if mtime > self._lastnormaltime:
631 if mtime > self._lastnormaltime:
632 # Remember the most recent modification timeslot for
632 # Remember the most recent modification timeslot for
633 # status(), to make sure we won't miss future
633 # status(), to make sure we won't miss future
634 # size-preserving file content modifications that happen
634 # size-preserving file content modifications that happen
635 # within the same timeslot.
635 # within the same timeslot.
636 self._lastnormaltime = mtime
636 self._lastnormaltime = mtime
637
637
638 self._map.reset_state(
638 self._map.reset_state(
639 filename,
639 filename,
640 wc_tracked,
640 wc_tracked,
641 p1_tracked,
641 p1_tracked,
642 p2_tracked=p2_tracked,
642 p2_tracked=p2_tracked,
643 merged=merged,
643 merged=merged,
644 clean_p1=clean_p1,
644 clean_p1=clean_p1,
645 clean_p2=clean_p2,
645 clean_p2=clean_p2,
646 possibly_dirty=possibly_dirty,
646 possibly_dirty=possibly_dirty,
647 parentfiledata=parentfiledata,
647 parentfiledata=parentfiledata,
648 )
648 )
649 if (
649 if (
650 parentfiledata is not None
650 parentfiledata is not None
651 and parentfiledata[2] > self._lastnormaltime
651 and parentfiledata[2] > self._lastnormaltime
652 ):
652 ):
653 # Remember the most recent modification timeslot for status(),
653 # Remember the most recent modification timeslot for status(),
654 # to make sure we won't miss future size-preserving file content
654 # to make sure we won't miss future size-preserving file content
655 # modifications that happen within the same timeslot.
655 # modifications that happen within the same timeslot.
656 self._lastnormaltime = parentfiledata[2]
656 self._lastnormaltime = parentfiledata[2]
657
657
658 def _addpath(
658 def _addpath(
659 self,
659 self,
660 f,
660 f,
661 mode=0,
661 mode=0,
662 size=None,
662 size=None,
663 mtime=None,
663 mtime=None,
664 added=False,
664 added=False,
665 merged=False,
665 merged=False,
666 from_p2=False,
666 from_p2=False,
667 possibly_dirty=False,
667 possibly_dirty=False,
668 ):
668 ):
669 entry = self._map.get(f)
669 entry = self._map.get(f)
670 if added or entry is not None and entry.removed:
670 if added or entry is not None and entry.removed:
671 scmutil.checkfilename(f)
671 scmutil.checkfilename(f)
672 if self._map.hastrackeddir(f):
672 if self._map.hastrackeddir(f):
673 msg = _(b'directory %r already in dirstate')
673 msg = _(b'directory %r already in dirstate')
674 msg %= pycompat.bytestr(f)
674 msg %= pycompat.bytestr(f)
675 raise error.Abort(msg)
675 raise error.Abort(msg)
676 # shadows
676 # shadows
677 for d in pathutil.finddirs(f):
677 for d in pathutil.finddirs(f):
678 if self._map.hastrackeddir(d):
678 if self._map.hastrackeddir(d):
679 break
679 break
680 entry = self._map.get(d)
680 entry = self._map.get(d)
681 if entry is not None and not entry.removed:
681 if entry is not None and not entry.removed:
682 msg = _(b'file %r in dirstate clashes with %r')
682 msg = _(b'file %r in dirstate clashes with %r')
683 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
683 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
684 raise error.Abort(msg)
684 raise error.Abort(msg)
685 self._dirty = True
685 self._dirty = True
686 self._updatedfiles.add(f)
686 self._updatedfiles.add(f)
687 self._map.addfile(
687 self._map.addfile(
688 f,
688 f,
689 mode=mode,
689 mode=mode,
690 size=size,
690 size=size,
691 mtime=mtime,
691 mtime=mtime,
692 added=added,
692 added=added,
693 merged=merged,
693 merged=merged,
694 from_p2=from_p2,
694 from_p2=from_p2,
695 possibly_dirty=possibly_dirty,
695 possibly_dirty=possibly_dirty,
696 )
696 )
697
697
698 def _get_filedata(self, filename):
698 def _get_filedata(self, filename):
699 """returns"""
699 """returns"""
700 s = os.lstat(self._join(filename))
700 s = os.lstat(self._join(filename))
701 mode = s.st_mode
701 mode = s.st_mode
702 size = s.st_size
702 size = s.st_size
703 mtime = s[stat.ST_MTIME]
703 mtime = s[stat.ST_MTIME]
704 return (mode, size, mtime)
704 return (mode, size, mtime)
705
705
706 def normal(self, f, parentfiledata=None):
706 def normal(self, f, parentfiledata=None):
707 """Mark a file normal and clean.
707 """Mark a file normal and clean.
708
708
709 parentfiledata: (mode, size, mtime) of the clean file
709 parentfiledata: (mode, size, mtime) of the clean file
710
710
711 parentfiledata should be computed from memory (for mode,
711 parentfiledata should be computed from memory (for mode,
712 size), as or close as possible from the point where we
712 size), as or close as possible from the point where we
713 determined the file was clean, to limit the risk of the
713 determined the file was clean, to limit the risk of the
714 file having been changed by an external process between the
714 file having been changed by an external process between the
715 moment where the file was determined to be clean and now."""
715 moment where the file was determined to be clean and now."""
716 if self.pendingparentchange():
716 if self.pendingparentchange():
717 util.nouideprecwarn(
717 util.nouideprecwarn(
718 b"do not use `normal` inside of update/merge context."
718 b"do not use `normal` inside of update/merge context."
719 b" Use `update_file` or `update_file_p1`",
719 b" Use `update_file` or `update_file_p1`",
720 b'6.0',
720 b'6.0',
721 stacklevel=2,
721 stacklevel=2,
722 )
722 )
723 else:
723 else:
724 util.nouideprecwarn(
724 util.nouideprecwarn(
725 b"do not use `normal` outside of update/merge context."
725 b"do not use `normal` outside of update/merge context."
726 b" Use `set_tracked`",
726 b" Use `set_tracked`",
727 b'6.0',
727 b'6.0',
728 stacklevel=2,
728 stacklevel=2,
729 )
729 )
730 self._normal(f, parentfiledata=parentfiledata)
730 self._normal(f, parentfiledata=parentfiledata)
731
731
732 def _normal(self, f, parentfiledata=None):
732 def _normal(self, f, parentfiledata=None):
733 if parentfiledata:
733 if parentfiledata:
734 (mode, size, mtime) = parentfiledata
734 (mode, size, mtime) = parentfiledata
735 else:
735 else:
736 (mode, size, mtime) = self._get_filedata(f)
736 (mode, size, mtime) = self._get_filedata(f)
737 self._addpath(f, mode=mode, size=size, mtime=mtime)
737 self._addpath(f, mode=mode, size=size, mtime=mtime)
738 self._map.copymap.pop(f, None)
738 self._map.copymap.pop(f, None)
739 if f in self._map.nonnormalset:
739 if f in self._map.nonnormalset:
740 self._map.nonnormalset.remove(f)
740 self._map.nonnormalset.remove(f)
741 if mtime > self._lastnormaltime:
741 if mtime > self._lastnormaltime:
742 # Remember the most recent modification timeslot for status(),
742 # Remember the most recent modification timeslot for status(),
743 # to make sure we won't miss future size-preserving file content
743 # to make sure we won't miss future size-preserving file content
744 # modifications that happen within the same timeslot.
744 # modifications that happen within the same timeslot.
745 self._lastnormaltime = mtime
745 self._lastnormaltime = mtime
746
746
747 def normallookup(self, f):
747 def normallookup(self, f):
748 '''Mark a file normal, but possibly dirty.'''
748 '''Mark a file normal, but possibly dirty.'''
749 if self.pendingparentchange():
749 if self.pendingparentchange():
750 util.nouideprecwarn(
750 util.nouideprecwarn(
751 b"do not use `normallookup` inside of update/merge context."
751 b"do not use `normallookup` inside of update/merge context."
752 b" Use `update_file` or `update_file_p1`",
752 b" Use `update_file` or `update_file_p1`",
753 b'6.0',
753 b'6.0',
754 stacklevel=2,
754 stacklevel=2,
755 )
755 )
756 else:
756 else:
757 util.nouideprecwarn(
757 util.nouideprecwarn(
758 b"do not use `normallookup` outside of update/merge context."
758 b"do not use `normallookup` outside of update/merge context."
759 b" Use `set_possibly_dirty` or `set_tracked`",
759 b" Use `set_possibly_dirty` or `set_tracked`",
760 b'6.0',
760 b'6.0',
761 stacklevel=2,
761 stacklevel=2,
762 )
762 )
763 self._normallookup(f)
763 self._normallookup(f)
764
764
765 def _normallookup(self, f):
765 def _normallookup(self, f):
766 '''Mark a file normal, but possibly dirty.'''
766 '''Mark a file normal, but possibly dirty.'''
767 if self.in_merge:
767 if self.in_merge:
768 # if there is a merge going on and the file was either
768 # if there is a merge going on and the file was either
769 # "merged" or coming from other parent (-2) before
769 # "merged" or coming from other parent (-2) before
770 # being removed, restore that state.
770 # being removed, restore that state.
771 entry = self._map.get(f)
771 entry = self._map.get(f)
772 if entry is not None:
772 if entry is not None:
773 # XXX this should probably be dealt with a a lower level
773 # XXX this should probably be dealt with a a lower level
774 # (see `merged_removed` and `from_p2_removed`)
774 # (see `merged_removed` and `from_p2_removed`)
775 if entry.merged_removed or entry.from_p2_removed:
775 if entry.merged_removed or entry.from_p2_removed:
776 source = self._map.copymap.get(f)
776 source = self._map.copymap.get(f)
777 if entry.merged_removed:
777 if entry.merged_removed:
778 self._merge(f)
778 self._merge(f)
779 elif entry.from_p2_removed:
779 elif entry.from_p2_removed:
780 self._otherparent(f)
780 self._otherparent(f)
781 if source is not None:
781 if source is not None:
782 self.copy(source, f)
782 self.copy(source, f)
783 return
783 return
784 elif entry.merged or entry.from_p2:
784 elif entry.merged or entry.from_p2:
785 return
785 return
786 self._addpath(f, possibly_dirty=True)
786 self._addpath(f, possibly_dirty=True)
787 self._map.copymap.pop(f, None)
787 self._map.copymap.pop(f, None)
788
788
789 def otherparent(self, f):
789 def otherparent(self, f):
790 '''Mark as coming from the other parent, always dirty.'''
790 '''Mark as coming from the other parent, always dirty.'''
791 if self.pendingparentchange():
791 if self.pendingparentchange():
792 util.nouideprecwarn(
792 util.nouideprecwarn(
793 b"do not use `otherparent` inside of update/merge context."
793 b"do not use `otherparent` inside of update/merge context."
794 b" Use `update_file` or `update_file_p1`",
794 b" Use `update_file` or `update_file_p1`",
795 b'6.0',
795 b'6.0',
796 stacklevel=2,
796 stacklevel=2,
797 )
797 )
798 else:
798 else:
799 util.nouideprecwarn(
799 util.nouideprecwarn(
800 b"do not use `otherparent` outside of update/merge context."
800 b"do not use `otherparent` outside of update/merge context."
801 b"It should have been set by the update/merge code",
801 b"It should have been set by the update/merge code",
802 b'6.0',
802 b'6.0',
803 stacklevel=2,
803 stacklevel=2,
804 )
804 )
805 self._otherparent(f)
805 self._otherparent(f)
806
806
807 def _otherparent(self, f):
807 def _otherparent(self, f):
808 if not self.in_merge:
808 if not self.in_merge:
809 msg = _(b"setting %r to other parent only allowed in merges") % f
809 msg = _(b"setting %r to other parent only allowed in merges") % f
810 raise error.Abort(msg)
810 raise error.Abort(msg)
811 entry = self._map.get(f)
811 entry = self._map.get(f)
812 if entry is not None and entry.tracked:
812 if entry is not None and entry.tracked:
813 # merge-like
813 # merge-like
814 self._addpath(f, merged=True)
814 self._addpath(f, merged=True)
815 else:
815 else:
816 # add-like
816 # add-like
817 self._addpath(f, from_p2=True)
817 self._addpath(f, from_p2=True)
818 self._map.copymap.pop(f, None)
818 self._map.copymap.pop(f, None)
819
819
820 def add(self, f):
820 def add(self, f):
821 '''Mark a file added.'''
821 '''Mark a file added.'''
822 if not self.pendingparentchange():
822 if not self.pendingparentchange():
823 util.nouideprecwarn(
823 util.nouideprecwarn(
824 b"do not use `add` outside of update/merge context."
824 b"do not use `add` outside of update/merge context."
825 b" Use `set_tracked`",
825 b" Use `set_tracked`",
826 b'6.0',
826 b'6.0',
827 stacklevel=2,
827 stacklevel=2,
828 )
828 )
829 self._add(f)
829 self._add(f)
830
830
831 def _add(self, filename):
831 def _add(self, filename):
832 """internal function to mark a file as added"""
832 """internal function to mark a file as added"""
833 self._addpath(filename, added=True)
833 self._addpath(filename, added=True)
834 self._map.copymap.pop(filename, None)
834 self._map.copymap.pop(filename, None)
835
835
836 def remove(self, f):
836 def remove(self, f):
837 '''Mark a file removed'''
837 '''Mark a file removed'''
838 if self.pendingparentchange():
838 if self.pendingparentchange():
839 util.nouideprecwarn(
839 util.nouideprecwarn(
840 b"do not use `remove` insde of update/merge context."
840 b"do not use `remove` insde of update/merge context."
841 b" Use `update_file` or `update_file_p1`",
841 b" Use `update_file` or `update_file_p1`",
842 b'6.0',
842 b'6.0',
843 stacklevel=2,
843 stacklevel=2,
844 )
844 )
845 else:
845 else:
846 util.nouideprecwarn(
846 util.nouideprecwarn(
847 b"do not use `remove` outside of update/merge context."
847 b"do not use `remove` outside of update/merge context."
848 b" Use `set_untracked`",
848 b" Use `set_untracked`",
849 b'6.0',
849 b'6.0',
850 stacklevel=2,
850 stacklevel=2,
851 )
851 )
852 self._remove(f)
852 self._remove(f)
853
853
854 def _remove(self, filename):
854 def _remove(self, filename):
855 """internal function to mark a file removed"""
855 """internal function to mark a file removed"""
856 self._dirty = True
856 self._dirty = True
857 self._updatedfiles.add(filename)
857 self._updatedfiles.add(filename)
858 self._map.removefile(filename, in_merge=self.in_merge)
858 self._map.removefile(filename, in_merge=self.in_merge)
859
859
860 def merge(self, f):
860 def merge(self, f):
861 '''Mark a file merged.'''
861 '''Mark a file merged.'''
862 if self.pendingparentchange():
862 if self.pendingparentchange():
863 util.nouideprecwarn(
863 util.nouideprecwarn(
864 b"do not use `merge` inside of update/merge context."
864 b"do not use `merge` inside of update/merge context."
865 b" Use `update_file`",
865 b" Use `update_file`",
866 b'6.0',
866 b'6.0',
867 stacklevel=2,
867 stacklevel=2,
868 )
868 )
869 else:
869 else:
870 util.nouideprecwarn(
870 util.nouideprecwarn(
871 b"do not use `merge` outside of update/merge context."
871 b"do not use `merge` outside of update/merge context."
872 b"It should have been set by the update/merge code",
872 b"It should have been set by the update/merge code",
873 b'6.0',
873 b'6.0',
874 stacklevel=2,
874 stacklevel=2,
875 )
875 )
876 self._merge(f)
876 self._merge(f)
877
877
878 def _merge(self, f):
878 def _merge(self, f):
879 if not self.in_merge:
879 if not self.in_merge:
880 return self._normallookup(f)
880 return self._normallookup(f)
881 return self._otherparent(f)
881 return self._otherparent(f)
882
882
883 def drop(self, f):
883 def drop(self, f):
884 '''Drop a file from the dirstate'''
884 '''Drop a file from the dirstate'''
885 if not self.pendingparentchange():
885 if self.pendingparentchange():
886 util.nouideprecwarn(
887 b"do not use `drop` inside of update/merge context."
888 b" Use `update_file`",
889 b'6.0',
890 stacklevel=2,
891 )
892 else:
886 util.nouideprecwarn(
893 util.nouideprecwarn(
887 b"do not use `drop` outside of update/merge context."
894 b"do not use `drop` outside of update/merge context."
888 b" Use `set_untracked`",
895 b" Use `set_untracked`",
889 b'6.0',
896 b'6.0',
890 stacklevel=2,
897 stacklevel=2,
891 )
898 )
892 self._drop(f)
899 self._drop(f)
893
900
894 def _drop(self, filename):
901 def _drop(self, filename):
895 """internal function to drop a file from the dirstate"""
902 """internal function to drop a file from the dirstate"""
896 if self._map.dropfile(filename):
903 if self._map.dropfile(filename):
897 self._dirty = True
904 self._dirty = True
898 self._updatedfiles.add(filename)
905 self._updatedfiles.add(filename)
899 self._map.copymap.pop(filename, None)
906 self._map.copymap.pop(filename, None)
900
907
901 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
908 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
902 if exists is None:
909 if exists is None:
903 exists = os.path.lexists(os.path.join(self._root, path))
910 exists = os.path.lexists(os.path.join(self._root, path))
904 if not exists:
911 if not exists:
905 # Maybe a path component exists
912 # Maybe a path component exists
906 if not ignoremissing and b'/' in path:
913 if not ignoremissing and b'/' in path:
907 d, f = path.rsplit(b'/', 1)
914 d, f = path.rsplit(b'/', 1)
908 d = self._normalize(d, False, ignoremissing, None)
915 d = self._normalize(d, False, ignoremissing, None)
909 folded = d + b"/" + f
916 folded = d + b"/" + f
910 else:
917 else:
911 # No path components, preserve original case
918 # No path components, preserve original case
912 folded = path
919 folded = path
913 else:
920 else:
914 # recursively normalize leading directory components
921 # recursively normalize leading directory components
915 # against dirstate
922 # against dirstate
916 if b'/' in normed:
923 if b'/' in normed:
917 d, f = normed.rsplit(b'/', 1)
924 d, f = normed.rsplit(b'/', 1)
918 d = self._normalize(d, False, ignoremissing, True)
925 d = self._normalize(d, False, ignoremissing, True)
919 r = self._root + b"/" + d
926 r = self._root + b"/" + d
920 folded = d + b"/" + util.fspath(f, r)
927 folded = d + b"/" + util.fspath(f, r)
921 else:
928 else:
922 folded = util.fspath(normed, self._root)
929 folded = util.fspath(normed, self._root)
923 storemap[normed] = folded
930 storemap[normed] = folded
924
931
925 return folded
932 return folded
926
933
927 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
934 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
928 normed = util.normcase(path)
935 normed = util.normcase(path)
929 folded = self._map.filefoldmap.get(normed, None)
936 folded = self._map.filefoldmap.get(normed, None)
930 if folded is None:
937 if folded is None:
931 if isknown:
938 if isknown:
932 folded = path
939 folded = path
933 else:
940 else:
934 folded = self._discoverpath(
941 folded = self._discoverpath(
935 path, normed, ignoremissing, exists, self._map.filefoldmap
942 path, normed, ignoremissing, exists, self._map.filefoldmap
936 )
943 )
937 return folded
944 return folded
938
945
939 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
946 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
940 normed = util.normcase(path)
947 normed = util.normcase(path)
941 folded = self._map.filefoldmap.get(normed, None)
948 folded = self._map.filefoldmap.get(normed, None)
942 if folded is None:
949 if folded is None:
943 folded = self._map.dirfoldmap.get(normed, None)
950 folded = self._map.dirfoldmap.get(normed, None)
944 if folded is None:
951 if folded is None:
945 if isknown:
952 if isknown:
946 folded = path
953 folded = path
947 else:
954 else:
948 # store discovered result in dirfoldmap so that future
955 # store discovered result in dirfoldmap so that future
949 # normalizefile calls don't start matching directories
956 # normalizefile calls don't start matching directories
950 folded = self._discoverpath(
957 folded = self._discoverpath(
951 path, normed, ignoremissing, exists, self._map.dirfoldmap
958 path, normed, ignoremissing, exists, self._map.dirfoldmap
952 )
959 )
953 return folded
960 return folded
954
961
955 def normalize(self, path, isknown=False, ignoremissing=False):
962 def normalize(self, path, isknown=False, ignoremissing=False):
956 """
963 """
957 normalize the case of a pathname when on a casefolding filesystem
964 normalize the case of a pathname when on a casefolding filesystem
958
965
959 isknown specifies whether the filename came from walking the
966 isknown specifies whether the filename came from walking the
960 disk, to avoid extra filesystem access.
967 disk, to avoid extra filesystem access.
961
968
962 If ignoremissing is True, missing path are returned
969 If ignoremissing is True, missing path are returned
963 unchanged. Otherwise, we try harder to normalize possibly
970 unchanged. Otherwise, we try harder to normalize possibly
964 existing path components.
971 existing path components.
965
972
966 The normalized case is determined based on the following precedence:
973 The normalized case is determined based on the following precedence:
967
974
968 - version of name already stored in the dirstate
975 - version of name already stored in the dirstate
969 - version of name stored on disk
976 - version of name stored on disk
970 - version provided via command arguments
977 - version provided via command arguments
971 """
978 """
972
979
973 if self._checkcase:
980 if self._checkcase:
974 return self._normalize(path, isknown, ignoremissing)
981 return self._normalize(path, isknown, ignoremissing)
975 return path
982 return path
976
983
977 def clear(self):
984 def clear(self):
978 self._map.clear()
985 self._map.clear()
979 self._lastnormaltime = 0
986 self._lastnormaltime = 0
980 self._updatedfiles.clear()
987 self._updatedfiles.clear()
981 self._dirty = True
988 self._dirty = True
982
989
983 def rebuild(self, parent, allfiles, changedfiles=None):
990 def rebuild(self, parent, allfiles, changedfiles=None):
984 if changedfiles is None:
991 if changedfiles is None:
985 # Rebuild entire dirstate
992 # Rebuild entire dirstate
986 to_lookup = allfiles
993 to_lookup = allfiles
987 to_drop = []
994 to_drop = []
988 lastnormaltime = self._lastnormaltime
995 lastnormaltime = self._lastnormaltime
989 self.clear()
996 self.clear()
990 self._lastnormaltime = lastnormaltime
997 self._lastnormaltime = lastnormaltime
991 elif len(changedfiles) < 10:
998 elif len(changedfiles) < 10:
992 # Avoid turning allfiles into a set, which can be expensive if it's
999 # Avoid turning allfiles into a set, which can be expensive if it's
993 # large.
1000 # large.
994 to_lookup = []
1001 to_lookup = []
995 to_drop = []
1002 to_drop = []
996 for f in changedfiles:
1003 for f in changedfiles:
997 if f in allfiles:
1004 if f in allfiles:
998 to_lookup.append(f)
1005 to_lookup.append(f)
999 else:
1006 else:
1000 to_drop.append(f)
1007 to_drop.append(f)
1001 else:
1008 else:
1002 changedfilesset = set(changedfiles)
1009 changedfilesset = set(changedfiles)
1003 to_lookup = changedfilesset & set(allfiles)
1010 to_lookup = changedfilesset & set(allfiles)
1004 to_drop = changedfilesset - to_lookup
1011 to_drop = changedfilesset - to_lookup
1005
1012
1006 if self._origpl is None:
1013 if self._origpl is None:
1007 self._origpl = self._pl
1014 self._origpl = self._pl
1008 self._map.setparents(parent, self._nodeconstants.nullid)
1015 self._map.setparents(parent, self._nodeconstants.nullid)
1009
1016
1010 for f in to_lookup:
1017 for f in to_lookup:
1011 self._normallookup(f)
1018 self._normallookup(f)
1012 for f in to_drop:
1019 for f in to_drop:
1013 self._drop(f)
1020 self._drop(f)
1014
1021
1015 self._dirty = True
1022 self._dirty = True
1016
1023
1017 def identity(self):
1024 def identity(self):
1018 """Return identity of dirstate itself to detect changing in storage
1025 """Return identity of dirstate itself to detect changing in storage
1019
1026
1020 If identity of previous dirstate is equal to this, writing
1027 If identity of previous dirstate is equal to this, writing
1021 changes based on the former dirstate out can keep consistency.
1028 changes based on the former dirstate out can keep consistency.
1022 """
1029 """
1023 return self._map.identity
1030 return self._map.identity
1024
1031
1025 def write(self, tr):
1032 def write(self, tr):
1026 if not self._dirty:
1033 if not self._dirty:
1027 return
1034 return
1028
1035
1029 filename = self._filename
1036 filename = self._filename
1030 if tr:
1037 if tr:
1031 # 'dirstate.write()' is not only for writing in-memory
1038 # 'dirstate.write()' is not only for writing in-memory
1032 # changes out, but also for dropping ambiguous timestamp.
1039 # changes out, but also for dropping ambiguous timestamp.
1033 # delayed writing re-raise "ambiguous timestamp issue".
1040 # delayed writing re-raise "ambiguous timestamp issue".
1034 # See also the wiki page below for detail:
1041 # See also the wiki page below for detail:
1035 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
1042 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
1036
1043
1037 # emulate dropping timestamp in 'parsers.pack_dirstate'
1044 # emulate dropping timestamp in 'parsers.pack_dirstate'
1038 now = _getfsnow(self._opener)
1045 now = _getfsnow(self._opener)
1039 self._map.clearambiguoustimes(self._updatedfiles, now)
1046 self._map.clearambiguoustimes(self._updatedfiles, now)
1040
1047
1041 # emulate that all 'dirstate.normal' results are written out
1048 # emulate that all 'dirstate.normal' results are written out
1042 self._lastnormaltime = 0
1049 self._lastnormaltime = 0
1043 self._updatedfiles.clear()
1050 self._updatedfiles.clear()
1044
1051
1045 # delay writing in-memory changes out
1052 # delay writing in-memory changes out
1046 tr.addfilegenerator(
1053 tr.addfilegenerator(
1047 b'dirstate',
1054 b'dirstate',
1048 (self._filename,),
1055 (self._filename,),
1049 lambda f: self._writedirstate(tr, f),
1056 lambda f: self._writedirstate(tr, f),
1050 location=b'plain',
1057 location=b'plain',
1051 )
1058 )
1052 return
1059 return
1053
1060
1054 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
1061 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
1055 self._writedirstate(tr, st)
1062 self._writedirstate(tr, st)
1056
1063
1057 def addparentchangecallback(self, category, callback):
1064 def addparentchangecallback(self, category, callback):
1058 """add a callback to be called when the wd parents are changed
1065 """add a callback to be called when the wd parents are changed
1059
1066
1060 Callback will be called with the following arguments:
1067 Callback will be called with the following arguments:
1061 dirstate, (oldp1, oldp2), (newp1, newp2)
1068 dirstate, (oldp1, oldp2), (newp1, newp2)
1062
1069
1063 Category is a unique identifier to allow overwriting an old callback
1070 Category is a unique identifier to allow overwriting an old callback
1064 with a newer callback.
1071 with a newer callback.
1065 """
1072 """
1066 self._plchangecallbacks[category] = callback
1073 self._plchangecallbacks[category] = callback
1067
1074
1068 def _writedirstate(self, tr, st):
1075 def _writedirstate(self, tr, st):
1069 # notify callbacks about parents change
1076 # notify callbacks about parents change
1070 if self._origpl is not None and self._origpl != self._pl:
1077 if self._origpl is not None and self._origpl != self._pl:
1071 for c, callback in sorted(
1078 for c, callback in sorted(
1072 pycompat.iteritems(self._plchangecallbacks)
1079 pycompat.iteritems(self._plchangecallbacks)
1073 ):
1080 ):
1074 callback(self, self._origpl, self._pl)
1081 callback(self, self._origpl, self._pl)
1075 self._origpl = None
1082 self._origpl = None
1076 # use the modification time of the newly created temporary file as the
1083 # use the modification time of the newly created temporary file as the
1077 # filesystem's notion of 'now'
1084 # filesystem's notion of 'now'
1078 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
1085 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
1079
1086
1080 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
1087 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
1081 # timestamp of each entries in dirstate, because of 'now > mtime'
1088 # timestamp of each entries in dirstate, because of 'now > mtime'
1082 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
1089 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
1083 if delaywrite > 0:
1090 if delaywrite > 0:
1084 # do we have any files to delay for?
1091 # do we have any files to delay for?
1085 for f, e in pycompat.iteritems(self._map):
1092 for f, e in pycompat.iteritems(self._map):
1086 if e.need_delay(now):
1093 if e.need_delay(now):
1087 import time # to avoid useless import
1094 import time # to avoid useless import
1088
1095
1089 # rather than sleep n seconds, sleep until the next
1096 # rather than sleep n seconds, sleep until the next
1090 # multiple of n seconds
1097 # multiple of n seconds
1091 clock = time.time()
1098 clock = time.time()
1092 start = int(clock) - (int(clock) % delaywrite)
1099 start = int(clock) - (int(clock) % delaywrite)
1093 end = start + delaywrite
1100 end = start + delaywrite
1094 time.sleep(end - clock)
1101 time.sleep(end - clock)
1095 now = end # trust our estimate that the end is near now
1102 now = end # trust our estimate that the end is near now
1096 break
1103 break
1097
1104
1098 self._map.write(tr, st, now)
1105 self._map.write(tr, st, now)
1099 self._lastnormaltime = 0
1106 self._lastnormaltime = 0
1100 self._dirty = False
1107 self._dirty = False
1101
1108
1102 def _dirignore(self, f):
1109 def _dirignore(self, f):
1103 if self._ignore(f):
1110 if self._ignore(f):
1104 return True
1111 return True
1105 for p in pathutil.finddirs(f):
1112 for p in pathutil.finddirs(f):
1106 if self._ignore(p):
1113 if self._ignore(p):
1107 return True
1114 return True
1108 return False
1115 return False
1109
1116
1110 def _ignorefiles(self):
1117 def _ignorefiles(self):
1111 files = []
1118 files = []
1112 if os.path.exists(self._join(b'.hgignore')):
1119 if os.path.exists(self._join(b'.hgignore')):
1113 files.append(self._join(b'.hgignore'))
1120 files.append(self._join(b'.hgignore'))
1114 for name, path in self._ui.configitems(b"ui"):
1121 for name, path in self._ui.configitems(b"ui"):
1115 if name == b'ignore' or name.startswith(b'ignore.'):
1122 if name == b'ignore' or name.startswith(b'ignore.'):
1116 # we need to use os.path.join here rather than self._join
1123 # we need to use os.path.join here rather than self._join
1117 # because path is arbitrary and user-specified
1124 # because path is arbitrary and user-specified
1118 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1125 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1119 return files
1126 return files
1120
1127
1121 def _ignorefileandline(self, f):
1128 def _ignorefileandline(self, f):
1122 files = collections.deque(self._ignorefiles())
1129 files = collections.deque(self._ignorefiles())
1123 visited = set()
1130 visited = set()
1124 while files:
1131 while files:
1125 i = files.popleft()
1132 i = files.popleft()
1126 patterns = matchmod.readpatternfile(
1133 patterns = matchmod.readpatternfile(
1127 i, self._ui.warn, sourceinfo=True
1134 i, self._ui.warn, sourceinfo=True
1128 )
1135 )
1129 for pattern, lineno, line in patterns:
1136 for pattern, lineno, line in patterns:
1130 kind, p = matchmod._patsplit(pattern, b'glob')
1137 kind, p = matchmod._patsplit(pattern, b'glob')
1131 if kind == b"subinclude":
1138 if kind == b"subinclude":
1132 if p not in visited:
1139 if p not in visited:
1133 files.append(p)
1140 files.append(p)
1134 continue
1141 continue
1135 m = matchmod.match(
1142 m = matchmod.match(
1136 self._root, b'', [], [pattern], warn=self._ui.warn
1143 self._root, b'', [], [pattern], warn=self._ui.warn
1137 )
1144 )
1138 if m(f):
1145 if m(f):
1139 return (i, lineno, line)
1146 return (i, lineno, line)
1140 visited.add(i)
1147 visited.add(i)
1141 return (None, -1, b"")
1148 return (None, -1, b"")
1142
1149
1143 def _walkexplicit(self, match, subrepos):
1150 def _walkexplicit(self, match, subrepos):
1144 """Get stat data about the files explicitly specified by match.
1151 """Get stat data about the files explicitly specified by match.
1145
1152
1146 Return a triple (results, dirsfound, dirsnotfound).
1153 Return a triple (results, dirsfound, dirsnotfound).
1147 - results is a mapping from filename to stat result. It also contains
1154 - results is a mapping from filename to stat result. It also contains
1148 listings mapping subrepos and .hg to None.
1155 listings mapping subrepos and .hg to None.
1149 - dirsfound is a list of files found to be directories.
1156 - dirsfound is a list of files found to be directories.
1150 - dirsnotfound is a list of files that the dirstate thinks are
1157 - dirsnotfound is a list of files that the dirstate thinks are
1151 directories and that were not found."""
1158 directories and that were not found."""
1152
1159
1153 def badtype(mode):
1160 def badtype(mode):
1154 kind = _(b'unknown')
1161 kind = _(b'unknown')
1155 if stat.S_ISCHR(mode):
1162 if stat.S_ISCHR(mode):
1156 kind = _(b'character device')
1163 kind = _(b'character device')
1157 elif stat.S_ISBLK(mode):
1164 elif stat.S_ISBLK(mode):
1158 kind = _(b'block device')
1165 kind = _(b'block device')
1159 elif stat.S_ISFIFO(mode):
1166 elif stat.S_ISFIFO(mode):
1160 kind = _(b'fifo')
1167 kind = _(b'fifo')
1161 elif stat.S_ISSOCK(mode):
1168 elif stat.S_ISSOCK(mode):
1162 kind = _(b'socket')
1169 kind = _(b'socket')
1163 elif stat.S_ISDIR(mode):
1170 elif stat.S_ISDIR(mode):
1164 kind = _(b'directory')
1171 kind = _(b'directory')
1165 return _(b'unsupported file type (type is %s)') % kind
1172 return _(b'unsupported file type (type is %s)') % kind
1166
1173
1167 badfn = match.bad
1174 badfn = match.bad
1168 dmap = self._map
1175 dmap = self._map
1169 lstat = os.lstat
1176 lstat = os.lstat
1170 getkind = stat.S_IFMT
1177 getkind = stat.S_IFMT
1171 dirkind = stat.S_IFDIR
1178 dirkind = stat.S_IFDIR
1172 regkind = stat.S_IFREG
1179 regkind = stat.S_IFREG
1173 lnkkind = stat.S_IFLNK
1180 lnkkind = stat.S_IFLNK
1174 join = self._join
1181 join = self._join
1175 dirsfound = []
1182 dirsfound = []
1176 foundadd = dirsfound.append
1183 foundadd = dirsfound.append
1177 dirsnotfound = []
1184 dirsnotfound = []
1178 notfoundadd = dirsnotfound.append
1185 notfoundadd = dirsnotfound.append
1179
1186
1180 if not match.isexact() and self._checkcase:
1187 if not match.isexact() and self._checkcase:
1181 normalize = self._normalize
1188 normalize = self._normalize
1182 else:
1189 else:
1183 normalize = None
1190 normalize = None
1184
1191
1185 files = sorted(match.files())
1192 files = sorted(match.files())
1186 subrepos.sort()
1193 subrepos.sort()
1187 i, j = 0, 0
1194 i, j = 0, 0
1188 while i < len(files) and j < len(subrepos):
1195 while i < len(files) and j < len(subrepos):
1189 subpath = subrepos[j] + b"/"
1196 subpath = subrepos[j] + b"/"
1190 if files[i] < subpath:
1197 if files[i] < subpath:
1191 i += 1
1198 i += 1
1192 continue
1199 continue
1193 while i < len(files) and files[i].startswith(subpath):
1200 while i < len(files) and files[i].startswith(subpath):
1194 del files[i]
1201 del files[i]
1195 j += 1
1202 j += 1
1196
1203
1197 if not files or b'' in files:
1204 if not files or b'' in files:
1198 files = [b'']
1205 files = [b'']
1199 # constructing the foldmap is expensive, so don't do it for the
1206 # constructing the foldmap is expensive, so don't do it for the
1200 # common case where files is ['']
1207 # common case where files is ['']
1201 normalize = None
1208 normalize = None
1202 results = dict.fromkeys(subrepos)
1209 results = dict.fromkeys(subrepos)
1203 results[b'.hg'] = None
1210 results[b'.hg'] = None
1204
1211
1205 for ff in files:
1212 for ff in files:
1206 if normalize:
1213 if normalize:
1207 nf = normalize(ff, False, True)
1214 nf = normalize(ff, False, True)
1208 else:
1215 else:
1209 nf = ff
1216 nf = ff
1210 if nf in results:
1217 if nf in results:
1211 continue
1218 continue
1212
1219
1213 try:
1220 try:
1214 st = lstat(join(nf))
1221 st = lstat(join(nf))
1215 kind = getkind(st.st_mode)
1222 kind = getkind(st.st_mode)
1216 if kind == dirkind:
1223 if kind == dirkind:
1217 if nf in dmap:
1224 if nf in dmap:
1218 # file replaced by dir on disk but still in dirstate
1225 # file replaced by dir on disk but still in dirstate
1219 results[nf] = None
1226 results[nf] = None
1220 foundadd((nf, ff))
1227 foundadd((nf, ff))
1221 elif kind == regkind or kind == lnkkind:
1228 elif kind == regkind or kind == lnkkind:
1222 results[nf] = st
1229 results[nf] = st
1223 else:
1230 else:
1224 badfn(ff, badtype(kind))
1231 badfn(ff, badtype(kind))
1225 if nf in dmap:
1232 if nf in dmap:
1226 results[nf] = None
1233 results[nf] = None
1227 except OSError as inst: # nf not found on disk - it is dirstate only
1234 except OSError as inst: # nf not found on disk - it is dirstate only
1228 if nf in dmap: # does it exactly match a missing file?
1235 if nf in dmap: # does it exactly match a missing file?
1229 results[nf] = None
1236 results[nf] = None
1230 else: # does it match a missing directory?
1237 else: # does it match a missing directory?
1231 if self._map.hasdir(nf):
1238 if self._map.hasdir(nf):
1232 notfoundadd(nf)
1239 notfoundadd(nf)
1233 else:
1240 else:
1234 badfn(ff, encoding.strtolocal(inst.strerror))
1241 badfn(ff, encoding.strtolocal(inst.strerror))
1235
1242
1236 # match.files() may contain explicitly-specified paths that shouldn't
1243 # match.files() may contain explicitly-specified paths that shouldn't
1237 # be taken; drop them from the list of files found. dirsfound/notfound
1244 # be taken; drop them from the list of files found. dirsfound/notfound
1238 # aren't filtered here because they will be tested later.
1245 # aren't filtered here because they will be tested later.
1239 if match.anypats():
1246 if match.anypats():
1240 for f in list(results):
1247 for f in list(results):
1241 if f == b'.hg' or f in subrepos:
1248 if f == b'.hg' or f in subrepos:
1242 # keep sentinel to disable further out-of-repo walks
1249 # keep sentinel to disable further out-of-repo walks
1243 continue
1250 continue
1244 if not match(f):
1251 if not match(f):
1245 del results[f]
1252 del results[f]
1246
1253
1247 # Case insensitive filesystems cannot rely on lstat() failing to detect
1254 # Case insensitive filesystems cannot rely on lstat() failing to detect
1248 # a case-only rename. Prune the stat object for any file that does not
1255 # a case-only rename. Prune the stat object for any file that does not
1249 # match the case in the filesystem, if there are multiple files that
1256 # match the case in the filesystem, if there are multiple files that
1250 # normalize to the same path.
1257 # normalize to the same path.
1251 if match.isexact() and self._checkcase:
1258 if match.isexact() and self._checkcase:
1252 normed = {}
1259 normed = {}
1253
1260
1254 for f, st in pycompat.iteritems(results):
1261 for f, st in pycompat.iteritems(results):
1255 if st is None:
1262 if st is None:
1256 continue
1263 continue
1257
1264
1258 nc = util.normcase(f)
1265 nc = util.normcase(f)
1259 paths = normed.get(nc)
1266 paths = normed.get(nc)
1260
1267
1261 if paths is None:
1268 if paths is None:
1262 paths = set()
1269 paths = set()
1263 normed[nc] = paths
1270 normed[nc] = paths
1264
1271
1265 paths.add(f)
1272 paths.add(f)
1266
1273
1267 for norm, paths in pycompat.iteritems(normed):
1274 for norm, paths in pycompat.iteritems(normed):
1268 if len(paths) > 1:
1275 if len(paths) > 1:
1269 for path in paths:
1276 for path in paths:
1270 folded = self._discoverpath(
1277 folded = self._discoverpath(
1271 path, norm, True, None, self._map.dirfoldmap
1278 path, norm, True, None, self._map.dirfoldmap
1272 )
1279 )
1273 if path != folded:
1280 if path != folded:
1274 results[path] = None
1281 results[path] = None
1275
1282
1276 return results, dirsfound, dirsnotfound
1283 return results, dirsfound, dirsnotfound
1277
1284
1278 def walk(self, match, subrepos, unknown, ignored, full=True):
1285 def walk(self, match, subrepos, unknown, ignored, full=True):
1279 """
1286 """
1280 Walk recursively through the directory tree, finding all files
1287 Walk recursively through the directory tree, finding all files
1281 matched by match.
1288 matched by match.
1282
1289
1283 If full is False, maybe skip some known-clean files.
1290 If full is False, maybe skip some known-clean files.
1284
1291
1285 Return a dict mapping filename to stat-like object (either
1292 Return a dict mapping filename to stat-like object (either
1286 mercurial.osutil.stat instance or return value of os.stat()).
1293 mercurial.osutil.stat instance or return value of os.stat()).
1287
1294
1288 """
1295 """
1289 # full is a flag that extensions that hook into walk can use -- this
1296 # full is a flag that extensions that hook into walk can use -- this
1290 # implementation doesn't use it at all. This satisfies the contract
1297 # implementation doesn't use it at all. This satisfies the contract
1291 # because we only guarantee a "maybe".
1298 # because we only guarantee a "maybe".
1292
1299
1293 if ignored:
1300 if ignored:
1294 ignore = util.never
1301 ignore = util.never
1295 dirignore = util.never
1302 dirignore = util.never
1296 elif unknown:
1303 elif unknown:
1297 ignore = self._ignore
1304 ignore = self._ignore
1298 dirignore = self._dirignore
1305 dirignore = self._dirignore
1299 else:
1306 else:
1300 # if not unknown and not ignored, drop dir recursion and step 2
1307 # if not unknown and not ignored, drop dir recursion and step 2
1301 ignore = util.always
1308 ignore = util.always
1302 dirignore = util.always
1309 dirignore = util.always
1303
1310
1304 matchfn = match.matchfn
1311 matchfn = match.matchfn
1305 matchalways = match.always()
1312 matchalways = match.always()
1306 matchtdir = match.traversedir
1313 matchtdir = match.traversedir
1307 dmap = self._map
1314 dmap = self._map
1308 listdir = util.listdir
1315 listdir = util.listdir
1309 lstat = os.lstat
1316 lstat = os.lstat
1310 dirkind = stat.S_IFDIR
1317 dirkind = stat.S_IFDIR
1311 regkind = stat.S_IFREG
1318 regkind = stat.S_IFREG
1312 lnkkind = stat.S_IFLNK
1319 lnkkind = stat.S_IFLNK
1313 join = self._join
1320 join = self._join
1314
1321
1315 exact = skipstep3 = False
1322 exact = skipstep3 = False
1316 if match.isexact(): # match.exact
1323 if match.isexact(): # match.exact
1317 exact = True
1324 exact = True
1318 dirignore = util.always # skip step 2
1325 dirignore = util.always # skip step 2
1319 elif match.prefix(): # match.match, no patterns
1326 elif match.prefix(): # match.match, no patterns
1320 skipstep3 = True
1327 skipstep3 = True
1321
1328
1322 if not exact and self._checkcase:
1329 if not exact and self._checkcase:
1323 normalize = self._normalize
1330 normalize = self._normalize
1324 normalizefile = self._normalizefile
1331 normalizefile = self._normalizefile
1325 skipstep3 = False
1332 skipstep3 = False
1326 else:
1333 else:
1327 normalize = self._normalize
1334 normalize = self._normalize
1328 normalizefile = None
1335 normalizefile = None
1329
1336
1330 # step 1: find all explicit files
1337 # step 1: find all explicit files
1331 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1338 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1332 if matchtdir:
1339 if matchtdir:
1333 for d in work:
1340 for d in work:
1334 matchtdir(d[0])
1341 matchtdir(d[0])
1335 for d in dirsnotfound:
1342 for d in dirsnotfound:
1336 matchtdir(d)
1343 matchtdir(d)
1337
1344
1338 skipstep3 = skipstep3 and not (work or dirsnotfound)
1345 skipstep3 = skipstep3 and not (work or dirsnotfound)
1339 work = [d for d in work if not dirignore(d[0])]
1346 work = [d for d in work if not dirignore(d[0])]
1340
1347
1341 # step 2: visit subdirectories
1348 # step 2: visit subdirectories
1342 def traverse(work, alreadynormed):
1349 def traverse(work, alreadynormed):
1343 wadd = work.append
1350 wadd = work.append
1344 while work:
1351 while work:
1345 tracing.counter('dirstate.walk work', len(work))
1352 tracing.counter('dirstate.walk work', len(work))
1346 nd = work.pop()
1353 nd = work.pop()
1347 visitentries = match.visitchildrenset(nd)
1354 visitentries = match.visitchildrenset(nd)
1348 if not visitentries:
1355 if not visitentries:
1349 continue
1356 continue
1350 if visitentries == b'this' or visitentries == b'all':
1357 if visitentries == b'this' or visitentries == b'all':
1351 visitentries = None
1358 visitentries = None
1352 skip = None
1359 skip = None
1353 if nd != b'':
1360 if nd != b'':
1354 skip = b'.hg'
1361 skip = b'.hg'
1355 try:
1362 try:
1356 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1363 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1357 entries = listdir(join(nd), stat=True, skip=skip)
1364 entries = listdir(join(nd), stat=True, skip=skip)
1358 except OSError as inst:
1365 except OSError as inst:
1359 if inst.errno in (errno.EACCES, errno.ENOENT):
1366 if inst.errno in (errno.EACCES, errno.ENOENT):
1360 match.bad(
1367 match.bad(
1361 self.pathto(nd), encoding.strtolocal(inst.strerror)
1368 self.pathto(nd), encoding.strtolocal(inst.strerror)
1362 )
1369 )
1363 continue
1370 continue
1364 raise
1371 raise
1365 for f, kind, st in entries:
1372 for f, kind, st in entries:
1366 # Some matchers may return files in the visitentries set,
1373 # Some matchers may return files in the visitentries set,
1367 # instead of 'this', if the matcher explicitly mentions them
1374 # instead of 'this', if the matcher explicitly mentions them
1368 # and is not an exactmatcher. This is acceptable; we do not
1375 # and is not an exactmatcher. This is acceptable; we do not
1369 # make any hard assumptions about file-or-directory below
1376 # make any hard assumptions about file-or-directory below
1370 # based on the presence of `f` in visitentries. If
1377 # based on the presence of `f` in visitentries. If
1371 # visitchildrenset returned a set, we can always skip the
1378 # visitchildrenset returned a set, we can always skip the
1372 # entries *not* in the set it provided regardless of whether
1379 # entries *not* in the set it provided regardless of whether
1373 # they're actually a file or a directory.
1380 # they're actually a file or a directory.
1374 if visitentries and f not in visitentries:
1381 if visitentries and f not in visitentries:
1375 continue
1382 continue
1376 if normalizefile:
1383 if normalizefile:
1377 # even though f might be a directory, we're only
1384 # even though f might be a directory, we're only
1378 # interested in comparing it to files currently in the
1385 # interested in comparing it to files currently in the
1379 # dmap -- therefore normalizefile is enough
1386 # dmap -- therefore normalizefile is enough
1380 nf = normalizefile(
1387 nf = normalizefile(
1381 nd and (nd + b"/" + f) or f, True, True
1388 nd and (nd + b"/" + f) or f, True, True
1382 )
1389 )
1383 else:
1390 else:
1384 nf = nd and (nd + b"/" + f) or f
1391 nf = nd and (nd + b"/" + f) or f
1385 if nf not in results:
1392 if nf not in results:
1386 if kind == dirkind:
1393 if kind == dirkind:
1387 if not ignore(nf):
1394 if not ignore(nf):
1388 if matchtdir:
1395 if matchtdir:
1389 matchtdir(nf)
1396 matchtdir(nf)
1390 wadd(nf)
1397 wadd(nf)
1391 if nf in dmap and (matchalways or matchfn(nf)):
1398 if nf in dmap and (matchalways or matchfn(nf)):
1392 results[nf] = None
1399 results[nf] = None
1393 elif kind == regkind or kind == lnkkind:
1400 elif kind == regkind or kind == lnkkind:
1394 if nf in dmap:
1401 if nf in dmap:
1395 if matchalways or matchfn(nf):
1402 if matchalways or matchfn(nf):
1396 results[nf] = st
1403 results[nf] = st
1397 elif (matchalways or matchfn(nf)) and not ignore(
1404 elif (matchalways or matchfn(nf)) and not ignore(
1398 nf
1405 nf
1399 ):
1406 ):
1400 # unknown file -- normalize if necessary
1407 # unknown file -- normalize if necessary
1401 if not alreadynormed:
1408 if not alreadynormed:
1402 nf = normalize(nf, False, True)
1409 nf = normalize(nf, False, True)
1403 results[nf] = st
1410 results[nf] = st
1404 elif nf in dmap and (matchalways or matchfn(nf)):
1411 elif nf in dmap and (matchalways or matchfn(nf)):
1405 results[nf] = None
1412 results[nf] = None
1406
1413
1407 for nd, d in work:
1414 for nd, d in work:
1408 # alreadynormed means that processwork doesn't have to do any
1415 # alreadynormed means that processwork doesn't have to do any
1409 # expensive directory normalization
1416 # expensive directory normalization
1410 alreadynormed = not normalize or nd == d
1417 alreadynormed = not normalize or nd == d
1411 traverse([d], alreadynormed)
1418 traverse([d], alreadynormed)
1412
1419
1413 for s in subrepos:
1420 for s in subrepos:
1414 del results[s]
1421 del results[s]
1415 del results[b'.hg']
1422 del results[b'.hg']
1416
1423
1417 # step 3: visit remaining files from dmap
1424 # step 3: visit remaining files from dmap
1418 if not skipstep3 and not exact:
1425 if not skipstep3 and not exact:
1419 # If a dmap file is not in results yet, it was either
1426 # If a dmap file is not in results yet, it was either
1420 # a) not matching matchfn b) ignored, c) missing, or d) under a
1427 # a) not matching matchfn b) ignored, c) missing, or d) under a
1421 # symlink directory.
1428 # symlink directory.
1422 if not results and matchalways:
1429 if not results and matchalways:
1423 visit = [f for f in dmap]
1430 visit = [f for f in dmap]
1424 else:
1431 else:
1425 visit = [f for f in dmap if f not in results and matchfn(f)]
1432 visit = [f for f in dmap if f not in results and matchfn(f)]
1426 visit.sort()
1433 visit.sort()
1427
1434
1428 if unknown:
1435 if unknown:
1429 # unknown == True means we walked all dirs under the roots
1436 # unknown == True means we walked all dirs under the roots
1430 # that wasn't ignored, and everything that matched was stat'ed
1437 # that wasn't ignored, and everything that matched was stat'ed
1431 # and is already in results.
1438 # and is already in results.
1432 # The rest must thus be ignored or under a symlink.
1439 # The rest must thus be ignored or under a symlink.
1433 audit_path = pathutil.pathauditor(self._root, cached=True)
1440 audit_path = pathutil.pathauditor(self._root, cached=True)
1434
1441
1435 for nf in iter(visit):
1442 for nf in iter(visit):
1436 # If a stat for the same file was already added with a
1443 # If a stat for the same file was already added with a
1437 # different case, don't add one for this, since that would
1444 # different case, don't add one for this, since that would
1438 # make it appear as if the file exists under both names
1445 # make it appear as if the file exists under both names
1439 # on disk.
1446 # on disk.
1440 if (
1447 if (
1441 normalizefile
1448 normalizefile
1442 and normalizefile(nf, True, True) in results
1449 and normalizefile(nf, True, True) in results
1443 ):
1450 ):
1444 results[nf] = None
1451 results[nf] = None
1445 # Report ignored items in the dmap as long as they are not
1452 # Report ignored items in the dmap as long as they are not
1446 # under a symlink directory.
1453 # under a symlink directory.
1447 elif audit_path.check(nf):
1454 elif audit_path.check(nf):
1448 try:
1455 try:
1449 results[nf] = lstat(join(nf))
1456 results[nf] = lstat(join(nf))
1450 # file was just ignored, no links, and exists
1457 # file was just ignored, no links, and exists
1451 except OSError:
1458 except OSError:
1452 # file doesn't exist
1459 # file doesn't exist
1453 results[nf] = None
1460 results[nf] = None
1454 else:
1461 else:
1455 # It's either missing or under a symlink directory
1462 # It's either missing or under a symlink directory
1456 # which we in this case report as missing
1463 # which we in this case report as missing
1457 results[nf] = None
1464 results[nf] = None
1458 else:
1465 else:
1459 # We may not have walked the full directory tree above,
1466 # We may not have walked the full directory tree above,
1460 # so stat and check everything we missed.
1467 # so stat and check everything we missed.
1461 iv = iter(visit)
1468 iv = iter(visit)
1462 for st in util.statfiles([join(i) for i in visit]):
1469 for st in util.statfiles([join(i) for i in visit]):
1463 results[next(iv)] = st
1470 results[next(iv)] = st
1464 return results
1471 return results
1465
1472
1466 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1473 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1467 # Force Rayon (Rust parallelism library) to respect the number of
1474 # Force Rayon (Rust parallelism library) to respect the number of
1468 # workers. This is a temporary workaround until Rust code knows
1475 # workers. This is a temporary workaround until Rust code knows
1469 # how to read the config file.
1476 # how to read the config file.
1470 numcpus = self._ui.configint(b"worker", b"numcpus")
1477 numcpus = self._ui.configint(b"worker", b"numcpus")
1471 if numcpus is not None:
1478 if numcpus is not None:
1472 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1479 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1473
1480
1474 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1481 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1475 if not workers_enabled:
1482 if not workers_enabled:
1476 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1483 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1477
1484
1478 (
1485 (
1479 lookup,
1486 lookup,
1480 modified,
1487 modified,
1481 added,
1488 added,
1482 removed,
1489 removed,
1483 deleted,
1490 deleted,
1484 clean,
1491 clean,
1485 ignored,
1492 ignored,
1486 unknown,
1493 unknown,
1487 warnings,
1494 warnings,
1488 bad,
1495 bad,
1489 traversed,
1496 traversed,
1490 dirty,
1497 dirty,
1491 ) = rustmod.status(
1498 ) = rustmod.status(
1492 self._map._rustmap,
1499 self._map._rustmap,
1493 matcher,
1500 matcher,
1494 self._rootdir,
1501 self._rootdir,
1495 self._ignorefiles(),
1502 self._ignorefiles(),
1496 self._checkexec,
1503 self._checkexec,
1497 self._lastnormaltime,
1504 self._lastnormaltime,
1498 bool(list_clean),
1505 bool(list_clean),
1499 bool(list_ignored),
1506 bool(list_ignored),
1500 bool(list_unknown),
1507 bool(list_unknown),
1501 bool(matcher.traversedir),
1508 bool(matcher.traversedir),
1502 )
1509 )
1503
1510
1504 self._dirty |= dirty
1511 self._dirty |= dirty
1505
1512
1506 if matcher.traversedir:
1513 if matcher.traversedir:
1507 for dir in traversed:
1514 for dir in traversed:
1508 matcher.traversedir(dir)
1515 matcher.traversedir(dir)
1509
1516
1510 if self._ui.warn:
1517 if self._ui.warn:
1511 for item in warnings:
1518 for item in warnings:
1512 if isinstance(item, tuple):
1519 if isinstance(item, tuple):
1513 file_path, syntax = item
1520 file_path, syntax = item
1514 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1521 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1515 file_path,
1522 file_path,
1516 syntax,
1523 syntax,
1517 )
1524 )
1518 self._ui.warn(msg)
1525 self._ui.warn(msg)
1519 else:
1526 else:
1520 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1527 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1521 self._ui.warn(
1528 self._ui.warn(
1522 msg
1529 msg
1523 % (
1530 % (
1524 pathutil.canonpath(
1531 pathutil.canonpath(
1525 self._rootdir, self._rootdir, item
1532 self._rootdir, self._rootdir, item
1526 ),
1533 ),
1527 b"No such file or directory",
1534 b"No such file or directory",
1528 )
1535 )
1529 )
1536 )
1530
1537
1531 for (fn, message) in bad:
1538 for (fn, message) in bad:
1532 matcher.bad(fn, encoding.strtolocal(message))
1539 matcher.bad(fn, encoding.strtolocal(message))
1533
1540
1534 status = scmutil.status(
1541 status = scmutil.status(
1535 modified=modified,
1542 modified=modified,
1536 added=added,
1543 added=added,
1537 removed=removed,
1544 removed=removed,
1538 deleted=deleted,
1545 deleted=deleted,
1539 unknown=unknown,
1546 unknown=unknown,
1540 ignored=ignored,
1547 ignored=ignored,
1541 clean=clean,
1548 clean=clean,
1542 )
1549 )
1543 return (lookup, status)
1550 return (lookup, status)
1544
1551
1545 def status(self, match, subrepos, ignored, clean, unknown):
1552 def status(self, match, subrepos, ignored, clean, unknown):
1546 """Determine the status of the working copy relative to the
1553 """Determine the status of the working copy relative to the
1547 dirstate and return a pair of (unsure, status), where status is of type
1554 dirstate and return a pair of (unsure, status), where status is of type
1548 scmutil.status and:
1555 scmutil.status and:
1549
1556
1550 unsure:
1557 unsure:
1551 files that might have been modified since the dirstate was
1558 files that might have been modified since the dirstate was
1552 written, but need to be read to be sure (size is the same
1559 written, but need to be read to be sure (size is the same
1553 but mtime differs)
1560 but mtime differs)
1554 status.modified:
1561 status.modified:
1555 files that have definitely been modified since the dirstate
1562 files that have definitely been modified since the dirstate
1556 was written (different size or mode)
1563 was written (different size or mode)
1557 status.clean:
1564 status.clean:
1558 files that have definitely not been modified since the
1565 files that have definitely not been modified since the
1559 dirstate was written
1566 dirstate was written
1560 """
1567 """
1561 listignored, listclean, listunknown = ignored, clean, unknown
1568 listignored, listclean, listunknown = ignored, clean, unknown
1562 lookup, modified, added, unknown, ignored = [], [], [], [], []
1569 lookup, modified, added, unknown, ignored = [], [], [], [], []
1563 removed, deleted, clean = [], [], []
1570 removed, deleted, clean = [], [], []
1564
1571
1565 dmap = self._map
1572 dmap = self._map
1566 dmap.preload()
1573 dmap.preload()
1567
1574
1568 use_rust = True
1575 use_rust = True
1569
1576
1570 allowed_matchers = (
1577 allowed_matchers = (
1571 matchmod.alwaysmatcher,
1578 matchmod.alwaysmatcher,
1572 matchmod.exactmatcher,
1579 matchmod.exactmatcher,
1573 matchmod.includematcher,
1580 matchmod.includematcher,
1574 )
1581 )
1575
1582
1576 if rustmod is None:
1583 if rustmod is None:
1577 use_rust = False
1584 use_rust = False
1578 elif self._checkcase:
1585 elif self._checkcase:
1579 # Case-insensitive filesystems are not handled yet
1586 # Case-insensitive filesystems are not handled yet
1580 use_rust = False
1587 use_rust = False
1581 elif subrepos:
1588 elif subrepos:
1582 use_rust = False
1589 use_rust = False
1583 elif sparse.enabled:
1590 elif sparse.enabled:
1584 use_rust = False
1591 use_rust = False
1585 elif not isinstance(match, allowed_matchers):
1592 elif not isinstance(match, allowed_matchers):
1586 # Some matchers have yet to be implemented
1593 # Some matchers have yet to be implemented
1587 use_rust = False
1594 use_rust = False
1588
1595
1589 if use_rust:
1596 if use_rust:
1590 try:
1597 try:
1591 return self._rust_status(
1598 return self._rust_status(
1592 match, listclean, listignored, listunknown
1599 match, listclean, listignored, listunknown
1593 )
1600 )
1594 except rustmod.FallbackError:
1601 except rustmod.FallbackError:
1595 pass
1602 pass
1596
1603
1597 def noop(f):
1604 def noop(f):
1598 pass
1605 pass
1599
1606
1600 dcontains = dmap.__contains__
1607 dcontains = dmap.__contains__
1601 dget = dmap.__getitem__
1608 dget = dmap.__getitem__
1602 ladd = lookup.append # aka "unsure"
1609 ladd = lookup.append # aka "unsure"
1603 madd = modified.append
1610 madd = modified.append
1604 aadd = added.append
1611 aadd = added.append
1605 uadd = unknown.append if listunknown else noop
1612 uadd = unknown.append if listunknown else noop
1606 iadd = ignored.append if listignored else noop
1613 iadd = ignored.append if listignored else noop
1607 radd = removed.append
1614 radd = removed.append
1608 dadd = deleted.append
1615 dadd = deleted.append
1609 cadd = clean.append if listclean else noop
1616 cadd = clean.append if listclean else noop
1610 mexact = match.exact
1617 mexact = match.exact
1611 dirignore = self._dirignore
1618 dirignore = self._dirignore
1612 checkexec = self._checkexec
1619 checkexec = self._checkexec
1613 copymap = self._map.copymap
1620 copymap = self._map.copymap
1614 lastnormaltime = self._lastnormaltime
1621 lastnormaltime = self._lastnormaltime
1615
1622
1616 # We need to do full walks when either
1623 # We need to do full walks when either
1617 # - we're listing all clean files, or
1624 # - we're listing all clean files, or
1618 # - match.traversedir does something, because match.traversedir should
1625 # - match.traversedir does something, because match.traversedir should
1619 # be called for every dir in the working dir
1626 # be called for every dir in the working dir
1620 full = listclean or match.traversedir is not None
1627 full = listclean or match.traversedir is not None
1621 for fn, st in pycompat.iteritems(
1628 for fn, st in pycompat.iteritems(
1622 self.walk(match, subrepos, listunknown, listignored, full=full)
1629 self.walk(match, subrepos, listunknown, listignored, full=full)
1623 ):
1630 ):
1624 if not dcontains(fn):
1631 if not dcontains(fn):
1625 if (listignored or mexact(fn)) and dirignore(fn):
1632 if (listignored or mexact(fn)) and dirignore(fn):
1626 if listignored:
1633 if listignored:
1627 iadd(fn)
1634 iadd(fn)
1628 else:
1635 else:
1629 uadd(fn)
1636 uadd(fn)
1630 continue
1637 continue
1631
1638
1632 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1639 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1633 # written like that for performance reasons. dmap[fn] is not a
1640 # written like that for performance reasons. dmap[fn] is not a
1634 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1641 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1635 # opcode has fast paths when the value to be unpacked is a tuple or
1642 # opcode has fast paths when the value to be unpacked is a tuple or
1636 # a list, but falls back to creating a full-fledged iterator in
1643 # a list, but falls back to creating a full-fledged iterator in
1637 # general. That is much slower than simply accessing and storing the
1644 # general. That is much slower than simply accessing and storing the
1638 # tuple members one by one.
1645 # tuple members one by one.
1639 t = dget(fn)
1646 t = dget(fn)
1640 mode = t.mode
1647 mode = t.mode
1641 size = t.size
1648 size = t.size
1642 time = t.mtime
1649 time = t.mtime
1643
1650
1644 if not st and t.tracked:
1651 if not st and t.tracked:
1645 dadd(fn)
1652 dadd(fn)
1646 elif t.merged:
1653 elif t.merged:
1647 madd(fn)
1654 madd(fn)
1648 elif t.added:
1655 elif t.added:
1649 aadd(fn)
1656 aadd(fn)
1650 elif t.removed:
1657 elif t.removed:
1651 radd(fn)
1658 radd(fn)
1652 elif t.tracked:
1659 elif t.tracked:
1653 if (
1660 if (
1654 size >= 0
1661 size >= 0
1655 and (
1662 and (
1656 (size != st.st_size and size != st.st_size & _rangemask)
1663 (size != st.st_size and size != st.st_size & _rangemask)
1657 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1664 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1658 )
1665 )
1659 or t.from_p2
1666 or t.from_p2
1660 or fn in copymap
1667 or fn in copymap
1661 ):
1668 ):
1662 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1669 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1663 # issue6456: Size returned may be longer due to
1670 # issue6456: Size returned may be longer due to
1664 # encryption on EXT-4 fscrypt, undecided.
1671 # encryption on EXT-4 fscrypt, undecided.
1665 ladd(fn)
1672 ladd(fn)
1666 else:
1673 else:
1667 madd(fn)
1674 madd(fn)
1668 elif (
1675 elif (
1669 time != st[stat.ST_MTIME]
1676 time != st[stat.ST_MTIME]
1670 and time != st[stat.ST_MTIME] & _rangemask
1677 and time != st[stat.ST_MTIME] & _rangemask
1671 ):
1678 ):
1672 ladd(fn)
1679 ladd(fn)
1673 elif st[stat.ST_MTIME] == lastnormaltime:
1680 elif st[stat.ST_MTIME] == lastnormaltime:
1674 # fn may have just been marked as normal and it may have
1681 # fn may have just been marked as normal and it may have
1675 # changed in the same second without changing its size.
1682 # changed in the same second without changing its size.
1676 # This can happen if we quickly do multiple commits.
1683 # This can happen if we quickly do multiple commits.
1677 # Force lookup, so we don't miss such a racy file change.
1684 # Force lookup, so we don't miss such a racy file change.
1678 ladd(fn)
1685 ladd(fn)
1679 elif listclean:
1686 elif listclean:
1680 cadd(fn)
1687 cadd(fn)
1681 status = scmutil.status(
1688 status = scmutil.status(
1682 modified, added, removed, deleted, unknown, ignored, clean
1689 modified, added, removed, deleted, unknown, ignored, clean
1683 )
1690 )
1684 return (lookup, status)
1691 return (lookup, status)
1685
1692
1686 def matches(self, match):
1693 def matches(self, match):
1687 """
1694 """
1688 return files in the dirstate (in whatever state) filtered by match
1695 return files in the dirstate (in whatever state) filtered by match
1689 """
1696 """
1690 dmap = self._map
1697 dmap = self._map
1691 if rustmod is not None:
1698 if rustmod is not None:
1692 dmap = self._map._rustmap
1699 dmap = self._map._rustmap
1693
1700
1694 if match.always():
1701 if match.always():
1695 return dmap.keys()
1702 return dmap.keys()
1696 files = match.files()
1703 files = match.files()
1697 if match.isexact():
1704 if match.isexact():
1698 # fast path -- filter the other way around, since typically files is
1705 # fast path -- filter the other way around, since typically files is
1699 # much smaller than dmap
1706 # much smaller than dmap
1700 return [f for f in files if f in dmap]
1707 return [f for f in files if f in dmap]
1701 if match.prefix() and all(fn in dmap for fn in files):
1708 if match.prefix() and all(fn in dmap for fn in files):
1702 # fast path -- all the values are known to be files, so just return
1709 # fast path -- all the values are known to be files, so just return
1703 # that
1710 # that
1704 return list(files)
1711 return list(files)
1705 return [f for f in dmap if match(f)]
1712 return [f for f in dmap if match(f)]
1706
1713
1707 def _actualfilename(self, tr):
1714 def _actualfilename(self, tr):
1708 if tr:
1715 if tr:
1709 return self._pendingfilename
1716 return self._pendingfilename
1710 else:
1717 else:
1711 return self._filename
1718 return self._filename
1712
1719
1713 def savebackup(self, tr, backupname):
1720 def savebackup(self, tr, backupname):
1714 '''Save current dirstate into backup file'''
1721 '''Save current dirstate into backup file'''
1715 filename = self._actualfilename(tr)
1722 filename = self._actualfilename(tr)
1716 assert backupname != filename
1723 assert backupname != filename
1717
1724
1718 # use '_writedirstate' instead of 'write' to write changes certainly,
1725 # use '_writedirstate' instead of 'write' to write changes certainly,
1719 # because the latter omits writing out if transaction is running.
1726 # because the latter omits writing out if transaction is running.
1720 # output file will be used to create backup of dirstate at this point.
1727 # output file will be used to create backup of dirstate at this point.
1721 if self._dirty or not self._opener.exists(filename):
1728 if self._dirty or not self._opener.exists(filename):
1722 self._writedirstate(
1729 self._writedirstate(
1723 tr,
1730 tr,
1724 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1731 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1725 )
1732 )
1726
1733
1727 if tr:
1734 if tr:
1728 # ensure that subsequent tr.writepending returns True for
1735 # ensure that subsequent tr.writepending returns True for
1729 # changes written out above, even if dirstate is never
1736 # changes written out above, even if dirstate is never
1730 # changed after this
1737 # changed after this
1731 tr.addfilegenerator(
1738 tr.addfilegenerator(
1732 b'dirstate',
1739 b'dirstate',
1733 (self._filename,),
1740 (self._filename,),
1734 lambda f: self._writedirstate(tr, f),
1741 lambda f: self._writedirstate(tr, f),
1735 location=b'plain',
1742 location=b'plain',
1736 )
1743 )
1737
1744
1738 # ensure that pending file written above is unlinked at
1745 # ensure that pending file written above is unlinked at
1739 # failure, even if tr.writepending isn't invoked until the
1746 # failure, even if tr.writepending isn't invoked until the
1740 # end of this transaction
1747 # end of this transaction
1741 tr.registertmp(filename, location=b'plain')
1748 tr.registertmp(filename, location=b'plain')
1742
1749
1743 self._opener.tryunlink(backupname)
1750 self._opener.tryunlink(backupname)
1744 # hardlink backup is okay because _writedirstate is always called
1751 # hardlink backup is okay because _writedirstate is always called
1745 # with an "atomictemp=True" file.
1752 # with an "atomictemp=True" file.
1746 util.copyfile(
1753 util.copyfile(
1747 self._opener.join(filename),
1754 self._opener.join(filename),
1748 self._opener.join(backupname),
1755 self._opener.join(backupname),
1749 hardlink=True,
1756 hardlink=True,
1750 )
1757 )
1751
1758
1752 def restorebackup(self, tr, backupname):
1759 def restorebackup(self, tr, backupname):
1753 '''Restore dirstate by backup file'''
1760 '''Restore dirstate by backup file'''
1754 # this "invalidate()" prevents "wlock.release()" from writing
1761 # this "invalidate()" prevents "wlock.release()" from writing
1755 # changes of dirstate out after restoring from backup file
1762 # changes of dirstate out after restoring from backup file
1756 self.invalidate()
1763 self.invalidate()
1757 filename = self._actualfilename(tr)
1764 filename = self._actualfilename(tr)
1758 o = self._opener
1765 o = self._opener
1759 if util.samefile(o.join(backupname), o.join(filename)):
1766 if util.samefile(o.join(backupname), o.join(filename)):
1760 o.unlink(backupname)
1767 o.unlink(backupname)
1761 else:
1768 else:
1762 o.rename(backupname, filename, checkambig=True)
1769 o.rename(backupname, filename, checkambig=True)
1763
1770
1764 def clearbackup(self, tr, backupname):
1771 def clearbackup(self, tr, backupname):
1765 '''Clear backup file'''
1772 '''Clear backup file'''
1766 self._opener.unlink(backupname)
1773 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now