##// END OF EJS Templates
dirstate: introduce a `set_clean` method...
marmoute -
r48504:8a50fb07 default
parent child Browse files
Show More
@@ -1,1679 +1,1686 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = parsers.DirstateItem
48 DirstateItem = parsers.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._updatedfiles = set()
133 self._updatedfiles = set()
134 self._mapcls = dirstatemap.dirstatemap
134 self._mapcls = dirstatemap.dirstatemap
135 # Access and cache cwd early, so we don't access it for the first time
135 # Access and cache cwd early, so we don't access it for the first time
136 # after a working-copy update caused it to not exist (accessing it then
136 # after a working-copy update caused it to not exist (accessing it then
137 # raises an exception).
137 # raises an exception).
138 self._cwd
138 self._cwd
139
139
140 def prefetch_parents(self):
140 def prefetch_parents(self):
141 """make sure the parents are loaded
141 """make sure the parents are loaded
142
142
143 Used to avoid a race condition.
143 Used to avoid a race condition.
144 """
144 """
145 self._pl
145 self._pl
146
146
147 @contextlib.contextmanager
147 @contextlib.contextmanager
148 def parentchange(self):
148 def parentchange(self):
149 """Context manager for handling dirstate parents.
149 """Context manager for handling dirstate parents.
150
150
151 If an exception occurs in the scope of the context manager,
151 If an exception occurs in the scope of the context manager,
152 the incoherent dirstate won't be written when wlock is
152 the incoherent dirstate won't be written when wlock is
153 released.
153 released.
154 """
154 """
155 self._parentwriters += 1
155 self._parentwriters += 1
156 yield
156 yield
157 # Typically we want the "undo" step of a context manager in a
157 # Typically we want the "undo" step of a context manager in a
158 # finally block so it happens even when an exception
158 # finally block so it happens even when an exception
159 # occurs. In this case, however, we only want to decrement
159 # occurs. In this case, however, we only want to decrement
160 # parentwriters if the code in the with statement exits
160 # parentwriters if the code in the with statement exits
161 # normally, so we don't have a try/finally here on purpose.
161 # normally, so we don't have a try/finally here on purpose.
162 self._parentwriters -= 1
162 self._parentwriters -= 1
163
163
164 def pendingparentchange(self):
164 def pendingparentchange(self):
165 """Returns true if the dirstate is in the middle of a set of changes
165 """Returns true if the dirstate is in the middle of a set of changes
166 that modify the dirstate parent.
166 that modify the dirstate parent.
167 """
167 """
168 return self._parentwriters > 0
168 return self._parentwriters > 0
169
169
170 @propertycache
170 @propertycache
171 def _map(self):
171 def _map(self):
172 """Return the dirstate contents (see documentation for dirstatemap)."""
172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 self._map = self._mapcls(
173 self._map = self._mapcls(
174 self._ui,
174 self._ui,
175 self._opener,
175 self._opener,
176 self._root,
176 self._root,
177 self._nodeconstants,
177 self._nodeconstants,
178 self._use_dirstate_v2,
178 self._use_dirstate_v2,
179 )
179 )
180 return self._map
180 return self._map
181
181
182 @property
182 @property
183 def _sparsematcher(self):
183 def _sparsematcher(self):
184 """The matcher for the sparse checkout.
184 """The matcher for the sparse checkout.
185
185
186 The working directory may not include every file from a manifest. The
186 The working directory may not include every file from a manifest. The
187 matcher obtained by this property will match a path if it is to be
187 matcher obtained by this property will match a path if it is to be
188 included in the working directory.
188 included in the working directory.
189 """
189 """
190 # TODO there is potential to cache this property. For now, the matcher
190 # TODO there is potential to cache this property. For now, the matcher
191 # is resolved on every access. (But the called function does use a
191 # is resolved on every access. (But the called function does use a
192 # cache to keep the lookup fast.)
192 # cache to keep the lookup fast.)
193 return self._sparsematchfn()
193 return self._sparsematchfn()
194
194
195 @repocache(b'branch')
195 @repocache(b'branch')
196 def _branch(self):
196 def _branch(self):
197 try:
197 try:
198 return self._opener.read(b"branch").strip() or b"default"
198 return self._opener.read(b"branch").strip() or b"default"
199 except IOError as inst:
199 except IOError as inst:
200 if inst.errno != errno.ENOENT:
200 if inst.errno != errno.ENOENT:
201 raise
201 raise
202 return b"default"
202 return b"default"
203
203
204 @property
204 @property
205 def _pl(self):
205 def _pl(self):
206 return self._map.parents()
206 return self._map.parents()
207
207
208 def hasdir(self, d):
208 def hasdir(self, d):
209 return self._map.hastrackeddir(d)
209 return self._map.hastrackeddir(d)
210
210
211 @rootcache(b'.hgignore')
211 @rootcache(b'.hgignore')
212 def _ignore(self):
212 def _ignore(self):
213 files = self._ignorefiles()
213 files = self._ignorefiles()
214 if not files:
214 if not files:
215 return matchmod.never()
215 return matchmod.never()
216
216
217 pats = [b'include:%s' % f for f in files]
217 pats = [b'include:%s' % f for f in files]
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219
219
220 @propertycache
220 @propertycache
221 def _slash(self):
221 def _slash(self):
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223
223
224 @propertycache
224 @propertycache
225 def _checklink(self):
225 def _checklink(self):
226 return util.checklink(self._root)
226 return util.checklink(self._root)
227
227
228 @propertycache
228 @propertycache
229 def _checkexec(self):
229 def _checkexec(self):
230 return bool(util.checkexec(self._root))
230 return bool(util.checkexec(self._root))
231
231
232 @propertycache
232 @propertycache
233 def _checkcase(self):
233 def _checkcase(self):
234 return not util.fscasesensitive(self._join(b'.hg'))
234 return not util.fscasesensitive(self._join(b'.hg'))
235
235
236 def _join(self, f):
236 def _join(self, f):
237 # much faster than os.path.join()
237 # much faster than os.path.join()
238 # it's safe because f is always a relative path
238 # it's safe because f is always a relative path
239 return self._rootdir + f
239 return self._rootdir + f
240
240
241 def flagfunc(self, buildfallback):
241 def flagfunc(self, buildfallback):
242 if self._checklink and self._checkexec:
242 if self._checklink and self._checkexec:
243
243
244 def f(x):
244 def f(x):
245 try:
245 try:
246 st = os.lstat(self._join(x))
246 st = os.lstat(self._join(x))
247 if util.statislink(st):
247 if util.statislink(st):
248 return b'l'
248 return b'l'
249 if util.statisexec(st):
249 if util.statisexec(st):
250 return b'x'
250 return b'x'
251 except OSError:
251 except OSError:
252 pass
252 pass
253 return b''
253 return b''
254
254
255 return f
255 return f
256
256
257 fallback = buildfallback()
257 fallback = buildfallback()
258 if self._checklink:
258 if self._checklink:
259
259
260 def f(x):
260 def f(x):
261 if os.path.islink(self._join(x)):
261 if os.path.islink(self._join(x)):
262 return b'l'
262 return b'l'
263 if b'x' in fallback(x):
263 if b'x' in fallback(x):
264 return b'x'
264 return b'x'
265 return b''
265 return b''
266
266
267 return f
267 return f
268 if self._checkexec:
268 if self._checkexec:
269
269
270 def f(x):
270 def f(x):
271 if b'l' in fallback(x):
271 if b'l' in fallback(x):
272 return b'l'
272 return b'l'
273 if util.isexec(self._join(x)):
273 if util.isexec(self._join(x)):
274 return b'x'
274 return b'x'
275 return b''
275 return b''
276
276
277 return f
277 return f
278 else:
278 else:
279 return fallback
279 return fallback
280
280
281 @propertycache
281 @propertycache
282 def _cwd(self):
282 def _cwd(self):
283 # internal config: ui.forcecwd
283 # internal config: ui.forcecwd
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 if forcecwd:
285 if forcecwd:
286 return forcecwd
286 return forcecwd
287 return encoding.getcwd()
287 return encoding.getcwd()
288
288
289 def getcwd(self):
289 def getcwd(self):
290 """Return the path from which a canonical path is calculated.
290 """Return the path from which a canonical path is calculated.
291
291
292 This path should be used to resolve file patterns or to convert
292 This path should be used to resolve file patterns or to convert
293 canonical paths back to file paths for display. It shouldn't be
293 canonical paths back to file paths for display. It shouldn't be
294 used to get real file paths. Use vfs functions instead.
294 used to get real file paths. Use vfs functions instead.
295 """
295 """
296 cwd = self._cwd
296 cwd = self._cwd
297 if cwd == self._root:
297 if cwd == self._root:
298 return b''
298 return b''
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 rootsep = self._root
300 rootsep = self._root
301 if not util.endswithsep(rootsep):
301 if not util.endswithsep(rootsep):
302 rootsep += pycompat.ossep
302 rootsep += pycompat.ossep
303 if cwd.startswith(rootsep):
303 if cwd.startswith(rootsep):
304 return cwd[len(rootsep) :]
304 return cwd[len(rootsep) :]
305 else:
305 else:
306 # we're outside the repo. return an absolute path.
306 # we're outside the repo. return an absolute path.
307 return cwd
307 return cwd
308
308
309 def pathto(self, f, cwd=None):
309 def pathto(self, f, cwd=None):
310 if cwd is None:
310 if cwd is None:
311 cwd = self.getcwd()
311 cwd = self.getcwd()
312 path = util.pathto(self._root, cwd, f)
312 path = util.pathto(self._root, cwd, f)
313 if self._slash:
313 if self._slash:
314 return util.pconvert(path)
314 return util.pconvert(path)
315 return path
315 return path
316
316
317 def __getitem__(self, key):
317 def __getitem__(self, key):
318 """Return the current state of key (a filename) in the dirstate.
318 """Return the current state of key (a filename) in the dirstate.
319
319
320 States are:
320 States are:
321 n normal
321 n normal
322 m needs merging
322 m needs merging
323 r marked for removal
323 r marked for removal
324 a marked for addition
324 a marked for addition
325 ? not tracked
325 ? not tracked
326
326
327 XXX The "state" is a bit obscure to be in the "public" API. we should
327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 consider migrating all user of this to going through the dirstate entry
328 consider migrating all user of this to going through the dirstate entry
329 instead.
329 instead.
330 """
330 """
331 entry = self._map.get(key)
331 entry = self._map.get(key)
332 if entry is not None:
332 if entry is not None:
333 return entry.state
333 return entry.state
334 return b'?'
334 return b'?'
335
335
336 def __contains__(self, key):
336 def __contains__(self, key):
337 return key in self._map
337 return key in self._map
338
338
339 def __iter__(self):
339 def __iter__(self):
340 return iter(sorted(self._map))
340 return iter(sorted(self._map))
341
341
342 def items(self):
342 def items(self):
343 return pycompat.iteritems(self._map)
343 return pycompat.iteritems(self._map)
344
344
345 iteritems = items
345 iteritems = items
346
346
347 def directories(self):
347 def directories(self):
348 return self._map.directories()
348 return self._map.directories()
349
349
350 def parents(self):
350 def parents(self):
351 return [self._validate(p) for p in self._pl]
351 return [self._validate(p) for p in self._pl]
352
352
353 def p1(self):
353 def p1(self):
354 return self._validate(self._pl[0])
354 return self._validate(self._pl[0])
355
355
356 def p2(self):
356 def p2(self):
357 return self._validate(self._pl[1])
357 return self._validate(self._pl[1])
358
358
359 @property
359 @property
360 def in_merge(self):
360 def in_merge(self):
361 """True if a merge is in progress"""
361 """True if a merge is in progress"""
362 return self._pl[1] != self._nodeconstants.nullid
362 return self._pl[1] != self._nodeconstants.nullid
363
363
364 def branch(self):
364 def branch(self):
365 return encoding.tolocal(self._branch)
365 return encoding.tolocal(self._branch)
366
366
367 def setparents(self, p1, p2=None):
367 def setparents(self, p1, p2=None):
368 """Set dirstate parents to p1 and p2.
368 """Set dirstate parents to p1 and p2.
369
369
370 When moving from two parents to one, "merged" entries a
370 When moving from two parents to one, "merged" entries a
371 adjusted to normal and previous copy records discarded and
371 adjusted to normal and previous copy records discarded and
372 returned by the call.
372 returned by the call.
373
373
374 See localrepo.setparents()
374 See localrepo.setparents()
375 """
375 """
376 if p2 is None:
376 if p2 is None:
377 p2 = self._nodeconstants.nullid
377 p2 = self._nodeconstants.nullid
378 if self._parentwriters == 0:
378 if self._parentwriters == 0:
379 raise ValueError(
379 raise ValueError(
380 b"cannot set dirstate parent outside of "
380 b"cannot set dirstate parent outside of "
381 b"dirstate.parentchange context manager"
381 b"dirstate.parentchange context manager"
382 )
382 )
383
383
384 self._dirty = True
384 self._dirty = True
385 oldp2 = self._pl[1]
385 oldp2 = self._pl[1]
386 if self._origpl is None:
386 if self._origpl is None:
387 self._origpl = self._pl
387 self._origpl = self._pl
388 self._map.setparents(p1, p2)
388 self._map.setparents(p1, p2)
389 copies = {}
389 copies = {}
390 if (
390 if (
391 oldp2 != self._nodeconstants.nullid
391 oldp2 != self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
393 ):
393 ):
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395
395
396 for f in candidatefiles:
396 for f in candidatefiles:
397 s = self._map.get(f)
397 s = self._map.get(f)
398 if s is None:
398 if s is None:
399 continue
399 continue
400
400
401 # Discard "merged" markers when moving away from a merge state
401 # Discard "merged" markers when moving away from a merge state
402 if s.merged:
402 if s.merged:
403 source = self._map.copymap.get(f)
403 source = self._map.copymap.get(f)
404 if source:
404 if source:
405 copies[f] = source
405 copies[f] = source
406 self.normallookup(f)
406 self.normallookup(f)
407 # Also fix up otherparent markers
407 # Also fix up otherparent markers
408 elif s.from_p2:
408 elif s.from_p2:
409 source = self._map.copymap.get(f)
409 source = self._map.copymap.get(f)
410 if source:
410 if source:
411 copies[f] = source
411 copies[f] = source
412 self._add(f)
412 self._add(f)
413 return copies
413 return copies
414
414
415 def setbranch(self, branch):
415 def setbranch(self, branch):
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 try:
418 try:
419 f.write(self._branch + b'\n')
419 f.write(self._branch + b'\n')
420 f.close()
420 f.close()
421
421
422 # make sure filecache has the correct stat info for _branch after
422 # make sure filecache has the correct stat info for _branch after
423 # replacing the underlying file
423 # replacing the underlying file
424 ce = self._filecache[b'_branch']
424 ce = self._filecache[b'_branch']
425 if ce:
425 if ce:
426 ce.refresh()
426 ce.refresh()
427 except: # re-raises
427 except: # re-raises
428 f.discard()
428 f.discard()
429 raise
429 raise
430
430
431 def invalidate(self):
431 def invalidate(self):
432 """Causes the next access to reread the dirstate.
432 """Causes the next access to reread the dirstate.
433
433
434 This is different from localrepo.invalidatedirstate() because it always
434 This is different from localrepo.invalidatedirstate() because it always
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 check whether the dirstate has changed before rereading it."""
436 check whether the dirstate has changed before rereading it."""
437
437
438 for a in ("_map", "_branch", "_ignore"):
438 for a in ("_map", "_branch", "_ignore"):
439 if a in self.__dict__:
439 if a in self.__dict__:
440 delattr(self, a)
440 delattr(self, a)
441 self._lastnormaltime = 0
441 self._lastnormaltime = 0
442 self._dirty = False
442 self._dirty = False
443 self._updatedfiles.clear()
443 self._updatedfiles.clear()
444 self._parentwriters = 0
444 self._parentwriters = 0
445 self._origpl = None
445 self._origpl = None
446
446
447 def copy(self, source, dest):
447 def copy(self, source, dest):
448 """Mark dest as a copy of source. Unmark dest if source is None."""
448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 if source == dest:
449 if source == dest:
450 return
450 return
451 self._dirty = True
451 self._dirty = True
452 if source is not None:
452 if source is not None:
453 self._map.copymap[dest] = source
453 self._map.copymap[dest] = source
454 self._updatedfiles.add(source)
454 self._updatedfiles.add(source)
455 self._updatedfiles.add(dest)
455 self._updatedfiles.add(dest)
456 elif self._map.copymap.pop(dest, None):
456 elif self._map.copymap.pop(dest, None):
457 self._updatedfiles.add(dest)
457 self._updatedfiles.add(dest)
458
458
459 def copied(self, file):
459 def copied(self, file):
460 return self._map.copymap.get(file, None)
460 return self._map.copymap.get(file, None)
461
461
462 def copies(self):
462 def copies(self):
463 return self._map.copymap
463 return self._map.copymap
464
464
465 @requires_no_parents_change
465 @requires_no_parents_change
466 def set_tracked(self, filename):
466 def set_tracked(self, filename):
467 """a "public" method for generic code to mark a file as tracked
467 """a "public" method for generic code to mark a file as tracked
468
468
469 This function is to be called outside of "update/merge" case. For
469 This function is to be called outside of "update/merge" case. For
470 example by a command like `hg add X`.
470 example by a command like `hg add X`.
471
471
472 return True the file was previously untracked, False otherwise.
472 return True the file was previously untracked, False otherwise.
473 """
473 """
474 entry = self._map.get(filename)
474 entry = self._map.get(filename)
475 if entry is None:
475 if entry is None:
476 self._add(filename)
476 self._add(filename)
477 return True
477 return True
478 elif not entry.tracked:
478 elif not entry.tracked:
479 self.normallookup(filename)
479 self.normallookup(filename)
480 return True
480 return True
481 return False
481 return False
482
482
483 @requires_no_parents_change
483 @requires_no_parents_change
484 def set_untracked(self, filename):
484 def set_untracked(self, filename):
485 """a "public" method for generic code to mark a file as untracked
485 """a "public" method for generic code to mark a file as untracked
486
486
487 This function is to be called outside of "update/merge" case. For
487 This function is to be called outside of "update/merge" case. For
488 example by a command like `hg remove X`.
488 example by a command like `hg remove X`.
489
489
490 return True the file was previously tracked, False otherwise.
490 return True the file was previously tracked, False otherwise.
491 """
491 """
492 entry = self._map.get(filename)
492 entry = self._map.get(filename)
493 if entry is None:
493 if entry is None:
494 return False
494 return False
495 elif entry.added:
495 elif entry.added:
496 self._drop(filename)
496 self._drop(filename)
497 return True
497 return True
498 else:
498 else:
499 self._remove(filename)
499 self._remove(filename)
500 return True
500 return True
501
501
502 @requires_no_parents_change
503 def set_clean(self, filename, parentfiledata=None):
504 """record that the current state of the file on disk is known to be clean"""
505 self._dirty = True
506 self._updatedfiles.add(filename)
507 self.normal(filename, parentfiledata=parentfiledata)
508
502 @requires_parents_change
509 @requires_parents_change
503 def update_file_p1(
510 def update_file_p1(
504 self,
511 self,
505 filename,
512 filename,
506 p1_tracked,
513 p1_tracked,
507 ):
514 ):
508 """Set a file as tracked in the parent (or not)
515 """Set a file as tracked in the parent (or not)
509
516
510 This is to be called when adjust the dirstate to a new parent after an history
517 This is to be called when adjust the dirstate to a new parent after an history
511 rewriting operation.
518 rewriting operation.
512
519
513 It should not be called during a merge (p2 != nullid) and only within
520 It should not be called during a merge (p2 != nullid) and only within
514 a `with dirstate.parentchange():` context.
521 a `with dirstate.parentchange():` context.
515 """
522 """
516 if self.in_merge:
523 if self.in_merge:
517 msg = b'update_file_reference should not be called when merging'
524 msg = b'update_file_reference should not be called when merging'
518 raise error.ProgrammingError(msg)
525 raise error.ProgrammingError(msg)
519 entry = self._map.get(filename)
526 entry = self._map.get(filename)
520 if entry is None:
527 if entry is None:
521 wc_tracked = False
528 wc_tracked = False
522 else:
529 else:
523 wc_tracked = entry.tracked
530 wc_tracked = entry.tracked
524 possibly_dirty = False
531 possibly_dirty = False
525 if p1_tracked and wc_tracked:
532 if p1_tracked and wc_tracked:
526 # the underlying reference might have changed, we will have to
533 # the underlying reference might have changed, we will have to
527 # check it.
534 # check it.
528 possibly_dirty = True
535 possibly_dirty = True
529 elif not (p1_tracked or wc_tracked):
536 elif not (p1_tracked or wc_tracked):
530 # the file is no longer relevant to anyone
537 # the file is no longer relevant to anyone
531 self._drop(filename)
538 self._drop(filename)
532 elif (not p1_tracked) and wc_tracked:
539 elif (not p1_tracked) and wc_tracked:
533 if entry is not None and entry.added:
540 if entry is not None and entry.added:
534 return # avoid dropping copy information (maybe?)
541 return # avoid dropping copy information (maybe?)
535 elif p1_tracked and not wc_tracked:
542 elif p1_tracked and not wc_tracked:
536 pass
543 pass
537 else:
544 else:
538 assert False, 'unreachable'
545 assert False, 'unreachable'
539
546
540 # this mean we are doing call for file we do not really care about the
547 # this mean we are doing call for file we do not really care about the
541 # data (eg: added or removed), however this should be a minor overhead
548 # data (eg: added or removed), however this should be a minor overhead
542 # compared to the overall update process calling this.
549 # compared to the overall update process calling this.
543 parentfiledata = None
550 parentfiledata = None
544 if wc_tracked:
551 if wc_tracked:
545 parentfiledata = self._get_filedata(filename)
552 parentfiledata = self._get_filedata(filename)
546
553
547 self._updatedfiles.add(filename)
554 self._updatedfiles.add(filename)
548 self._map.reset_state(
555 self._map.reset_state(
549 filename,
556 filename,
550 wc_tracked,
557 wc_tracked,
551 p1_tracked,
558 p1_tracked,
552 possibly_dirty=possibly_dirty,
559 possibly_dirty=possibly_dirty,
553 parentfiledata=parentfiledata,
560 parentfiledata=parentfiledata,
554 )
561 )
555 if (
562 if (
556 parentfiledata is not None
563 parentfiledata is not None
557 and parentfiledata[2] > self._lastnormaltime
564 and parentfiledata[2] > self._lastnormaltime
558 ):
565 ):
559 # Remember the most recent modification timeslot for status(),
566 # Remember the most recent modification timeslot for status(),
560 # to make sure we won't miss future size-preserving file content
567 # to make sure we won't miss future size-preserving file content
561 # modifications that happen within the same timeslot.
568 # modifications that happen within the same timeslot.
562 self._lastnormaltime = parentfiledata[2]
569 self._lastnormaltime = parentfiledata[2]
563
570
564 @requires_parents_change
571 @requires_parents_change
565 def update_file(
572 def update_file(
566 self,
573 self,
567 filename,
574 filename,
568 wc_tracked,
575 wc_tracked,
569 p1_tracked,
576 p1_tracked,
570 p2_tracked=False,
577 p2_tracked=False,
571 merged=False,
578 merged=False,
572 clean_p1=False,
579 clean_p1=False,
573 clean_p2=False,
580 clean_p2=False,
574 possibly_dirty=False,
581 possibly_dirty=False,
575 parentfiledata=None,
582 parentfiledata=None,
576 ):
583 ):
577 """update the information about a file in the dirstate
584 """update the information about a file in the dirstate
578
585
579 This is to be called when the direstates parent changes to keep track
586 This is to be called when the direstates parent changes to keep track
580 of what is the file situation in regards to the working copy and its parent.
587 of what is the file situation in regards to the working copy and its parent.
581
588
582 This function must be called within a `dirstate.parentchange` context.
589 This function must be called within a `dirstate.parentchange` context.
583
590
584 note: the API is at an early stage and we might need to ajust it
591 note: the API is at an early stage and we might need to ajust it
585 depending of what information ends up being relevant and useful to
592 depending of what information ends up being relevant and useful to
586 other processing.
593 other processing.
587 """
594 """
588 if merged and (clean_p1 or clean_p2):
595 if merged and (clean_p1 or clean_p2):
589 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
596 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
590 raise error.ProgrammingError(msg)
597 raise error.ProgrammingError(msg)
591
598
592 # note: I do not think we need to double check name clash here since we
599 # note: I do not think we need to double check name clash here since we
593 # are in a update/merge case that should already have taken care of
600 # are in a update/merge case that should already have taken care of
594 # this. The test agrees
601 # this. The test agrees
595
602
596 self._dirty = True
603 self._dirty = True
597 self._updatedfiles.add(filename)
604 self._updatedfiles.add(filename)
598
605
599 need_parent_file_data = (
606 need_parent_file_data = (
600 not (possibly_dirty or clean_p2 or merged)
607 not (possibly_dirty or clean_p2 or merged)
601 and wc_tracked
608 and wc_tracked
602 and p1_tracked
609 and p1_tracked
603 )
610 )
604
611
605 # this mean we are doing call for file we do not really care about the
612 # this mean we are doing call for file we do not really care about the
606 # data (eg: added or removed), however this should be a minor overhead
613 # data (eg: added or removed), however this should be a minor overhead
607 # compared to the overall update process calling this.
614 # compared to the overall update process calling this.
608 if need_parent_file_data:
615 if need_parent_file_data:
609 if parentfiledata is None:
616 if parentfiledata is None:
610 parentfiledata = self._get_filedata(filename)
617 parentfiledata = self._get_filedata(filename)
611 mtime = parentfiledata[2]
618 mtime = parentfiledata[2]
612
619
613 if mtime > self._lastnormaltime:
620 if mtime > self._lastnormaltime:
614 # Remember the most recent modification timeslot for
621 # Remember the most recent modification timeslot for
615 # status(), to make sure we won't miss future
622 # status(), to make sure we won't miss future
616 # size-preserving file content modifications that happen
623 # size-preserving file content modifications that happen
617 # within the same timeslot.
624 # within the same timeslot.
618 self._lastnormaltime = mtime
625 self._lastnormaltime = mtime
619
626
620 self._map.reset_state(
627 self._map.reset_state(
621 filename,
628 filename,
622 wc_tracked,
629 wc_tracked,
623 p1_tracked,
630 p1_tracked,
624 p2_tracked=p2_tracked,
631 p2_tracked=p2_tracked,
625 merged=merged,
632 merged=merged,
626 clean_p1=clean_p1,
633 clean_p1=clean_p1,
627 clean_p2=clean_p2,
634 clean_p2=clean_p2,
628 possibly_dirty=possibly_dirty,
635 possibly_dirty=possibly_dirty,
629 parentfiledata=parentfiledata,
636 parentfiledata=parentfiledata,
630 )
637 )
631 if (
638 if (
632 parentfiledata is not None
639 parentfiledata is not None
633 and parentfiledata[2] > self._lastnormaltime
640 and parentfiledata[2] > self._lastnormaltime
634 ):
641 ):
635 # Remember the most recent modification timeslot for status(),
642 # Remember the most recent modification timeslot for status(),
636 # to make sure we won't miss future size-preserving file content
643 # to make sure we won't miss future size-preserving file content
637 # modifications that happen within the same timeslot.
644 # modifications that happen within the same timeslot.
638 self._lastnormaltime = parentfiledata[2]
645 self._lastnormaltime = parentfiledata[2]
639
646
640 def _addpath(
647 def _addpath(
641 self,
648 self,
642 f,
649 f,
643 mode=0,
650 mode=0,
644 size=None,
651 size=None,
645 mtime=None,
652 mtime=None,
646 added=False,
653 added=False,
647 merged=False,
654 merged=False,
648 from_p2=False,
655 from_p2=False,
649 possibly_dirty=False,
656 possibly_dirty=False,
650 ):
657 ):
651 entry = self._map.get(f)
658 entry = self._map.get(f)
652 if added or entry is not None and entry.removed:
659 if added or entry is not None and entry.removed:
653 scmutil.checkfilename(f)
660 scmutil.checkfilename(f)
654 if self._map.hastrackeddir(f):
661 if self._map.hastrackeddir(f):
655 msg = _(b'directory %r already in dirstate')
662 msg = _(b'directory %r already in dirstate')
656 msg %= pycompat.bytestr(f)
663 msg %= pycompat.bytestr(f)
657 raise error.Abort(msg)
664 raise error.Abort(msg)
658 # shadows
665 # shadows
659 for d in pathutil.finddirs(f):
666 for d in pathutil.finddirs(f):
660 if self._map.hastrackeddir(d):
667 if self._map.hastrackeddir(d):
661 break
668 break
662 entry = self._map.get(d)
669 entry = self._map.get(d)
663 if entry is not None and not entry.removed:
670 if entry is not None and not entry.removed:
664 msg = _(b'file %r in dirstate clashes with %r')
671 msg = _(b'file %r in dirstate clashes with %r')
665 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
672 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
666 raise error.Abort(msg)
673 raise error.Abort(msg)
667 self._dirty = True
674 self._dirty = True
668 self._updatedfiles.add(f)
675 self._updatedfiles.add(f)
669 self._map.addfile(
676 self._map.addfile(
670 f,
677 f,
671 mode=mode,
678 mode=mode,
672 size=size,
679 size=size,
673 mtime=mtime,
680 mtime=mtime,
674 added=added,
681 added=added,
675 merged=merged,
682 merged=merged,
676 from_p2=from_p2,
683 from_p2=from_p2,
677 possibly_dirty=possibly_dirty,
684 possibly_dirty=possibly_dirty,
678 )
685 )
679
686
680 def _get_filedata(self, filename):
687 def _get_filedata(self, filename):
681 """returns"""
688 """returns"""
682 s = os.lstat(self._join(filename))
689 s = os.lstat(self._join(filename))
683 mode = s.st_mode
690 mode = s.st_mode
684 size = s.st_size
691 size = s.st_size
685 mtime = s[stat.ST_MTIME]
692 mtime = s[stat.ST_MTIME]
686 return (mode, size, mtime)
693 return (mode, size, mtime)
687
694
688 def normal(self, f, parentfiledata=None):
695 def normal(self, f, parentfiledata=None):
689 """Mark a file normal and clean.
696 """Mark a file normal and clean.
690
697
691 parentfiledata: (mode, size, mtime) of the clean file
698 parentfiledata: (mode, size, mtime) of the clean file
692
699
693 parentfiledata should be computed from memory (for mode,
700 parentfiledata should be computed from memory (for mode,
694 size), as or close as possible from the point where we
701 size), as or close as possible from the point where we
695 determined the file was clean, to limit the risk of the
702 determined the file was clean, to limit the risk of the
696 file having been changed by an external process between the
703 file having been changed by an external process between the
697 moment where the file was determined to be clean and now."""
704 moment where the file was determined to be clean and now."""
698 if parentfiledata:
705 if parentfiledata:
699 (mode, size, mtime) = parentfiledata
706 (mode, size, mtime) = parentfiledata
700 else:
707 else:
701 (mode, size, mtime) = self._get_filedata(f)
708 (mode, size, mtime) = self._get_filedata(f)
702 self._addpath(f, mode=mode, size=size, mtime=mtime)
709 self._addpath(f, mode=mode, size=size, mtime=mtime)
703 self._map.copymap.pop(f, None)
710 self._map.copymap.pop(f, None)
704 if f in self._map.nonnormalset:
711 if f in self._map.nonnormalset:
705 self._map.nonnormalset.remove(f)
712 self._map.nonnormalset.remove(f)
706 if mtime > self._lastnormaltime:
713 if mtime > self._lastnormaltime:
707 # Remember the most recent modification timeslot for status(),
714 # Remember the most recent modification timeslot for status(),
708 # to make sure we won't miss future size-preserving file content
715 # to make sure we won't miss future size-preserving file content
709 # modifications that happen within the same timeslot.
716 # modifications that happen within the same timeslot.
710 self._lastnormaltime = mtime
717 self._lastnormaltime = mtime
711
718
712 def normallookup(self, f):
719 def normallookup(self, f):
713 '''Mark a file normal, but possibly dirty.'''
720 '''Mark a file normal, but possibly dirty.'''
714 if self.in_merge:
721 if self.in_merge:
715 # if there is a merge going on and the file was either
722 # if there is a merge going on and the file was either
716 # "merged" or coming from other parent (-2) before
723 # "merged" or coming from other parent (-2) before
717 # being removed, restore that state.
724 # being removed, restore that state.
718 entry = self._map.get(f)
725 entry = self._map.get(f)
719 if entry is not None:
726 if entry is not None:
720 # XXX this should probably be dealt with a a lower level
727 # XXX this should probably be dealt with a a lower level
721 # (see `merged_removed` and `from_p2_removed`)
728 # (see `merged_removed` and `from_p2_removed`)
722 if entry.merged_removed or entry.from_p2_removed:
729 if entry.merged_removed or entry.from_p2_removed:
723 source = self._map.copymap.get(f)
730 source = self._map.copymap.get(f)
724 if entry.merged_removed:
731 if entry.merged_removed:
725 self.merge(f)
732 self.merge(f)
726 elif entry.from_p2_removed:
733 elif entry.from_p2_removed:
727 self.otherparent(f)
734 self.otherparent(f)
728 if source is not None:
735 if source is not None:
729 self.copy(source, f)
736 self.copy(source, f)
730 return
737 return
731 elif entry.merged or entry.from_p2:
738 elif entry.merged or entry.from_p2:
732 return
739 return
733 self._addpath(f, possibly_dirty=True)
740 self._addpath(f, possibly_dirty=True)
734 self._map.copymap.pop(f, None)
741 self._map.copymap.pop(f, None)
735
742
736 def otherparent(self, f):
743 def otherparent(self, f):
737 '''Mark as coming from the other parent, always dirty.'''
744 '''Mark as coming from the other parent, always dirty.'''
738 if not self.in_merge:
745 if not self.in_merge:
739 msg = _(b"setting %r to other parent only allowed in merges") % f
746 msg = _(b"setting %r to other parent only allowed in merges") % f
740 raise error.Abort(msg)
747 raise error.Abort(msg)
741 entry = self._map.get(f)
748 entry = self._map.get(f)
742 if entry is not None and entry.tracked:
749 if entry is not None and entry.tracked:
743 # merge-like
750 # merge-like
744 self._addpath(f, merged=True)
751 self._addpath(f, merged=True)
745 else:
752 else:
746 # add-like
753 # add-like
747 self._addpath(f, from_p2=True)
754 self._addpath(f, from_p2=True)
748 self._map.copymap.pop(f, None)
755 self._map.copymap.pop(f, None)
749
756
750 def add(self, f):
757 def add(self, f):
751 '''Mark a file added.'''
758 '''Mark a file added.'''
752 if not self.pendingparentchange():
759 if not self.pendingparentchange():
753 util.nouideprecwarn(
760 util.nouideprecwarn(
754 b"do not use `add` outside of update/merge context."
761 b"do not use `add` outside of update/merge context."
755 b" Use `set_tracked`",
762 b" Use `set_tracked`",
756 b'6.0',
763 b'6.0',
757 stacklevel=2,
764 stacklevel=2,
758 )
765 )
759 self._add(f)
766 self._add(f)
760
767
761 def _add(self, filename):
768 def _add(self, filename):
762 """internal function to mark a file as added"""
769 """internal function to mark a file as added"""
763 self._addpath(filename, added=True)
770 self._addpath(filename, added=True)
764 self._map.copymap.pop(filename, None)
771 self._map.copymap.pop(filename, None)
765
772
766 def remove(self, f):
773 def remove(self, f):
767 '''Mark a file removed'''
774 '''Mark a file removed'''
768 if self.pendingparentchange():
775 if self.pendingparentchange():
769 util.nouideprecwarn(
776 util.nouideprecwarn(
770 b"do not use `remove` insde of update/merge context."
777 b"do not use `remove` insde of update/merge context."
771 b" Use `update_file` or `update_file_p1`",
778 b" Use `update_file` or `update_file_p1`",
772 b'6.0',
779 b'6.0',
773 stacklevel=2,
780 stacklevel=2,
774 )
781 )
775 else:
782 else:
776 util.nouideprecwarn(
783 util.nouideprecwarn(
777 b"do not use `remove` outside of update/merge context."
784 b"do not use `remove` outside of update/merge context."
778 b" Use `set_untracked`",
785 b" Use `set_untracked`",
779 b'6.0',
786 b'6.0',
780 stacklevel=2,
787 stacklevel=2,
781 )
788 )
782 self._remove(f)
789 self._remove(f)
783
790
784 def _remove(self, filename):
791 def _remove(self, filename):
785 """internal function to mark a file removed"""
792 """internal function to mark a file removed"""
786 self._dirty = True
793 self._dirty = True
787 self._updatedfiles.add(filename)
794 self._updatedfiles.add(filename)
788 self._map.removefile(filename, in_merge=self.in_merge)
795 self._map.removefile(filename, in_merge=self.in_merge)
789
796
790 def merge(self, f):
797 def merge(self, f):
791 '''Mark a file merged.'''
798 '''Mark a file merged.'''
792 if not self.in_merge:
799 if not self.in_merge:
793 return self.normallookup(f)
800 return self.normallookup(f)
794 return self.otherparent(f)
801 return self.otherparent(f)
795
802
796 def drop(self, f):
803 def drop(self, f):
797 '''Drop a file from the dirstate'''
804 '''Drop a file from the dirstate'''
798 if not self.pendingparentchange():
805 if not self.pendingparentchange():
799 util.nouideprecwarn(
806 util.nouideprecwarn(
800 b"do not use `drop` outside of update/merge context."
807 b"do not use `drop` outside of update/merge context."
801 b" Use `set_untracked`",
808 b" Use `set_untracked`",
802 b'6.0',
809 b'6.0',
803 stacklevel=2,
810 stacklevel=2,
804 )
811 )
805 self._drop(f)
812 self._drop(f)
806
813
807 def _drop(self, filename):
814 def _drop(self, filename):
808 """internal function to drop a file from the dirstate"""
815 """internal function to drop a file from the dirstate"""
809 if self._map.dropfile(filename):
816 if self._map.dropfile(filename):
810 self._dirty = True
817 self._dirty = True
811 self._updatedfiles.add(filename)
818 self._updatedfiles.add(filename)
812 self._map.copymap.pop(filename, None)
819 self._map.copymap.pop(filename, None)
813
820
814 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
821 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
815 if exists is None:
822 if exists is None:
816 exists = os.path.lexists(os.path.join(self._root, path))
823 exists = os.path.lexists(os.path.join(self._root, path))
817 if not exists:
824 if not exists:
818 # Maybe a path component exists
825 # Maybe a path component exists
819 if not ignoremissing and b'/' in path:
826 if not ignoremissing and b'/' in path:
820 d, f = path.rsplit(b'/', 1)
827 d, f = path.rsplit(b'/', 1)
821 d = self._normalize(d, False, ignoremissing, None)
828 d = self._normalize(d, False, ignoremissing, None)
822 folded = d + b"/" + f
829 folded = d + b"/" + f
823 else:
830 else:
824 # No path components, preserve original case
831 # No path components, preserve original case
825 folded = path
832 folded = path
826 else:
833 else:
827 # recursively normalize leading directory components
834 # recursively normalize leading directory components
828 # against dirstate
835 # against dirstate
829 if b'/' in normed:
836 if b'/' in normed:
830 d, f = normed.rsplit(b'/', 1)
837 d, f = normed.rsplit(b'/', 1)
831 d = self._normalize(d, False, ignoremissing, True)
838 d = self._normalize(d, False, ignoremissing, True)
832 r = self._root + b"/" + d
839 r = self._root + b"/" + d
833 folded = d + b"/" + util.fspath(f, r)
840 folded = d + b"/" + util.fspath(f, r)
834 else:
841 else:
835 folded = util.fspath(normed, self._root)
842 folded = util.fspath(normed, self._root)
836 storemap[normed] = folded
843 storemap[normed] = folded
837
844
838 return folded
845 return folded
839
846
840 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
847 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
841 normed = util.normcase(path)
848 normed = util.normcase(path)
842 folded = self._map.filefoldmap.get(normed, None)
849 folded = self._map.filefoldmap.get(normed, None)
843 if folded is None:
850 if folded is None:
844 if isknown:
851 if isknown:
845 folded = path
852 folded = path
846 else:
853 else:
847 folded = self._discoverpath(
854 folded = self._discoverpath(
848 path, normed, ignoremissing, exists, self._map.filefoldmap
855 path, normed, ignoremissing, exists, self._map.filefoldmap
849 )
856 )
850 return folded
857 return folded
851
858
852 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
859 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
853 normed = util.normcase(path)
860 normed = util.normcase(path)
854 folded = self._map.filefoldmap.get(normed, None)
861 folded = self._map.filefoldmap.get(normed, None)
855 if folded is None:
862 if folded is None:
856 folded = self._map.dirfoldmap.get(normed, None)
863 folded = self._map.dirfoldmap.get(normed, None)
857 if folded is None:
864 if folded is None:
858 if isknown:
865 if isknown:
859 folded = path
866 folded = path
860 else:
867 else:
861 # store discovered result in dirfoldmap so that future
868 # store discovered result in dirfoldmap so that future
862 # normalizefile calls don't start matching directories
869 # normalizefile calls don't start matching directories
863 folded = self._discoverpath(
870 folded = self._discoverpath(
864 path, normed, ignoremissing, exists, self._map.dirfoldmap
871 path, normed, ignoremissing, exists, self._map.dirfoldmap
865 )
872 )
866 return folded
873 return folded
867
874
868 def normalize(self, path, isknown=False, ignoremissing=False):
875 def normalize(self, path, isknown=False, ignoremissing=False):
869 """
876 """
870 normalize the case of a pathname when on a casefolding filesystem
877 normalize the case of a pathname when on a casefolding filesystem
871
878
872 isknown specifies whether the filename came from walking the
879 isknown specifies whether the filename came from walking the
873 disk, to avoid extra filesystem access.
880 disk, to avoid extra filesystem access.
874
881
875 If ignoremissing is True, missing path are returned
882 If ignoremissing is True, missing path are returned
876 unchanged. Otherwise, we try harder to normalize possibly
883 unchanged. Otherwise, we try harder to normalize possibly
877 existing path components.
884 existing path components.
878
885
879 The normalized case is determined based on the following precedence:
886 The normalized case is determined based on the following precedence:
880
887
881 - version of name already stored in the dirstate
888 - version of name already stored in the dirstate
882 - version of name stored on disk
889 - version of name stored on disk
883 - version provided via command arguments
890 - version provided via command arguments
884 """
891 """
885
892
886 if self._checkcase:
893 if self._checkcase:
887 return self._normalize(path, isknown, ignoremissing)
894 return self._normalize(path, isknown, ignoremissing)
888 return path
895 return path
889
896
890 def clear(self):
897 def clear(self):
891 self._map.clear()
898 self._map.clear()
892 self._lastnormaltime = 0
899 self._lastnormaltime = 0
893 self._updatedfiles.clear()
900 self._updatedfiles.clear()
894 self._dirty = True
901 self._dirty = True
895
902
896 def rebuild(self, parent, allfiles, changedfiles=None):
903 def rebuild(self, parent, allfiles, changedfiles=None):
897 if changedfiles is None:
904 if changedfiles is None:
898 # Rebuild entire dirstate
905 # Rebuild entire dirstate
899 to_lookup = allfiles
906 to_lookup = allfiles
900 to_drop = []
907 to_drop = []
901 lastnormaltime = self._lastnormaltime
908 lastnormaltime = self._lastnormaltime
902 self.clear()
909 self.clear()
903 self._lastnormaltime = lastnormaltime
910 self._lastnormaltime = lastnormaltime
904 elif len(changedfiles) < 10:
911 elif len(changedfiles) < 10:
905 # Avoid turning allfiles into a set, which can be expensive if it's
912 # Avoid turning allfiles into a set, which can be expensive if it's
906 # large.
913 # large.
907 to_lookup = []
914 to_lookup = []
908 to_drop = []
915 to_drop = []
909 for f in changedfiles:
916 for f in changedfiles:
910 if f in allfiles:
917 if f in allfiles:
911 to_lookup.append(f)
918 to_lookup.append(f)
912 else:
919 else:
913 to_drop.append(f)
920 to_drop.append(f)
914 else:
921 else:
915 changedfilesset = set(changedfiles)
922 changedfilesset = set(changedfiles)
916 to_lookup = changedfilesset & set(allfiles)
923 to_lookup = changedfilesset & set(allfiles)
917 to_drop = changedfilesset - to_lookup
924 to_drop = changedfilesset - to_lookup
918
925
919 if self._origpl is None:
926 if self._origpl is None:
920 self._origpl = self._pl
927 self._origpl = self._pl
921 self._map.setparents(parent, self._nodeconstants.nullid)
928 self._map.setparents(parent, self._nodeconstants.nullid)
922
929
923 for f in to_lookup:
930 for f in to_lookup:
924 self.normallookup(f)
931 self.normallookup(f)
925 for f in to_drop:
932 for f in to_drop:
926 self._drop(f)
933 self._drop(f)
927
934
928 self._dirty = True
935 self._dirty = True
929
936
930 def identity(self):
937 def identity(self):
931 """Return identity of dirstate itself to detect changing in storage
938 """Return identity of dirstate itself to detect changing in storage
932
939
933 If identity of previous dirstate is equal to this, writing
940 If identity of previous dirstate is equal to this, writing
934 changes based on the former dirstate out can keep consistency.
941 changes based on the former dirstate out can keep consistency.
935 """
942 """
936 return self._map.identity
943 return self._map.identity
937
944
938 def write(self, tr):
945 def write(self, tr):
939 if not self._dirty:
946 if not self._dirty:
940 return
947 return
941
948
942 filename = self._filename
949 filename = self._filename
943 if tr:
950 if tr:
944 # 'dirstate.write()' is not only for writing in-memory
951 # 'dirstate.write()' is not only for writing in-memory
945 # changes out, but also for dropping ambiguous timestamp.
952 # changes out, but also for dropping ambiguous timestamp.
946 # delayed writing re-raise "ambiguous timestamp issue".
953 # delayed writing re-raise "ambiguous timestamp issue".
947 # See also the wiki page below for detail:
954 # See also the wiki page below for detail:
948 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
955 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
949
956
950 # emulate dropping timestamp in 'parsers.pack_dirstate'
957 # emulate dropping timestamp in 'parsers.pack_dirstate'
951 now = _getfsnow(self._opener)
958 now = _getfsnow(self._opener)
952 self._map.clearambiguoustimes(self._updatedfiles, now)
959 self._map.clearambiguoustimes(self._updatedfiles, now)
953
960
954 # emulate that all 'dirstate.normal' results are written out
961 # emulate that all 'dirstate.normal' results are written out
955 self._lastnormaltime = 0
962 self._lastnormaltime = 0
956 self._updatedfiles.clear()
963 self._updatedfiles.clear()
957
964
958 # delay writing in-memory changes out
965 # delay writing in-memory changes out
959 tr.addfilegenerator(
966 tr.addfilegenerator(
960 b'dirstate',
967 b'dirstate',
961 (self._filename,),
968 (self._filename,),
962 lambda f: self._writedirstate(tr, f),
969 lambda f: self._writedirstate(tr, f),
963 location=b'plain',
970 location=b'plain',
964 )
971 )
965 return
972 return
966
973
967 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
974 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
968 self._writedirstate(tr, st)
975 self._writedirstate(tr, st)
969
976
970 def addparentchangecallback(self, category, callback):
977 def addparentchangecallback(self, category, callback):
971 """add a callback to be called when the wd parents are changed
978 """add a callback to be called when the wd parents are changed
972
979
973 Callback will be called with the following arguments:
980 Callback will be called with the following arguments:
974 dirstate, (oldp1, oldp2), (newp1, newp2)
981 dirstate, (oldp1, oldp2), (newp1, newp2)
975
982
976 Category is a unique identifier to allow overwriting an old callback
983 Category is a unique identifier to allow overwriting an old callback
977 with a newer callback.
984 with a newer callback.
978 """
985 """
979 self._plchangecallbacks[category] = callback
986 self._plchangecallbacks[category] = callback
980
987
981 def _writedirstate(self, tr, st):
988 def _writedirstate(self, tr, st):
982 # notify callbacks about parents change
989 # notify callbacks about parents change
983 if self._origpl is not None and self._origpl != self._pl:
990 if self._origpl is not None and self._origpl != self._pl:
984 for c, callback in sorted(
991 for c, callback in sorted(
985 pycompat.iteritems(self._plchangecallbacks)
992 pycompat.iteritems(self._plchangecallbacks)
986 ):
993 ):
987 callback(self, self._origpl, self._pl)
994 callback(self, self._origpl, self._pl)
988 self._origpl = None
995 self._origpl = None
989 # use the modification time of the newly created temporary file as the
996 # use the modification time of the newly created temporary file as the
990 # filesystem's notion of 'now'
997 # filesystem's notion of 'now'
991 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
998 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
992
999
993 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
1000 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
994 # timestamp of each entries in dirstate, because of 'now > mtime'
1001 # timestamp of each entries in dirstate, because of 'now > mtime'
995 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
1002 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
996 if delaywrite > 0:
1003 if delaywrite > 0:
997 # do we have any files to delay for?
1004 # do we have any files to delay for?
998 for f, e in pycompat.iteritems(self._map):
1005 for f, e in pycompat.iteritems(self._map):
999 if e.need_delay(now):
1006 if e.need_delay(now):
1000 import time # to avoid useless import
1007 import time # to avoid useless import
1001
1008
1002 # rather than sleep n seconds, sleep until the next
1009 # rather than sleep n seconds, sleep until the next
1003 # multiple of n seconds
1010 # multiple of n seconds
1004 clock = time.time()
1011 clock = time.time()
1005 start = int(clock) - (int(clock) % delaywrite)
1012 start = int(clock) - (int(clock) % delaywrite)
1006 end = start + delaywrite
1013 end = start + delaywrite
1007 time.sleep(end - clock)
1014 time.sleep(end - clock)
1008 now = end # trust our estimate that the end is near now
1015 now = end # trust our estimate that the end is near now
1009 break
1016 break
1010
1017
1011 self._map.write(tr, st, now)
1018 self._map.write(tr, st, now)
1012 self._lastnormaltime = 0
1019 self._lastnormaltime = 0
1013 self._dirty = False
1020 self._dirty = False
1014
1021
1015 def _dirignore(self, f):
1022 def _dirignore(self, f):
1016 if self._ignore(f):
1023 if self._ignore(f):
1017 return True
1024 return True
1018 for p in pathutil.finddirs(f):
1025 for p in pathutil.finddirs(f):
1019 if self._ignore(p):
1026 if self._ignore(p):
1020 return True
1027 return True
1021 return False
1028 return False
1022
1029
1023 def _ignorefiles(self):
1030 def _ignorefiles(self):
1024 files = []
1031 files = []
1025 if os.path.exists(self._join(b'.hgignore')):
1032 if os.path.exists(self._join(b'.hgignore')):
1026 files.append(self._join(b'.hgignore'))
1033 files.append(self._join(b'.hgignore'))
1027 for name, path in self._ui.configitems(b"ui"):
1034 for name, path in self._ui.configitems(b"ui"):
1028 if name == b'ignore' or name.startswith(b'ignore.'):
1035 if name == b'ignore' or name.startswith(b'ignore.'):
1029 # we need to use os.path.join here rather than self._join
1036 # we need to use os.path.join here rather than self._join
1030 # because path is arbitrary and user-specified
1037 # because path is arbitrary and user-specified
1031 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1038 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1032 return files
1039 return files
1033
1040
1034 def _ignorefileandline(self, f):
1041 def _ignorefileandline(self, f):
1035 files = collections.deque(self._ignorefiles())
1042 files = collections.deque(self._ignorefiles())
1036 visited = set()
1043 visited = set()
1037 while files:
1044 while files:
1038 i = files.popleft()
1045 i = files.popleft()
1039 patterns = matchmod.readpatternfile(
1046 patterns = matchmod.readpatternfile(
1040 i, self._ui.warn, sourceinfo=True
1047 i, self._ui.warn, sourceinfo=True
1041 )
1048 )
1042 for pattern, lineno, line in patterns:
1049 for pattern, lineno, line in patterns:
1043 kind, p = matchmod._patsplit(pattern, b'glob')
1050 kind, p = matchmod._patsplit(pattern, b'glob')
1044 if kind == b"subinclude":
1051 if kind == b"subinclude":
1045 if p not in visited:
1052 if p not in visited:
1046 files.append(p)
1053 files.append(p)
1047 continue
1054 continue
1048 m = matchmod.match(
1055 m = matchmod.match(
1049 self._root, b'', [], [pattern], warn=self._ui.warn
1056 self._root, b'', [], [pattern], warn=self._ui.warn
1050 )
1057 )
1051 if m(f):
1058 if m(f):
1052 return (i, lineno, line)
1059 return (i, lineno, line)
1053 visited.add(i)
1060 visited.add(i)
1054 return (None, -1, b"")
1061 return (None, -1, b"")
1055
1062
1056 def _walkexplicit(self, match, subrepos):
1063 def _walkexplicit(self, match, subrepos):
1057 """Get stat data about the files explicitly specified by match.
1064 """Get stat data about the files explicitly specified by match.
1058
1065
1059 Return a triple (results, dirsfound, dirsnotfound).
1066 Return a triple (results, dirsfound, dirsnotfound).
1060 - results is a mapping from filename to stat result. It also contains
1067 - results is a mapping from filename to stat result. It also contains
1061 listings mapping subrepos and .hg to None.
1068 listings mapping subrepos and .hg to None.
1062 - dirsfound is a list of files found to be directories.
1069 - dirsfound is a list of files found to be directories.
1063 - dirsnotfound is a list of files that the dirstate thinks are
1070 - dirsnotfound is a list of files that the dirstate thinks are
1064 directories and that were not found."""
1071 directories and that were not found."""
1065
1072
1066 def badtype(mode):
1073 def badtype(mode):
1067 kind = _(b'unknown')
1074 kind = _(b'unknown')
1068 if stat.S_ISCHR(mode):
1075 if stat.S_ISCHR(mode):
1069 kind = _(b'character device')
1076 kind = _(b'character device')
1070 elif stat.S_ISBLK(mode):
1077 elif stat.S_ISBLK(mode):
1071 kind = _(b'block device')
1078 kind = _(b'block device')
1072 elif stat.S_ISFIFO(mode):
1079 elif stat.S_ISFIFO(mode):
1073 kind = _(b'fifo')
1080 kind = _(b'fifo')
1074 elif stat.S_ISSOCK(mode):
1081 elif stat.S_ISSOCK(mode):
1075 kind = _(b'socket')
1082 kind = _(b'socket')
1076 elif stat.S_ISDIR(mode):
1083 elif stat.S_ISDIR(mode):
1077 kind = _(b'directory')
1084 kind = _(b'directory')
1078 return _(b'unsupported file type (type is %s)') % kind
1085 return _(b'unsupported file type (type is %s)') % kind
1079
1086
1080 badfn = match.bad
1087 badfn = match.bad
1081 dmap = self._map
1088 dmap = self._map
1082 lstat = os.lstat
1089 lstat = os.lstat
1083 getkind = stat.S_IFMT
1090 getkind = stat.S_IFMT
1084 dirkind = stat.S_IFDIR
1091 dirkind = stat.S_IFDIR
1085 regkind = stat.S_IFREG
1092 regkind = stat.S_IFREG
1086 lnkkind = stat.S_IFLNK
1093 lnkkind = stat.S_IFLNK
1087 join = self._join
1094 join = self._join
1088 dirsfound = []
1095 dirsfound = []
1089 foundadd = dirsfound.append
1096 foundadd = dirsfound.append
1090 dirsnotfound = []
1097 dirsnotfound = []
1091 notfoundadd = dirsnotfound.append
1098 notfoundadd = dirsnotfound.append
1092
1099
1093 if not match.isexact() and self._checkcase:
1100 if not match.isexact() and self._checkcase:
1094 normalize = self._normalize
1101 normalize = self._normalize
1095 else:
1102 else:
1096 normalize = None
1103 normalize = None
1097
1104
1098 files = sorted(match.files())
1105 files = sorted(match.files())
1099 subrepos.sort()
1106 subrepos.sort()
1100 i, j = 0, 0
1107 i, j = 0, 0
1101 while i < len(files) and j < len(subrepos):
1108 while i < len(files) and j < len(subrepos):
1102 subpath = subrepos[j] + b"/"
1109 subpath = subrepos[j] + b"/"
1103 if files[i] < subpath:
1110 if files[i] < subpath:
1104 i += 1
1111 i += 1
1105 continue
1112 continue
1106 while i < len(files) and files[i].startswith(subpath):
1113 while i < len(files) and files[i].startswith(subpath):
1107 del files[i]
1114 del files[i]
1108 j += 1
1115 j += 1
1109
1116
1110 if not files or b'' in files:
1117 if not files or b'' in files:
1111 files = [b'']
1118 files = [b'']
1112 # constructing the foldmap is expensive, so don't do it for the
1119 # constructing the foldmap is expensive, so don't do it for the
1113 # common case where files is ['']
1120 # common case where files is ['']
1114 normalize = None
1121 normalize = None
1115 results = dict.fromkeys(subrepos)
1122 results = dict.fromkeys(subrepos)
1116 results[b'.hg'] = None
1123 results[b'.hg'] = None
1117
1124
1118 for ff in files:
1125 for ff in files:
1119 if normalize:
1126 if normalize:
1120 nf = normalize(ff, False, True)
1127 nf = normalize(ff, False, True)
1121 else:
1128 else:
1122 nf = ff
1129 nf = ff
1123 if nf in results:
1130 if nf in results:
1124 continue
1131 continue
1125
1132
1126 try:
1133 try:
1127 st = lstat(join(nf))
1134 st = lstat(join(nf))
1128 kind = getkind(st.st_mode)
1135 kind = getkind(st.st_mode)
1129 if kind == dirkind:
1136 if kind == dirkind:
1130 if nf in dmap:
1137 if nf in dmap:
1131 # file replaced by dir on disk but still in dirstate
1138 # file replaced by dir on disk but still in dirstate
1132 results[nf] = None
1139 results[nf] = None
1133 foundadd((nf, ff))
1140 foundadd((nf, ff))
1134 elif kind == regkind or kind == lnkkind:
1141 elif kind == regkind or kind == lnkkind:
1135 results[nf] = st
1142 results[nf] = st
1136 else:
1143 else:
1137 badfn(ff, badtype(kind))
1144 badfn(ff, badtype(kind))
1138 if nf in dmap:
1145 if nf in dmap:
1139 results[nf] = None
1146 results[nf] = None
1140 except OSError as inst: # nf not found on disk - it is dirstate only
1147 except OSError as inst: # nf not found on disk - it is dirstate only
1141 if nf in dmap: # does it exactly match a missing file?
1148 if nf in dmap: # does it exactly match a missing file?
1142 results[nf] = None
1149 results[nf] = None
1143 else: # does it match a missing directory?
1150 else: # does it match a missing directory?
1144 if self._map.hasdir(nf):
1151 if self._map.hasdir(nf):
1145 notfoundadd(nf)
1152 notfoundadd(nf)
1146 else:
1153 else:
1147 badfn(ff, encoding.strtolocal(inst.strerror))
1154 badfn(ff, encoding.strtolocal(inst.strerror))
1148
1155
1149 # match.files() may contain explicitly-specified paths that shouldn't
1156 # match.files() may contain explicitly-specified paths that shouldn't
1150 # be taken; drop them from the list of files found. dirsfound/notfound
1157 # be taken; drop them from the list of files found. dirsfound/notfound
1151 # aren't filtered here because they will be tested later.
1158 # aren't filtered here because they will be tested later.
1152 if match.anypats():
1159 if match.anypats():
1153 for f in list(results):
1160 for f in list(results):
1154 if f == b'.hg' or f in subrepos:
1161 if f == b'.hg' or f in subrepos:
1155 # keep sentinel to disable further out-of-repo walks
1162 # keep sentinel to disable further out-of-repo walks
1156 continue
1163 continue
1157 if not match(f):
1164 if not match(f):
1158 del results[f]
1165 del results[f]
1159
1166
1160 # Case insensitive filesystems cannot rely on lstat() failing to detect
1167 # Case insensitive filesystems cannot rely on lstat() failing to detect
1161 # a case-only rename. Prune the stat object for any file that does not
1168 # a case-only rename. Prune the stat object for any file that does not
1162 # match the case in the filesystem, if there are multiple files that
1169 # match the case in the filesystem, if there are multiple files that
1163 # normalize to the same path.
1170 # normalize to the same path.
1164 if match.isexact() and self._checkcase:
1171 if match.isexact() and self._checkcase:
1165 normed = {}
1172 normed = {}
1166
1173
1167 for f, st in pycompat.iteritems(results):
1174 for f, st in pycompat.iteritems(results):
1168 if st is None:
1175 if st is None:
1169 continue
1176 continue
1170
1177
1171 nc = util.normcase(f)
1178 nc = util.normcase(f)
1172 paths = normed.get(nc)
1179 paths = normed.get(nc)
1173
1180
1174 if paths is None:
1181 if paths is None:
1175 paths = set()
1182 paths = set()
1176 normed[nc] = paths
1183 normed[nc] = paths
1177
1184
1178 paths.add(f)
1185 paths.add(f)
1179
1186
1180 for norm, paths in pycompat.iteritems(normed):
1187 for norm, paths in pycompat.iteritems(normed):
1181 if len(paths) > 1:
1188 if len(paths) > 1:
1182 for path in paths:
1189 for path in paths:
1183 folded = self._discoverpath(
1190 folded = self._discoverpath(
1184 path, norm, True, None, self._map.dirfoldmap
1191 path, norm, True, None, self._map.dirfoldmap
1185 )
1192 )
1186 if path != folded:
1193 if path != folded:
1187 results[path] = None
1194 results[path] = None
1188
1195
1189 return results, dirsfound, dirsnotfound
1196 return results, dirsfound, dirsnotfound
1190
1197
1191 def walk(self, match, subrepos, unknown, ignored, full=True):
1198 def walk(self, match, subrepos, unknown, ignored, full=True):
1192 """
1199 """
1193 Walk recursively through the directory tree, finding all files
1200 Walk recursively through the directory tree, finding all files
1194 matched by match.
1201 matched by match.
1195
1202
1196 If full is False, maybe skip some known-clean files.
1203 If full is False, maybe skip some known-clean files.
1197
1204
1198 Return a dict mapping filename to stat-like object (either
1205 Return a dict mapping filename to stat-like object (either
1199 mercurial.osutil.stat instance or return value of os.stat()).
1206 mercurial.osutil.stat instance or return value of os.stat()).
1200
1207
1201 """
1208 """
1202 # full is a flag that extensions that hook into walk can use -- this
1209 # full is a flag that extensions that hook into walk can use -- this
1203 # implementation doesn't use it at all. This satisfies the contract
1210 # implementation doesn't use it at all. This satisfies the contract
1204 # because we only guarantee a "maybe".
1211 # because we only guarantee a "maybe".
1205
1212
1206 if ignored:
1213 if ignored:
1207 ignore = util.never
1214 ignore = util.never
1208 dirignore = util.never
1215 dirignore = util.never
1209 elif unknown:
1216 elif unknown:
1210 ignore = self._ignore
1217 ignore = self._ignore
1211 dirignore = self._dirignore
1218 dirignore = self._dirignore
1212 else:
1219 else:
1213 # if not unknown and not ignored, drop dir recursion and step 2
1220 # if not unknown and not ignored, drop dir recursion and step 2
1214 ignore = util.always
1221 ignore = util.always
1215 dirignore = util.always
1222 dirignore = util.always
1216
1223
1217 matchfn = match.matchfn
1224 matchfn = match.matchfn
1218 matchalways = match.always()
1225 matchalways = match.always()
1219 matchtdir = match.traversedir
1226 matchtdir = match.traversedir
1220 dmap = self._map
1227 dmap = self._map
1221 listdir = util.listdir
1228 listdir = util.listdir
1222 lstat = os.lstat
1229 lstat = os.lstat
1223 dirkind = stat.S_IFDIR
1230 dirkind = stat.S_IFDIR
1224 regkind = stat.S_IFREG
1231 regkind = stat.S_IFREG
1225 lnkkind = stat.S_IFLNK
1232 lnkkind = stat.S_IFLNK
1226 join = self._join
1233 join = self._join
1227
1234
1228 exact = skipstep3 = False
1235 exact = skipstep3 = False
1229 if match.isexact(): # match.exact
1236 if match.isexact(): # match.exact
1230 exact = True
1237 exact = True
1231 dirignore = util.always # skip step 2
1238 dirignore = util.always # skip step 2
1232 elif match.prefix(): # match.match, no patterns
1239 elif match.prefix(): # match.match, no patterns
1233 skipstep3 = True
1240 skipstep3 = True
1234
1241
1235 if not exact and self._checkcase:
1242 if not exact and self._checkcase:
1236 normalize = self._normalize
1243 normalize = self._normalize
1237 normalizefile = self._normalizefile
1244 normalizefile = self._normalizefile
1238 skipstep3 = False
1245 skipstep3 = False
1239 else:
1246 else:
1240 normalize = self._normalize
1247 normalize = self._normalize
1241 normalizefile = None
1248 normalizefile = None
1242
1249
1243 # step 1: find all explicit files
1250 # step 1: find all explicit files
1244 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1251 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1245 if matchtdir:
1252 if matchtdir:
1246 for d in work:
1253 for d in work:
1247 matchtdir(d[0])
1254 matchtdir(d[0])
1248 for d in dirsnotfound:
1255 for d in dirsnotfound:
1249 matchtdir(d)
1256 matchtdir(d)
1250
1257
1251 skipstep3 = skipstep3 and not (work or dirsnotfound)
1258 skipstep3 = skipstep3 and not (work or dirsnotfound)
1252 work = [d for d in work if not dirignore(d[0])]
1259 work = [d for d in work if not dirignore(d[0])]
1253
1260
1254 # step 2: visit subdirectories
1261 # step 2: visit subdirectories
1255 def traverse(work, alreadynormed):
1262 def traverse(work, alreadynormed):
1256 wadd = work.append
1263 wadd = work.append
1257 while work:
1264 while work:
1258 tracing.counter('dirstate.walk work', len(work))
1265 tracing.counter('dirstate.walk work', len(work))
1259 nd = work.pop()
1266 nd = work.pop()
1260 visitentries = match.visitchildrenset(nd)
1267 visitentries = match.visitchildrenset(nd)
1261 if not visitentries:
1268 if not visitentries:
1262 continue
1269 continue
1263 if visitentries == b'this' or visitentries == b'all':
1270 if visitentries == b'this' or visitentries == b'all':
1264 visitentries = None
1271 visitentries = None
1265 skip = None
1272 skip = None
1266 if nd != b'':
1273 if nd != b'':
1267 skip = b'.hg'
1274 skip = b'.hg'
1268 try:
1275 try:
1269 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1276 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1270 entries = listdir(join(nd), stat=True, skip=skip)
1277 entries = listdir(join(nd), stat=True, skip=skip)
1271 except OSError as inst:
1278 except OSError as inst:
1272 if inst.errno in (errno.EACCES, errno.ENOENT):
1279 if inst.errno in (errno.EACCES, errno.ENOENT):
1273 match.bad(
1280 match.bad(
1274 self.pathto(nd), encoding.strtolocal(inst.strerror)
1281 self.pathto(nd), encoding.strtolocal(inst.strerror)
1275 )
1282 )
1276 continue
1283 continue
1277 raise
1284 raise
1278 for f, kind, st in entries:
1285 for f, kind, st in entries:
1279 # Some matchers may return files in the visitentries set,
1286 # Some matchers may return files in the visitentries set,
1280 # instead of 'this', if the matcher explicitly mentions them
1287 # instead of 'this', if the matcher explicitly mentions them
1281 # and is not an exactmatcher. This is acceptable; we do not
1288 # and is not an exactmatcher. This is acceptable; we do not
1282 # make any hard assumptions about file-or-directory below
1289 # make any hard assumptions about file-or-directory below
1283 # based on the presence of `f` in visitentries. If
1290 # based on the presence of `f` in visitentries. If
1284 # visitchildrenset returned a set, we can always skip the
1291 # visitchildrenset returned a set, we can always skip the
1285 # entries *not* in the set it provided regardless of whether
1292 # entries *not* in the set it provided regardless of whether
1286 # they're actually a file or a directory.
1293 # they're actually a file or a directory.
1287 if visitentries and f not in visitentries:
1294 if visitentries and f not in visitentries:
1288 continue
1295 continue
1289 if normalizefile:
1296 if normalizefile:
1290 # even though f might be a directory, we're only
1297 # even though f might be a directory, we're only
1291 # interested in comparing it to files currently in the
1298 # interested in comparing it to files currently in the
1292 # dmap -- therefore normalizefile is enough
1299 # dmap -- therefore normalizefile is enough
1293 nf = normalizefile(
1300 nf = normalizefile(
1294 nd and (nd + b"/" + f) or f, True, True
1301 nd and (nd + b"/" + f) or f, True, True
1295 )
1302 )
1296 else:
1303 else:
1297 nf = nd and (nd + b"/" + f) or f
1304 nf = nd and (nd + b"/" + f) or f
1298 if nf not in results:
1305 if nf not in results:
1299 if kind == dirkind:
1306 if kind == dirkind:
1300 if not ignore(nf):
1307 if not ignore(nf):
1301 if matchtdir:
1308 if matchtdir:
1302 matchtdir(nf)
1309 matchtdir(nf)
1303 wadd(nf)
1310 wadd(nf)
1304 if nf in dmap and (matchalways or matchfn(nf)):
1311 if nf in dmap and (matchalways or matchfn(nf)):
1305 results[nf] = None
1312 results[nf] = None
1306 elif kind == regkind or kind == lnkkind:
1313 elif kind == regkind or kind == lnkkind:
1307 if nf in dmap:
1314 if nf in dmap:
1308 if matchalways or matchfn(nf):
1315 if matchalways or matchfn(nf):
1309 results[nf] = st
1316 results[nf] = st
1310 elif (matchalways or matchfn(nf)) and not ignore(
1317 elif (matchalways or matchfn(nf)) and not ignore(
1311 nf
1318 nf
1312 ):
1319 ):
1313 # unknown file -- normalize if necessary
1320 # unknown file -- normalize if necessary
1314 if not alreadynormed:
1321 if not alreadynormed:
1315 nf = normalize(nf, False, True)
1322 nf = normalize(nf, False, True)
1316 results[nf] = st
1323 results[nf] = st
1317 elif nf in dmap and (matchalways or matchfn(nf)):
1324 elif nf in dmap and (matchalways or matchfn(nf)):
1318 results[nf] = None
1325 results[nf] = None
1319
1326
1320 for nd, d in work:
1327 for nd, d in work:
1321 # alreadynormed means that processwork doesn't have to do any
1328 # alreadynormed means that processwork doesn't have to do any
1322 # expensive directory normalization
1329 # expensive directory normalization
1323 alreadynormed = not normalize or nd == d
1330 alreadynormed = not normalize or nd == d
1324 traverse([d], alreadynormed)
1331 traverse([d], alreadynormed)
1325
1332
1326 for s in subrepos:
1333 for s in subrepos:
1327 del results[s]
1334 del results[s]
1328 del results[b'.hg']
1335 del results[b'.hg']
1329
1336
1330 # step 3: visit remaining files from dmap
1337 # step 3: visit remaining files from dmap
1331 if not skipstep3 and not exact:
1338 if not skipstep3 and not exact:
1332 # If a dmap file is not in results yet, it was either
1339 # If a dmap file is not in results yet, it was either
1333 # a) not matching matchfn b) ignored, c) missing, or d) under a
1340 # a) not matching matchfn b) ignored, c) missing, or d) under a
1334 # symlink directory.
1341 # symlink directory.
1335 if not results and matchalways:
1342 if not results and matchalways:
1336 visit = [f for f in dmap]
1343 visit = [f for f in dmap]
1337 else:
1344 else:
1338 visit = [f for f in dmap if f not in results and matchfn(f)]
1345 visit = [f for f in dmap if f not in results and matchfn(f)]
1339 visit.sort()
1346 visit.sort()
1340
1347
1341 if unknown:
1348 if unknown:
1342 # unknown == True means we walked all dirs under the roots
1349 # unknown == True means we walked all dirs under the roots
1343 # that wasn't ignored, and everything that matched was stat'ed
1350 # that wasn't ignored, and everything that matched was stat'ed
1344 # and is already in results.
1351 # and is already in results.
1345 # The rest must thus be ignored or under a symlink.
1352 # The rest must thus be ignored or under a symlink.
1346 audit_path = pathutil.pathauditor(self._root, cached=True)
1353 audit_path = pathutil.pathauditor(self._root, cached=True)
1347
1354
1348 for nf in iter(visit):
1355 for nf in iter(visit):
1349 # If a stat for the same file was already added with a
1356 # If a stat for the same file was already added with a
1350 # different case, don't add one for this, since that would
1357 # different case, don't add one for this, since that would
1351 # make it appear as if the file exists under both names
1358 # make it appear as if the file exists under both names
1352 # on disk.
1359 # on disk.
1353 if (
1360 if (
1354 normalizefile
1361 normalizefile
1355 and normalizefile(nf, True, True) in results
1362 and normalizefile(nf, True, True) in results
1356 ):
1363 ):
1357 results[nf] = None
1364 results[nf] = None
1358 # Report ignored items in the dmap as long as they are not
1365 # Report ignored items in the dmap as long as they are not
1359 # under a symlink directory.
1366 # under a symlink directory.
1360 elif audit_path.check(nf):
1367 elif audit_path.check(nf):
1361 try:
1368 try:
1362 results[nf] = lstat(join(nf))
1369 results[nf] = lstat(join(nf))
1363 # file was just ignored, no links, and exists
1370 # file was just ignored, no links, and exists
1364 except OSError:
1371 except OSError:
1365 # file doesn't exist
1372 # file doesn't exist
1366 results[nf] = None
1373 results[nf] = None
1367 else:
1374 else:
1368 # It's either missing or under a symlink directory
1375 # It's either missing or under a symlink directory
1369 # which we in this case report as missing
1376 # which we in this case report as missing
1370 results[nf] = None
1377 results[nf] = None
1371 else:
1378 else:
1372 # We may not have walked the full directory tree above,
1379 # We may not have walked the full directory tree above,
1373 # so stat and check everything we missed.
1380 # so stat and check everything we missed.
1374 iv = iter(visit)
1381 iv = iter(visit)
1375 for st in util.statfiles([join(i) for i in visit]):
1382 for st in util.statfiles([join(i) for i in visit]):
1376 results[next(iv)] = st
1383 results[next(iv)] = st
1377 return results
1384 return results
1378
1385
1379 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1386 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1380 # Force Rayon (Rust parallelism library) to respect the number of
1387 # Force Rayon (Rust parallelism library) to respect the number of
1381 # workers. This is a temporary workaround until Rust code knows
1388 # workers. This is a temporary workaround until Rust code knows
1382 # how to read the config file.
1389 # how to read the config file.
1383 numcpus = self._ui.configint(b"worker", b"numcpus")
1390 numcpus = self._ui.configint(b"worker", b"numcpus")
1384 if numcpus is not None:
1391 if numcpus is not None:
1385 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1392 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1386
1393
1387 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1394 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1388 if not workers_enabled:
1395 if not workers_enabled:
1389 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1396 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1390
1397
1391 (
1398 (
1392 lookup,
1399 lookup,
1393 modified,
1400 modified,
1394 added,
1401 added,
1395 removed,
1402 removed,
1396 deleted,
1403 deleted,
1397 clean,
1404 clean,
1398 ignored,
1405 ignored,
1399 unknown,
1406 unknown,
1400 warnings,
1407 warnings,
1401 bad,
1408 bad,
1402 traversed,
1409 traversed,
1403 dirty,
1410 dirty,
1404 ) = rustmod.status(
1411 ) = rustmod.status(
1405 self._map._rustmap,
1412 self._map._rustmap,
1406 matcher,
1413 matcher,
1407 self._rootdir,
1414 self._rootdir,
1408 self._ignorefiles(),
1415 self._ignorefiles(),
1409 self._checkexec,
1416 self._checkexec,
1410 self._lastnormaltime,
1417 self._lastnormaltime,
1411 bool(list_clean),
1418 bool(list_clean),
1412 bool(list_ignored),
1419 bool(list_ignored),
1413 bool(list_unknown),
1420 bool(list_unknown),
1414 bool(matcher.traversedir),
1421 bool(matcher.traversedir),
1415 )
1422 )
1416
1423
1417 self._dirty |= dirty
1424 self._dirty |= dirty
1418
1425
1419 if matcher.traversedir:
1426 if matcher.traversedir:
1420 for dir in traversed:
1427 for dir in traversed:
1421 matcher.traversedir(dir)
1428 matcher.traversedir(dir)
1422
1429
1423 if self._ui.warn:
1430 if self._ui.warn:
1424 for item in warnings:
1431 for item in warnings:
1425 if isinstance(item, tuple):
1432 if isinstance(item, tuple):
1426 file_path, syntax = item
1433 file_path, syntax = item
1427 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1434 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1428 file_path,
1435 file_path,
1429 syntax,
1436 syntax,
1430 )
1437 )
1431 self._ui.warn(msg)
1438 self._ui.warn(msg)
1432 else:
1439 else:
1433 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1440 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1434 self._ui.warn(
1441 self._ui.warn(
1435 msg
1442 msg
1436 % (
1443 % (
1437 pathutil.canonpath(
1444 pathutil.canonpath(
1438 self._rootdir, self._rootdir, item
1445 self._rootdir, self._rootdir, item
1439 ),
1446 ),
1440 b"No such file or directory",
1447 b"No such file or directory",
1441 )
1448 )
1442 )
1449 )
1443
1450
1444 for (fn, message) in bad:
1451 for (fn, message) in bad:
1445 matcher.bad(fn, encoding.strtolocal(message))
1452 matcher.bad(fn, encoding.strtolocal(message))
1446
1453
1447 status = scmutil.status(
1454 status = scmutil.status(
1448 modified=modified,
1455 modified=modified,
1449 added=added,
1456 added=added,
1450 removed=removed,
1457 removed=removed,
1451 deleted=deleted,
1458 deleted=deleted,
1452 unknown=unknown,
1459 unknown=unknown,
1453 ignored=ignored,
1460 ignored=ignored,
1454 clean=clean,
1461 clean=clean,
1455 )
1462 )
1456 return (lookup, status)
1463 return (lookup, status)
1457
1464
1458 def status(self, match, subrepos, ignored, clean, unknown):
1465 def status(self, match, subrepos, ignored, clean, unknown):
1459 """Determine the status of the working copy relative to the
1466 """Determine the status of the working copy relative to the
1460 dirstate and return a pair of (unsure, status), where status is of type
1467 dirstate and return a pair of (unsure, status), where status is of type
1461 scmutil.status and:
1468 scmutil.status and:
1462
1469
1463 unsure:
1470 unsure:
1464 files that might have been modified since the dirstate was
1471 files that might have been modified since the dirstate was
1465 written, but need to be read to be sure (size is the same
1472 written, but need to be read to be sure (size is the same
1466 but mtime differs)
1473 but mtime differs)
1467 status.modified:
1474 status.modified:
1468 files that have definitely been modified since the dirstate
1475 files that have definitely been modified since the dirstate
1469 was written (different size or mode)
1476 was written (different size or mode)
1470 status.clean:
1477 status.clean:
1471 files that have definitely not been modified since the
1478 files that have definitely not been modified since the
1472 dirstate was written
1479 dirstate was written
1473 """
1480 """
1474 listignored, listclean, listunknown = ignored, clean, unknown
1481 listignored, listclean, listunknown = ignored, clean, unknown
1475 lookup, modified, added, unknown, ignored = [], [], [], [], []
1482 lookup, modified, added, unknown, ignored = [], [], [], [], []
1476 removed, deleted, clean = [], [], []
1483 removed, deleted, clean = [], [], []
1477
1484
1478 dmap = self._map
1485 dmap = self._map
1479 dmap.preload()
1486 dmap.preload()
1480
1487
1481 use_rust = True
1488 use_rust = True
1482
1489
1483 allowed_matchers = (
1490 allowed_matchers = (
1484 matchmod.alwaysmatcher,
1491 matchmod.alwaysmatcher,
1485 matchmod.exactmatcher,
1492 matchmod.exactmatcher,
1486 matchmod.includematcher,
1493 matchmod.includematcher,
1487 )
1494 )
1488
1495
1489 if rustmod is None:
1496 if rustmod is None:
1490 use_rust = False
1497 use_rust = False
1491 elif self._checkcase:
1498 elif self._checkcase:
1492 # Case-insensitive filesystems are not handled yet
1499 # Case-insensitive filesystems are not handled yet
1493 use_rust = False
1500 use_rust = False
1494 elif subrepos:
1501 elif subrepos:
1495 use_rust = False
1502 use_rust = False
1496 elif sparse.enabled:
1503 elif sparse.enabled:
1497 use_rust = False
1504 use_rust = False
1498 elif not isinstance(match, allowed_matchers):
1505 elif not isinstance(match, allowed_matchers):
1499 # Some matchers have yet to be implemented
1506 # Some matchers have yet to be implemented
1500 use_rust = False
1507 use_rust = False
1501
1508
1502 if use_rust:
1509 if use_rust:
1503 try:
1510 try:
1504 return self._rust_status(
1511 return self._rust_status(
1505 match, listclean, listignored, listunknown
1512 match, listclean, listignored, listunknown
1506 )
1513 )
1507 except rustmod.FallbackError:
1514 except rustmod.FallbackError:
1508 pass
1515 pass
1509
1516
1510 def noop(f):
1517 def noop(f):
1511 pass
1518 pass
1512
1519
1513 dcontains = dmap.__contains__
1520 dcontains = dmap.__contains__
1514 dget = dmap.__getitem__
1521 dget = dmap.__getitem__
1515 ladd = lookup.append # aka "unsure"
1522 ladd = lookup.append # aka "unsure"
1516 madd = modified.append
1523 madd = modified.append
1517 aadd = added.append
1524 aadd = added.append
1518 uadd = unknown.append if listunknown else noop
1525 uadd = unknown.append if listunknown else noop
1519 iadd = ignored.append if listignored else noop
1526 iadd = ignored.append if listignored else noop
1520 radd = removed.append
1527 radd = removed.append
1521 dadd = deleted.append
1528 dadd = deleted.append
1522 cadd = clean.append if listclean else noop
1529 cadd = clean.append if listclean else noop
1523 mexact = match.exact
1530 mexact = match.exact
1524 dirignore = self._dirignore
1531 dirignore = self._dirignore
1525 checkexec = self._checkexec
1532 checkexec = self._checkexec
1526 copymap = self._map.copymap
1533 copymap = self._map.copymap
1527 lastnormaltime = self._lastnormaltime
1534 lastnormaltime = self._lastnormaltime
1528
1535
1529 # We need to do full walks when either
1536 # We need to do full walks when either
1530 # - we're listing all clean files, or
1537 # - we're listing all clean files, or
1531 # - match.traversedir does something, because match.traversedir should
1538 # - match.traversedir does something, because match.traversedir should
1532 # be called for every dir in the working dir
1539 # be called for every dir in the working dir
1533 full = listclean or match.traversedir is not None
1540 full = listclean or match.traversedir is not None
1534 for fn, st in pycompat.iteritems(
1541 for fn, st in pycompat.iteritems(
1535 self.walk(match, subrepos, listunknown, listignored, full=full)
1542 self.walk(match, subrepos, listunknown, listignored, full=full)
1536 ):
1543 ):
1537 if not dcontains(fn):
1544 if not dcontains(fn):
1538 if (listignored or mexact(fn)) and dirignore(fn):
1545 if (listignored or mexact(fn)) and dirignore(fn):
1539 if listignored:
1546 if listignored:
1540 iadd(fn)
1547 iadd(fn)
1541 else:
1548 else:
1542 uadd(fn)
1549 uadd(fn)
1543 continue
1550 continue
1544
1551
1545 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1552 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1546 # written like that for performance reasons. dmap[fn] is not a
1553 # written like that for performance reasons. dmap[fn] is not a
1547 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1554 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1548 # opcode has fast paths when the value to be unpacked is a tuple or
1555 # opcode has fast paths when the value to be unpacked is a tuple or
1549 # a list, but falls back to creating a full-fledged iterator in
1556 # a list, but falls back to creating a full-fledged iterator in
1550 # general. That is much slower than simply accessing and storing the
1557 # general. That is much slower than simply accessing and storing the
1551 # tuple members one by one.
1558 # tuple members one by one.
1552 t = dget(fn)
1559 t = dget(fn)
1553 mode = t.mode
1560 mode = t.mode
1554 size = t.size
1561 size = t.size
1555 time = t.mtime
1562 time = t.mtime
1556
1563
1557 if not st and t.tracked:
1564 if not st and t.tracked:
1558 dadd(fn)
1565 dadd(fn)
1559 elif t.merged:
1566 elif t.merged:
1560 madd(fn)
1567 madd(fn)
1561 elif t.added:
1568 elif t.added:
1562 aadd(fn)
1569 aadd(fn)
1563 elif t.removed:
1570 elif t.removed:
1564 radd(fn)
1571 radd(fn)
1565 elif t.tracked:
1572 elif t.tracked:
1566 if (
1573 if (
1567 size >= 0
1574 size >= 0
1568 and (
1575 and (
1569 (size != st.st_size and size != st.st_size & _rangemask)
1576 (size != st.st_size and size != st.st_size & _rangemask)
1570 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1577 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1571 )
1578 )
1572 or t.from_p2
1579 or t.from_p2
1573 or fn in copymap
1580 or fn in copymap
1574 ):
1581 ):
1575 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1582 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1576 # issue6456: Size returned may be longer due to
1583 # issue6456: Size returned may be longer due to
1577 # encryption on EXT-4 fscrypt, undecided.
1584 # encryption on EXT-4 fscrypt, undecided.
1578 ladd(fn)
1585 ladd(fn)
1579 else:
1586 else:
1580 madd(fn)
1587 madd(fn)
1581 elif (
1588 elif (
1582 time != st[stat.ST_MTIME]
1589 time != st[stat.ST_MTIME]
1583 and time != st[stat.ST_MTIME] & _rangemask
1590 and time != st[stat.ST_MTIME] & _rangemask
1584 ):
1591 ):
1585 ladd(fn)
1592 ladd(fn)
1586 elif st[stat.ST_MTIME] == lastnormaltime:
1593 elif st[stat.ST_MTIME] == lastnormaltime:
1587 # fn may have just been marked as normal and it may have
1594 # fn may have just been marked as normal and it may have
1588 # changed in the same second without changing its size.
1595 # changed in the same second without changing its size.
1589 # This can happen if we quickly do multiple commits.
1596 # This can happen if we quickly do multiple commits.
1590 # Force lookup, so we don't miss such a racy file change.
1597 # Force lookup, so we don't miss such a racy file change.
1591 ladd(fn)
1598 ladd(fn)
1592 elif listclean:
1599 elif listclean:
1593 cadd(fn)
1600 cadd(fn)
1594 status = scmutil.status(
1601 status = scmutil.status(
1595 modified, added, removed, deleted, unknown, ignored, clean
1602 modified, added, removed, deleted, unknown, ignored, clean
1596 )
1603 )
1597 return (lookup, status)
1604 return (lookup, status)
1598
1605
1599 def matches(self, match):
1606 def matches(self, match):
1600 """
1607 """
1601 return files in the dirstate (in whatever state) filtered by match
1608 return files in the dirstate (in whatever state) filtered by match
1602 """
1609 """
1603 dmap = self._map
1610 dmap = self._map
1604 if rustmod is not None:
1611 if rustmod is not None:
1605 dmap = self._map._rustmap
1612 dmap = self._map._rustmap
1606
1613
1607 if match.always():
1614 if match.always():
1608 return dmap.keys()
1615 return dmap.keys()
1609 files = match.files()
1616 files = match.files()
1610 if match.isexact():
1617 if match.isexact():
1611 # fast path -- filter the other way around, since typically files is
1618 # fast path -- filter the other way around, since typically files is
1612 # much smaller than dmap
1619 # much smaller than dmap
1613 return [f for f in files if f in dmap]
1620 return [f for f in files if f in dmap]
1614 if match.prefix() and all(fn in dmap for fn in files):
1621 if match.prefix() and all(fn in dmap for fn in files):
1615 # fast path -- all the values are known to be files, so just return
1622 # fast path -- all the values are known to be files, so just return
1616 # that
1623 # that
1617 return list(files)
1624 return list(files)
1618 return [f for f in dmap if match(f)]
1625 return [f for f in dmap if match(f)]
1619
1626
1620 def _actualfilename(self, tr):
1627 def _actualfilename(self, tr):
1621 if tr:
1628 if tr:
1622 return self._pendingfilename
1629 return self._pendingfilename
1623 else:
1630 else:
1624 return self._filename
1631 return self._filename
1625
1632
1626 def savebackup(self, tr, backupname):
1633 def savebackup(self, tr, backupname):
1627 '''Save current dirstate into backup file'''
1634 '''Save current dirstate into backup file'''
1628 filename = self._actualfilename(tr)
1635 filename = self._actualfilename(tr)
1629 assert backupname != filename
1636 assert backupname != filename
1630
1637
1631 # use '_writedirstate' instead of 'write' to write changes certainly,
1638 # use '_writedirstate' instead of 'write' to write changes certainly,
1632 # because the latter omits writing out if transaction is running.
1639 # because the latter omits writing out if transaction is running.
1633 # output file will be used to create backup of dirstate at this point.
1640 # output file will be used to create backup of dirstate at this point.
1634 if self._dirty or not self._opener.exists(filename):
1641 if self._dirty or not self._opener.exists(filename):
1635 self._writedirstate(
1642 self._writedirstate(
1636 tr,
1643 tr,
1637 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1644 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1638 )
1645 )
1639
1646
1640 if tr:
1647 if tr:
1641 # ensure that subsequent tr.writepending returns True for
1648 # ensure that subsequent tr.writepending returns True for
1642 # changes written out above, even if dirstate is never
1649 # changes written out above, even if dirstate is never
1643 # changed after this
1650 # changed after this
1644 tr.addfilegenerator(
1651 tr.addfilegenerator(
1645 b'dirstate',
1652 b'dirstate',
1646 (self._filename,),
1653 (self._filename,),
1647 lambda f: self._writedirstate(tr, f),
1654 lambda f: self._writedirstate(tr, f),
1648 location=b'plain',
1655 location=b'plain',
1649 )
1656 )
1650
1657
1651 # ensure that pending file written above is unlinked at
1658 # ensure that pending file written above is unlinked at
1652 # failure, even if tr.writepending isn't invoked until the
1659 # failure, even if tr.writepending isn't invoked until the
1653 # end of this transaction
1660 # end of this transaction
1654 tr.registertmp(filename, location=b'plain')
1661 tr.registertmp(filename, location=b'plain')
1655
1662
1656 self._opener.tryunlink(backupname)
1663 self._opener.tryunlink(backupname)
1657 # hardlink backup is okay because _writedirstate is always called
1664 # hardlink backup is okay because _writedirstate is always called
1658 # with an "atomictemp=True" file.
1665 # with an "atomictemp=True" file.
1659 util.copyfile(
1666 util.copyfile(
1660 self._opener.join(filename),
1667 self._opener.join(filename),
1661 self._opener.join(backupname),
1668 self._opener.join(backupname),
1662 hardlink=True,
1669 hardlink=True,
1663 )
1670 )
1664
1671
1665 def restorebackup(self, tr, backupname):
1672 def restorebackup(self, tr, backupname):
1666 '''Restore dirstate by backup file'''
1673 '''Restore dirstate by backup file'''
1667 # this "invalidate()" prevents "wlock.release()" from writing
1674 # this "invalidate()" prevents "wlock.release()" from writing
1668 # changes of dirstate out after restoring from backup file
1675 # changes of dirstate out after restoring from backup file
1669 self.invalidate()
1676 self.invalidate()
1670 filename = self._actualfilename(tr)
1677 filename = self._actualfilename(tr)
1671 o = self._opener
1678 o = self._opener
1672 if util.samefile(o.join(backupname), o.join(filename)):
1679 if util.samefile(o.join(backupname), o.join(filename)):
1673 o.unlink(backupname)
1680 o.unlink(backupname)
1674 else:
1681 else:
1675 o.rename(backupname, filename, checkambig=True)
1682 o.rename(backupname, filename, checkambig=True)
1676
1683
1677 def clearbackup(self, tr, backupname):
1684 def clearbackup(self, tr, backupname):
1678 '''Clear backup file'''
1685 '''Clear backup file'''
1679 self._opener.unlink(backupname)
1686 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now