##// END OF EJS Templates
dirstate: properly update `_lastnormaltime` in `update_file_p1`...
marmoute -
r48496:ff481c23 default
parent child Browse files
Show More
@@ -1,1664 +1,1672 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = parsers.DirstateItem
48 DirstateItem = parsers.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._updatedfiles = set()
133 self._updatedfiles = set()
134 self._mapcls = dirstatemap.dirstatemap
134 self._mapcls = dirstatemap.dirstatemap
135 # Access and cache cwd early, so we don't access it for the first time
135 # Access and cache cwd early, so we don't access it for the first time
136 # after a working-copy update caused it to not exist (accessing it then
136 # after a working-copy update caused it to not exist (accessing it then
137 # raises an exception).
137 # raises an exception).
138 self._cwd
138 self._cwd
139
139
140 def prefetch_parents(self):
140 def prefetch_parents(self):
141 """make sure the parents are loaded
141 """make sure the parents are loaded
142
142
143 Used to avoid a race condition.
143 Used to avoid a race condition.
144 """
144 """
145 self._pl
145 self._pl
146
146
147 @contextlib.contextmanager
147 @contextlib.contextmanager
148 def parentchange(self):
148 def parentchange(self):
149 """Context manager for handling dirstate parents.
149 """Context manager for handling dirstate parents.
150
150
151 If an exception occurs in the scope of the context manager,
151 If an exception occurs in the scope of the context manager,
152 the incoherent dirstate won't be written when wlock is
152 the incoherent dirstate won't be written when wlock is
153 released.
153 released.
154 """
154 """
155 self._parentwriters += 1
155 self._parentwriters += 1
156 yield
156 yield
157 # Typically we want the "undo" step of a context manager in a
157 # Typically we want the "undo" step of a context manager in a
158 # finally block so it happens even when an exception
158 # finally block so it happens even when an exception
159 # occurs. In this case, however, we only want to decrement
159 # occurs. In this case, however, we only want to decrement
160 # parentwriters if the code in the with statement exits
160 # parentwriters if the code in the with statement exits
161 # normally, so we don't have a try/finally here on purpose.
161 # normally, so we don't have a try/finally here on purpose.
162 self._parentwriters -= 1
162 self._parentwriters -= 1
163
163
164 def pendingparentchange(self):
164 def pendingparentchange(self):
165 """Returns true if the dirstate is in the middle of a set of changes
165 """Returns true if the dirstate is in the middle of a set of changes
166 that modify the dirstate parent.
166 that modify the dirstate parent.
167 """
167 """
168 return self._parentwriters > 0
168 return self._parentwriters > 0
169
169
170 @propertycache
170 @propertycache
171 def _map(self):
171 def _map(self):
172 """Return the dirstate contents (see documentation for dirstatemap)."""
172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 self._map = self._mapcls(
173 self._map = self._mapcls(
174 self._ui,
174 self._ui,
175 self._opener,
175 self._opener,
176 self._root,
176 self._root,
177 self._nodeconstants,
177 self._nodeconstants,
178 self._use_dirstate_v2,
178 self._use_dirstate_v2,
179 )
179 )
180 return self._map
180 return self._map
181
181
182 @property
182 @property
183 def _sparsematcher(self):
183 def _sparsematcher(self):
184 """The matcher for the sparse checkout.
184 """The matcher for the sparse checkout.
185
185
186 The working directory may not include every file from a manifest. The
186 The working directory may not include every file from a manifest. The
187 matcher obtained by this property will match a path if it is to be
187 matcher obtained by this property will match a path if it is to be
188 included in the working directory.
188 included in the working directory.
189 """
189 """
190 # TODO there is potential to cache this property. For now, the matcher
190 # TODO there is potential to cache this property. For now, the matcher
191 # is resolved on every access. (But the called function does use a
191 # is resolved on every access. (But the called function does use a
192 # cache to keep the lookup fast.)
192 # cache to keep the lookup fast.)
193 return self._sparsematchfn()
193 return self._sparsematchfn()
194
194
195 @repocache(b'branch')
195 @repocache(b'branch')
196 def _branch(self):
196 def _branch(self):
197 try:
197 try:
198 return self._opener.read(b"branch").strip() or b"default"
198 return self._opener.read(b"branch").strip() or b"default"
199 except IOError as inst:
199 except IOError as inst:
200 if inst.errno != errno.ENOENT:
200 if inst.errno != errno.ENOENT:
201 raise
201 raise
202 return b"default"
202 return b"default"
203
203
204 @property
204 @property
205 def _pl(self):
205 def _pl(self):
206 return self._map.parents()
206 return self._map.parents()
207
207
208 def hasdir(self, d):
208 def hasdir(self, d):
209 return self._map.hastrackeddir(d)
209 return self._map.hastrackeddir(d)
210
210
211 @rootcache(b'.hgignore')
211 @rootcache(b'.hgignore')
212 def _ignore(self):
212 def _ignore(self):
213 files = self._ignorefiles()
213 files = self._ignorefiles()
214 if not files:
214 if not files:
215 return matchmod.never()
215 return matchmod.never()
216
216
217 pats = [b'include:%s' % f for f in files]
217 pats = [b'include:%s' % f for f in files]
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219
219
220 @propertycache
220 @propertycache
221 def _slash(self):
221 def _slash(self):
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223
223
224 @propertycache
224 @propertycache
225 def _checklink(self):
225 def _checklink(self):
226 return util.checklink(self._root)
226 return util.checklink(self._root)
227
227
228 @propertycache
228 @propertycache
229 def _checkexec(self):
229 def _checkexec(self):
230 return bool(util.checkexec(self._root))
230 return bool(util.checkexec(self._root))
231
231
232 @propertycache
232 @propertycache
233 def _checkcase(self):
233 def _checkcase(self):
234 return not util.fscasesensitive(self._join(b'.hg'))
234 return not util.fscasesensitive(self._join(b'.hg'))
235
235
236 def _join(self, f):
236 def _join(self, f):
237 # much faster than os.path.join()
237 # much faster than os.path.join()
238 # it's safe because f is always a relative path
238 # it's safe because f is always a relative path
239 return self._rootdir + f
239 return self._rootdir + f
240
240
241 def flagfunc(self, buildfallback):
241 def flagfunc(self, buildfallback):
242 if self._checklink and self._checkexec:
242 if self._checklink and self._checkexec:
243
243
244 def f(x):
244 def f(x):
245 try:
245 try:
246 st = os.lstat(self._join(x))
246 st = os.lstat(self._join(x))
247 if util.statislink(st):
247 if util.statislink(st):
248 return b'l'
248 return b'l'
249 if util.statisexec(st):
249 if util.statisexec(st):
250 return b'x'
250 return b'x'
251 except OSError:
251 except OSError:
252 pass
252 pass
253 return b''
253 return b''
254
254
255 return f
255 return f
256
256
257 fallback = buildfallback()
257 fallback = buildfallback()
258 if self._checklink:
258 if self._checklink:
259
259
260 def f(x):
260 def f(x):
261 if os.path.islink(self._join(x)):
261 if os.path.islink(self._join(x)):
262 return b'l'
262 return b'l'
263 if b'x' in fallback(x):
263 if b'x' in fallback(x):
264 return b'x'
264 return b'x'
265 return b''
265 return b''
266
266
267 return f
267 return f
268 if self._checkexec:
268 if self._checkexec:
269
269
270 def f(x):
270 def f(x):
271 if b'l' in fallback(x):
271 if b'l' in fallback(x):
272 return b'l'
272 return b'l'
273 if util.isexec(self._join(x)):
273 if util.isexec(self._join(x)):
274 return b'x'
274 return b'x'
275 return b''
275 return b''
276
276
277 return f
277 return f
278 else:
278 else:
279 return fallback
279 return fallback
280
280
281 @propertycache
281 @propertycache
282 def _cwd(self):
282 def _cwd(self):
283 # internal config: ui.forcecwd
283 # internal config: ui.forcecwd
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 if forcecwd:
285 if forcecwd:
286 return forcecwd
286 return forcecwd
287 return encoding.getcwd()
287 return encoding.getcwd()
288
288
289 def getcwd(self):
289 def getcwd(self):
290 """Return the path from which a canonical path is calculated.
290 """Return the path from which a canonical path is calculated.
291
291
292 This path should be used to resolve file patterns or to convert
292 This path should be used to resolve file patterns or to convert
293 canonical paths back to file paths for display. It shouldn't be
293 canonical paths back to file paths for display. It shouldn't be
294 used to get real file paths. Use vfs functions instead.
294 used to get real file paths. Use vfs functions instead.
295 """
295 """
296 cwd = self._cwd
296 cwd = self._cwd
297 if cwd == self._root:
297 if cwd == self._root:
298 return b''
298 return b''
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 rootsep = self._root
300 rootsep = self._root
301 if not util.endswithsep(rootsep):
301 if not util.endswithsep(rootsep):
302 rootsep += pycompat.ossep
302 rootsep += pycompat.ossep
303 if cwd.startswith(rootsep):
303 if cwd.startswith(rootsep):
304 return cwd[len(rootsep) :]
304 return cwd[len(rootsep) :]
305 else:
305 else:
306 # we're outside the repo. return an absolute path.
306 # we're outside the repo. return an absolute path.
307 return cwd
307 return cwd
308
308
309 def pathto(self, f, cwd=None):
309 def pathto(self, f, cwd=None):
310 if cwd is None:
310 if cwd is None:
311 cwd = self.getcwd()
311 cwd = self.getcwd()
312 path = util.pathto(self._root, cwd, f)
312 path = util.pathto(self._root, cwd, f)
313 if self._slash:
313 if self._slash:
314 return util.pconvert(path)
314 return util.pconvert(path)
315 return path
315 return path
316
316
317 def __getitem__(self, key):
317 def __getitem__(self, key):
318 """Return the current state of key (a filename) in the dirstate.
318 """Return the current state of key (a filename) in the dirstate.
319
319
320 States are:
320 States are:
321 n normal
321 n normal
322 m needs merging
322 m needs merging
323 r marked for removal
323 r marked for removal
324 a marked for addition
324 a marked for addition
325 ? not tracked
325 ? not tracked
326
326
327 XXX The "state" is a bit obscure to be in the "public" API. we should
327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 consider migrating all user of this to going through the dirstate entry
328 consider migrating all user of this to going through the dirstate entry
329 instead.
329 instead.
330 """
330 """
331 entry = self._map.get(key)
331 entry = self._map.get(key)
332 if entry is not None:
332 if entry is not None:
333 return entry.state
333 return entry.state
334 return b'?'
334 return b'?'
335
335
336 def __contains__(self, key):
336 def __contains__(self, key):
337 return key in self._map
337 return key in self._map
338
338
339 def __iter__(self):
339 def __iter__(self):
340 return iter(sorted(self._map))
340 return iter(sorted(self._map))
341
341
342 def items(self):
342 def items(self):
343 return pycompat.iteritems(self._map)
343 return pycompat.iteritems(self._map)
344
344
345 iteritems = items
345 iteritems = items
346
346
347 def directories(self):
347 def directories(self):
348 return self._map.directories()
348 return self._map.directories()
349
349
350 def parents(self):
350 def parents(self):
351 return [self._validate(p) for p in self._pl]
351 return [self._validate(p) for p in self._pl]
352
352
353 def p1(self):
353 def p1(self):
354 return self._validate(self._pl[0])
354 return self._validate(self._pl[0])
355
355
356 def p2(self):
356 def p2(self):
357 return self._validate(self._pl[1])
357 return self._validate(self._pl[1])
358
358
359 @property
359 @property
360 def in_merge(self):
360 def in_merge(self):
361 """True if a merge is in progress"""
361 """True if a merge is in progress"""
362 return self._pl[1] != self._nodeconstants.nullid
362 return self._pl[1] != self._nodeconstants.nullid
363
363
364 def branch(self):
364 def branch(self):
365 return encoding.tolocal(self._branch)
365 return encoding.tolocal(self._branch)
366
366
367 def setparents(self, p1, p2=None):
367 def setparents(self, p1, p2=None):
368 """Set dirstate parents to p1 and p2.
368 """Set dirstate parents to p1 and p2.
369
369
370 When moving from two parents to one, "merged" entries a
370 When moving from two parents to one, "merged" entries a
371 adjusted to normal and previous copy records discarded and
371 adjusted to normal and previous copy records discarded and
372 returned by the call.
372 returned by the call.
373
373
374 See localrepo.setparents()
374 See localrepo.setparents()
375 """
375 """
376 if p2 is None:
376 if p2 is None:
377 p2 = self._nodeconstants.nullid
377 p2 = self._nodeconstants.nullid
378 if self._parentwriters == 0:
378 if self._parentwriters == 0:
379 raise ValueError(
379 raise ValueError(
380 b"cannot set dirstate parent outside of "
380 b"cannot set dirstate parent outside of "
381 b"dirstate.parentchange context manager"
381 b"dirstate.parentchange context manager"
382 )
382 )
383
383
384 self._dirty = True
384 self._dirty = True
385 oldp2 = self._pl[1]
385 oldp2 = self._pl[1]
386 if self._origpl is None:
386 if self._origpl is None:
387 self._origpl = self._pl
387 self._origpl = self._pl
388 self._map.setparents(p1, p2)
388 self._map.setparents(p1, p2)
389 copies = {}
389 copies = {}
390 if (
390 if (
391 oldp2 != self._nodeconstants.nullid
391 oldp2 != self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
393 ):
393 ):
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395
395
396 for f in candidatefiles:
396 for f in candidatefiles:
397 s = self._map.get(f)
397 s = self._map.get(f)
398 if s is None:
398 if s is None:
399 continue
399 continue
400
400
401 # Discard "merged" markers when moving away from a merge state
401 # Discard "merged" markers when moving away from a merge state
402 if s.merged:
402 if s.merged:
403 source = self._map.copymap.get(f)
403 source = self._map.copymap.get(f)
404 if source:
404 if source:
405 copies[f] = source
405 copies[f] = source
406 self.normallookup(f)
406 self.normallookup(f)
407 # Also fix up otherparent markers
407 # Also fix up otherparent markers
408 elif s.from_p2:
408 elif s.from_p2:
409 source = self._map.copymap.get(f)
409 source = self._map.copymap.get(f)
410 if source:
410 if source:
411 copies[f] = source
411 copies[f] = source
412 self._add(f)
412 self._add(f)
413 return copies
413 return copies
414
414
415 def setbranch(self, branch):
415 def setbranch(self, branch):
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 try:
418 try:
419 f.write(self._branch + b'\n')
419 f.write(self._branch + b'\n')
420 f.close()
420 f.close()
421
421
422 # make sure filecache has the correct stat info for _branch after
422 # make sure filecache has the correct stat info for _branch after
423 # replacing the underlying file
423 # replacing the underlying file
424 ce = self._filecache[b'_branch']
424 ce = self._filecache[b'_branch']
425 if ce:
425 if ce:
426 ce.refresh()
426 ce.refresh()
427 except: # re-raises
427 except: # re-raises
428 f.discard()
428 f.discard()
429 raise
429 raise
430
430
431 def invalidate(self):
431 def invalidate(self):
432 """Causes the next access to reread the dirstate.
432 """Causes the next access to reread the dirstate.
433
433
434 This is different from localrepo.invalidatedirstate() because it always
434 This is different from localrepo.invalidatedirstate() because it always
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 check whether the dirstate has changed before rereading it."""
436 check whether the dirstate has changed before rereading it."""
437
437
438 for a in ("_map", "_branch", "_ignore"):
438 for a in ("_map", "_branch", "_ignore"):
439 if a in self.__dict__:
439 if a in self.__dict__:
440 delattr(self, a)
440 delattr(self, a)
441 self._lastnormaltime = 0
441 self._lastnormaltime = 0
442 self._dirty = False
442 self._dirty = False
443 self._updatedfiles.clear()
443 self._updatedfiles.clear()
444 self._parentwriters = 0
444 self._parentwriters = 0
445 self._origpl = None
445 self._origpl = None
446
446
447 def copy(self, source, dest):
447 def copy(self, source, dest):
448 """Mark dest as a copy of source. Unmark dest if source is None."""
448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 if source == dest:
449 if source == dest:
450 return
450 return
451 self._dirty = True
451 self._dirty = True
452 if source is not None:
452 if source is not None:
453 self._map.copymap[dest] = source
453 self._map.copymap[dest] = source
454 self._updatedfiles.add(source)
454 self._updatedfiles.add(source)
455 self._updatedfiles.add(dest)
455 self._updatedfiles.add(dest)
456 elif self._map.copymap.pop(dest, None):
456 elif self._map.copymap.pop(dest, None):
457 self._updatedfiles.add(dest)
457 self._updatedfiles.add(dest)
458
458
459 def copied(self, file):
459 def copied(self, file):
460 return self._map.copymap.get(file, None)
460 return self._map.copymap.get(file, None)
461
461
462 def copies(self):
462 def copies(self):
463 return self._map.copymap
463 return self._map.copymap
464
464
465 @requires_no_parents_change
465 @requires_no_parents_change
466 def set_tracked(self, filename):
466 def set_tracked(self, filename):
467 """a "public" method for generic code to mark a file as tracked
467 """a "public" method for generic code to mark a file as tracked
468
468
469 This function is to be called outside of "update/merge" case. For
469 This function is to be called outside of "update/merge" case. For
470 example by a command like `hg add X`.
470 example by a command like `hg add X`.
471
471
472 return True the file was previously untracked, False otherwise.
472 return True the file was previously untracked, False otherwise.
473 """
473 """
474 entry = self._map.get(filename)
474 entry = self._map.get(filename)
475 if entry is None:
475 if entry is None:
476 self._add(filename)
476 self._add(filename)
477 return True
477 return True
478 elif not entry.tracked:
478 elif not entry.tracked:
479 self.normallookup(filename)
479 self.normallookup(filename)
480 return True
480 return True
481 return False
481 return False
482
482
483 @requires_no_parents_change
483 @requires_no_parents_change
484 def set_untracked(self, filename):
484 def set_untracked(self, filename):
485 """a "public" method for generic code to mark a file as untracked
485 """a "public" method for generic code to mark a file as untracked
486
486
487 This function is to be called outside of "update/merge" case. For
487 This function is to be called outside of "update/merge" case. For
488 example by a command like `hg remove X`.
488 example by a command like `hg remove X`.
489
489
490 return True the file was previously tracked, False otherwise.
490 return True the file was previously tracked, False otherwise.
491 """
491 """
492 entry = self._map.get(filename)
492 entry = self._map.get(filename)
493 if entry is None:
493 if entry is None:
494 return False
494 return False
495 elif entry.added:
495 elif entry.added:
496 self._drop(filename)
496 self._drop(filename)
497 return True
497 return True
498 else:
498 else:
499 self._remove(filename)
499 self._remove(filename)
500 return True
500 return True
501
501
502 @requires_parents_change
502 @requires_parents_change
503 def update_file_p1(
503 def update_file_p1(
504 self,
504 self,
505 filename,
505 filename,
506 p1_tracked,
506 p1_tracked,
507 ):
507 ):
508 """Set a file as tracked in the parent (or not)
508 """Set a file as tracked in the parent (or not)
509
509
510 This is to be called when adjust the dirstate to a new parent after an history
510 This is to be called when adjust the dirstate to a new parent after an history
511 rewriting operation.
511 rewriting operation.
512
512
513 It should not be called during a merge (p2 != nullid) and only within
513 It should not be called during a merge (p2 != nullid) and only within
514 a `with dirstate.parentchange():` context.
514 a `with dirstate.parentchange():` context.
515 """
515 """
516 if self.in_merge:
516 if self.in_merge:
517 msg = b'update_file_reference should not be called when merging'
517 msg = b'update_file_reference should not be called when merging'
518 raise error.ProgrammingError(msg)
518 raise error.ProgrammingError(msg)
519 entry = self._map.get(filename)
519 entry = self._map.get(filename)
520 if entry is None:
520 if entry is None:
521 wc_tracked = False
521 wc_tracked = False
522 else:
522 else:
523 wc_tracked = entry.tracked
523 wc_tracked = entry.tracked
524 possibly_dirty = False
524 possibly_dirty = False
525 if p1_tracked and wc_tracked:
525 if p1_tracked and wc_tracked:
526 # the underlying reference might have changed, we will have to
526 # the underlying reference might have changed, we will have to
527 # check it.
527 # check it.
528 possibly_dirty = True
528 possibly_dirty = True
529 elif not (p1_tracked or wc_tracked):
529 elif not (p1_tracked or wc_tracked):
530 # the file is no longer relevant to anyone
530 # the file is no longer relevant to anyone
531 self._drop(filename)
531 self._drop(filename)
532 elif (not p1_tracked) and wc_tracked:
532 elif (not p1_tracked) and wc_tracked:
533 if entry is not None and entry.added:
533 if entry is not None and entry.added:
534 return # avoid dropping copy information (maybe?)
534 return # avoid dropping copy information (maybe?)
535 elif p1_tracked and not wc_tracked:
535 elif p1_tracked and not wc_tracked:
536 pass
536 pass
537 else:
537 else:
538 assert False, 'unreachable'
538 assert False, 'unreachable'
539
539
540 # this mean we are doing call for file we do not really care about the
540 # this mean we are doing call for file we do not really care about the
541 # data (eg: added or removed), however this should be a minor overhead
541 # data (eg: added or removed), however this should be a minor overhead
542 # compared to the overall update process calling this.
542 # compared to the overall update process calling this.
543 parentfiledata = None
543 parentfiledata = None
544 if wc_tracked:
544 if wc_tracked:
545 parentfiledata = self._get_filedata(filename)
545 parentfiledata = self._get_filedata(filename)
546
546
547 self._updatedfiles.add(filename)
547 self._updatedfiles.add(filename)
548 self._map.reset_state(
548 self._map.reset_state(
549 filename,
549 filename,
550 wc_tracked,
550 wc_tracked,
551 p1_tracked,
551 p1_tracked,
552 possibly_dirty=possibly_dirty,
552 possibly_dirty=possibly_dirty,
553 parentfiledata=parentfiledata,
553 parentfiledata=parentfiledata,
554 )
554 )
555 if (
556 parentfiledata is not None
557 and parentfiledata[2] > self._lastnormaltime
558 ):
559 # Remember the most recent modification timeslot for status(),
560 # to make sure we won't miss future size-preserving file content
561 # modifications that happen within the same timeslot.
562 self._lastnormaltime = parentfiledata[2]
555
563
556 @requires_parents_change
564 @requires_parents_change
557 def update_file(
565 def update_file(
558 self,
566 self,
559 filename,
567 filename,
560 wc_tracked,
568 wc_tracked,
561 p1_tracked,
569 p1_tracked,
562 p2_tracked=False,
570 p2_tracked=False,
563 merged=False,
571 merged=False,
564 clean_p1=False,
572 clean_p1=False,
565 clean_p2=False,
573 clean_p2=False,
566 possibly_dirty=False,
574 possibly_dirty=False,
567 parentfiledata=None,
575 parentfiledata=None,
568 ):
576 ):
569 """update the information about a file in the dirstate
577 """update the information about a file in the dirstate
570
578
571 This is to be called when the direstates parent changes to keep track
579 This is to be called when the direstates parent changes to keep track
572 of what is the file situation in regards to the working copy and its parent.
580 of what is the file situation in regards to the working copy and its parent.
573
581
574 This function must be called within a `dirstate.parentchange` context.
582 This function must be called within a `dirstate.parentchange` context.
575
583
576 note: the API is at an early stage and we might need to ajust it
584 note: the API is at an early stage and we might need to ajust it
577 depending of what information ends up being relevant and useful to
585 depending of what information ends up being relevant and useful to
578 other processing.
586 other processing.
579 """
587 """
580 if merged and (clean_p1 or clean_p2):
588 if merged and (clean_p1 or clean_p2):
581 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
589 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
582 raise error.ProgrammingError(msg)
590 raise error.ProgrammingError(msg)
583
591
584 # note: I do not think we need to double check name clash here since we
592 # note: I do not think we need to double check name clash here since we
585 # are in a update/merge case that should already have taken care of
593 # are in a update/merge case that should already have taken care of
586 # this. The test agrees
594 # this. The test agrees
587
595
588 self._dirty = True
596 self._dirty = True
589 self._updatedfiles.add(filename)
597 self._updatedfiles.add(filename)
590
598
591 need_parent_file_data = (
599 need_parent_file_data = (
592 not (possibly_dirty or clean_p2 or merged)
600 not (possibly_dirty or clean_p2 or merged)
593 and wc_tracked
601 and wc_tracked
594 and p1_tracked
602 and p1_tracked
595 )
603 )
596
604
597 # this mean we are doing call for file we do not really care about the
605 # this mean we are doing call for file we do not really care about the
598 # data (eg: added or removed), however this should be a minor overhead
606 # data (eg: added or removed), however this should be a minor overhead
599 # compared to the overall update process calling this.
607 # compared to the overall update process calling this.
600 if need_parent_file_data:
608 if need_parent_file_data:
601 if parentfiledata is None:
609 if parentfiledata is None:
602 parentfiledata = self._get_filedata(filename)
610 parentfiledata = self._get_filedata(filename)
603 mtime = parentfiledata[2]
611 mtime = parentfiledata[2]
604
612
605 if mtime > self._lastnormaltime:
613 if mtime > self._lastnormaltime:
606 # Remember the most recent modification timeslot for
614 # Remember the most recent modification timeslot for
607 # status(), to make sure we won't miss future
615 # status(), to make sure we won't miss future
608 # size-preserving file content modifications that happen
616 # size-preserving file content modifications that happen
609 # within the same timeslot.
617 # within the same timeslot.
610 self._lastnormaltime = mtime
618 self._lastnormaltime = mtime
611
619
612 self._map.reset_state(
620 self._map.reset_state(
613 filename,
621 filename,
614 wc_tracked,
622 wc_tracked,
615 p1_tracked,
623 p1_tracked,
616 p2_tracked=p2_tracked,
624 p2_tracked=p2_tracked,
617 merged=merged,
625 merged=merged,
618 clean_p1=clean_p1,
626 clean_p1=clean_p1,
619 clean_p2=clean_p2,
627 clean_p2=clean_p2,
620 possibly_dirty=possibly_dirty,
628 possibly_dirty=possibly_dirty,
621 parentfiledata=parentfiledata,
629 parentfiledata=parentfiledata,
622 )
630 )
623 if (
631 if (
624 parentfiledata is not None
632 parentfiledata is not None
625 and parentfiledata[2] > self._lastnormaltime
633 and parentfiledata[2] > self._lastnormaltime
626 ):
634 ):
627 # Remember the most recent modification timeslot for status(),
635 # Remember the most recent modification timeslot for status(),
628 # to make sure we won't miss future size-preserving file content
636 # to make sure we won't miss future size-preserving file content
629 # modifications that happen within the same timeslot.
637 # modifications that happen within the same timeslot.
630 self._lastnormaltime = parentfiledata[2]
638 self._lastnormaltime = parentfiledata[2]
631
639
632 def _addpath(
640 def _addpath(
633 self,
641 self,
634 f,
642 f,
635 mode=0,
643 mode=0,
636 size=None,
644 size=None,
637 mtime=None,
645 mtime=None,
638 added=False,
646 added=False,
639 merged=False,
647 merged=False,
640 from_p2=False,
648 from_p2=False,
641 possibly_dirty=False,
649 possibly_dirty=False,
642 ):
650 ):
643 entry = self._map.get(f)
651 entry = self._map.get(f)
644 if added or entry is not None and entry.removed:
652 if added or entry is not None and entry.removed:
645 scmutil.checkfilename(f)
653 scmutil.checkfilename(f)
646 if self._map.hastrackeddir(f):
654 if self._map.hastrackeddir(f):
647 msg = _(b'directory %r already in dirstate')
655 msg = _(b'directory %r already in dirstate')
648 msg %= pycompat.bytestr(f)
656 msg %= pycompat.bytestr(f)
649 raise error.Abort(msg)
657 raise error.Abort(msg)
650 # shadows
658 # shadows
651 for d in pathutil.finddirs(f):
659 for d in pathutil.finddirs(f):
652 if self._map.hastrackeddir(d):
660 if self._map.hastrackeddir(d):
653 break
661 break
654 entry = self._map.get(d)
662 entry = self._map.get(d)
655 if entry is not None and not entry.removed:
663 if entry is not None and not entry.removed:
656 msg = _(b'file %r in dirstate clashes with %r')
664 msg = _(b'file %r in dirstate clashes with %r')
657 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
665 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
658 raise error.Abort(msg)
666 raise error.Abort(msg)
659 self._dirty = True
667 self._dirty = True
660 self._updatedfiles.add(f)
668 self._updatedfiles.add(f)
661 self._map.addfile(
669 self._map.addfile(
662 f,
670 f,
663 mode=mode,
671 mode=mode,
664 size=size,
672 size=size,
665 mtime=mtime,
673 mtime=mtime,
666 added=added,
674 added=added,
667 merged=merged,
675 merged=merged,
668 from_p2=from_p2,
676 from_p2=from_p2,
669 possibly_dirty=possibly_dirty,
677 possibly_dirty=possibly_dirty,
670 )
678 )
671
679
672 def _get_filedata(self, filename):
680 def _get_filedata(self, filename):
673 """returns"""
681 """returns"""
674 s = os.lstat(self._join(filename))
682 s = os.lstat(self._join(filename))
675 mode = s.st_mode
683 mode = s.st_mode
676 size = s.st_size
684 size = s.st_size
677 mtime = s[stat.ST_MTIME]
685 mtime = s[stat.ST_MTIME]
678 return (mode, size, mtime)
686 return (mode, size, mtime)
679
687
680 def normal(self, f, parentfiledata=None):
688 def normal(self, f, parentfiledata=None):
681 """Mark a file normal and clean.
689 """Mark a file normal and clean.
682
690
683 parentfiledata: (mode, size, mtime) of the clean file
691 parentfiledata: (mode, size, mtime) of the clean file
684
692
685 parentfiledata should be computed from memory (for mode,
693 parentfiledata should be computed from memory (for mode,
686 size), as or close as possible from the point where we
694 size), as or close as possible from the point where we
687 determined the file was clean, to limit the risk of the
695 determined the file was clean, to limit the risk of the
688 file having been changed by an external process between the
696 file having been changed by an external process between the
689 moment where the file was determined to be clean and now."""
697 moment where the file was determined to be clean and now."""
690 if parentfiledata:
698 if parentfiledata:
691 (mode, size, mtime) = parentfiledata
699 (mode, size, mtime) = parentfiledata
692 else:
700 else:
693 (mode, size, mtime) = self._get_filedata(f)
701 (mode, size, mtime) = self._get_filedata(f)
694 self._addpath(f, mode=mode, size=size, mtime=mtime)
702 self._addpath(f, mode=mode, size=size, mtime=mtime)
695 self._map.copymap.pop(f, None)
703 self._map.copymap.pop(f, None)
696 if f in self._map.nonnormalset:
704 if f in self._map.nonnormalset:
697 self._map.nonnormalset.remove(f)
705 self._map.nonnormalset.remove(f)
698 if mtime > self._lastnormaltime:
706 if mtime > self._lastnormaltime:
699 # Remember the most recent modification timeslot for status(),
707 # Remember the most recent modification timeslot for status(),
700 # to make sure we won't miss future size-preserving file content
708 # to make sure we won't miss future size-preserving file content
701 # modifications that happen within the same timeslot.
709 # modifications that happen within the same timeslot.
702 self._lastnormaltime = mtime
710 self._lastnormaltime = mtime
703
711
704 def normallookup(self, f):
712 def normallookup(self, f):
705 '''Mark a file normal, but possibly dirty.'''
713 '''Mark a file normal, but possibly dirty.'''
706 if self.in_merge:
714 if self.in_merge:
707 # if there is a merge going on and the file was either
715 # if there is a merge going on and the file was either
708 # "merged" or coming from other parent (-2) before
716 # "merged" or coming from other parent (-2) before
709 # being removed, restore that state.
717 # being removed, restore that state.
710 entry = self._map.get(f)
718 entry = self._map.get(f)
711 if entry is not None:
719 if entry is not None:
712 # XXX this should probably be dealt with a a lower level
720 # XXX this should probably be dealt with a a lower level
713 # (see `merged_removed` and `from_p2_removed`)
721 # (see `merged_removed` and `from_p2_removed`)
714 if entry.merged_removed or entry.from_p2_removed:
722 if entry.merged_removed or entry.from_p2_removed:
715 source = self._map.copymap.get(f)
723 source = self._map.copymap.get(f)
716 if entry.merged_removed:
724 if entry.merged_removed:
717 self.merge(f)
725 self.merge(f)
718 elif entry.from_p2_removed:
726 elif entry.from_p2_removed:
719 self.otherparent(f)
727 self.otherparent(f)
720 if source is not None:
728 if source is not None:
721 self.copy(source, f)
729 self.copy(source, f)
722 return
730 return
723 elif entry.merged or entry.from_p2:
731 elif entry.merged or entry.from_p2:
724 return
732 return
725 self._addpath(f, possibly_dirty=True)
733 self._addpath(f, possibly_dirty=True)
726 self._map.copymap.pop(f, None)
734 self._map.copymap.pop(f, None)
727
735
728 def otherparent(self, f):
736 def otherparent(self, f):
729 '''Mark as coming from the other parent, always dirty.'''
737 '''Mark as coming from the other parent, always dirty.'''
730 if not self.in_merge:
738 if not self.in_merge:
731 msg = _(b"setting %r to other parent only allowed in merges") % f
739 msg = _(b"setting %r to other parent only allowed in merges") % f
732 raise error.Abort(msg)
740 raise error.Abort(msg)
733 entry = self._map.get(f)
741 entry = self._map.get(f)
734 if entry is not None and entry.tracked:
742 if entry is not None and entry.tracked:
735 # merge-like
743 # merge-like
736 self._addpath(f, merged=True)
744 self._addpath(f, merged=True)
737 else:
745 else:
738 # add-like
746 # add-like
739 self._addpath(f, from_p2=True)
747 self._addpath(f, from_p2=True)
740 self._map.copymap.pop(f, None)
748 self._map.copymap.pop(f, None)
741
749
742 def add(self, f):
750 def add(self, f):
743 '''Mark a file added.'''
751 '''Mark a file added.'''
744 if not self.pendingparentchange():
752 if not self.pendingparentchange():
745 util.nouideprecwarn(
753 util.nouideprecwarn(
746 b"do not use `add` outside of update/merge context."
754 b"do not use `add` outside of update/merge context."
747 b" Use `set_tracked`",
755 b" Use `set_tracked`",
748 b'6.0',
756 b'6.0',
749 stacklevel=2,
757 stacklevel=2,
750 )
758 )
751 self._add(f)
759 self._add(f)
752
760
753 def _add(self, filename):
761 def _add(self, filename):
754 """internal function to mark a file as added"""
762 """internal function to mark a file as added"""
755 self._addpath(filename, added=True)
763 self._addpath(filename, added=True)
756 self._map.copymap.pop(filename, None)
764 self._map.copymap.pop(filename, None)
757
765
758 def remove(self, f):
766 def remove(self, f):
759 '''Mark a file removed'''
767 '''Mark a file removed'''
760 if not self.pendingparentchange():
768 if not self.pendingparentchange():
761 util.nouideprecwarn(
769 util.nouideprecwarn(
762 b"do not use `remove` outside of update/merge context."
770 b"do not use `remove` outside of update/merge context."
763 b" Use `set_untracked`",
771 b" Use `set_untracked`",
764 b'6.0',
772 b'6.0',
765 stacklevel=2,
773 stacklevel=2,
766 )
774 )
767 self._remove(f)
775 self._remove(f)
768
776
769 def _remove(self, filename):
777 def _remove(self, filename):
770 """internal function to mark a file removed"""
778 """internal function to mark a file removed"""
771 self._dirty = True
779 self._dirty = True
772 self._updatedfiles.add(filename)
780 self._updatedfiles.add(filename)
773 self._map.removefile(filename, in_merge=self.in_merge)
781 self._map.removefile(filename, in_merge=self.in_merge)
774
782
775 def merge(self, f):
783 def merge(self, f):
776 '''Mark a file merged.'''
784 '''Mark a file merged.'''
777 if not self.in_merge:
785 if not self.in_merge:
778 return self.normallookup(f)
786 return self.normallookup(f)
779 return self.otherparent(f)
787 return self.otherparent(f)
780
788
781 def drop(self, f):
789 def drop(self, f):
782 '''Drop a file from the dirstate'''
790 '''Drop a file from the dirstate'''
783 if not self.pendingparentchange():
791 if not self.pendingparentchange():
784 util.nouideprecwarn(
792 util.nouideprecwarn(
785 b"do not use `drop` outside of update/merge context."
793 b"do not use `drop` outside of update/merge context."
786 b" Use `set_untracked`",
794 b" Use `set_untracked`",
787 b'6.0',
795 b'6.0',
788 stacklevel=2,
796 stacklevel=2,
789 )
797 )
790 self._drop(f)
798 self._drop(f)
791
799
792 def _drop(self, filename):
800 def _drop(self, filename):
793 """internal function to drop a file from the dirstate"""
801 """internal function to drop a file from the dirstate"""
794 if self._map.dropfile(filename):
802 if self._map.dropfile(filename):
795 self._dirty = True
803 self._dirty = True
796 self._updatedfiles.add(filename)
804 self._updatedfiles.add(filename)
797 self._map.copymap.pop(filename, None)
805 self._map.copymap.pop(filename, None)
798
806
799 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
807 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
800 if exists is None:
808 if exists is None:
801 exists = os.path.lexists(os.path.join(self._root, path))
809 exists = os.path.lexists(os.path.join(self._root, path))
802 if not exists:
810 if not exists:
803 # Maybe a path component exists
811 # Maybe a path component exists
804 if not ignoremissing and b'/' in path:
812 if not ignoremissing and b'/' in path:
805 d, f = path.rsplit(b'/', 1)
813 d, f = path.rsplit(b'/', 1)
806 d = self._normalize(d, False, ignoremissing, None)
814 d = self._normalize(d, False, ignoremissing, None)
807 folded = d + b"/" + f
815 folded = d + b"/" + f
808 else:
816 else:
809 # No path components, preserve original case
817 # No path components, preserve original case
810 folded = path
818 folded = path
811 else:
819 else:
812 # recursively normalize leading directory components
820 # recursively normalize leading directory components
813 # against dirstate
821 # against dirstate
814 if b'/' in normed:
822 if b'/' in normed:
815 d, f = normed.rsplit(b'/', 1)
823 d, f = normed.rsplit(b'/', 1)
816 d = self._normalize(d, False, ignoremissing, True)
824 d = self._normalize(d, False, ignoremissing, True)
817 r = self._root + b"/" + d
825 r = self._root + b"/" + d
818 folded = d + b"/" + util.fspath(f, r)
826 folded = d + b"/" + util.fspath(f, r)
819 else:
827 else:
820 folded = util.fspath(normed, self._root)
828 folded = util.fspath(normed, self._root)
821 storemap[normed] = folded
829 storemap[normed] = folded
822
830
823 return folded
831 return folded
824
832
825 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
833 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
826 normed = util.normcase(path)
834 normed = util.normcase(path)
827 folded = self._map.filefoldmap.get(normed, None)
835 folded = self._map.filefoldmap.get(normed, None)
828 if folded is None:
836 if folded is None:
829 if isknown:
837 if isknown:
830 folded = path
838 folded = path
831 else:
839 else:
832 folded = self._discoverpath(
840 folded = self._discoverpath(
833 path, normed, ignoremissing, exists, self._map.filefoldmap
841 path, normed, ignoremissing, exists, self._map.filefoldmap
834 )
842 )
835 return folded
843 return folded
836
844
837 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
845 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
838 normed = util.normcase(path)
846 normed = util.normcase(path)
839 folded = self._map.filefoldmap.get(normed, None)
847 folded = self._map.filefoldmap.get(normed, None)
840 if folded is None:
848 if folded is None:
841 folded = self._map.dirfoldmap.get(normed, None)
849 folded = self._map.dirfoldmap.get(normed, None)
842 if folded is None:
850 if folded is None:
843 if isknown:
851 if isknown:
844 folded = path
852 folded = path
845 else:
853 else:
846 # store discovered result in dirfoldmap so that future
854 # store discovered result in dirfoldmap so that future
847 # normalizefile calls don't start matching directories
855 # normalizefile calls don't start matching directories
848 folded = self._discoverpath(
856 folded = self._discoverpath(
849 path, normed, ignoremissing, exists, self._map.dirfoldmap
857 path, normed, ignoremissing, exists, self._map.dirfoldmap
850 )
858 )
851 return folded
859 return folded
852
860
853 def normalize(self, path, isknown=False, ignoremissing=False):
861 def normalize(self, path, isknown=False, ignoremissing=False):
854 """
862 """
855 normalize the case of a pathname when on a casefolding filesystem
863 normalize the case of a pathname when on a casefolding filesystem
856
864
857 isknown specifies whether the filename came from walking the
865 isknown specifies whether the filename came from walking the
858 disk, to avoid extra filesystem access.
866 disk, to avoid extra filesystem access.
859
867
860 If ignoremissing is True, missing path are returned
868 If ignoremissing is True, missing path are returned
861 unchanged. Otherwise, we try harder to normalize possibly
869 unchanged. Otherwise, we try harder to normalize possibly
862 existing path components.
870 existing path components.
863
871
864 The normalized case is determined based on the following precedence:
872 The normalized case is determined based on the following precedence:
865
873
866 - version of name already stored in the dirstate
874 - version of name already stored in the dirstate
867 - version of name stored on disk
875 - version of name stored on disk
868 - version provided via command arguments
876 - version provided via command arguments
869 """
877 """
870
878
871 if self._checkcase:
879 if self._checkcase:
872 return self._normalize(path, isknown, ignoremissing)
880 return self._normalize(path, isknown, ignoremissing)
873 return path
881 return path
874
882
875 def clear(self):
883 def clear(self):
876 self._map.clear()
884 self._map.clear()
877 self._lastnormaltime = 0
885 self._lastnormaltime = 0
878 self._updatedfiles.clear()
886 self._updatedfiles.clear()
879 self._dirty = True
887 self._dirty = True
880
888
881 def rebuild(self, parent, allfiles, changedfiles=None):
889 def rebuild(self, parent, allfiles, changedfiles=None):
882 if changedfiles is None:
890 if changedfiles is None:
883 # Rebuild entire dirstate
891 # Rebuild entire dirstate
884 to_lookup = allfiles
892 to_lookup = allfiles
885 to_drop = []
893 to_drop = []
886 lastnormaltime = self._lastnormaltime
894 lastnormaltime = self._lastnormaltime
887 self.clear()
895 self.clear()
888 self._lastnormaltime = lastnormaltime
896 self._lastnormaltime = lastnormaltime
889 elif len(changedfiles) < 10:
897 elif len(changedfiles) < 10:
890 # Avoid turning allfiles into a set, which can be expensive if it's
898 # Avoid turning allfiles into a set, which can be expensive if it's
891 # large.
899 # large.
892 to_lookup = []
900 to_lookup = []
893 to_drop = []
901 to_drop = []
894 for f in changedfiles:
902 for f in changedfiles:
895 if f in allfiles:
903 if f in allfiles:
896 to_lookup.append(f)
904 to_lookup.append(f)
897 else:
905 else:
898 to_drop.append(f)
906 to_drop.append(f)
899 else:
907 else:
900 changedfilesset = set(changedfiles)
908 changedfilesset = set(changedfiles)
901 to_lookup = changedfilesset & set(allfiles)
909 to_lookup = changedfilesset & set(allfiles)
902 to_drop = changedfilesset - to_lookup
910 to_drop = changedfilesset - to_lookup
903
911
904 if self._origpl is None:
912 if self._origpl is None:
905 self._origpl = self._pl
913 self._origpl = self._pl
906 self._map.setparents(parent, self._nodeconstants.nullid)
914 self._map.setparents(parent, self._nodeconstants.nullid)
907
915
908 for f in to_lookup:
916 for f in to_lookup:
909 self.normallookup(f)
917 self.normallookup(f)
910 for f in to_drop:
918 for f in to_drop:
911 self._drop(f)
919 self._drop(f)
912
920
913 self._dirty = True
921 self._dirty = True
914
922
915 def identity(self):
923 def identity(self):
916 """Return identity of dirstate itself to detect changing in storage
924 """Return identity of dirstate itself to detect changing in storage
917
925
918 If identity of previous dirstate is equal to this, writing
926 If identity of previous dirstate is equal to this, writing
919 changes based on the former dirstate out can keep consistency.
927 changes based on the former dirstate out can keep consistency.
920 """
928 """
921 return self._map.identity
929 return self._map.identity
922
930
923 def write(self, tr):
931 def write(self, tr):
924 if not self._dirty:
932 if not self._dirty:
925 return
933 return
926
934
927 filename = self._filename
935 filename = self._filename
928 if tr:
936 if tr:
929 # 'dirstate.write()' is not only for writing in-memory
937 # 'dirstate.write()' is not only for writing in-memory
930 # changes out, but also for dropping ambiguous timestamp.
938 # changes out, but also for dropping ambiguous timestamp.
931 # delayed writing re-raise "ambiguous timestamp issue".
939 # delayed writing re-raise "ambiguous timestamp issue".
932 # See also the wiki page below for detail:
940 # See also the wiki page below for detail:
933 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
941 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
934
942
935 # emulate dropping timestamp in 'parsers.pack_dirstate'
943 # emulate dropping timestamp in 'parsers.pack_dirstate'
936 now = _getfsnow(self._opener)
944 now = _getfsnow(self._opener)
937 self._map.clearambiguoustimes(self._updatedfiles, now)
945 self._map.clearambiguoustimes(self._updatedfiles, now)
938
946
939 # emulate that all 'dirstate.normal' results are written out
947 # emulate that all 'dirstate.normal' results are written out
940 self._lastnormaltime = 0
948 self._lastnormaltime = 0
941 self._updatedfiles.clear()
949 self._updatedfiles.clear()
942
950
943 # delay writing in-memory changes out
951 # delay writing in-memory changes out
944 tr.addfilegenerator(
952 tr.addfilegenerator(
945 b'dirstate',
953 b'dirstate',
946 (self._filename,),
954 (self._filename,),
947 lambda f: self._writedirstate(tr, f),
955 lambda f: self._writedirstate(tr, f),
948 location=b'plain',
956 location=b'plain',
949 )
957 )
950 return
958 return
951
959
952 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
960 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
953 self._writedirstate(tr, st)
961 self._writedirstate(tr, st)
954
962
955 def addparentchangecallback(self, category, callback):
963 def addparentchangecallback(self, category, callback):
956 """add a callback to be called when the wd parents are changed
964 """add a callback to be called when the wd parents are changed
957
965
958 Callback will be called with the following arguments:
966 Callback will be called with the following arguments:
959 dirstate, (oldp1, oldp2), (newp1, newp2)
967 dirstate, (oldp1, oldp2), (newp1, newp2)
960
968
961 Category is a unique identifier to allow overwriting an old callback
969 Category is a unique identifier to allow overwriting an old callback
962 with a newer callback.
970 with a newer callback.
963 """
971 """
964 self._plchangecallbacks[category] = callback
972 self._plchangecallbacks[category] = callback
965
973
966 def _writedirstate(self, tr, st):
974 def _writedirstate(self, tr, st):
967 # notify callbacks about parents change
975 # notify callbacks about parents change
968 if self._origpl is not None and self._origpl != self._pl:
976 if self._origpl is not None and self._origpl != self._pl:
969 for c, callback in sorted(
977 for c, callback in sorted(
970 pycompat.iteritems(self._plchangecallbacks)
978 pycompat.iteritems(self._plchangecallbacks)
971 ):
979 ):
972 callback(self, self._origpl, self._pl)
980 callback(self, self._origpl, self._pl)
973 self._origpl = None
981 self._origpl = None
974 # use the modification time of the newly created temporary file as the
982 # use the modification time of the newly created temporary file as the
975 # filesystem's notion of 'now'
983 # filesystem's notion of 'now'
976 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
984 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
977
985
978 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
986 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
979 # timestamp of each entries in dirstate, because of 'now > mtime'
987 # timestamp of each entries in dirstate, because of 'now > mtime'
980 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
988 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
981 if delaywrite > 0:
989 if delaywrite > 0:
982 # do we have any files to delay for?
990 # do we have any files to delay for?
983 for f, e in pycompat.iteritems(self._map):
991 for f, e in pycompat.iteritems(self._map):
984 if e.need_delay(now):
992 if e.need_delay(now):
985 import time # to avoid useless import
993 import time # to avoid useless import
986
994
987 # rather than sleep n seconds, sleep until the next
995 # rather than sleep n seconds, sleep until the next
988 # multiple of n seconds
996 # multiple of n seconds
989 clock = time.time()
997 clock = time.time()
990 start = int(clock) - (int(clock) % delaywrite)
998 start = int(clock) - (int(clock) % delaywrite)
991 end = start + delaywrite
999 end = start + delaywrite
992 time.sleep(end - clock)
1000 time.sleep(end - clock)
993 now = end # trust our estimate that the end is near now
1001 now = end # trust our estimate that the end is near now
994 break
1002 break
995
1003
996 self._map.write(tr, st, now)
1004 self._map.write(tr, st, now)
997 self._lastnormaltime = 0
1005 self._lastnormaltime = 0
998 self._dirty = False
1006 self._dirty = False
999
1007
1000 def _dirignore(self, f):
1008 def _dirignore(self, f):
1001 if self._ignore(f):
1009 if self._ignore(f):
1002 return True
1010 return True
1003 for p in pathutil.finddirs(f):
1011 for p in pathutil.finddirs(f):
1004 if self._ignore(p):
1012 if self._ignore(p):
1005 return True
1013 return True
1006 return False
1014 return False
1007
1015
1008 def _ignorefiles(self):
1016 def _ignorefiles(self):
1009 files = []
1017 files = []
1010 if os.path.exists(self._join(b'.hgignore')):
1018 if os.path.exists(self._join(b'.hgignore')):
1011 files.append(self._join(b'.hgignore'))
1019 files.append(self._join(b'.hgignore'))
1012 for name, path in self._ui.configitems(b"ui"):
1020 for name, path in self._ui.configitems(b"ui"):
1013 if name == b'ignore' or name.startswith(b'ignore.'):
1021 if name == b'ignore' or name.startswith(b'ignore.'):
1014 # we need to use os.path.join here rather than self._join
1022 # we need to use os.path.join here rather than self._join
1015 # because path is arbitrary and user-specified
1023 # because path is arbitrary and user-specified
1016 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1024 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1017 return files
1025 return files
1018
1026
1019 def _ignorefileandline(self, f):
1027 def _ignorefileandline(self, f):
1020 files = collections.deque(self._ignorefiles())
1028 files = collections.deque(self._ignorefiles())
1021 visited = set()
1029 visited = set()
1022 while files:
1030 while files:
1023 i = files.popleft()
1031 i = files.popleft()
1024 patterns = matchmod.readpatternfile(
1032 patterns = matchmod.readpatternfile(
1025 i, self._ui.warn, sourceinfo=True
1033 i, self._ui.warn, sourceinfo=True
1026 )
1034 )
1027 for pattern, lineno, line in patterns:
1035 for pattern, lineno, line in patterns:
1028 kind, p = matchmod._patsplit(pattern, b'glob')
1036 kind, p = matchmod._patsplit(pattern, b'glob')
1029 if kind == b"subinclude":
1037 if kind == b"subinclude":
1030 if p not in visited:
1038 if p not in visited:
1031 files.append(p)
1039 files.append(p)
1032 continue
1040 continue
1033 m = matchmod.match(
1041 m = matchmod.match(
1034 self._root, b'', [], [pattern], warn=self._ui.warn
1042 self._root, b'', [], [pattern], warn=self._ui.warn
1035 )
1043 )
1036 if m(f):
1044 if m(f):
1037 return (i, lineno, line)
1045 return (i, lineno, line)
1038 visited.add(i)
1046 visited.add(i)
1039 return (None, -1, b"")
1047 return (None, -1, b"")
1040
1048
1041 def _walkexplicit(self, match, subrepos):
1049 def _walkexplicit(self, match, subrepos):
1042 """Get stat data about the files explicitly specified by match.
1050 """Get stat data about the files explicitly specified by match.
1043
1051
1044 Return a triple (results, dirsfound, dirsnotfound).
1052 Return a triple (results, dirsfound, dirsnotfound).
1045 - results is a mapping from filename to stat result. It also contains
1053 - results is a mapping from filename to stat result. It also contains
1046 listings mapping subrepos and .hg to None.
1054 listings mapping subrepos and .hg to None.
1047 - dirsfound is a list of files found to be directories.
1055 - dirsfound is a list of files found to be directories.
1048 - dirsnotfound is a list of files that the dirstate thinks are
1056 - dirsnotfound is a list of files that the dirstate thinks are
1049 directories and that were not found."""
1057 directories and that were not found."""
1050
1058
1051 def badtype(mode):
1059 def badtype(mode):
1052 kind = _(b'unknown')
1060 kind = _(b'unknown')
1053 if stat.S_ISCHR(mode):
1061 if stat.S_ISCHR(mode):
1054 kind = _(b'character device')
1062 kind = _(b'character device')
1055 elif stat.S_ISBLK(mode):
1063 elif stat.S_ISBLK(mode):
1056 kind = _(b'block device')
1064 kind = _(b'block device')
1057 elif stat.S_ISFIFO(mode):
1065 elif stat.S_ISFIFO(mode):
1058 kind = _(b'fifo')
1066 kind = _(b'fifo')
1059 elif stat.S_ISSOCK(mode):
1067 elif stat.S_ISSOCK(mode):
1060 kind = _(b'socket')
1068 kind = _(b'socket')
1061 elif stat.S_ISDIR(mode):
1069 elif stat.S_ISDIR(mode):
1062 kind = _(b'directory')
1070 kind = _(b'directory')
1063 return _(b'unsupported file type (type is %s)') % kind
1071 return _(b'unsupported file type (type is %s)') % kind
1064
1072
1065 badfn = match.bad
1073 badfn = match.bad
1066 dmap = self._map
1074 dmap = self._map
1067 lstat = os.lstat
1075 lstat = os.lstat
1068 getkind = stat.S_IFMT
1076 getkind = stat.S_IFMT
1069 dirkind = stat.S_IFDIR
1077 dirkind = stat.S_IFDIR
1070 regkind = stat.S_IFREG
1078 regkind = stat.S_IFREG
1071 lnkkind = stat.S_IFLNK
1079 lnkkind = stat.S_IFLNK
1072 join = self._join
1080 join = self._join
1073 dirsfound = []
1081 dirsfound = []
1074 foundadd = dirsfound.append
1082 foundadd = dirsfound.append
1075 dirsnotfound = []
1083 dirsnotfound = []
1076 notfoundadd = dirsnotfound.append
1084 notfoundadd = dirsnotfound.append
1077
1085
1078 if not match.isexact() and self._checkcase:
1086 if not match.isexact() and self._checkcase:
1079 normalize = self._normalize
1087 normalize = self._normalize
1080 else:
1088 else:
1081 normalize = None
1089 normalize = None
1082
1090
1083 files = sorted(match.files())
1091 files = sorted(match.files())
1084 subrepos.sort()
1092 subrepos.sort()
1085 i, j = 0, 0
1093 i, j = 0, 0
1086 while i < len(files) and j < len(subrepos):
1094 while i < len(files) and j < len(subrepos):
1087 subpath = subrepos[j] + b"/"
1095 subpath = subrepos[j] + b"/"
1088 if files[i] < subpath:
1096 if files[i] < subpath:
1089 i += 1
1097 i += 1
1090 continue
1098 continue
1091 while i < len(files) and files[i].startswith(subpath):
1099 while i < len(files) and files[i].startswith(subpath):
1092 del files[i]
1100 del files[i]
1093 j += 1
1101 j += 1
1094
1102
1095 if not files or b'' in files:
1103 if not files or b'' in files:
1096 files = [b'']
1104 files = [b'']
1097 # constructing the foldmap is expensive, so don't do it for the
1105 # constructing the foldmap is expensive, so don't do it for the
1098 # common case where files is ['']
1106 # common case where files is ['']
1099 normalize = None
1107 normalize = None
1100 results = dict.fromkeys(subrepos)
1108 results = dict.fromkeys(subrepos)
1101 results[b'.hg'] = None
1109 results[b'.hg'] = None
1102
1110
1103 for ff in files:
1111 for ff in files:
1104 if normalize:
1112 if normalize:
1105 nf = normalize(ff, False, True)
1113 nf = normalize(ff, False, True)
1106 else:
1114 else:
1107 nf = ff
1115 nf = ff
1108 if nf in results:
1116 if nf in results:
1109 continue
1117 continue
1110
1118
1111 try:
1119 try:
1112 st = lstat(join(nf))
1120 st = lstat(join(nf))
1113 kind = getkind(st.st_mode)
1121 kind = getkind(st.st_mode)
1114 if kind == dirkind:
1122 if kind == dirkind:
1115 if nf in dmap:
1123 if nf in dmap:
1116 # file replaced by dir on disk but still in dirstate
1124 # file replaced by dir on disk but still in dirstate
1117 results[nf] = None
1125 results[nf] = None
1118 foundadd((nf, ff))
1126 foundadd((nf, ff))
1119 elif kind == regkind or kind == lnkkind:
1127 elif kind == regkind or kind == lnkkind:
1120 results[nf] = st
1128 results[nf] = st
1121 else:
1129 else:
1122 badfn(ff, badtype(kind))
1130 badfn(ff, badtype(kind))
1123 if nf in dmap:
1131 if nf in dmap:
1124 results[nf] = None
1132 results[nf] = None
1125 except OSError as inst: # nf not found on disk - it is dirstate only
1133 except OSError as inst: # nf not found on disk - it is dirstate only
1126 if nf in dmap: # does it exactly match a missing file?
1134 if nf in dmap: # does it exactly match a missing file?
1127 results[nf] = None
1135 results[nf] = None
1128 else: # does it match a missing directory?
1136 else: # does it match a missing directory?
1129 if self._map.hasdir(nf):
1137 if self._map.hasdir(nf):
1130 notfoundadd(nf)
1138 notfoundadd(nf)
1131 else:
1139 else:
1132 badfn(ff, encoding.strtolocal(inst.strerror))
1140 badfn(ff, encoding.strtolocal(inst.strerror))
1133
1141
1134 # match.files() may contain explicitly-specified paths that shouldn't
1142 # match.files() may contain explicitly-specified paths that shouldn't
1135 # be taken; drop them from the list of files found. dirsfound/notfound
1143 # be taken; drop them from the list of files found. dirsfound/notfound
1136 # aren't filtered here because they will be tested later.
1144 # aren't filtered here because they will be tested later.
1137 if match.anypats():
1145 if match.anypats():
1138 for f in list(results):
1146 for f in list(results):
1139 if f == b'.hg' or f in subrepos:
1147 if f == b'.hg' or f in subrepos:
1140 # keep sentinel to disable further out-of-repo walks
1148 # keep sentinel to disable further out-of-repo walks
1141 continue
1149 continue
1142 if not match(f):
1150 if not match(f):
1143 del results[f]
1151 del results[f]
1144
1152
1145 # Case insensitive filesystems cannot rely on lstat() failing to detect
1153 # Case insensitive filesystems cannot rely on lstat() failing to detect
1146 # a case-only rename. Prune the stat object for any file that does not
1154 # a case-only rename. Prune the stat object for any file that does not
1147 # match the case in the filesystem, if there are multiple files that
1155 # match the case in the filesystem, if there are multiple files that
1148 # normalize to the same path.
1156 # normalize to the same path.
1149 if match.isexact() and self._checkcase:
1157 if match.isexact() and self._checkcase:
1150 normed = {}
1158 normed = {}
1151
1159
1152 for f, st in pycompat.iteritems(results):
1160 for f, st in pycompat.iteritems(results):
1153 if st is None:
1161 if st is None:
1154 continue
1162 continue
1155
1163
1156 nc = util.normcase(f)
1164 nc = util.normcase(f)
1157 paths = normed.get(nc)
1165 paths = normed.get(nc)
1158
1166
1159 if paths is None:
1167 if paths is None:
1160 paths = set()
1168 paths = set()
1161 normed[nc] = paths
1169 normed[nc] = paths
1162
1170
1163 paths.add(f)
1171 paths.add(f)
1164
1172
1165 for norm, paths in pycompat.iteritems(normed):
1173 for norm, paths in pycompat.iteritems(normed):
1166 if len(paths) > 1:
1174 if len(paths) > 1:
1167 for path in paths:
1175 for path in paths:
1168 folded = self._discoverpath(
1176 folded = self._discoverpath(
1169 path, norm, True, None, self._map.dirfoldmap
1177 path, norm, True, None, self._map.dirfoldmap
1170 )
1178 )
1171 if path != folded:
1179 if path != folded:
1172 results[path] = None
1180 results[path] = None
1173
1181
1174 return results, dirsfound, dirsnotfound
1182 return results, dirsfound, dirsnotfound
1175
1183
1176 def walk(self, match, subrepos, unknown, ignored, full=True):
1184 def walk(self, match, subrepos, unknown, ignored, full=True):
1177 """
1185 """
1178 Walk recursively through the directory tree, finding all files
1186 Walk recursively through the directory tree, finding all files
1179 matched by match.
1187 matched by match.
1180
1188
1181 If full is False, maybe skip some known-clean files.
1189 If full is False, maybe skip some known-clean files.
1182
1190
1183 Return a dict mapping filename to stat-like object (either
1191 Return a dict mapping filename to stat-like object (either
1184 mercurial.osutil.stat instance or return value of os.stat()).
1192 mercurial.osutil.stat instance or return value of os.stat()).
1185
1193
1186 """
1194 """
1187 # full is a flag that extensions that hook into walk can use -- this
1195 # full is a flag that extensions that hook into walk can use -- this
1188 # implementation doesn't use it at all. This satisfies the contract
1196 # implementation doesn't use it at all. This satisfies the contract
1189 # because we only guarantee a "maybe".
1197 # because we only guarantee a "maybe".
1190
1198
1191 if ignored:
1199 if ignored:
1192 ignore = util.never
1200 ignore = util.never
1193 dirignore = util.never
1201 dirignore = util.never
1194 elif unknown:
1202 elif unknown:
1195 ignore = self._ignore
1203 ignore = self._ignore
1196 dirignore = self._dirignore
1204 dirignore = self._dirignore
1197 else:
1205 else:
1198 # if not unknown and not ignored, drop dir recursion and step 2
1206 # if not unknown and not ignored, drop dir recursion and step 2
1199 ignore = util.always
1207 ignore = util.always
1200 dirignore = util.always
1208 dirignore = util.always
1201
1209
1202 matchfn = match.matchfn
1210 matchfn = match.matchfn
1203 matchalways = match.always()
1211 matchalways = match.always()
1204 matchtdir = match.traversedir
1212 matchtdir = match.traversedir
1205 dmap = self._map
1213 dmap = self._map
1206 listdir = util.listdir
1214 listdir = util.listdir
1207 lstat = os.lstat
1215 lstat = os.lstat
1208 dirkind = stat.S_IFDIR
1216 dirkind = stat.S_IFDIR
1209 regkind = stat.S_IFREG
1217 regkind = stat.S_IFREG
1210 lnkkind = stat.S_IFLNK
1218 lnkkind = stat.S_IFLNK
1211 join = self._join
1219 join = self._join
1212
1220
1213 exact = skipstep3 = False
1221 exact = skipstep3 = False
1214 if match.isexact(): # match.exact
1222 if match.isexact(): # match.exact
1215 exact = True
1223 exact = True
1216 dirignore = util.always # skip step 2
1224 dirignore = util.always # skip step 2
1217 elif match.prefix(): # match.match, no patterns
1225 elif match.prefix(): # match.match, no patterns
1218 skipstep3 = True
1226 skipstep3 = True
1219
1227
1220 if not exact and self._checkcase:
1228 if not exact and self._checkcase:
1221 normalize = self._normalize
1229 normalize = self._normalize
1222 normalizefile = self._normalizefile
1230 normalizefile = self._normalizefile
1223 skipstep3 = False
1231 skipstep3 = False
1224 else:
1232 else:
1225 normalize = self._normalize
1233 normalize = self._normalize
1226 normalizefile = None
1234 normalizefile = None
1227
1235
1228 # step 1: find all explicit files
1236 # step 1: find all explicit files
1229 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1237 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1230 if matchtdir:
1238 if matchtdir:
1231 for d in work:
1239 for d in work:
1232 matchtdir(d[0])
1240 matchtdir(d[0])
1233 for d in dirsnotfound:
1241 for d in dirsnotfound:
1234 matchtdir(d)
1242 matchtdir(d)
1235
1243
1236 skipstep3 = skipstep3 and not (work or dirsnotfound)
1244 skipstep3 = skipstep3 and not (work or dirsnotfound)
1237 work = [d for d in work if not dirignore(d[0])]
1245 work = [d for d in work if not dirignore(d[0])]
1238
1246
1239 # step 2: visit subdirectories
1247 # step 2: visit subdirectories
1240 def traverse(work, alreadynormed):
1248 def traverse(work, alreadynormed):
1241 wadd = work.append
1249 wadd = work.append
1242 while work:
1250 while work:
1243 tracing.counter('dirstate.walk work', len(work))
1251 tracing.counter('dirstate.walk work', len(work))
1244 nd = work.pop()
1252 nd = work.pop()
1245 visitentries = match.visitchildrenset(nd)
1253 visitentries = match.visitchildrenset(nd)
1246 if not visitentries:
1254 if not visitentries:
1247 continue
1255 continue
1248 if visitentries == b'this' or visitentries == b'all':
1256 if visitentries == b'this' or visitentries == b'all':
1249 visitentries = None
1257 visitentries = None
1250 skip = None
1258 skip = None
1251 if nd != b'':
1259 if nd != b'':
1252 skip = b'.hg'
1260 skip = b'.hg'
1253 try:
1261 try:
1254 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1262 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1255 entries = listdir(join(nd), stat=True, skip=skip)
1263 entries = listdir(join(nd), stat=True, skip=skip)
1256 except OSError as inst:
1264 except OSError as inst:
1257 if inst.errno in (errno.EACCES, errno.ENOENT):
1265 if inst.errno in (errno.EACCES, errno.ENOENT):
1258 match.bad(
1266 match.bad(
1259 self.pathto(nd), encoding.strtolocal(inst.strerror)
1267 self.pathto(nd), encoding.strtolocal(inst.strerror)
1260 )
1268 )
1261 continue
1269 continue
1262 raise
1270 raise
1263 for f, kind, st in entries:
1271 for f, kind, st in entries:
1264 # Some matchers may return files in the visitentries set,
1272 # Some matchers may return files in the visitentries set,
1265 # instead of 'this', if the matcher explicitly mentions them
1273 # instead of 'this', if the matcher explicitly mentions them
1266 # and is not an exactmatcher. This is acceptable; we do not
1274 # and is not an exactmatcher. This is acceptable; we do not
1267 # make any hard assumptions about file-or-directory below
1275 # make any hard assumptions about file-or-directory below
1268 # based on the presence of `f` in visitentries. If
1276 # based on the presence of `f` in visitentries. If
1269 # visitchildrenset returned a set, we can always skip the
1277 # visitchildrenset returned a set, we can always skip the
1270 # entries *not* in the set it provided regardless of whether
1278 # entries *not* in the set it provided regardless of whether
1271 # they're actually a file or a directory.
1279 # they're actually a file or a directory.
1272 if visitentries and f not in visitentries:
1280 if visitentries and f not in visitentries:
1273 continue
1281 continue
1274 if normalizefile:
1282 if normalizefile:
1275 # even though f might be a directory, we're only
1283 # even though f might be a directory, we're only
1276 # interested in comparing it to files currently in the
1284 # interested in comparing it to files currently in the
1277 # dmap -- therefore normalizefile is enough
1285 # dmap -- therefore normalizefile is enough
1278 nf = normalizefile(
1286 nf = normalizefile(
1279 nd and (nd + b"/" + f) or f, True, True
1287 nd and (nd + b"/" + f) or f, True, True
1280 )
1288 )
1281 else:
1289 else:
1282 nf = nd and (nd + b"/" + f) or f
1290 nf = nd and (nd + b"/" + f) or f
1283 if nf not in results:
1291 if nf not in results:
1284 if kind == dirkind:
1292 if kind == dirkind:
1285 if not ignore(nf):
1293 if not ignore(nf):
1286 if matchtdir:
1294 if matchtdir:
1287 matchtdir(nf)
1295 matchtdir(nf)
1288 wadd(nf)
1296 wadd(nf)
1289 if nf in dmap and (matchalways or matchfn(nf)):
1297 if nf in dmap and (matchalways or matchfn(nf)):
1290 results[nf] = None
1298 results[nf] = None
1291 elif kind == regkind or kind == lnkkind:
1299 elif kind == regkind or kind == lnkkind:
1292 if nf in dmap:
1300 if nf in dmap:
1293 if matchalways or matchfn(nf):
1301 if matchalways or matchfn(nf):
1294 results[nf] = st
1302 results[nf] = st
1295 elif (matchalways or matchfn(nf)) and not ignore(
1303 elif (matchalways or matchfn(nf)) and not ignore(
1296 nf
1304 nf
1297 ):
1305 ):
1298 # unknown file -- normalize if necessary
1306 # unknown file -- normalize if necessary
1299 if not alreadynormed:
1307 if not alreadynormed:
1300 nf = normalize(nf, False, True)
1308 nf = normalize(nf, False, True)
1301 results[nf] = st
1309 results[nf] = st
1302 elif nf in dmap and (matchalways or matchfn(nf)):
1310 elif nf in dmap and (matchalways or matchfn(nf)):
1303 results[nf] = None
1311 results[nf] = None
1304
1312
1305 for nd, d in work:
1313 for nd, d in work:
1306 # alreadynormed means that processwork doesn't have to do any
1314 # alreadynormed means that processwork doesn't have to do any
1307 # expensive directory normalization
1315 # expensive directory normalization
1308 alreadynormed = not normalize or nd == d
1316 alreadynormed = not normalize or nd == d
1309 traverse([d], alreadynormed)
1317 traverse([d], alreadynormed)
1310
1318
1311 for s in subrepos:
1319 for s in subrepos:
1312 del results[s]
1320 del results[s]
1313 del results[b'.hg']
1321 del results[b'.hg']
1314
1322
1315 # step 3: visit remaining files from dmap
1323 # step 3: visit remaining files from dmap
1316 if not skipstep3 and not exact:
1324 if not skipstep3 and not exact:
1317 # If a dmap file is not in results yet, it was either
1325 # If a dmap file is not in results yet, it was either
1318 # a) not matching matchfn b) ignored, c) missing, or d) under a
1326 # a) not matching matchfn b) ignored, c) missing, or d) under a
1319 # symlink directory.
1327 # symlink directory.
1320 if not results and matchalways:
1328 if not results and matchalways:
1321 visit = [f for f in dmap]
1329 visit = [f for f in dmap]
1322 else:
1330 else:
1323 visit = [f for f in dmap if f not in results and matchfn(f)]
1331 visit = [f for f in dmap if f not in results and matchfn(f)]
1324 visit.sort()
1332 visit.sort()
1325
1333
1326 if unknown:
1334 if unknown:
1327 # unknown == True means we walked all dirs under the roots
1335 # unknown == True means we walked all dirs under the roots
1328 # that wasn't ignored, and everything that matched was stat'ed
1336 # that wasn't ignored, and everything that matched was stat'ed
1329 # and is already in results.
1337 # and is already in results.
1330 # The rest must thus be ignored or under a symlink.
1338 # The rest must thus be ignored or under a symlink.
1331 audit_path = pathutil.pathauditor(self._root, cached=True)
1339 audit_path = pathutil.pathauditor(self._root, cached=True)
1332
1340
1333 for nf in iter(visit):
1341 for nf in iter(visit):
1334 # If a stat for the same file was already added with a
1342 # If a stat for the same file was already added with a
1335 # different case, don't add one for this, since that would
1343 # different case, don't add one for this, since that would
1336 # make it appear as if the file exists under both names
1344 # make it appear as if the file exists under both names
1337 # on disk.
1345 # on disk.
1338 if (
1346 if (
1339 normalizefile
1347 normalizefile
1340 and normalizefile(nf, True, True) in results
1348 and normalizefile(nf, True, True) in results
1341 ):
1349 ):
1342 results[nf] = None
1350 results[nf] = None
1343 # Report ignored items in the dmap as long as they are not
1351 # Report ignored items in the dmap as long as they are not
1344 # under a symlink directory.
1352 # under a symlink directory.
1345 elif audit_path.check(nf):
1353 elif audit_path.check(nf):
1346 try:
1354 try:
1347 results[nf] = lstat(join(nf))
1355 results[nf] = lstat(join(nf))
1348 # file was just ignored, no links, and exists
1356 # file was just ignored, no links, and exists
1349 except OSError:
1357 except OSError:
1350 # file doesn't exist
1358 # file doesn't exist
1351 results[nf] = None
1359 results[nf] = None
1352 else:
1360 else:
1353 # It's either missing or under a symlink directory
1361 # It's either missing or under a symlink directory
1354 # which we in this case report as missing
1362 # which we in this case report as missing
1355 results[nf] = None
1363 results[nf] = None
1356 else:
1364 else:
1357 # We may not have walked the full directory tree above,
1365 # We may not have walked the full directory tree above,
1358 # so stat and check everything we missed.
1366 # so stat and check everything we missed.
1359 iv = iter(visit)
1367 iv = iter(visit)
1360 for st in util.statfiles([join(i) for i in visit]):
1368 for st in util.statfiles([join(i) for i in visit]):
1361 results[next(iv)] = st
1369 results[next(iv)] = st
1362 return results
1370 return results
1363
1371
1364 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1372 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1365 # Force Rayon (Rust parallelism library) to respect the number of
1373 # Force Rayon (Rust parallelism library) to respect the number of
1366 # workers. This is a temporary workaround until Rust code knows
1374 # workers. This is a temporary workaround until Rust code knows
1367 # how to read the config file.
1375 # how to read the config file.
1368 numcpus = self._ui.configint(b"worker", b"numcpus")
1376 numcpus = self._ui.configint(b"worker", b"numcpus")
1369 if numcpus is not None:
1377 if numcpus is not None:
1370 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1378 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1371
1379
1372 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1380 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1373 if not workers_enabled:
1381 if not workers_enabled:
1374 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1382 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1375
1383
1376 (
1384 (
1377 lookup,
1385 lookup,
1378 modified,
1386 modified,
1379 added,
1387 added,
1380 removed,
1388 removed,
1381 deleted,
1389 deleted,
1382 clean,
1390 clean,
1383 ignored,
1391 ignored,
1384 unknown,
1392 unknown,
1385 warnings,
1393 warnings,
1386 bad,
1394 bad,
1387 traversed,
1395 traversed,
1388 dirty,
1396 dirty,
1389 ) = rustmod.status(
1397 ) = rustmod.status(
1390 self._map._rustmap,
1398 self._map._rustmap,
1391 matcher,
1399 matcher,
1392 self._rootdir,
1400 self._rootdir,
1393 self._ignorefiles(),
1401 self._ignorefiles(),
1394 self._checkexec,
1402 self._checkexec,
1395 self._lastnormaltime,
1403 self._lastnormaltime,
1396 bool(list_clean),
1404 bool(list_clean),
1397 bool(list_ignored),
1405 bool(list_ignored),
1398 bool(list_unknown),
1406 bool(list_unknown),
1399 bool(matcher.traversedir),
1407 bool(matcher.traversedir),
1400 )
1408 )
1401
1409
1402 self._dirty |= dirty
1410 self._dirty |= dirty
1403
1411
1404 if matcher.traversedir:
1412 if matcher.traversedir:
1405 for dir in traversed:
1413 for dir in traversed:
1406 matcher.traversedir(dir)
1414 matcher.traversedir(dir)
1407
1415
1408 if self._ui.warn:
1416 if self._ui.warn:
1409 for item in warnings:
1417 for item in warnings:
1410 if isinstance(item, tuple):
1418 if isinstance(item, tuple):
1411 file_path, syntax = item
1419 file_path, syntax = item
1412 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1420 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1413 file_path,
1421 file_path,
1414 syntax,
1422 syntax,
1415 )
1423 )
1416 self._ui.warn(msg)
1424 self._ui.warn(msg)
1417 else:
1425 else:
1418 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1426 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1419 self._ui.warn(
1427 self._ui.warn(
1420 msg
1428 msg
1421 % (
1429 % (
1422 pathutil.canonpath(
1430 pathutil.canonpath(
1423 self._rootdir, self._rootdir, item
1431 self._rootdir, self._rootdir, item
1424 ),
1432 ),
1425 b"No such file or directory",
1433 b"No such file or directory",
1426 )
1434 )
1427 )
1435 )
1428
1436
1429 for (fn, message) in bad:
1437 for (fn, message) in bad:
1430 matcher.bad(fn, encoding.strtolocal(message))
1438 matcher.bad(fn, encoding.strtolocal(message))
1431
1439
1432 status = scmutil.status(
1440 status = scmutil.status(
1433 modified=modified,
1441 modified=modified,
1434 added=added,
1442 added=added,
1435 removed=removed,
1443 removed=removed,
1436 deleted=deleted,
1444 deleted=deleted,
1437 unknown=unknown,
1445 unknown=unknown,
1438 ignored=ignored,
1446 ignored=ignored,
1439 clean=clean,
1447 clean=clean,
1440 )
1448 )
1441 return (lookup, status)
1449 return (lookup, status)
1442
1450
1443 def status(self, match, subrepos, ignored, clean, unknown):
1451 def status(self, match, subrepos, ignored, clean, unknown):
1444 """Determine the status of the working copy relative to the
1452 """Determine the status of the working copy relative to the
1445 dirstate and return a pair of (unsure, status), where status is of type
1453 dirstate and return a pair of (unsure, status), where status is of type
1446 scmutil.status and:
1454 scmutil.status and:
1447
1455
1448 unsure:
1456 unsure:
1449 files that might have been modified since the dirstate was
1457 files that might have been modified since the dirstate was
1450 written, but need to be read to be sure (size is the same
1458 written, but need to be read to be sure (size is the same
1451 but mtime differs)
1459 but mtime differs)
1452 status.modified:
1460 status.modified:
1453 files that have definitely been modified since the dirstate
1461 files that have definitely been modified since the dirstate
1454 was written (different size or mode)
1462 was written (different size or mode)
1455 status.clean:
1463 status.clean:
1456 files that have definitely not been modified since the
1464 files that have definitely not been modified since the
1457 dirstate was written
1465 dirstate was written
1458 """
1466 """
1459 listignored, listclean, listunknown = ignored, clean, unknown
1467 listignored, listclean, listunknown = ignored, clean, unknown
1460 lookup, modified, added, unknown, ignored = [], [], [], [], []
1468 lookup, modified, added, unknown, ignored = [], [], [], [], []
1461 removed, deleted, clean = [], [], []
1469 removed, deleted, clean = [], [], []
1462
1470
1463 dmap = self._map
1471 dmap = self._map
1464 dmap.preload()
1472 dmap.preload()
1465
1473
1466 use_rust = True
1474 use_rust = True
1467
1475
1468 allowed_matchers = (
1476 allowed_matchers = (
1469 matchmod.alwaysmatcher,
1477 matchmod.alwaysmatcher,
1470 matchmod.exactmatcher,
1478 matchmod.exactmatcher,
1471 matchmod.includematcher,
1479 matchmod.includematcher,
1472 )
1480 )
1473
1481
1474 if rustmod is None:
1482 if rustmod is None:
1475 use_rust = False
1483 use_rust = False
1476 elif self._checkcase:
1484 elif self._checkcase:
1477 # Case-insensitive filesystems are not handled yet
1485 # Case-insensitive filesystems are not handled yet
1478 use_rust = False
1486 use_rust = False
1479 elif subrepos:
1487 elif subrepos:
1480 use_rust = False
1488 use_rust = False
1481 elif sparse.enabled:
1489 elif sparse.enabled:
1482 use_rust = False
1490 use_rust = False
1483 elif not isinstance(match, allowed_matchers):
1491 elif not isinstance(match, allowed_matchers):
1484 # Some matchers have yet to be implemented
1492 # Some matchers have yet to be implemented
1485 use_rust = False
1493 use_rust = False
1486
1494
1487 if use_rust:
1495 if use_rust:
1488 try:
1496 try:
1489 return self._rust_status(
1497 return self._rust_status(
1490 match, listclean, listignored, listunknown
1498 match, listclean, listignored, listunknown
1491 )
1499 )
1492 except rustmod.FallbackError:
1500 except rustmod.FallbackError:
1493 pass
1501 pass
1494
1502
1495 def noop(f):
1503 def noop(f):
1496 pass
1504 pass
1497
1505
1498 dcontains = dmap.__contains__
1506 dcontains = dmap.__contains__
1499 dget = dmap.__getitem__
1507 dget = dmap.__getitem__
1500 ladd = lookup.append # aka "unsure"
1508 ladd = lookup.append # aka "unsure"
1501 madd = modified.append
1509 madd = modified.append
1502 aadd = added.append
1510 aadd = added.append
1503 uadd = unknown.append if listunknown else noop
1511 uadd = unknown.append if listunknown else noop
1504 iadd = ignored.append if listignored else noop
1512 iadd = ignored.append if listignored else noop
1505 radd = removed.append
1513 radd = removed.append
1506 dadd = deleted.append
1514 dadd = deleted.append
1507 cadd = clean.append if listclean else noop
1515 cadd = clean.append if listclean else noop
1508 mexact = match.exact
1516 mexact = match.exact
1509 dirignore = self._dirignore
1517 dirignore = self._dirignore
1510 checkexec = self._checkexec
1518 checkexec = self._checkexec
1511 copymap = self._map.copymap
1519 copymap = self._map.copymap
1512 lastnormaltime = self._lastnormaltime
1520 lastnormaltime = self._lastnormaltime
1513
1521
1514 # We need to do full walks when either
1522 # We need to do full walks when either
1515 # - we're listing all clean files, or
1523 # - we're listing all clean files, or
1516 # - match.traversedir does something, because match.traversedir should
1524 # - match.traversedir does something, because match.traversedir should
1517 # be called for every dir in the working dir
1525 # be called for every dir in the working dir
1518 full = listclean or match.traversedir is not None
1526 full = listclean or match.traversedir is not None
1519 for fn, st in pycompat.iteritems(
1527 for fn, st in pycompat.iteritems(
1520 self.walk(match, subrepos, listunknown, listignored, full=full)
1528 self.walk(match, subrepos, listunknown, listignored, full=full)
1521 ):
1529 ):
1522 if not dcontains(fn):
1530 if not dcontains(fn):
1523 if (listignored or mexact(fn)) and dirignore(fn):
1531 if (listignored or mexact(fn)) and dirignore(fn):
1524 if listignored:
1532 if listignored:
1525 iadd(fn)
1533 iadd(fn)
1526 else:
1534 else:
1527 uadd(fn)
1535 uadd(fn)
1528 continue
1536 continue
1529
1537
1530 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1538 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1531 # written like that for performance reasons. dmap[fn] is not a
1539 # written like that for performance reasons. dmap[fn] is not a
1532 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1540 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1533 # opcode has fast paths when the value to be unpacked is a tuple or
1541 # opcode has fast paths when the value to be unpacked is a tuple or
1534 # a list, but falls back to creating a full-fledged iterator in
1542 # a list, but falls back to creating a full-fledged iterator in
1535 # general. That is much slower than simply accessing and storing the
1543 # general. That is much slower than simply accessing and storing the
1536 # tuple members one by one.
1544 # tuple members one by one.
1537 t = dget(fn)
1545 t = dget(fn)
1538 mode = t.mode
1546 mode = t.mode
1539 size = t.size
1547 size = t.size
1540 time = t.mtime
1548 time = t.mtime
1541
1549
1542 if not st and t.tracked:
1550 if not st and t.tracked:
1543 dadd(fn)
1551 dadd(fn)
1544 elif t.merged:
1552 elif t.merged:
1545 madd(fn)
1553 madd(fn)
1546 elif t.added:
1554 elif t.added:
1547 aadd(fn)
1555 aadd(fn)
1548 elif t.removed:
1556 elif t.removed:
1549 radd(fn)
1557 radd(fn)
1550 elif t.tracked:
1558 elif t.tracked:
1551 if (
1559 if (
1552 size >= 0
1560 size >= 0
1553 and (
1561 and (
1554 (size != st.st_size and size != st.st_size & _rangemask)
1562 (size != st.st_size and size != st.st_size & _rangemask)
1555 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1563 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1556 )
1564 )
1557 or t.from_p2
1565 or t.from_p2
1558 or fn in copymap
1566 or fn in copymap
1559 ):
1567 ):
1560 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1568 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1561 # issue6456: Size returned may be longer due to
1569 # issue6456: Size returned may be longer due to
1562 # encryption on EXT-4 fscrypt, undecided.
1570 # encryption on EXT-4 fscrypt, undecided.
1563 ladd(fn)
1571 ladd(fn)
1564 else:
1572 else:
1565 madd(fn)
1573 madd(fn)
1566 elif (
1574 elif (
1567 time != st[stat.ST_MTIME]
1575 time != st[stat.ST_MTIME]
1568 and time != st[stat.ST_MTIME] & _rangemask
1576 and time != st[stat.ST_MTIME] & _rangemask
1569 ):
1577 ):
1570 ladd(fn)
1578 ladd(fn)
1571 elif st[stat.ST_MTIME] == lastnormaltime:
1579 elif st[stat.ST_MTIME] == lastnormaltime:
1572 # fn may have just been marked as normal and it may have
1580 # fn may have just been marked as normal and it may have
1573 # changed in the same second without changing its size.
1581 # changed in the same second without changing its size.
1574 # This can happen if we quickly do multiple commits.
1582 # This can happen if we quickly do multiple commits.
1575 # Force lookup, so we don't miss such a racy file change.
1583 # Force lookup, so we don't miss such a racy file change.
1576 ladd(fn)
1584 ladd(fn)
1577 elif listclean:
1585 elif listclean:
1578 cadd(fn)
1586 cadd(fn)
1579 status = scmutil.status(
1587 status = scmutil.status(
1580 modified, added, removed, deleted, unknown, ignored, clean
1588 modified, added, removed, deleted, unknown, ignored, clean
1581 )
1589 )
1582 return (lookup, status)
1590 return (lookup, status)
1583
1591
1584 def matches(self, match):
1592 def matches(self, match):
1585 """
1593 """
1586 return files in the dirstate (in whatever state) filtered by match
1594 return files in the dirstate (in whatever state) filtered by match
1587 """
1595 """
1588 dmap = self._map
1596 dmap = self._map
1589 if rustmod is not None:
1597 if rustmod is not None:
1590 dmap = self._map._rustmap
1598 dmap = self._map._rustmap
1591
1599
1592 if match.always():
1600 if match.always():
1593 return dmap.keys()
1601 return dmap.keys()
1594 files = match.files()
1602 files = match.files()
1595 if match.isexact():
1603 if match.isexact():
1596 # fast path -- filter the other way around, since typically files is
1604 # fast path -- filter the other way around, since typically files is
1597 # much smaller than dmap
1605 # much smaller than dmap
1598 return [f for f in files if f in dmap]
1606 return [f for f in files if f in dmap]
1599 if match.prefix() and all(fn in dmap for fn in files):
1607 if match.prefix() and all(fn in dmap for fn in files):
1600 # fast path -- all the values are known to be files, so just return
1608 # fast path -- all the values are known to be files, so just return
1601 # that
1609 # that
1602 return list(files)
1610 return list(files)
1603 return [f for f in dmap if match(f)]
1611 return [f for f in dmap if match(f)]
1604
1612
1605 def _actualfilename(self, tr):
1613 def _actualfilename(self, tr):
1606 if tr:
1614 if tr:
1607 return self._pendingfilename
1615 return self._pendingfilename
1608 else:
1616 else:
1609 return self._filename
1617 return self._filename
1610
1618
1611 def savebackup(self, tr, backupname):
1619 def savebackup(self, tr, backupname):
1612 '''Save current dirstate into backup file'''
1620 '''Save current dirstate into backup file'''
1613 filename = self._actualfilename(tr)
1621 filename = self._actualfilename(tr)
1614 assert backupname != filename
1622 assert backupname != filename
1615
1623
1616 # use '_writedirstate' instead of 'write' to write changes certainly,
1624 # use '_writedirstate' instead of 'write' to write changes certainly,
1617 # because the latter omits writing out if transaction is running.
1625 # because the latter omits writing out if transaction is running.
1618 # output file will be used to create backup of dirstate at this point.
1626 # output file will be used to create backup of dirstate at this point.
1619 if self._dirty or not self._opener.exists(filename):
1627 if self._dirty or not self._opener.exists(filename):
1620 self._writedirstate(
1628 self._writedirstate(
1621 tr,
1629 tr,
1622 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1630 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1623 )
1631 )
1624
1632
1625 if tr:
1633 if tr:
1626 # ensure that subsequent tr.writepending returns True for
1634 # ensure that subsequent tr.writepending returns True for
1627 # changes written out above, even if dirstate is never
1635 # changes written out above, even if dirstate is never
1628 # changed after this
1636 # changed after this
1629 tr.addfilegenerator(
1637 tr.addfilegenerator(
1630 b'dirstate',
1638 b'dirstate',
1631 (self._filename,),
1639 (self._filename,),
1632 lambda f: self._writedirstate(tr, f),
1640 lambda f: self._writedirstate(tr, f),
1633 location=b'plain',
1641 location=b'plain',
1634 )
1642 )
1635
1643
1636 # ensure that pending file written above is unlinked at
1644 # ensure that pending file written above is unlinked at
1637 # failure, even if tr.writepending isn't invoked until the
1645 # failure, even if tr.writepending isn't invoked until the
1638 # end of this transaction
1646 # end of this transaction
1639 tr.registertmp(filename, location=b'plain')
1647 tr.registertmp(filename, location=b'plain')
1640
1648
1641 self._opener.tryunlink(backupname)
1649 self._opener.tryunlink(backupname)
1642 # hardlink backup is okay because _writedirstate is always called
1650 # hardlink backup is okay because _writedirstate is always called
1643 # with an "atomictemp=True" file.
1651 # with an "atomictemp=True" file.
1644 util.copyfile(
1652 util.copyfile(
1645 self._opener.join(filename),
1653 self._opener.join(filename),
1646 self._opener.join(backupname),
1654 self._opener.join(backupname),
1647 hardlink=True,
1655 hardlink=True,
1648 )
1656 )
1649
1657
1650 def restorebackup(self, tr, backupname):
1658 def restorebackup(self, tr, backupname):
1651 '''Restore dirstate by backup file'''
1659 '''Restore dirstate by backup file'''
1652 # this "invalidate()" prevents "wlock.release()" from writing
1660 # this "invalidate()" prevents "wlock.release()" from writing
1653 # changes of dirstate out after restoring from backup file
1661 # changes of dirstate out after restoring from backup file
1654 self.invalidate()
1662 self.invalidate()
1655 filename = self._actualfilename(tr)
1663 filename = self._actualfilename(tr)
1656 o = self._opener
1664 o = self._opener
1657 if util.samefile(o.join(backupname), o.join(filename)):
1665 if util.samefile(o.join(backupname), o.join(filename)):
1658 o.unlink(backupname)
1666 o.unlink(backupname)
1659 else:
1667 else:
1660 o.rename(backupname, filename, checkambig=True)
1668 o.rename(backupname, filename, checkambig=True)
1661
1669
1662 def clearbackup(self, tr, backupname):
1670 def clearbackup(self, tr, backupname):
1663 '''Clear backup file'''
1671 '''Clear backup file'''
1664 self._opener.unlink(backupname)
1672 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now