##// END OF EJS Templates
dirstate: use `tracked` property in `_addpath`...
marmoute -
r48796:36c0d738 default
parent child Browse files
Show More
@@ -1,1614 +1,1614 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = parsers.DirstateItem
48 DirstateItem = parsers.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._updatedfiles = set()
133 self._updatedfiles = set()
134 self._mapcls = dirstatemap.dirstatemap
134 self._mapcls = dirstatemap.dirstatemap
135 # Access and cache cwd early, so we don't access it for the first time
135 # Access and cache cwd early, so we don't access it for the first time
136 # after a working-copy update caused it to not exist (accessing it then
136 # after a working-copy update caused it to not exist (accessing it then
137 # raises an exception).
137 # raises an exception).
138 self._cwd
138 self._cwd
139
139
140 def prefetch_parents(self):
140 def prefetch_parents(self):
141 """make sure the parents are loaded
141 """make sure the parents are loaded
142
142
143 Used to avoid a race condition.
143 Used to avoid a race condition.
144 """
144 """
145 self._pl
145 self._pl
146
146
147 @contextlib.contextmanager
147 @contextlib.contextmanager
148 def parentchange(self):
148 def parentchange(self):
149 """Context manager for handling dirstate parents.
149 """Context manager for handling dirstate parents.
150
150
151 If an exception occurs in the scope of the context manager,
151 If an exception occurs in the scope of the context manager,
152 the incoherent dirstate won't be written when wlock is
152 the incoherent dirstate won't be written when wlock is
153 released.
153 released.
154 """
154 """
155 self._parentwriters += 1
155 self._parentwriters += 1
156 yield
156 yield
157 # Typically we want the "undo" step of a context manager in a
157 # Typically we want the "undo" step of a context manager in a
158 # finally block so it happens even when an exception
158 # finally block so it happens even when an exception
159 # occurs. In this case, however, we only want to decrement
159 # occurs. In this case, however, we only want to decrement
160 # parentwriters if the code in the with statement exits
160 # parentwriters if the code in the with statement exits
161 # normally, so we don't have a try/finally here on purpose.
161 # normally, so we don't have a try/finally here on purpose.
162 self._parentwriters -= 1
162 self._parentwriters -= 1
163
163
164 def pendingparentchange(self):
164 def pendingparentchange(self):
165 """Returns true if the dirstate is in the middle of a set of changes
165 """Returns true if the dirstate is in the middle of a set of changes
166 that modify the dirstate parent.
166 that modify the dirstate parent.
167 """
167 """
168 return self._parentwriters > 0
168 return self._parentwriters > 0
169
169
170 @propertycache
170 @propertycache
171 def _map(self):
171 def _map(self):
172 """Return the dirstate contents (see documentation for dirstatemap)."""
172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 self._map = self._mapcls(
173 self._map = self._mapcls(
174 self._ui,
174 self._ui,
175 self._opener,
175 self._opener,
176 self._root,
176 self._root,
177 self._nodeconstants,
177 self._nodeconstants,
178 self._use_dirstate_v2,
178 self._use_dirstate_v2,
179 )
179 )
180 return self._map
180 return self._map
181
181
182 @property
182 @property
183 def _sparsematcher(self):
183 def _sparsematcher(self):
184 """The matcher for the sparse checkout.
184 """The matcher for the sparse checkout.
185
185
186 The working directory may not include every file from a manifest. The
186 The working directory may not include every file from a manifest. The
187 matcher obtained by this property will match a path if it is to be
187 matcher obtained by this property will match a path if it is to be
188 included in the working directory.
188 included in the working directory.
189 """
189 """
190 # TODO there is potential to cache this property. For now, the matcher
190 # TODO there is potential to cache this property. For now, the matcher
191 # is resolved on every access. (But the called function does use a
191 # is resolved on every access. (But the called function does use a
192 # cache to keep the lookup fast.)
192 # cache to keep the lookup fast.)
193 return self._sparsematchfn()
193 return self._sparsematchfn()
194
194
195 @repocache(b'branch')
195 @repocache(b'branch')
196 def _branch(self):
196 def _branch(self):
197 try:
197 try:
198 return self._opener.read(b"branch").strip() or b"default"
198 return self._opener.read(b"branch").strip() or b"default"
199 except IOError as inst:
199 except IOError as inst:
200 if inst.errno != errno.ENOENT:
200 if inst.errno != errno.ENOENT:
201 raise
201 raise
202 return b"default"
202 return b"default"
203
203
204 @property
204 @property
205 def _pl(self):
205 def _pl(self):
206 return self._map.parents()
206 return self._map.parents()
207
207
208 def hasdir(self, d):
208 def hasdir(self, d):
209 return self._map.hastrackeddir(d)
209 return self._map.hastrackeddir(d)
210
210
211 @rootcache(b'.hgignore')
211 @rootcache(b'.hgignore')
212 def _ignore(self):
212 def _ignore(self):
213 files = self._ignorefiles()
213 files = self._ignorefiles()
214 if not files:
214 if not files:
215 return matchmod.never()
215 return matchmod.never()
216
216
217 pats = [b'include:%s' % f for f in files]
217 pats = [b'include:%s' % f for f in files]
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219
219
220 @propertycache
220 @propertycache
221 def _slash(self):
221 def _slash(self):
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223
223
224 @propertycache
224 @propertycache
225 def _checklink(self):
225 def _checklink(self):
226 return util.checklink(self._root)
226 return util.checklink(self._root)
227
227
228 @propertycache
228 @propertycache
229 def _checkexec(self):
229 def _checkexec(self):
230 return bool(util.checkexec(self._root))
230 return bool(util.checkexec(self._root))
231
231
232 @propertycache
232 @propertycache
233 def _checkcase(self):
233 def _checkcase(self):
234 return not util.fscasesensitive(self._join(b'.hg'))
234 return not util.fscasesensitive(self._join(b'.hg'))
235
235
236 def _join(self, f):
236 def _join(self, f):
237 # much faster than os.path.join()
237 # much faster than os.path.join()
238 # it's safe because f is always a relative path
238 # it's safe because f is always a relative path
239 return self._rootdir + f
239 return self._rootdir + f
240
240
241 def flagfunc(self, buildfallback):
241 def flagfunc(self, buildfallback):
242 if self._checklink and self._checkexec:
242 if self._checklink and self._checkexec:
243
243
244 def f(x):
244 def f(x):
245 try:
245 try:
246 st = os.lstat(self._join(x))
246 st = os.lstat(self._join(x))
247 if util.statislink(st):
247 if util.statislink(st):
248 return b'l'
248 return b'l'
249 if util.statisexec(st):
249 if util.statisexec(st):
250 return b'x'
250 return b'x'
251 except OSError:
251 except OSError:
252 pass
252 pass
253 return b''
253 return b''
254
254
255 return f
255 return f
256
256
257 fallback = buildfallback()
257 fallback = buildfallback()
258 if self._checklink:
258 if self._checklink:
259
259
260 def f(x):
260 def f(x):
261 if os.path.islink(self._join(x)):
261 if os.path.islink(self._join(x)):
262 return b'l'
262 return b'l'
263 if b'x' in fallback(x):
263 if b'x' in fallback(x):
264 return b'x'
264 return b'x'
265 return b''
265 return b''
266
266
267 return f
267 return f
268 if self._checkexec:
268 if self._checkexec:
269
269
270 def f(x):
270 def f(x):
271 if b'l' in fallback(x):
271 if b'l' in fallback(x):
272 return b'l'
272 return b'l'
273 if util.isexec(self._join(x)):
273 if util.isexec(self._join(x)):
274 return b'x'
274 return b'x'
275 return b''
275 return b''
276
276
277 return f
277 return f
278 else:
278 else:
279 return fallback
279 return fallback
280
280
281 @propertycache
281 @propertycache
282 def _cwd(self):
282 def _cwd(self):
283 # internal config: ui.forcecwd
283 # internal config: ui.forcecwd
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 if forcecwd:
285 if forcecwd:
286 return forcecwd
286 return forcecwd
287 return encoding.getcwd()
287 return encoding.getcwd()
288
288
289 def getcwd(self):
289 def getcwd(self):
290 """Return the path from which a canonical path is calculated.
290 """Return the path from which a canonical path is calculated.
291
291
292 This path should be used to resolve file patterns or to convert
292 This path should be used to resolve file patterns or to convert
293 canonical paths back to file paths for display. It shouldn't be
293 canonical paths back to file paths for display. It shouldn't be
294 used to get real file paths. Use vfs functions instead.
294 used to get real file paths. Use vfs functions instead.
295 """
295 """
296 cwd = self._cwd
296 cwd = self._cwd
297 if cwd == self._root:
297 if cwd == self._root:
298 return b''
298 return b''
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 rootsep = self._root
300 rootsep = self._root
301 if not util.endswithsep(rootsep):
301 if not util.endswithsep(rootsep):
302 rootsep += pycompat.ossep
302 rootsep += pycompat.ossep
303 if cwd.startswith(rootsep):
303 if cwd.startswith(rootsep):
304 return cwd[len(rootsep) :]
304 return cwd[len(rootsep) :]
305 else:
305 else:
306 # we're outside the repo. return an absolute path.
306 # we're outside the repo. return an absolute path.
307 return cwd
307 return cwd
308
308
309 def pathto(self, f, cwd=None):
309 def pathto(self, f, cwd=None):
310 if cwd is None:
310 if cwd is None:
311 cwd = self.getcwd()
311 cwd = self.getcwd()
312 path = util.pathto(self._root, cwd, f)
312 path = util.pathto(self._root, cwd, f)
313 if self._slash:
313 if self._slash:
314 return util.pconvert(path)
314 return util.pconvert(path)
315 return path
315 return path
316
316
317 def __getitem__(self, key):
317 def __getitem__(self, key):
318 """Return the current state of key (a filename) in the dirstate.
318 """Return the current state of key (a filename) in the dirstate.
319
319
320 States are:
320 States are:
321 n normal
321 n normal
322 m needs merging
322 m needs merging
323 r marked for removal
323 r marked for removal
324 a marked for addition
324 a marked for addition
325 ? not tracked
325 ? not tracked
326
326
327 XXX The "state" is a bit obscure to be in the "public" API. we should
327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 consider migrating all user of this to going through the dirstate entry
328 consider migrating all user of this to going through the dirstate entry
329 instead.
329 instead.
330 """
330 """
331 entry = self._map.get(key)
331 entry = self._map.get(key)
332 if entry is not None:
332 if entry is not None:
333 return entry.state
333 return entry.state
334 return b'?'
334 return b'?'
335
335
336 def __contains__(self, key):
336 def __contains__(self, key):
337 return key in self._map
337 return key in self._map
338
338
339 def __iter__(self):
339 def __iter__(self):
340 return iter(sorted(self._map))
340 return iter(sorted(self._map))
341
341
342 def items(self):
342 def items(self):
343 return pycompat.iteritems(self._map)
343 return pycompat.iteritems(self._map)
344
344
345 iteritems = items
345 iteritems = items
346
346
347 def directories(self):
347 def directories(self):
348 return self._map.directories()
348 return self._map.directories()
349
349
350 def parents(self):
350 def parents(self):
351 return [self._validate(p) for p in self._pl]
351 return [self._validate(p) for p in self._pl]
352
352
353 def p1(self):
353 def p1(self):
354 return self._validate(self._pl[0])
354 return self._validate(self._pl[0])
355
355
356 def p2(self):
356 def p2(self):
357 return self._validate(self._pl[1])
357 return self._validate(self._pl[1])
358
358
359 @property
359 @property
360 def in_merge(self):
360 def in_merge(self):
361 """True if a merge is in progress"""
361 """True if a merge is in progress"""
362 return self._pl[1] != self._nodeconstants.nullid
362 return self._pl[1] != self._nodeconstants.nullid
363
363
364 def branch(self):
364 def branch(self):
365 return encoding.tolocal(self._branch)
365 return encoding.tolocal(self._branch)
366
366
367 def setparents(self, p1, p2=None):
367 def setparents(self, p1, p2=None):
368 """Set dirstate parents to p1 and p2.
368 """Set dirstate parents to p1 and p2.
369
369
370 When moving from two parents to one, "merged" entries a
370 When moving from two parents to one, "merged" entries a
371 adjusted to normal and previous copy records discarded and
371 adjusted to normal and previous copy records discarded and
372 returned by the call.
372 returned by the call.
373
373
374 See localrepo.setparents()
374 See localrepo.setparents()
375 """
375 """
376 if p2 is None:
376 if p2 is None:
377 p2 = self._nodeconstants.nullid
377 p2 = self._nodeconstants.nullid
378 if self._parentwriters == 0:
378 if self._parentwriters == 0:
379 raise ValueError(
379 raise ValueError(
380 b"cannot set dirstate parent outside of "
380 b"cannot set dirstate parent outside of "
381 b"dirstate.parentchange context manager"
381 b"dirstate.parentchange context manager"
382 )
382 )
383
383
384 self._dirty = True
384 self._dirty = True
385 oldp2 = self._pl[1]
385 oldp2 = self._pl[1]
386 if self._origpl is None:
386 if self._origpl is None:
387 self._origpl = self._pl
387 self._origpl = self._pl
388 self._map.setparents(p1, p2)
388 self._map.setparents(p1, p2)
389 copies = {}
389 copies = {}
390 if (
390 if (
391 oldp2 != self._nodeconstants.nullid
391 oldp2 != self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
393 ):
393 ):
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395
395
396 for f in candidatefiles:
396 for f in candidatefiles:
397 s = self._map.get(f)
397 s = self._map.get(f)
398 if s is None:
398 if s is None:
399 continue
399 continue
400
400
401 # Discard "merged" markers when moving away from a merge state
401 # Discard "merged" markers when moving away from a merge state
402 if s.merged:
402 if s.merged:
403 source = self._map.copymap.get(f)
403 source = self._map.copymap.get(f)
404 if source:
404 if source:
405 copies[f] = source
405 copies[f] = source
406 self._normallookup(f)
406 self._normallookup(f)
407 # Also fix up otherparent markers
407 # Also fix up otherparent markers
408 elif s.from_p2:
408 elif s.from_p2:
409 source = self._map.copymap.get(f)
409 source = self._map.copymap.get(f)
410 if source:
410 if source:
411 copies[f] = source
411 copies[f] = source
412 self._add(f)
412 self._add(f)
413 return copies
413 return copies
414
414
415 def setbranch(self, branch):
415 def setbranch(self, branch):
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 try:
418 try:
419 f.write(self._branch + b'\n')
419 f.write(self._branch + b'\n')
420 f.close()
420 f.close()
421
421
422 # make sure filecache has the correct stat info for _branch after
422 # make sure filecache has the correct stat info for _branch after
423 # replacing the underlying file
423 # replacing the underlying file
424 ce = self._filecache[b'_branch']
424 ce = self._filecache[b'_branch']
425 if ce:
425 if ce:
426 ce.refresh()
426 ce.refresh()
427 except: # re-raises
427 except: # re-raises
428 f.discard()
428 f.discard()
429 raise
429 raise
430
430
431 def invalidate(self):
431 def invalidate(self):
432 """Causes the next access to reread the dirstate.
432 """Causes the next access to reread the dirstate.
433
433
434 This is different from localrepo.invalidatedirstate() because it always
434 This is different from localrepo.invalidatedirstate() because it always
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 check whether the dirstate has changed before rereading it."""
436 check whether the dirstate has changed before rereading it."""
437
437
438 for a in ("_map", "_branch", "_ignore"):
438 for a in ("_map", "_branch", "_ignore"):
439 if a in self.__dict__:
439 if a in self.__dict__:
440 delattr(self, a)
440 delattr(self, a)
441 self._lastnormaltime = 0
441 self._lastnormaltime = 0
442 self._dirty = False
442 self._dirty = False
443 self._updatedfiles.clear()
443 self._updatedfiles.clear()
444 self._parentwriters = 0
444 self._parentwriters = 0
445 self._origpl = None
445 self._origpl = None
446
446
447 def copy(self, source, dest):
447 def copy(self, source, dest):
448 """Mark dest as a copy of source. Unmark dest if source is None."""
448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 if source == dest:
449 if source == dest:
450 return
450 return
451 self._dirty = True
451 self._dirty = True
452 if source is not None:
452 if source is not None:
453 self._map.copymap[dest] = source
453 self._map.copymap[dest] = source
454 self._updatedfiles.add(source)
454 self._updatedfiles.add(source)
455 self._updatedfiles.add(dest)
455 self._updatedfiles.add(dest)
456 elif self._map.copymap.pop(dest, None):
456 elif self._map.copymap.pop(dest, None):
457 self._updatedfiles.add(dest)
457 self._updatedfiles.add(dest)
458
458
459 def copied(self, file):
459 def copied(self, file):
460 return self._map.copymap.get(file, None)
460 return self._map.copymap.get(file, None)
461
461
462 def copies(self):
462 def copies(self):
463 return self._map.copymap
463 return self._map.copymap
464
464
465 @requires_no_parents_change
465 @requires_no_parents_change
466 def set_tracked(self, filename):
466 def set_tracked(self, filename):
467 """a "public" method for generic code to mark a file as tracked
467 """a "public" method for generic code to mark a file as tracked
468
468
469 This function is to be called outside of "update/merge" case. For
469 This function is to be called outside of "update/merge" case. For
470 example by a command like `hg add X`.
470 example by a command like `hg add X`.
471
471
472 return True the file was previously untracked, False otherwise.
472 return True the file was previously untracked, False otherwise.
473 """
473 """
474 entry = self._map.get(filename)
474 entry = self._map.get(filename)
475 if entry is None:
475 if entry is None:
476 self._add(filename)
476 self._add(filename)
477 return True
477 return True
478 elif not entry.tracked:
478 elif not entry.tracked:
479 self._normallookup(filename)
479 self._normallookup(filename)
480 return True
480 return True
481 # XXX This is probably overkill for more case, but we need this to
481 # XXX This is probably overkill for more case, but we need this to
482 # fully replace the `normallookup` call with `set_tracked` one.
482 # fully replace the `normallookup` call with `set_tracked` one.
483 # Consider smoothing this in the future.
483 # Consider smoothing this in the future.
484 self.set_possibly_dirty(filename)
484 self.set_possibly_dirty(filename)
485 return False
485 return False
486
486
487 @requires_no_parents_change
487 @requires_no_parents_change
488 def set_untracked(self, filename):
488 def set_untracked(self, filename):
489 """a "public" method for generic code to mark a file as untracked
489 """a "public" method for generic code to mark a file as untracked
490
490
491 This function is to be called outside of "update/merge" case. For
491 This function is to be called outside of "update/merge" case. For
492 example by a command like `hg remove X`.
492 example by a command like `hg remove X`.
493
493
494 return True the file was previously tracked, False otherwise.
494 return True the file was previously tracked, False otherwise.
495 """
495 """
496 ret = self._map.set_untracked(filename)
496 ret = self._map.set_untracked(filename)
497 if ret:
497 if ret:
498 self._dirty = True
498 self._dirty = True
499 self._updatedfiles.add(filename)
499 self._updatedfiles.add(filename)
500 return ret
500 return ret
501
501
502 @requires_no_parents_change
502 @requires_no_parents_change
503 def set_clean(self, filename, parentfiledata=None):
503 def set_clean(self, filename, parentfiledata=None):
504 """record that the current state of the file on disk is known to be clean"""
504 """record that the current state of the file on disk is known to be clean"""
505 self._dirty = True
505 self._dirty = True
506 self._updatedfiles.add(filename)
506 self._updatedfiles.add(filename)
507 if parentfiledata:
507 if parentfiledata:
508 (mode, size, mtime) = parentfiledata
508 (mode, size, mtime) = parentfiledata
509 else:
509 else:
510 (mode, size, mtime) = self._get_filedata(filename)
510 (mode, size, mtime) = self._get_filedata(filename)
511 if not self._map[filename].tracked:
511 if not self._map[filename].tracked:
512 self._check_new_tracked_filename(filename)
512 self._check_new_tracked_filename(filename)
513 self._map.set_clean(filename, mode, size, mtime)
513 self._map.set_clean(filename, mode, size, mtime)
514 if mtime > self._lastnormaltime:
514 if mtime > self._lastnormaltime:
515 # Remember the most recent modification timeslot for status(),
515 # Remember the most recent modification timeslot for status(),
516 # to make sure we won't miss future size-preserving file content
516 # to make sure we won't miss future size-preserving file content
517 # modifications that happen within the same timeslot.
517 # modifications that happen within the same timeslot.
518 self._lastnormaltime = mtime
518 self._lastnormaltime = mtime
519
519
520 @requires_no_parents_change
520 @requires_no_parents_change
521 def set_possibly_dirty(self, filename):
521 def set_possibly_dirty(self, filename):
522 """record that the current state of the file on disk is unknown"""
522 """record that the current state of the file on disk is unknown"""
523 self._dirty = True
523 self._dirty = True
524 self._updatedfiles.add(filename)
524 self._updatedfiles.add(filename)
525 self._map.set_possibly_dirty(filename)
525 self._map.set_possibly_dirty(filename)
526
526
527 @requires_parents_change
527 @requires_parents_change
528 def update_file_p1(
528 def update_file_p1(
529 self,
529 self,
530 filename,
530 filename,
531 p1_tracked,
531 p1_tracked,
532 ):
532 ):
533 """Set a file as tracked in the parent (or not)
533 """Set a file as tracked in the parent (or not)
534
534
535 This is to be called when adjust the dirstate to a new parent after an history
535 This is to be called when adjust the dirstate to a new parent after an history
536 rewriting operation.
536 rewriting operation.
537
537
538 It should not be called during a merge (p2 != nullid) and only within
538 It should not be called during a merge (p2 != nullid) and only within
539 a `with dirstate.parentchange():` context.
539 a `with dirstate.parentchange():` context.
540 """
540 """
541 if self.in_merge:
541 if self.in_merge:
542 msg = b'update_file_reference should not be called when merging'
542 msg = b'update_file_reference should not be called when merging'
543 raise error.ProgrammingError(msg)
543 raise error.ProgrammingError(msg)
544 entry = self._map.get(filename)
544 entry = self._map.get(filename)
545 if entry is None:
545 if entry is None:
546 wc_tracked = False
546 wc_tracked = False
547 else:
547 else:
548 wc_tracked = entry.tracked
548 wc_tracked = entry.tracked
549 possibly_dirty = False
549 possibly_dirty = False
550 if p1_tracked and wc_tracked:
550 if p1_tracked and wc_tracked:
551 # the underlying reference might have changed, we will have to
551 # the underlying reference might have changed, we will have to
552 # check it.
552 # check it.
553 possibly_dirty = True
553 possibly_dirty = True
554 elif not (p1_tracked or wc_tracked):
554 elif not (p1_tracked or wc_tracked):
555 # the file is no longer relevant to anyone
555 # the file is no longer relevant to anyone
556 self._drop(filename)
556 self._drop(filename)
557 elif (not p1_tracked) and wc_tracked:
557 elif (not p1_tracked) and wc_tracked:
558 if entry is not None and entry.added:
558 if entry is not None and entry.added:
559 return # avoid dropping copy information (maybe?)
559 return # avoid dropping copy information (maybe?)
560 elif p1_tracked and not wc_tracked:
560 elif p1_tracked and not wc_tracked:
561 pass
561 pass
562 else:
562 else:
563 assert False, 'unreachable'
563 assert False, 'unreachable'
564
564
565 # this mean we are doing call for file we do not really care about the
565 # this mean we are doing call for file we do not really care about the
566 # data (eg: added or removed), however this should be a minor overhead
566 # data (eg: added or removed), however this should be a minor overhead
567 # compared to the overall update process calling this.
567 # compared to the overall update process calling this.
568 parentfiledata = None
568 parentfiledata = None
569 if wc_tracked:
569 if wc_tracked:
570 parentfiledata = self._get_filedata(filename)
570 parentfiledata = self._get_filedata(filename)
571
571
572 self._updatedfiles.add(filename)
572 self._updatedfiles.add(filename)
573 self._map.reset_state(
573 self._map.reset_state(
574 filename,
574 filename,
575 wc_tracked,
575 wc_tracked,
576 p1_tracked,
576 p1_tracked,
577 possibly_dirty=possibly_dirty,
577 possibly_dirty=possibly_dirty,
578 parentfiledata=parentfiledata,
578 parentfiledata=parentfiledata,
579 )
579 )
580 if (
580 if (
581 parentfiledata is not None
581 parentfiledata is not None
582 and parentfiledata[2] > self._lastnormaltime
582 and parentfiledata[2] > self._lastnormaltime
583 ):
583 ):
584 # Remember the most recent modification timeslot for status(),
584 # Remember the most recent modification timeslot for status(),
585 # to make sure we won't miss future size-preserving file content
585 # to make sure we won't miss future size-preserving file content
586 # modifications that happen within the same timeslot.
586 # modifications that happen within the same timeslot.
587 self._lastnormaltime = parentfiledata[2]
587 self._lastnormaltime = parentfiledata[2]
588
588
589 @requires_parents_change
589 @requires_parents_change
590 def update_file(
590 def update_file(
591 self,
591 self,
592 filename,
592 filename,
593 wc_tracked,
593 wc_tracked,
594 p1_tracked,
594 p1_tracked,
595 p2_tracked=False,
595 p2_tracked=False,
596 merged=False,
596 merged=False,
597 clean_p1=False,
597 clean_p1=False,
598 clean_p2=False,
598 clean_p2=False,
599 possibly_dirty=False,
599 possibly_dirty=False,
600 parentfiledata=None,
600 parentfiledata=None,
601 ):
601 ):
602 """update the information about a file in the dirstate
602 """update the information about a file in the dirstate
603
603
604 This is to be called when the direstates parent changes to keep track
604 This is to be called when the direstates parent changes to keep track
605 of what is the file situation in regards to the working copy and its parent.
605 of what is the file situation in regards to the working copy and its parent.
606
606
607 This function must be called within a `dirstate.parentchange` context.
607 This function must be called within a `dirstate.parentchange` context.
608
608
609 note: the API is at an early stage and we might need to adjust it
609 note: the API is at an early stage and we might need to adjust it
610 depending of what information ends up being relevant and useful to
610 depending of what information ends up being relevant and useful to
611 other processing.
611 other processing.
612 """
612 """
613 if merged and (clean_p1 or clean_p2):
613 if merged and (clean_p1 or clean_p2):
614 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
614 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
615 raise error.ProgrammingError(msg)
615 raise error.ProgrammingError(msg)
616
616
617 # note: I do not think we need to double check name clash here since we
617 # note: I do not think we need to double check name clash here since we
618 # are in a update/merge case that should already have taken care of
618 # are in a update/merge case that should already have taken care of
619 # this. The test agrees
619 # this. The test agrees
620
620
621 self._dirty = True
621 self._dirty = True
622 self._updatedfiles.add(filename)
622 self._updatedfiles.add(filename)
623
623
624 need_parent_file_data = (
624 need_parent_file_data = (
625 not (possibly_dirty or clean_p2 or merged)
625 not (possibly_dirty or clean_p2 or merged)
626 and wc_tracked
626 and wc_tracked
627 and p1_tracked
627 and p1_tracked
628 )
628 )
629
629
630 # this mean we are doing call for file we do not really care about the
630 # this mean we are doing call for file we do not really care about the
631 # data (eg: added or removed), however this should be a minor overhead
631 # data (eg: added or removed), however this should be a minor overhead
632 # compared to the overall update process calling this.
632 # compared to the overall update process calling this.
633 if need_parent_file_data:
633 if need_parent_file_data:
634 if parentfiledata is None:
634 if parentfiledata is None:
635 parentfiledata = self._get_filedata(filename)
635 parentfiledata = self._get_filedata(filename)
636 mtime = parentfiledata[2]
636 mtime = parentfiledata[2]
637
637
638 if mtime > self._lastnormaltime:
638 if mtime > self._lastnormaltime:
639 # Remember the most recent modification timeslot for
639 # Remember the most recent modification timeslot for
640 # status(), to make sure we won't miss future
640 # status(), to make sure we won't miss future
641 # size-preserving file content modifications that happen
641 # size-preserving file content modifications that happen
642 # within the same timeslot.
642 # within the same timeslot.
643 self._lastnormaltime = mtime
643 self._lastnormaltime = mtime
644
644
645 self._map.reset_state(
645 self._map.reset_state(
646 filename,
646 filename,
647 wc_tracked,
647 wc_tracked,
648 p1_tracked,
648 p1_tracked,
649 p2_tracked=p2_tracked,
649 p2_tracked=p2_tracked,
650 merged=merged,
650 merged=merged,
651 clean_p1=clean_p1,
651 clean_p1=clean_p1,
652 clean_p2=clean_p2,
652 clean_p2=clean_p2,
653 possibly_dirty=possibly_dirty,
653 possibly_dirty=possibly_dirty,
654 parentfiledata=parentfiledata,
654 parentfiledata=parentfiledata,
655 )
655 )
656 if (
656 if (
657 parentfiledata is not None
657 parentfiledata is not None
658 and parentfiledata[2] > self._lastnormaltime
658 and parentfiledata[2] > self._lastnormaltime
659 ):
659 ):
660 # Remember the most recent modification timeslot for status(),
660 # Remember the most recent modification timeslot for status(),
661 # to make sure we won't miss future size-preserving file content
661 # to make sure we won't miss future size-preserving file content
662 # modifications that happen within the same timeslot.
662 # modifications that happen within the same timeslot.
663 self._lastnormaltime = parentfiledata[2]
663 self._lastnormaltime = parentfiledata[2]
664
664
665 def _addpath(
665 def _addpath(
666 self,
666 self,
667 f,
667 f,
668 mode=0,
668 mode=0,
669 size=None,
669 size=None,
670 mtime=None,
670 mtime=None,
671 added=False,
671 added=False,
672 merged=False,
672 merged=False,
673 from_p2=False,
673 from_p2=False,
674 possibly_dirty=False,
674 possibly_dirty=False,
675 ):
675 ):
676 entry = self._map.get(f)
676 entry = self._map.get(f)
677 if added or entry is not None and entry.removed:
677 if added or entry is not None and not entry.tracked:
678 self._check_new_tracked_filename(f)
678 self._check_new_tracked_filename(f)
679 self._dirty = True
679 self._dirty = True
680 self._updatedfiles.add(f)
680 self._updatedfiles.add(f)
681 self._map.addfile(
681 self._map.addfile(
682 f,
682 f,
683 mode=mode,
683 mode=mode,
684 size=size,
684 size=size,
685 mtime=mtime,
685 mtime=mtime,
686 added=added,
686 added=added,
687 merged=merged,
687 merged=merged,
688 from_p2=from_p2,
688 from_p2=from_p2,
689 possibly_dirty=possibly_dirty,
689 possibly_dirty=possibly_dirty,
690 )
690 )
691
691
692 def _check_new_tracked_filename(self, filename):
692 def _check_new_tracked_filename(self, filename):
693 scmutil.checkfilename(filename)
693 scmutil.checkfilename(filename)
694 if self._map.hastrackeddir(filename):
694 if self._map.hastrackeddir(filename):
695 msg = _(b'directory %r already in dirstate')
695 msg = _(b'directory %r already in dirstate')
696 msg %= pycompat.bytestr(filename)
696 msg %= pycompat.bytestr(filename)
697 raise error.Abort(msg)
697 raise error.Abort(msg)
698 # shadows
698 # shadows
699 for d in pathutil.finddirs(filename):
699 for d in pathutil.finddirs(filename):
700 if self._map.hastrackeddir(d):
700 if self._map.hastrackeddir(d):
701 break
701 break
702 entry = self._map.get(d)
702 entry = self._map.get(d)
703 if entry is not None and not entry.removed:
703 if entry is not None and not entry.removed:
704 msg = _(b'file %r in dirstate clashes with %r')
704 msg = _(b'file %r in dirstate clashes with %r')
705 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
705 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
706 raise error.Abort(msg)
706 raise error.Abort(msg)
707
707
708 def _get_filedata(self, filename):
708 def _get_filedata(self, filename):
709 """returns"""
709 """returns"""
710 s = os.lstat(self._join(filename))
710 s = os.lstat(self._join(filename))
711 mode = s.st_mode
711 mode = s.st_mode
712 size = s.st_size
712 size = s.st_size
713 mtime = s[stat.ST_MTIME]
713 mtime = s[stat.ST_MTIME]
714 return (mode, size, mtime)
714 return (mode, size, mtime)
715
715
716 def _normallookup(self, f):
716 def _normallookup(self, f):
717 '''Mark a file normal, but possibly dirty.'''
717 '''Mark a file normal, but possibly dirty.'''
718 if self.in_merge:
718 if self.in_merge:
719 # if there is a merge going on and the file was either
719 # if there is a merge going on and the file was either
720 # "merged" or coming from other parent (-2) before
720 # "merged" or coming from other parent (-2) before
721 # being removed, restore that state.
721 # being removed, restore that state.
722 entry = self._map.get(f)
722 entry = self._map.get(f)
723 if entry is not None:
723 if entry is not None:
724 # XXX this should probably be dealt with a a lower level
724 # XXX this should probably be dealt with a a lower level
725 # (see `merged_removed` and `from_p2_removed`)
725 # (see `merged_removed` and `from_p2_removed`)
726 if entry.merged_removed or entry.from_p2_removed:
726 if entry.merged_removed or entry.from_p2_removed:
727 source = self._map.copymap.get(f)
727 source = self._map.copymap.get(f)
728 self._addpath(f, from_p2=True)
728 self._addpath(f, from_p2=True)
729 self._map.copymap.pop(f, None)
729 self._map.copymap.pop(f, None)
730 if source is not None:
730 if source is not None:
731 self.copy(source, f)
731 self.copy(source, f)
732 return
732 return
733 elif entry.merged or entry.from_p2:
733 elif entry.merged or entry.from_p2:
734 return
734 return
735 self._addpath(f, possibly_dirty=True)
735 self._addpath(f, possibly_dirty=True)
736 self._map.copymap.pop(f, None)
736 self._map.copymap.pop(f, None)
737
737
738 def _add(self, filename):
738 def _add(self, filename):
739 """internal function to mark a file as added"""
739 """internal function to mark a file as added"""
740 self._addpath(filename, added=True)
740 self._addpath(filename, added=True)
741 self._map.copymap.pop(filename, None)
741 self._map.copymap.pop(filename, None)
742
742
743 def _drop(self, filename):
743 def _drop(self, filename):
744 """internal function to drop a file from the dirstate"""
744 """internal function to drop a file from the dirstate"""
745 if self._map.dropfile(filename):
745 if self._map.dropfile(filename):
746 self._dirty = True
746 self._dirty = True
747 self._updatedfiles.add(filename)
747 self._updatedfiles.add(filename)
748
748
749 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
749 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
750 if exists is None:
750 if exists is None:
751 exists = os.path.lexists(os.path.join(self._root, path))
751 exists = os.path.lexists(os.path.join(self._root, path))
752 if not exists:
752 if not exists:
753 # Maybe a path component exists
753 # Maybe a path component exists
754 if not ignoremissing and b'/' in path:
754 if not ignoremissing and b'/' in path:
755 d, f = path.rsplit(b'/', 1)
755 d, f = path.rsplit(b'/', 1)
756 d = self._normalize(d, False, ignoremissing, None)
756 d = self._normalize(d, False, ignoremissing, None)
757 folded = d + b"/" + f
757 folded = d + b"/" + f
758 else:
758 else:
759 # No path components, preserve original case
759 # No path components, preserve original case
760 folded = path
760 folded = path
761 else:
761 else:
762 # recursively normalize leading directory components
762 # recursively normalize leading directory components
763 # against dirstate
763 # against dirstate
764 if b'/' in normed:
764 if b'/' in normed:
765 d, f = normed.rsplit(b'/', 1)
765 d, f = normed.rsplit(b'/', 1)
766 d = self._normalize(d, False, ignoremissing, True)
766 d = self._normalize(d, False, ignoremissing, True)
767 r = self._root + b"/" + d
767 r = self._root + b"/" + d
768 folded = d + b"/" + util.fspath(f, r)
768 folded = d + b"/" + util.fspath(f, r)
769 else:
769 else:
770 folded = util.fspath(normed, self._root)
770 folded = util.fspath(normed, self._root)
771 storemap[normed] = folded
771 storemap[normed] = folded
772
772
773 return folded
773 return folded
774
774
775 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
775 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
776 normed = util.normcase(path)
776 normed = util.normcase(path)
777 folded = self._map.filefoldmap.get(normed, None)
777 folded = self._map.filefoldmap.get(normed, None)
778 if folded is None:
778 if folded is None:
779 if isknown:
779 if isknown:
780 folded = path
780 folded = path
781 else:
781 else:
782 folded = self._discoverpath(
782 folded = self._discoverpath(
783 path, normed, ignoremissing, exists, self._map.filefoldmap
783 path, normed, ignoremissing, exists, self._map.filefoldmap
784 )
784 )
785 return folded
785 return folded
786
786
787 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
787 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
788 normed = util.normcase(path)
788 normed = util.normcase(path)
789 folded = self._map.filefoldmap.get(normed, None)
789 folded = self._map.filefoldmap.get(normed, None)
790 if folded is None:
790 if folded is None:
791 folded = self._map.dirfoldmap.get(normed, None)
791 folded = self._map.dirfoldmap.get(normed, None)
792 if folded is None:
792 if folded is None:
793 if isknown:
793 if isknown:
794 folded = path
794 folded = path
795 else:
795 else:
796 # store discovered result in dirfoldmap so that future
796 # store discovered result in dirfoldmap so that future
797 # normalizefile calls don't start matching directories
797 # normalizefile calls don't start matching directories
798 folded = self._discoverpath(
798 folded = self._discoverpath(
799 path, normed, ignoremissing, exists, self._map.dirfoldmap
799 path, normed, ignoremissing, exists, self._map.dirfoldmap
800 )
800 )
801 return folded
801 return folded
802
802
803 def normalize(self, path, isknown=False, ignoremissing=False):
803 def normalize(self, path, isknown=False, ignoremissing=False):
804 """
804 """
805 normalize the case of a pathname when on a casefolding filesystem
805 normalize the case of a pathname when on a casefolding filesystem
806
806
807 isknown specifies whether the filename came from walking the
807 isknown specifies whether the filename came from walking the
808 disk, to avoid extra filesystem access.
808 disk, to avoid extra filesystem access.
809
809
810 If ignoremissing is True, missing path are returned
810 If ignoremissing is True, missing path are returned
811 unchanged. Otherwise, we try harder to normalize possibly
811 unchanged. Otherwise, we try harder to normalize possibly
812 existing path components.
812 existing path components.
813
813
814 The normalized case is determined based on the following precedence:
814 The normalized case is determined based on the following precedence:
815
815
816 - version of name already stored in the dirstate
816 - version of name already stored in the dirstate
817 - version of name stored on disk
817 - version of name stored on disk
818 - version provided via command arguments
818 - version provided via command arguments
819 """
819 """
820
820
821 if self._checkcase:
821 if self._checkcase:
822 return self._normalize(path, isknown, ignoremissing)
822 return self._normalize(path, isknown, ignoremissing)
823 return path
823 return path
824
824
825 def clear(self):
825 def clear(self):
826 self._map.clear()
826 self._map.clear()
827 self._lastnormaltime = 0
827 self._lastnormaltime = 0
828 self._updatedfiles.clear()
828 self._updatedfiles.clear()
829 self._dirty = True
829 self._dirty = True
830
830
831 def rebuild(self, parent, allfiles, changedfiles=None):
831 def rebuild(self, parent, allfiles, changedfiles=None):
832 if changedfiles is None:
832 if changedfiles is None:
833 # Rebuild entire dirstate
833 # Rebuild entire dirstate
834 to_lookup = allfiles
834 to_lookup = allfiles
835 to_drop = []
835 to_drop = []
836 lastnormaltime = self._lastnormaltime
836 lastnormaltime = self._lastnormaltime
837 self.clear()
837 self.clear()
838 self._lastnormaltime = lastnormaltime
838 self._lastnormaltime = lastnormaltime
839 elif len(changedfiles) < 10:
839 elif len(changedfiles) < 10:
840 # Avoid turning allfiles into a set, which can be expensive if it's
840 # Avoid turning allfiles into a set, which can be expensive if it's
841 # large.
841 # large.
842 to_lookup = []
842 to_lookup = []
843 to_drop = []
843 to_drop = []
844 for f in changedfiles:
844 for f in changedfiles:
845 if f in allfiles:
845 if f in allfiles:
846 to_lookup.append(f)
846 to_lookup.append(f)
847 else:
847 else:
848 to_drop.append(f)
848 to_drop.append(f)
849 else:
849 else:
850 changedfilesset = set(changedfiles)
850 changedfilesset = set(changedfiles)
851 to_lookup = changedfilesset & set(allfiles)
851 to_lookup = changedfilesset & set(allfiles)
852 to_drop = changedfilesset - to_lookup
852 to_drop = changedfilesset - to_lookup
853
853
854 if self._origpl is None:
854 if self._origpl is None:
855 self._origpl = self._pl
855 self._origpl = self._pl
856 self._map.setparents(parent, self._nodeconstants.nullid)
856 self._map.setparents(parent, self._nodeconstants.nullid)
857
857
858 for f in to_lookup:
858 for f in to_lookup:
859 self._normallookup(f)
859 self._normallookup(f)
860 for f in to_drop:
860 for f in to_drop:
861 self._drop(f)
861 self._drop(f)
862
862
863 self._dirty = True
863 self._dirty = True
864
864
865 def identity(self):
865 def identity(self):
866 """Return identity of dirstate itself to detect changing in storage
866 """Return identity of dirstate itself to detect changing in storage
867
867
868 If identity of previous dirstate is equal to this, writing
868 If identity of previous dirstate is equal to this, writing
869 changes based on the former dirstate out can keep consistency.
869 changes based on the former dirstate out can keep consistency.
870 """
870 """
871 return self._map.identity
871 return self._map.identity
872
872
873 def write(self, tr):
873 def write(self, tr):
874 if not self._dirty:
874 if not self._dirty:
875 return
875 return
876
876
877 filename = self._filename
877 filename = self._filename
878 if tr:
878 if tr:
879 # 'dirstate.write()' is not only for writing in-memory
879 # 'dirstate.write()' is not only for writing in-memory
880 # changes out, but also for dropping ambiguous timestamp.
880 # changes out, but also for dropping ambiguous timestamp.
881 # delayed writing re-raise "ambiguous timestamp issue".
881 # delayed writing re-raise "ambiguous timestamp issue".
882 # See also the wiki page below for detail:
882 # See also the wiki page below for detail:
883 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
883 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
884
884
885 # emulate dropping timestamp in 'parsers.pack_dirstate'
885 # emulate dropping timestamp in 'parsers.pack_dirstate'
886 now = _getfsnow(self._opener)
886 now = _getfsnow(self._opener)
887 self._map.clearambiguoustimes(self._updatedfiles, now)
887 self._map.clearambiguoustimes(self._updatedfiles, now)
888
888
889 # emulate that all 'dirstate.normal' results are written out
889 # emulate that all 'dirstate.normal' results are written out
890 self._lastnormaltime = 0
890 self._lastnormaltime = 0
891 self._updatedfiles.clear()
891 self._updatedfiles.clear()
892
892
893 # delay writing in-memory changes out
893 # delay writing in-memory changes out
894 tr.addfilegenerator(
894 tr.addfilegenerator(
895 b'dirstate',
895 b'dirstate',
896 (self._filename,),
896 (self._filename,),
897 lambda f: self._writedirstate(tr, f),
897 lambda f: self._writedirstate(tr, f),
898 location=b'plain',
898 location=b'plain',
899 )
899 )
900 return
900 return
901
901
902 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
902 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
903 self._writedirstate(tr, st)
903 self._writedirstate(tr, st)
904
904
905 def addparentchangecallback(self, category, callback):
905 def addparentchangecallback(self, category, callback):
906 """add a callback to be called when the wd parents are changed
906 """add a callback to be called when the wd parents are changed
907
907
908 Callback will be called with the following arguments:
908 Callback will be called with the following arguments:
909 dirstate, (oldp1, oldp2), (newp1, newp2)
909 dirstate, (oldp1, oldp2), (newp1, newp2)
910
910
911 Category is a unique identifier to allow overwriting an old callback
911 Category is a unique identifier to allow overwriting an old callback
912 with a newer callback.
912 with a newer callback.
913 """
913 """
914 self._plchangecallbacks[category] = callback
914 self._plchangecallbacks[category] = callback
915
915
916 def _writedirstate(self, tr, st):
916 def _writedirstate(self, tr, st):
917 # notify callbacks about parents change
917 # notify callbacks about parents change
918 if self._origpl is not None and self._origpl != self._pl:
918 if self._origpl is not None and self._origpl != self._pl:
919 for c, callback in sorted(
919 for c, callback in sorted(
920 pycompat.iteritems(self._plchangecallbacks)
920 pycompat.iteritems(self._plchangecallbacks)
921 ):
921 ):
922 callback(self, self._origpl, self._pl)
922 callback(self, self._origpl, self._pl)
923 self._origpl = None
923 self._origpl = None
924 # use the modification time of the newly created temporary file as the
924 # use the modification time of the newly created temporary file as the
925 # filesystem's notion of 'now'
925 # filesystem's notion of 'now'
926 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
926 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
927
927
928 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
928 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
929 # timestamp of each entries in dirstate, because of 'now > mtime'
929 # timestamp of each entries in dirstate, because of 'now > mtime'
930 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
930 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
931 if delaywrite > 0:
931 if delaywrite > 0:
932 # do we have any files to delay for?
932 # do we have any files to delay for?
933 for f, e in pycompat.iteritems(self._map):
933 for f, e in pycompat.iteritems(self._map):
934 if e.need_delay(now):
934 if e.need_delay(now):
935 import time # to avoid useless import
935 import time # to avoid useless import
936
936
937 # rather than sleep n seconds, sleep until the next
937 # rather than sleep n seconds, sleep until the next
938 # multiple of n seconds
938 # multiple of n seconds
939 clock = time.time()
939 clock = time.time()
940 start = int(clock) - (int(clock) % delaywrite)
940 start = int(clock) - (int(clock) % delaywrite)
941 end = start + delaywrite
941 end = start + delaywrite
942 time.sleep(end - clock)
942 time.sleep(end - clock)
943 now = end # trust our estimate that the end is near now
943 now = end # trust our estimate that the end is near now
944 break
944 break
945
945
946 self._map.write(tr, st, now)
946 self._map.write(tr, st, now)
947 self._lastnormaltime = 0
947 self._lastnormaltime = 0
948 self._dirty = False
948 self._dirty = False
949
949
950 def _dirignore(self, f):
950 def _dirignore(self, f):
951 if self._ignore(f):
951 if self._ignore(f):
952 return True
952 return True
953 for p in pathutil.finddirs(f):
953 for p in pathutil.finddirs(f):
954 if self._ignore(p):
954 if self._ignore(p):
955 return True
955 return True
956 return False
956 return False
957
957
958 def _ignorefiles(self):
958 def _ignorefiles(self):
959 files = []
959 files = []
960 if os.path.exists(self._join(b'.hgignore')):
960 if os.path.exists(self._join(b'.hgignore')):
961 files.append(self._join(b'.hgignore'))
961 files.append(self._join(b'.hgignore'))
962 for name, path in self._ui.configitems(b"ui"):
962 for name, path in self._ui.configitems(b"ui"):
963 if name == b'ignore' or name.startswith(b'ignore.'):
963 if name == b'ignore' or name.startswith(b'ignore.'):
964 # we need to use os.path.join here rather than self._join
964 # we need to use os.path.join here rather than self._join
965 # because path is arbitrary and user-specified
965 # because path is arbitrary and user-specified
966 files.append(os.path.join(self._rootdir, util.expandpath(path)))
966 files.append(os.path.join(self._rootdir, util.expandpath(path)))
967 return files
967 return files
968
968
969 def _ignorefileandline(self, f):
969 def _ignorefileandline(self, f):
970 files = collections.deque(self._ignorefiles())
970 files = collections.deque(self._ignorefiles())
971 visited = set()
971 visited = set()
972 while files:
972 while files:
973 i = files.popleft()
973 i = files.popleft()
974 patterns = matchmod.readpatternfile(
974 patterns = matchmod.readpatternfile(
975 i, self._ui.warn, sourceinfo=True
975 i, self._ui.warn, sourceinfo=True
976 )
976 )
977 for pattern, lineno, line in patterns:
977 for pattern, lineno, line in patterns:
978 kind, p = matchmod._patsplit(pattern, b'glob')
978 kind, p = matchmod._patsplit(pattern, b'glob')
979 if kind == b"subinclude":
979 if kind == b"subinclude":
980 if p not in visited:
980 if p not in visited:
981 files.append(p)
981 files.append(p)
982 continue
982 continue
983 m = matchmod.match(
983 m = matchmod.match(
984 self._root, b'', [], [pattern], warn=self._ui.warn
984 self._root, b'', [], [pattern], warn=self._ui.warn
985 )
985 )
986 if m(f):
986 if m(f):
987 return (i, lineno, line)
987 return (i, lineno, line)
988 visited.add(i)
988 visited.add(i)
989 return (None, -1, b"")
989 return (None, -1, b"")
990
990
991 def _walkexplicit(self, match, subrepos):
991 def _walkexplicit(self, match, subrepos):
992 """Get stat data about the files explicitly specified by match.
992 """Get stat data about the files explicitly specified by match.
993
993
994 Return a triple (results, dirsfound, dirsnotfound).
994 Return a triple (results, dirsfound, dirsnotfound).
995 - results is a mapping from filename to stat result. It also contains
995 - results is a mapping from filename to stat result. It also contains
996 listings mapping subrepos and .hg to None.
996 listings mapping subrepos and .hg to None.
997 - dirsfound is a list of files found to be directories.
997 - dirsfound is a list of files found to be directories.
998 - dirsnotfound is a list of files that the dirstate thinks are
998 - dirsnotfound is a list of files that the dirstate thinks are
999 directories and that were not found."""
999 directories and that were not found."""
1000
1000
1001 def badtype(mode):
1001 def badtype(mode):
1002 kind = _(b'unknown')
1002 kind = _(b'unknown')
1003 if stat.S_ISCHR(mode):
1003 if stat.S_ISCHR(mode):
1004 kind = _(b'character device')
1004 kind = _(b'character device')
1005 elif stat.S_ISBLK(mode):
1005 elif stat.S_ISBLK(mode):
1006 kind = _(b'block device')
1006 kind = _(b'block device')
1007 elif stat.S_ISFIFO(mode):
1007 elif stat.S_ISFIFO(mode):
1008 kind = _(b'fifo')
1008 kind = _(b'fifo')
1009 elif stat.S_ISSOCK(mode):
1009 elif stat.S_ISSOCK(mode):
1010 kind = _(b'socket')
1010 kind = _(b'socket')
1011 elif stat.S_ISDIR(mode):
1011 elif stat.S_ISDIR(mode):
1012 kind = _(b'directory')
1012 kind = _(b'directory')
1013 return _(b'unsupported file type (type is %s)') % kind
1013 return _(b'unsupported file type (type is %s)') % kind
1014
1014
1015 badfn = match.bad
1015 badfn = match.bad
1016 dmap = self._map
1016 dmap = self._map
1017 lstat = os.lstat
1017 lstat = os.lstat
1018 getkind = stat.S_IFMT
1018 getkind = stat.S_IFMT
1019 dirkind = stat.S_IFDIR
1019 dirkind = stat.S_IFDIR
1020 regkind = stat.S_IFREG
1020 regkind = stat.S_IFREG
1021 lnkkind = stat.S_IFLNK
1021 lnkkind = stat.S_IFLNK
1022 join = self._join
1022 join = self._join
1023 dirsfound = []
1023 dirsfound = []
1024 foundadd = dirsfound.append
1024 foundadd = dirsfound.append
1025 dirsnotfound = []
1025 dirsnotfound = []
1026 notfoundadd = dirsnotfound.append
1026 notfoundadd = dirsnotfound.append
1027
1027
1028 if not match.isexact() and self._checkcase:
1028 if not match.isexact() and self._checkcase:
1029 normalize = self._normalize
1029 normalize = self._normalize
1030 else:
1030 else:
1031 normalize = None
1031 normalize = None
1032
1032
1033 files = sorted(match.files())
1033 files = sorted(match.files())
1034 subrepos.sort()
1034 subrepos.sort()
1035 i, j = 0, 0
1035 i, j = 0, 0
1036 while i < len(files) and j < len(subrepos):
1036 while i < len(files) and j < len(subrepos):
1037 subpath = subrepos[j] + b"/"
1037 subpath = subrepos[j] + b"/"
1038 if files[i] < subpath:
1038 if files[i] < subpath:
1039 i += 1
1039 i += 1
1040 continue
1040 continue
1041 while i < len(files) and files[i].startswith(subpath):
1041 while i < len(files) and files[i].startswith(subpath):
1042 del files[i]
1042 del files[i]
1043 j += 1
1043 j += 1
1044
1044
1045 if not files or b'' in files:
1045 if not files or b'' in files:
1046 files = [b'']
1046 files = [b'']
1047 # constructing the foldmap is expensive, so don't do it for the
1047 # constructing the foldmap is expensive, so don't do it for the
1048 # common case where files is ['']
1048 # common case where files is ['']
1049 normalize = None
1049 normalize = None
1050 results = dict.fromkeys(subrepos)
1050 results = dict.fromkeys(subrepos)
1051 results[b'.hg'] = None
1051 results[b'.hg'] = None
1052
1052
1053 for ff in files:
1053 for ff in files:
1054 if normalize:
1054 if normalize:
1055 nf = normalize(ff, False, True)
1055 nf = normalize(ff, False, True)
1056 else:
1056 else:
1057 nf = ff
1057 nf = ff
1058 if nf in results:
1058 if nf in results:
1059 continue
1059 continue
1060
1060
1061 try:
1061 try:
1062 st = lstat(join(nf))
1062 st = lstat(join(nf))
1063 kind = getkind(st.st_mode)
1063 kind = getkind(st.st_mode)
1064 if kind == dirkind:
1064 if kind == dirkind:
1065 if nf in dmap:
1065 if nf in dmap:
1066 # file replaced by dir on disk but still in dirstate
1066 # file replaced by dir on disk but still in dirstate
1067 results[nf] = None
1067 results[nf] = None
1068 foundadd((nf, ff))
1068 foundadd((nf, ff))
1069 elif kind == regkind or kind == lnkkind:
1069 elif kind == regkind or kind == lnkkind:
1070 results[nf] = st
1070 results[nf] = st
1071 else:
1071 else:
1072 badfn(ff, badtype(kind))
1072 badfn(ff, badtype(kind))
1073 if nf in dmap:
1073 if nf in dmap:
1074 results[nf] = None
1074 results[nf] = None
1075 except OSError as inst: # nf not found on disk - it is dirstate only
1075 except OSError as inst: # nf not found on disk - it is dirstate only
1076 if nf in dmap: # does it exactly match a missing file?
1076 if nf in dmap: # does it exactly match a missing file?
1077 results[nf] = None
1077 results[nf] = None
1078 else: # does it match a missing directory?
1078 else: # does it match a missing directory?
1079 if self._map.hasdir(nf):
1079 if self._map.hasdir(nf):
1080 notfoundadd(nf)
1080 notfoundadd(nf)
1081 else:
1081 else:
1082 badfn(ff, encoding.strtolocal(inst.strerror))
1082 badfn(ff, encoding.strtolocal(inst.strerror))
1083
1083
1084 # match.files() may contain explicitly-specified paths that shouldn't
1084 # match.files() may contain explicitly-specified paths that shouldn't
1085 # be taken; drop them from the list of files found. dirsfound/notfound
1085 # be taken; drop them from the list of files found. dirsfound/notfound
1086 # aren't filtered here because they will be tested later.
1086 # aren't filtered here because they will be tested later.
1087 if match.anypats():
1087 if match.anypats():
1088 for f in list(results):
1088 for f in list(results):
1089 if f == b'.hg' or f in subrepos:
1089 if f == b'.hg' or f in subrepos:
1090 # keep sentinel to disable further out-of-repo walks
1090 # keep sentinel to disable further out-of-repo walks
1091 continue
1091 continue
1092 if not match(f):
1092 if not match(f):
1093 del results[f]
1093 del results[f]
1094
1094
1095 # Case insensitive filesystems cannot rely on lstat() failing to detect
1095 # Case insensitive filesystems cannot rely on lstat() failing to detect
1096 # a case-only rename. Prune the stat object for any file that does not
1096 # a case-only rename. Prune the stat object for any file that does not
1097 # match the case in the filesystem, if there are multiple files that
1097 # match the case in the filesystem, if there are multiple files that
1098 # normalize to the same path.
1098 # normalize to the same path.
1099 if match.isexact() and self._checkcase:
1099 if match.isexact() and self._checkcase:
1100 normed = {}
1100 normed = {}
1101
1101
1102 for f, st in pycompat.iteritems(results):
1102 for f, st in pycompat.iteritems(results):
1103 if st is None:
1103 if st is None:
1104 continue
1104 continue
1105
1105
1106 nc = util.normcase(f)
1106 nc = util.normcase(f)
1107 paths = normed.get(nc)
1107 paths = normed.get(nc)
1108
1108
1109 if paths is None:
1109 if paths is None:
1110 paths = set()
1110 paths = set()
1111 normed[nc] = paths
1111 normed[nc] = paths
1112
1112
1113 paths.add(f)
1113 paths.add(f)
1114
1114
1115 for norm, paths in pycompat.iteritems(normed):
1115 for norm, paths in pycompat.iteritems(normed):
1116 if len(paths) > 1:
1116 if len(paths) > 1:
1117 for path in paths:
1117 for path in paths:
1118 folded = self._discoverpath(
1118 folded = self._discoverpath(
1119 path, norm, True, None, self._map.dirfoldmap
1119 path, norm, True, None, self._map.dirfoldmap
1120 )
1120 )
1121 if path != folded:
1121 if path != folded:
1122 results[path] = None
1122 results[path] = None
1123
1123
1124 return results, dirsfound, dirsnotfound
1124 return results, dirsfound, dirsnotfound
1125
1125
1126 def walk(self, match, subrepos, unknown, ignored, full=True):
1126 def walk(self, match, subrepos, unknown, ignored, full=True):
1127 """
1127 """
1128 Walk recursively through the directory tree, finding all files
1128 Walk recursively through the directory tree, finding all files
1129 matched by match.
1129 matched by match.
1130
1130
1131 If full is False, maybe skip some known-clean files.
1131 If full is False, maybe skip some known-clean files.
1132
1132
1133 Return a dict mapping filename to stat-like object (either
1133 Return a dict mapping filename to stat-like object (either
1134 mercurial.osutil.stat instance or return value of os.stat()).
1134 mercurial.osutil.stat instance or return value of os.stat()).
1135
1135
1136 """
1136 """
1137 # full is a flag that extensions that hook into walk can use -- this
1137 # full is a flag that extensions that hook into walk can use -- this
1138 # implementation doesn't use it at all. This satisfies the contract
1138 # implementation doesn't use it at all. This satisfies the contract
1139 # because we only guarantee a "maybe".
1139 # because we only guarantee a "maybe".
1140
1140
1141 if ignored:
1141 if ignored:
1142 ignore = util.never
1142 ignore = util.never
1143 dirignore = util.never
1143 dirignore = util.never
1144 elif unknown:
1144 elif unknown:
1145 ignore = self._ignore
1145 ignore = self._ignore
1146 dirignore = self._dirignore
1146 dirignore = self._dirignore
1147 else:
1147 else:
1148 # if not unknown and not ignored, drop dir recursion and step 2
1148 # if not unknown and not ignored, drop dir recursion and step 2
1149 ignore = util.always
1149 ignore = util.always
1150 dirignore = util.always
1150 dirignore = util.always
1151
1151
1152 matchfn = match.matchfn
1152 matchfn = match.matchfn
1153 matchalways = match.always()
1153 matchalways = match.always()
1154 matchtdir = match.traversedir
1154 matchtdir = match.traversedir
1155 dmap = self._map
1155 dmap = self._map
1156 listdir = util.listdir
1156 listdir = util.listdir
1157 lstat = os.lstat
1157 lstat = os.lstat
1158 dirkind = stat.S_IFDIR
1158 dirkind = stat.S_IFDIR
1159 regkind = stat.S_IFREG
1159 regkind = stat.S_IFREG
1160 lnkkind = stat.S_IFLNK
1160 lnkkind = stat.S_IFLNK
1161 join = self._join
1161 join = self._join
1162
1162
1163 exact = skipstep3 = False
1163 exact = skipstep3 = False
1164 if match.isexact(): # match.exact
1164 if match.isexact(): # match.exact
1165 exact = True
1165 exact = True
1166 dirignore = util.always # skip step 2
1166 dirignore = util.always # skip step 2
1167 elif match.prefix(): # match.match, no patterns
1167 elif match.prefix(): # match.match, no patterns
1168 skipstep3 = True
1168 skipstep3 = True
1169
1169
1170 if not exact and self._checkcase:
1170 if not exact and self._checkcase:
1171 normalize = self._normalize
1171 normalize = self._normalize
1172 normalizefile = self._normalizefile
1172 normalizefile = self._normalizefile
1173 skipstep3 = False
1173 skipstep3 = False
1174 else:
1174 else:
1175 normalize = self._normalize
1175 normalize = self._normalize
1176 normalizefile = None
1176 normalizefile = None
1177
1177
1178 # step 1: find all explicit files
1178 # step 1: find all explicit files
1179 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1179 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1180 if matchtdir:
1180 if matchtdir:
1181 for d in work:
1181 for d in work:
1182 matchtdir(d[0])
1182 matchtdir(d[0])
1183 for d in dirsnotfound:
1183 for d in dirsnotfound:
1184 matchtdir(d)
1184 matchtdir(d)
1185
1185
1186 skipstep3 = skipstep3 and not (work or dirsnotfound)
1186 skipstep3 = skipstep3 and not (work or dirsnotfound)
1187 work = [d for d in work if not dirignore(d[0])]
1187 work = [d for d in work if not dirignore(d[0])]
1188
1188
1189 # step 2: visit subdirectories
1189 # step 2: visit subdirectories
1190 def traverse(work, alreadynormed):
1190 def traverse(work, alreadynormed):
1191 wadd = work.append
1191 wadd = work.append
1192 while work:
1192 while work:
1193 tracing.counter('dirstate.walk work', len(work))
1193 tracing.counter('dirstate.walk work', len(work))
1194 nd = work.pop()
1194 nd = work.pop()
1195 visitentries = match.visitchildrenset(nd)
1195 visitentries = match.visitchildrenset(nd)
1196 if not visitentries:
1196 if not visitentries:
1197 continue
1197 continue
1198 if visitentries == b'this' or visitentries == b'all':
1198 if visitentries == b'this' or visitentries == b'all':
1199 visitentries = None
1199 visitentries = None
1200 skip = None
1200 skip = None
1201 if nd != b'':
1201 if nd != b'':
1202 skip = b'.hg'
1202 skip = b'.hg'
1203 try:
1203 try:
1204 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1204 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1205 entries = listdir(join(nd), stat=True, skip=skip)
1205 entries = listdir(join(nd), stat=True, skip=skip)
1206 except OSError as inst:
1206 except OSError as inst:
1207 if inst.errno in (errno.EACCES, errno.ENOENT):
1207 if inst.errno in (errno.EACCES, errno.ENOENT):
1208 match.bad(
1208 match.bad(
1209 self.pathto(nd), encoding.strtolocal(inst.strerror)
1209 self.pathto(nd), encoding.strtolocal(inst.strerror)
1210 )
1210 )
1211 continue
1211 continue
1212 raise
1212 raise
1213 for f, kind, st in entries:
1213 for f, kind, st in entries:
1214 # Some matchers may return files in the visitentries set,
1214 # Some matchers may return files in the visitentries set,
1215 # instead of 'this', if the matcher explicitly mentions them
1215 # instead of 'this', if the matcher explicitly mentions them
1216 # and is not an exactmatcher. This is acceptable; we do not
1216 # and is not an exactmatcher. This is acceptable; we do not
1217 # make any hard assumptions about file-or-directory below
1217 # make any hard assumptions about file-or-directory below
1218 # based on the presence of `f` in visitentries. If
1218 # based on the presence of `f` in visitentries. If
1219 # visitchildrenset returned a set, we can always skip the
1219 # visitchildrenset returned a set, we can always skip the
1220 # entries *not* in the set it provided regardless of whether
1220 # entries *not* in the set it provided regardless of whether
1221 # they're actually a file or a directory.
1221 # they're actually a file or a directory.
1222 if visitentries and f not in visitentries:
1222 if visitentries and f not in visitentries:
1223 continue
1223 continue
1224 if normalizefile:
1224 if normalizefile:
1225 # even though f might be a directory, we're only
1225 # even though f might be a directory, we're only
1226 # interested in comparing it to files currently in the
1226 # interested in comparing it to files currently in the
1227 # dmap -- therefore normalizefile is enough
1227 # dmap -- therefore normalizefile is enough
1228 nf = normalizefile(
1228 nf = normalizefile(
1229 nd and (nd + b"/" + f) or f, True, True
1229 nd and (nd + b"/" + f) or f, True, True
1230 )
1230 )
1231 else:
1231 else:
1232 nf = nd and (nd + b"/" + f) or f
1232 nf = nd and (nd + b"/" + f) or f
1233 if nf not in results:
1233 if nf not in results:
1234 if kind == dirkind:
1234 if kind == dirkind:
1235 if not ignore(nf):
1235 if not ignore(nf):
1236 if matchtdir:
1236 if matchtdir:
1237 matchtdir(nf)
1237 matchtdir(nf)
1238 wadd(nf)
1238 wadd(nf)
1239 if nf in dmap and (matchalways or matchfn(nf)):
1239 if nf in dmap and (matchalways or matchfn(nf)):
1240 results[nf] = None
1240 results[nf] = None
1241 elif kind == regkind or kind == lnkkind:
1241 elif kind == regkind or kind == lnkkind:
1242 if nf in dmap:
1242 if nf in dmap:
1243 if matchalways or matchfn(nf):
1243 if matchalways or matchfn(nf):
1244 results[nf] = st
1244 results[nf] = st
1245 elif (matchalways or matchfn(nf)) and not ignore(
1245 elif (matchalways or matchfn(nf)) and not ignore(
1246 nf
1246 nf
1247 ):
1247 ):
1248 # unknown file -- normalize if necessary
1248 # unknown file -- normalize if necessary
1249 if not alreadynormed:
1249 if not alreadynormed:
1250 nf = normalize(nf, False, True)
1250 nf = normalize(nf, False, True)
1251 results[nf] = st
1251 results[nf] = st
1252 elif nf in dmap and (matchalways or matchfn(nf)):
1252 elif nf in dmap and (matchalways or matchfn(nf)):
1253 results[nf] = None
1253 results[nf] = None
1254
1254
1255 for nd, d in work:
1255 for nd, d in work:
1256 # alreadynormed means that processwork doesn't have to do any
1256 # alreadynormed means that processwork doesn't have to do any
1257 # expensive directory normalization
1257 # expensive directory normalization
1258 alreadynormed = not normalize or nd == d
1258 alreadynormed = not normalize or nd == d
1259 traverse([d], alreadynormed)
1259 traverse([d], alreadynormed)
1260
1260
1261 for s in subrepos:
1261 for s in subrepos:
1262 del results[s]
1262 del results[s]
1263 del results[b'.hg']
1263 del results[b'.hg']
1264
1264
1265 # step 3: visit remaining files from dmap
1265 # step 3: visit remaining files from dmap
1266 if not skipstep3 and not exact:
1266 if not skipstep3 and not exact:
1267 # If a dmap file is not in results yet, it was either
1267 # If a dmap file is not in results yet, it was either
1268 # a) not matching matchfn b) ignored, c) missing, or d) under a
1268 # a) not matching matchfn b) ignored, c) missing, or d) under a
1269 # symlink directory.
1269 # symlink directory.
1270 if not results and matchalways:
1270 if not results and matchalways:
1271 visit = [f for f in dmap]
1271 visit = [f for f in dmap]
1272 else:
1272 else:
1273 visit = [f for f in dmap if f not in results and matchfn(f)]
1273 visit = [f for f in dmap if f not in results and matchfn(f)]
1274 visit.sort()
1274 visit.sort()
1275
1275
1276 if unknown:
1276 if unknown:
1277 # unknown == True means we walked all dirs under the roots
1277 # unknown == True means we walked all dirs under the roots
1278 # that wasn't ignored, and everything that matched was stat'ed
1278 # that wasn't ignored, and everything that matched was stat'ed
1279 # and is already in results.
1279 # and is already in results.
1280 # The rest must thus be ignored or under a symlink.
1280 # The rest must thus be ignored or under a symlink.
1281 audit_path = pathutil.pathauditor(self._root, cached=True)
1281 audit_path = pathutil.pathauditor(self._root, cached=True)
1282
1282
1283 for nf in iter(visit):
1283 for nf in iter(visit):
1284 # If a stat for the same file was already added with a
1284 # If a stat for the same file was already added with a
1285 # different case, don't add one for this, since that would
1285 # different case, don't add one for this, since that would
1286 # make it appear as if the file exists under both names
1286 # make it appear as if the file exists under both names
1287 # on disk.
1287 # on disk.
1288 if (
1288 if (
1289 normalizefile
1289 normalizefile
1290 and normalizefile(nf, True, True) in results
1290 and normalizefile(nf, True, True) in results
1291 ):
1291 ):
1292 results[nf] = None
1292 results[nf] = None
1293 # Report ignored items in the dmap as long as they are not
1293 # Report ignored items in the dmap as long as they are not
1294 # under a symlink directory.
1294 # under a symlink directory.
1295 elif audit_path.check(nf):
1295 elif audit_path.check(nf):
1296 try:
1296 try:
1297 results[nf] = lstat(join(nf))
1297 results[nf] = lstat(join(nf))
1298 # file was just ignored, no links, and exists
1298 # file was just ignored, no links, and exists
1299 except OSError:
1299 except OSError:
1300 # file doesn't exist
1300 # file doesn't exist
1301 results[nf] = None
1301 results[nf] = None
1302 else:
1302 else:
1303 # It's either missing or under a symlink directory
1303 # It's either missing or under a symlink directory
1304 # which we in this case report as missing
1304 # which we in this case report as missing
1305 results[nf] = None
1305 results[nf] = None
1306 else:
1306 else:
1307 # We may not have walked the full directory tree above,
1307 # We may not have walked the full directory tree above,
1308 # so stat and check everything we missed.
1308 # so stat and check everything we missed.
1309 iv = iter(visit)
1309 iv = iter(visit)
1310 for st in util.statfiles([join(i) for i in visit]):
1310 for st in util.statfiles([join(i) for i in visit]):
1311 results[next(iv)] = st
1311 results[next(iv)] = st
1312 return results
1312 return results
1313
1313
1314 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1314 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1315 # Force Rayon (Rust parallelism library) to respect the number of
1315 # Force Rayon (Rust parallelism library) to respect the number of
1316 # workers. This is a temporary workaround until Rust code knows
1316 # workers. This is a temporary workaround until Rust code knows
1317 # how to read the config file.
1317 # how to read the config file.
1318 numcpus = self._ui.configint(b"worker", b"numcpus")
1318 numcpus = self._ui.configint(b"worker", b"numcpus")
1319 if numcpus is not None:
1319 if numcpus is not None:
1320 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1320 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1321
1321
1322 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1322 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1323 if not workers_enabled:
1323 if not workers_enabled:
1324 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1324 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1325
1325
1326 (
1326 (
1327 lookup,
1327 lookup,
1328 modified,
1328 modified,
1329 added,
1329 added,
1330 removed,
1330 removed,
1331 deleted,
1331 deleted,
1332 clean,
1332 clean,
1333 ignored,
1333 ignored,
1334 unknown,
1334 unknown,
1335 warnings,
1335 warnings,
1336 bad,
1336 bad,
1337 traversed,
1337 traversed,
1338 dirty,
1338 dirty,
1339 ) = rustmod.status(
1339 ) = rustmod.status(
1340 self._map._rustmap,
1340 self._map._rustmap,
1341 matcher,
1341 matcher,
1342 self._rootdir,
1342 self._rootdir,
1343 self._ignorefiles(),
1343 self._ignorefiles(),
1344 self._checkexec,
1344 self._checkexec,
1345 self._lastnormaltime,
1345 self._lastnormaltime,
1346 bool(list_clean),
1346 bool(list_clean),
1347 bool(list_ignored),
1347 bool(list_ignored),
1348 bool(list_unknown),
1348 bool(list_unknown),
1349 bool(matcher.traversedir),
1349 bool(matcher.traversedir),
1350 )
1350 )
1351
1351
1352 self._dirty |= dirty
1352 self._dirty |= dirty
1353
1353
1354 if matcher.traversedir:
1354 if matcher.traversedir:
1355 for dir in traversed:
1355 for dir in traversed:
1356 matcher.traversedir(dir)
1356 matcher.traversedir(dir)
1357
1357
1358 if self._ui.warn:
1358 if self._ui.warn:
1359 for item in warnings:
1359 for item in warnings:
1360 if isinstance(item, tuple):
1360 if isinstance(item, tuple):
1361 file_path, syntax = item
1361 file_path, syntax = item
1362 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1362 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1363 file_path,
1363 file_path,
1364 syntax,
1364 syntax,
1365 )
1365 )
1366 self._ui.warn(msg)
1366 self._ui.warn(msg)
1367 else:
1367 else:
1368 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1368 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1369 self._ui.warn(
1369 self._ui.warn(
1370 msg
1370 msg
1371 % (
1371 % (
1372 pathutil.canonpath(
1372 pathutil.canonpath(
1373 self._rootdir, self._rootdir, item
1373 self._rootdir, self._rootdir, item
1374 ),
1374 ),
1375 b"No such file or directory",
1375 b"No such file or directory",
1376 )
1376 )
1377 )
1377 )
1378
1378
1379 for (fn, message) in bad:
1379 for (fn, message) in bad:
1380 matcher.bad(fn, encoding.strtolocal(message))
1380 matcher.bad(fn, encoding.strtolocal(message))
1381
1381
1382 status = scmutil.status(
1382 status = scmutil.status(
1383 modified=modified,
1383 modified=modified,
1384 added=added,
1384 added=added,
1385 removed=removed,
1385 removed=removed,
1386 deleted=deleted,
1386 deleted=deleted,
1387 unknown=unknown,
1387 unknown=unknown,
1388 ignored=ignored,
1388 ignored=ignored,
1389 clean=clean,
1389 clean=clean,
1390 )
1390 )
1391 return (lookup, status)
1391 return (lookup, status)
1392
1392
1393 def status(self, match, subrepos, ignored, clean, unknown):
1393 def status(self, match, subrepos, ignored, clean, unknown):
1394 """Determine the status of the working copy relative to the
1394 """Determine the status of the working copy relative to the
1395 dirstate and return a pair of (unsure, status), where status is of type
1395 dirstate and return a pair of (unsure, status), where status is of type
1396 scmutil.status and:
1396 scmutil.status and:
1397
1397
1398 unsure:
1398 unsure:
1399 files that might have been modified since the dirstate was
1399 files that might have been modified since the dirstate was
1400 written, but need to be read to be sure (size is the same
1400 written, but need to be read to be sure (size is the same
1401 but mtime differs)
1401 but mtime differs)
1402 status.modified:
1402 status.modified:
1403 files that have definitely been modified since the dirstate
1403 files that have definitely been modified since the dirstate
1404 was written (different size or mode)
1404 was written (different size or mode)
1405 status.clean:
1405 status.clean:
1406 files that have definitely not been modified since the
1406 files that have definitely not been modified since the
1407 dirstate was written
1407 dirstate was written
1408 """
1408 """
1409 listignored, listclean, listunknown = ignored, clean, unknown
1409 listignored, listclean, listunknown = ignored, clean, unknown
1410 lookup, modified, added, unknown, ignored = [], [], [], [], []
1410 lookup, modified, added, unknown, ignored = [], [], [], [], []
1411 removed, deleted, clean = [], [], []
1411 removed, deleted, clean = [], [], []
1412
1412
1413 dmap = self._map
1413 dmap = self._map
1414 dmap.preload()
1414 dmap.preload()
1415
1415
1416 use_rust = True
1416 use_rust = True
1417
1417
1418 allowed_matchers = (
1418 allowed_matchers = (
1419 matchmod.alwaysmatcher,
1419 matchmod.alwaysmatcher,
1420 matchmod.exactmatcher,
1420 matchmod.exactmatcher,
1421 matchmod.includematcher,
1421 matchmod.includematcher,
1422 )
1422 )
1423
1423
1424 if rustmod is None:
1424 if rustmod is None:
1425 use_rust = False
1425 use_rust = False
1426 elif self._checkcase:
1426 elif self._checkcase:
1427 # Case-insensitive filesystems are not handled yet
1427 # Case-insensitive filesystems are not handled yet
1428 use_rust = False
1428 use_rust = False
1429 elif subrepos:
1429 elif subrepos:
1430 use_rust = False
1430 use_rust = False
1431 elif sparse.enabled:
1431 elif sparse.enabled:
1432 use_rust = False
1432 use_rust = False
1433 elif not isinstance(match, allowed_matchers):
1433 elif not isinstance(match, allowed_matchers):
1434 # Some matchers have yet to be implemented
1434 # Some matchers have yet to be implemented
1435 use_rust = False
1435 use_rust = False
1436
1436
1437 if use_rust:
1437 if use_rust:
1438 try:
1438 try:
1439 return self._rust_status(
1439 return self._rust_status(
1440 match, listclean, listignored, listunknown
1440 match, listclean, listignored, listunknown
1441 )
1441 )
1442 except rustmod.FallbackError:
1442 except rustmod.FallbackError:
1443 pass
1443 pass
1444
1444
1445 def noop(f):
1445 def noop(f):
1446 pass
1446 pass
1447
1447
1448 dcontains = dmap.__contains__
1448 dcontains = dmap.__contains__
1449 dget = dmap.__getitem__
1449 dget = dmap.__getitem__
1450 ladd = lookup.append # aka "unsure"
1450 ladd = lookup.append # aka "unsure"
1451 madd = modified.append
1451 madd = modified.append
1452 aadd = added.append
1452 aadd = added.append
1453 uadd = unknown.append if listunknown else noop
1453 uadd = unknown.append if listunknown else noop
1454 iadd = ignored.append if listignored else noop
1454 iadd = ignored.append if listignored else noop
1455 radd = removed.append
1455 radd = removed.append
1456 dadd = deleted.append
1456 dadd = deleted.append
1457 cadd = clean.append if listclean else noop
1457 cadd = clean.append if listclean else noop
1458 mexact = match.exact
1458 mexact = match.exact
1459 dirignore = self._dirignore
1459 dirignore = self._dirignore
1460 checkexec = self._checkexec
1460 checkexec = self._checkexec
1461 copymap = self._map.copymap
1461 copymap = self._map.copymap
1462 lastnormaltime = self._lastnormaltime
1462 lastnormaltime = self._lastnormaltime
1463
1463
1464 # We need to do full walks when either
1464 # We need to do full walks when either
1465 # - we're listing all clean files, or
1465 # - we're listing all clean files, or
1466 # - match.traversedir does something, because match.traversedir should
1466 # - match.traversedir does something, because match.traversedir should
1467 # be called for every dir in the working dir
1467 # be called for every dir in the working dir
1468 full = listclean or match.traversedir is not None
1468 full = listclean or match.traversedir is not None
1469 for fn, st in pycompat.iteritems(
1469 for fn, st in pycompat.iteritems(
1470 self.walk(match, subrepos, listunknown, listignored, full=full)
1470 self.walk(match, subrepos, listunknown, listignored, full=full)
1471 ):
1471 ):
1472 if not dcontains(fn):
1472 if not dcontains(fn):
1473 if (listignored or mexact(fn)) and dirignore(fn):
1473 if (listignored or mexact(fn)) and dirignore(fn):
1474 if listignored:
1474 if listignored:
1475 iadd(fn)
1475 iadd(fn)
1476 else:
1476 else:
1477 uadd(fn)
1477 uadd(fn)
1478 continue
1478 continue
1479
1479
1480 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1480 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1481 # written like that for performance reasons. dmap[fn] is not a
1481 # written like that for performance reasons. dmap[fn] is not a
1482 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1482 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1483 # opcode has fast paths when the value to be unpacked is a tuple or
1483 # opcode has fast paths when the value to be unpacked is a tuple or
1484 # a list, but falls back to creating a full-fledged iterator in
1484 # a list, but falls back to creating a full-fledged iterator in
1485 # general. That is much slower than simply accessing and storing the
1485 # general. That is much slower than simply accessing and storing the
1486 # tuple members one by one.
1486 # tuple members one by one.
1487 t = dget(fn)
1487 t = dget(fn)
1488 mode = t.mode
1488 mode = t.mode
1489 size = t.size
1489 size = t.size
1490 time = t.mtime
1490 time = t.mtime
1491
1491
1492 if not st and t.tracked:
1492 if not st and t.tracked:
1493 dadd(fn)
1493 dadd(fn)
1494 elif t.merged:
1494 elif t.merged:
1495 madd(fn)
1495 madd(fn)
1496 elif t.added:
1496 elif t.added:
1497 aadd(fn)
1497 aadd(fn)
1498 elif t.removed:
1498 elif t.removed:
1499 radd(fn)
1499 radd(fn)
1500 elif t.tracked:
1500 elif t.tracked:
1501 if (
1501 if (
1502 size >= 0
1502 size >= 0
1503 and (
1503 and (
1504 (size != st.st_size and size != st.st_size & _rangemask)
1504 (size != st.st_size and size != st.st_size & _rangemask)
1505 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1505 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1506 )
1506 )
1507 or t.from_p2
1507 or t.from_p2
1508 or fn in copymap
1508 or fn in copymap
1509 ):
1509 ):
1510 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1510 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1511 # issue6456: Size returned may be longer due to
1511 # issue6456: Size returned may be longer due to
1512 # encryption on EXT-4 fscrypt, undecided.
1512 # encryption on EXT-4 fscrypt, undecided.
1513 ladd(fn)
1513 ladd(fn)
1514 else:
1514 else:
1515 madd(fn)
1515 madd(fn)
1516 elif (
1516 elif (
1517 time != st[stat.ST_MTIME]
1517 time != st[stat.ST_MTIME]
1518 and time != st[stat.ST_MTIME] & _rangemask
1518 and time != st[stat.ST_MTIME] & _rangemask
1519 ):
1519 ):
1520 ladd(fn)
1520 ladd(fn)
1521 elif st[stat.ST_MTIME] == lastnormaltime:
1521 elif st[stat.ST_MTIME] == lastnormaltime:
1522 # fn may have just been marked as normal and it may have
1522 # fn may have just been marked as normal and it may have
1523 # changed in the same second without changing its size.
1523 # changed in the same second without changing its size.
1524 # This can happen if we quickly do multiple commits.
1524 # This can happen if we quickly do multiple commits.
1525 # Force lookup, so we don't miss such a racy file change.
1525 # Force lookup, so we don't miss such a racy file change.
1526 ladd(fn)
1526 ladd(fn)
1527 elif listclean:
1527 elif listclean:
1528 cadd(fn)
1528 cadd(fn)
1529 status = scmutil.status(
1529 status = scmutil.status(
1530 modified, added, removed, deleted, unknown, ignored, clean
1530 modified, added, removed, deleted, unknown, ignored, clean
1531 )
1531 )
1532 return (lookup, status)
1532 return (lookup, status)
1533
1533
1534 def matches(self, match):
1534 def matches(self, match):
1535 """
1535 """
1536 return files in the dirstate (in whatever state) filtered by match
1536 return files in the dirstate (in whatever state) filtered by match
1537 """
1537 """
1538 dmap = self._map
1538 dmap = self._map
1539 if rustmod is not None:
1539 if rustmod is not None:
1540 dmap = self._map._rustmap
1540 dmap = self._map._rustmap
1541
1541
1542 if match.always():
1542 if match.always():
1543 return dmap.keys()
1543 return dmap.keys()
1544 files = match.files()
1544 files = match.files()
1545 if match.isexact():
1545 if match.isexact():
1546 # fast path -- filter the other way around, since typically files is
1546 # fast path -- filter the other way around, since typically files is
1547 # much smaller than dmap
1547 # much smaller than dmap
1548 return [f for f in files if f in dmap]
1548 return [f for f in files if f in dmap]
1549 if match.prefix() and all(fn in dmap for fn in files):
1549 if match.prefix() and all(fn in dmap for fn in files):
1550 # fast path -- all the values are known to be files, so just return
1550 # fast path -- all the values are known to be files, so just return
1551 # that
1551 # that
1552 return list(files)
1552 return list(files)
1553 return [f for f in dmap if match(f)]
1553 return [f for f in dmap if match(f)]
1554
1554
1555 def _actualfilename(self, tr):
1555 def _actualfilename(self, tr):
1556 if tr:
1556 if tr:
1557 return self._pendingfilename
1557 return self._pendingfilename
1558 else:
1558 else:
1559 return self._filename
1559 return self._filename
1560
1560
1561 def savebackup(self, tr, backupname):
1561 def savebackup(self, tr, backupname):
1562 '''Save current dirstate into backup file'''
1562 '''Save current dirstate into backup file'''
1563 filename = self._actualfilename(tr)
1563 filename = self._actualfilename(tr)
1564 assert backupname != filename
1564 assert backupname != filename
1565
1565
1566 # use '_writedirstate' instead of 'write' to write changes certainly,
1566 # use '_writedirstate' instead of 'write' to write changes certainly,
1567 # because the latter omits writing out if transaction is running.
1567 # because the latter omits writing out if transaction is running.
1568 # output file will be used to create backup of dirstate at this point.
1568 # output file will be used to create backup of dirstate at this point.
1569 if self._dirty or not self._opener.exists(filename):
1569 if self._dirty or not self._opener.exists(filename):
1570 self._writedirstate(
1570 self._writedirstate(
1571 tr,
1571 tr,
1572 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1572 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1573 )
1573 )
1574
1574
1575 if tr:
1575 if tr:
1576 # ensure that subsequent tr.writepending returns True for
1576 # ensure that subsequent tr.writepending returns True for
1577 # changes written out above, even if dirstate is never
1577 # changes written out above, even if dirstate is never
1578 # changed after this
1578 # changed after this
1579 tr.addfilegenerator(
1579 tr.addfilegenerator(
1580 b'dirstate',
1580 b'dirstate',
1581 (self._filename,),
1581 (self._filename,),
1582 lambda f: self._writedirstate(tr, f),
1582 lambda f: self._writedirstate(tr, f),
1583 location=b'plain',
1583 location=b'plain',
1584 )
1584 )
1585
1585
1586 # ensure that pending file written above is unlinked at
1586 # ensure that pending file written above is unlinked at
1587 # failure, even if tr.writepending isn't invoked until the
1587 # failure, even if tr.writepending isn't invoked until the
1588 # end of this transaction
1588 # end of this transaction
1589 tr.registertmp(filename, location=b'plain')
1589 tr.registertmp(filename, location=b'plain')
1590
1590
1591 self._opener.tryunlink(backupname)
1591 self._opener.tryunlink(backupname)
1592 # hardlink backup is okay because _writedirstate is always called
1592 # hardlink backup is okay because _writedirstate is always called
1593 # with an "atomictemp=True" file.
1593 # with an "atomictemp=True" file.
1594 util.copyfile(
1594 util.copyfile(
1595 self._opener.join(filename),
1595 self._opener.join(filename),
1596 self._opener.join(backupname),
1596 self._opener.join(backupname),
1597 hardlink=True,
1597 hardlink=True,
1598 )
1598 )
1599
1599
1600 def restorebackup(self, tr, backupname):
1600 def restorebackup(self, tr, backupname):
1601 '''Restore dirstate by backup file'''
1601 '''Restore dirstate by backup file'''
1602 # this "invalidate()" prevents "wlock.release()" from writing
1602 # this "invalidate()" prevents "wlock.release()" from writing
1603 # changes of dirstate out after restoring from backup file
1603 # changes of dirstate out after restoring from backup file
1604 self.invalidate()
1604 self.invalidate()
1605 filename = self._actualfilename(tr)
1605 filename = self._actualfilename(tr)
1606 o = self._opener
1606 o = self._opener
1607 if util.samefile(o.join(backupname), o.join(filename)):
1607 if util.samefile(o.join(backupname), o.join(filename)):
1608 o.unlink(backupname)
1608 o.unlink(backupname)
1609 else:
1609 else:
1610 o.rename(backupname, filename, checkambig=True)
1610 o.rename(backupname, filename, checkambig=True)
1611
1611
1612 def clearbackup(self, tr, backupname):
1612 def clearbackup(self, tr, backupname):
1613 '''Clear backup file'''
1613 '''Clear backup file'''
1614 self._opener.unlink(backupname)
1614 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now