##// END OF EJS Templates
dirstate: replace the use of `_normallookup` in `rebuild`...
marmoute -
r48806:14fa2e58 default
parent child Browse files
Show More
@@ -1,1612 +1,1622 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = parsers.DirstateItem
48 DirstateItem = parsers.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._updatedfiles = set()
133 self._updatedfiles = set()
134 self._mapcls = dirstatemap.dirstatemap
134 self._mapcls = dirstatemap.dirstatemap
135 # Access and cache cwd early, so we don't access it for the first time
135 # Access and cache cwd early, so we don't access it for the first time
136 # after a working-copy update caused it to not exist (accessing it then
136 # after a working-copy update caused it to not exist (accessing it then
137 # raises an exception).
137 # raises an exception).
138 self._cwd
138 self._cwd
139
139
140 def prefetch_parents(self):
140 def prefetch_parents(self):
141 """make sure the parents are loaded
141 """make sure the parents are loaded
142
142
143 Used to avoid a race condition.
143 Used to avoid a race condition.
144 """
144 """
145 self._pl
145 self._pl
146
146
147 @contextlib.contextmanager
147 @contextlib.contextmanager
148 def parentchange(self):
148 def parentchange(self):
149 """Context manager for handling dirstate parents.
149 """Context manager for handling dirstate parents.
150
150
151 If an exception occurs in the scope of the context manager,
151 If an exception occurs in the scope of the context manager,
152 the incoherent dirstate won't be written when wlock is
152 the incoherent dirstate won't be written when wlock is
153 released.
153 released.
154 """
154 """
155 self._parentwriters += 1
155 self._parentwriters += 1
156 yield
156 yield
157 # Typically we want the "undo" step of a context manager in a
157 # Typically we want the "undo" step of a context manager in a
158 # finally block so it happens even when an exception
158 # finally block so it happens even when an exception
159 # occurs. In this case, however, we only want to decrement
159 # occurs. In this case, however, we only want to decrement
160 # parentwriters if the code in the with statement exits
160 # parentwriters if the code in the with statement exits
161 # normally, so we don't have a try/finally here on purpose.
161 # normally, so we don't have a try/finally here on purpose.
162 self._parentwriters -= 1
162 self._parentwriters -= 1
163
163
164 def pendingparentchange(self):
164 def pendingparentchange(self):
165 """Returns true if the dirstate is in the middle of a set of changes
165 """Returns true if the dirstate is in the middle of a set of changes
166 that modify the dirstate parent.
166 that modify the dirstate parent.
167 """
167 """
168 return self._parentwriters > 0
168 return self._parentwriters > 0
169
169
170 @propertycache
170 @propertycache
171 def _map(self):
171 def _map(self):
172 """Return the dirstate contents (see documentation for dirstatemap)."""
172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 self._map = self._mapcls(
173 self._map = self._mapcls(
174 self._ui,
174 self._ui,
175 self._opener,
175 self._opener,
176 self._root,
176 self._root,
177 self._nodeconstants,
177 self._nodeconstants,
178 self._use_dirstate_v2,
178 self._use_dirstate_v2,
179 )
179 )
180 return self._map
180 return self._map
181
181
182 @property
182 @property
183 def _sparsematcher(self):
183 def _sparsematcher(self):
184 """The matcher for the sparse checkout.
184 """The matcher for the sparse checkout.
185
185
186 The working directory may not include every file from a manifest. The
186 The working directory may not include every file from a manifest. The
187 matcher obtained by this property will match a path if it is to be
187 matcher obtained by this property will match a path if it is to be
188 included in the working directory.
188 included in the working directory.
189 """
189 """
190 # TODO there is potential to cache this property. For now, the matcher
190 # TODO there is potential to cache this property. For now, the matcher
191 # is resolved on every access. (But the called function does use a
191 # is resolved on every access. (But the called function does use a
192 # cache to keep the lookup fast.)
192 # cache to keep the lookup fast.)
193 return self._sparsematchfn()
193 return self._sparsematchfn()
194
194
195 @repocache(b'branch')
195 @repocache(b'branch')
196 def _branch(self):
196 def _branch(self):
197 try:
197 try:
198 return self._opener.read(b"branch").strip() or b"default"
198 return self._opener.read(b"branch").strip() or b"default"
199 except IOError as inst:
199 except IOError as inst:
200 if inst.errno != errno.ENOENT:
200 if inst.errno != errno.ENOENT:
201 raise
201 raise
202 return b"default"
202 return b"default"
203
203
204 @property
204 @property
205 def _pl(self):
205 def _pl(self):
206 return self._map.parents()
206 return self._map.parents()
207
207
208 def hasdir(self, d):
208 def hasdir(self, d):
209 return self._map.hastrackeddir(d)
209 return self._map.hastrackeddir(d)
210
210
211 @rootcache(b'.hgignore')
211 @rootcache(b'.hgignore')
212 def _ignore(self):
212 def _ignore(self):
213 files = self._ignorefiles()
213 files = self._ignorefiles()
214 if not files:
214 if not files:
215 return matchmod.never()
215 return matchmod.never()
216
216
217 pats = [b'include:%s' % f for f in files]
217 pats = [b'include:%s' % f for f in files]
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219
219
220 @propertycache
220 @propertycache
221 def _slash(self):
221 def _slash(self):
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223
223
224 @propertycache
224 @propertycache
225 def _checklink(self):
225 def _checklink(self):
226 return util.checklink(self._root)
226 return util.checklink(self._root)
227
227
228 @propertycache
228 @propertycache
229 def _checkexec(self):
229 def _checkexec(self):
230 return bool(util.checkexec(self._root))
230 return bool(util.checkexec(self._root))
231
231
232 @propertycache
232 @propertycache
233 def _checkcase(self):
233 def _checkcase(self):
234 return not util.fscasesensitive(self._join(b'.hg'))
234 return not util.fscasesensitive(self._join(b'.hg'))
235
235
236 def _join(self, f):
236 def _join(self, f):
237 # much faster than os.path.join()
237 # much faster than os.path.join()
238 # it's safe because f is always a relative path
238 # it's safe because f is always a relative path
239 return self._rootdir + f
239 return self._rootdir + f
240
240
241 def flagfunc(self, buildfallback):
241 def flagfunc(self, buildfallback):
242 if self._checklink and self._checkexec:
242 if self._checklink and self._checkexec:
243
243
244 def f(x):
244 def f(x):
245 try:
245 try:
246 st = os.lstat(self._join(x))
246 st = os.lstat(self._join(x))
247 if util.statislink(st):
247 if util.statislink(st):
248 return b'l'
248 return b'l'
249 if util.statisexec(st):
249 if util.statisexec(st):
250 return b'x'
250 return b'x'
251 except OSError:
251 except OSError:
252 pass
252 pass
253 return b''
253 return b''
254
254
255 return f
255 return f
256
256
257 fallback = buildfallback()
257 fallback = buildfallback()
258 if self._checklink:
258 if self._checklink:
259
259
260 def f(x):
260 def f(x):
261 if os.path.islink(self._join(x)):
261 if os.path.islink(self._join(x)):
262 return b'l'
262 return b'l'
263 if b'x' in fallback(x):
263 if b'x' in fallback(x):
264 return b'x'
264 return b'x'
265 return b''
265 return b''
266
266
267 return f
267 return f
268 if self._checkexec:
268 if self._checkexec:
269
269
270 def f(x):
270 def f(x):
271 if b'l' in fallback(x):
271 if b'l' in fallback(x):
272 return b'l'
272 return b'l'
273 if util.isexec(self._join(x)):
273 if util.isexec(self._join(x)):
274 return b'x'
274 return b'x'
275 return b''
275 return b''
276
276
277 return f
277 return f
278 else:
278 else:
279 return fallback
279 return fallback
280
280
281 @propertycache
281 @propertycache
282 def _cwd(self):
282 def _cwd(self):
283 # internal config: ui.forcecwd
283 # internal config: ui.forcecwd
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 if forcecwd:
285 if forcecwd:
286 return forcecwd
286 return forcecwd
287 return encoding.getcwd()
287 return encoding.getcwd()
288
288
289 def getcwd(self):
289 def getcwd(self):
290 """Return the path from which a canonical path is calculated.
290 """Return the path from which a canonical path is calculated.
291
291
292 This path should be used to resolve file patterns or to convert
292 This path should be used to resolve file patterns or to convert
293 canonical paths back to file paths for display. It shouldn't be
293 canonical paths back to file paths for display. It shouldn't be
294 used to get real file paths. Use vfs functions instead.
294 used to get real file paths. Use vfs functions instead.
295 """
295 """
296 cwd = self._cwd
296 cwd = self._cwd
297 if cwd == self._root:
297 if cwd == self._root:
298 return b''
298 return b''
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 rootsep = self._root
300 rootsep = self._root
301 if not util.endswithsep(rootsep):
301 if not util.endswithsep(rootsep):
302 rootsep += pycompat.ossep
302 rootsep += pycompat.ossep
303 if cwd.startswith(rootsep):
303 if cwd.startswith(rootsep):
304 return cwd[len(rootsep) :]
304 return cwd[len(rootsep) :]
305 else:
305 else:
306 # we're outside the repo. return an absolute path.
306 # we're outside the repo. return an absolute path.
307 return cwd
307 return cwd
308
308
309 def pathto(self, f, cwd=None):
309 def pathto(self, f, cwd=None):
310 if cwd is None:
310 if cwd is None:
311 cwd = self.getcwd()
311 cwd = self.getcwd()
312 path = util.pathto(self._root, cwd, f)
312 path = util.pathto(self._root, cwd, f)
313 if self._slash:
313 if self._slash:
314 return util.pconvert(path)
314 return util.pconvert(path)
315 return path
315 return path
316
316
317 def __getitem__(self, key):
317 def __getitem__(self, key):
318 """Return the current state of key (a filename) in the dirstate.
318 """Return the current state of key (a filename) in the dirstate.
319
319
320 States are:
320 States are:
321 n normal
321 n normal
322 m needs merging
322 m needs merging
323 r marked for removal
323 r marked for removal
324 a marked for addition
324 a marked for addition
325 ? not tracked
325 ? not tracked
326
326
327 XXX The "state" is a bit obscure to be in the "public" API. we should
327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 consider migrating all user of this to going through the dirstate entry
328 consider migrating all user of this to going through the dirstate entry
329 instead.
329 instead.
330 """
330 """
331 entry = self._map.get(key)
331 entry = self._map.get(key)
332 if entry is not None:
332 if entry is not None:
333 return entry.state
333 return entry.state
334 return b'?'
334 return b'?'
335
335
336 def __contains__(self, key):
336 def __contains__(self, key):
337 return key in self._map
337 return key in self._map
338
338
339 def __iter__(self):
339 def __iter__(self):
340 return iter(sorted(self._map))
340 return iter(sorted(self._map))
341
341
342 def items(self):
342 def items(self):
343 return pycompat.iteritems(self._map)
343 return pycompat.iteritems(self._map)
344
344
345 iteritems = items
345 iteritems = items
346
346
347 def directories(self):
347 def directories(self):
348 return self._map.directories()
348 return self._map.directories()
349
349
350 def parents(self):
350 def parents(self):
351 return [self._validate(p) for p in self._pl]
351 return [self._validate(p) for p in self._pl]
352
352
353 def p1(self):
353 def p1(self):
354 return self._validate(self._pl[0])
354 return self._validate(self._pl[0])
355
355
356 def p2(self):
356 def p2(self):
357 return self._validate(self._pl[1])
357 return self._validate(self._pl[1])
358
358
359 @property
359 @property
360 def in_merge(self):
360 def in_merge(self):
361 """True if a merge is in progress"""
361 """True if a merge is in progress"""
362 return self._pl[1] != self._nodeconstants.nullid
362 return self._pl[1] != self._nodeconstants.nullid
363
363
364 def branch(self):
364 def branch(self):
365 return encoding.tolocal(self._branch)
365 return encoding.tolocal(self._branch)
366
366
367 def setparents(self, p1, p2=None):
367 def setparents(self, p1, p2=None):
368 """Set dirstate parents to p1 and p2.
368 """Set dirstate parents to p1 and p2.
369
369
370 When moving from two parents to one, "merged" entries a
370 When moving from two parents to one, "merged" entries a
371 adjusted to normal and previous copy records discarded and
371 adjusted to normal and previous copy records discarded and
372 returned by the call.
372 returned by the call.
373
373
374 See localrepo.setparents()
374 See localrepo.setparents()
375 """
375 """
376 if p2 is None:
376 if p2 is None:
377 p2 = self._nodeconstants.nullid
377 p2 = self._nodeconstants.nullid
378 if self._parentwriters == 0:
378 if self._parentwriters == 0:
379 raise ValueError(
379 raise ValueError(
380 b"cannot set dirstate parent outside of "
380 b"cannot set dirstate parent outside of "
381 b"dirstate.parentchange context manager"
381 b"dirstate.parentchange context manager"
382 )
382 )
383
383
384 self._dirty = True
384 self._dirty = True
385 oldp2 = self._pl[1]
385 oldp2 = self._pl[1]
386 if self._origpl is None:
386 if self._origpl is None:
387 self._origpl = self._pl
387 self._origpl = self._pl
388 self._map.setparents(p1, p2)
388 self._map.setparents(p1, p2)
389 copies = {}
389 copies = {}
390 nullid = self._nodeconstants.nullid
390 nullid = self._nodeconstants.nullid
391 if oldp2 != nullid and p2 == nullid:
391 if oldp2 != nullid and p2 == nullid:
392 candidatefiles = self._map.non_normal_or_other_parent_paths()
392 candidatefiles = self._map.non_normal_or_other_parent_paths()
393
393
394 for f in candidatefiles:
394 for f in candidatefiles:
395 s = self._map.get(f)
395 s = self._map.get(f)
396 if s is None:
396 if s is None:
397 continue
397 continue
398
398
399 # Discard "merged" markers when moving away from a merge state
399 # Discard "merged" markers when moving away from a merge state
400 if s.merged:
400 if s.merged:
401 source = self._map.copymap.get(f)
401 source = self._map.copymap.get(f)
402 if source:
402 if source:
403 copies[f] = source
403 copies[f] = source
404 self._map.reset_state(
404 self._map.reset_state(
405 f,
405 f,
406 wc_tracked=True,
406 wc_tracked=True,
407 p1_tracked=True,
407 p1_tracked=True,
408 possibly_dirty=True,
408 possibly_dirty=True,
409 )
409 )
410 # Also fix up otherparent markers
410 # Also fix up otherparent markers
411 elif s.from_p2:
411 elif s.from_p2:
412 source = self._map.copymap.get(f)
412 source = self._map.copymap.get(f)
413 if source:
413 if source:
414 copies[f] = source
414 copies[f] = source
415 self._check_new_tracked_filename(f)
415 self._check_new_tracked_filename(f)
416 self._updatedfiles.add(f)
416 self._updatedfiles.add(f)
417 self._map.reset_state(
417 self._map.reset_state(
418 f,
418 f,
419 p1_tracked=False,
419 p1_tracked=False,
420 wc_tracked=True,
420 wc_tracked=True,
421 )
421 )
422 return copies
422 return copies
423
423
424 def setbranch(self, branch):
424 def setbranch(self, branch):
425 self.__class__._branch.set(self, encoding.fromlocal(branch))
425 self.__class__._branch.set(self, encoding.fromlocal(branch))
426 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
426 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
427 try:
427 try:
428 f.write(self._branch + b'\n')
428 f.write(self._branch + b'\n')
429 f.close()
429 f.close()
430
430
431 # make sure filecache has the correct stat info for _branch after
431 # make sure filecache has the correct stat info for _branch after
432 # replacing the underlying file
432 # replacing the underlying file
433 ce = self._filecache[b'_branch']
433 ce = self._filecache[b'_branch']
434 if ce:
434 if ce:
435 ce.refresh()
435 ce.refresh()
436 except: # re-raises
436 except: # re-raises
437 f.discard()
437 f.discard()
438 raise
438 raise
439
439
440 def invalidate(self):
440 def invalidate(self):
441 """Causes the next access to reread the dirstate.
441 """Causes the next access to reread the dirstate.
442
442
443 This is different from localrepo.invalidatedirstate() because it always
443 This is different from localrepo.invalidatedirstate() because it always
444 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
444 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
445 check whether the dirstate has changed before rereading it."""
445 check whether the dirstate has changed before rereading it."""
446
446
447 for a in ("_map", "_branch", "_ignore"):
447 for a in ("_map", "_branch", "_ignore"):
448 if a in self.__dict__:
448 if a in self.__dict__:
449 delattr(self, a)
449 delattr(self, a)
450 self._lastnormaltime = 0
450 self._lastnormaltime = 0
451 self._dirty = False
451 self._dirty = False
452 self._updatedfiles.clear()
452 self._updatedfiles.clear()
453 self._parentwriters = 0
453 self._parentwriters = 0
454 self._origpl = None
454 self._origpl = None
455
455
456 def copy(self, source, dest):
456 def copy(self, source, dest):
457 """Mark dest as a copy of source. Unmark dest if source is None."""
457 """Mark dest as a copy of source. Unmark dest if source is None."""
458 if source == dest:
458 if source == dest:
459 return
459 return
460 self._dirty = True
460 self._dirty = True
461 if source is not None:
461 if source is not None:
462 self._map.copymap[dest] = source
462 self._map.copymap[dest] = source
463 self._updatedfiles.add(source)
463 self._updatedfiles.add(source)
464 self._updatedfiles.add(dest)
464 self._updatedfiles.add(dest)
465 elif self._map.copymap.pop(dest, None):
465 elif self._map.copymap.pop(dest, None):
466 self._updatedfiles.add(dest)
466 self._updatedfiles.add(dest)
467
467
468 def copied(self, file):
468 def copied(self, file):
469 return self._map.copymap.get(file, None)
469 return self._map.copymap.get(file, None)
470
470
471 def copies(self):
471 def copies(self):
472 return self._map.copymap
472 return self._map.copymap
473
473
474 @requires_no_parents_change
474 @requires_no_parents_change
475 def set_tracked(self, filename):
475 def set_tracked(self, filename):
476 """a "public" method for generic code to mark a file as tracked
476 """a "public" method for generic code to mark a file as tracked
477
477
478 This function is to be called outside of "update/merge" case. For
478 This function is to be called outside of "update/merge" case. For
479 example by a command like `hg add X`.
479 example by a command like `hg add X`.
480
480
481 return True the file was previously untracked, False otherwise.
481 return True the file was previously untracked, False otherwise.
482 """
482 """
483 self._dirty = True
483 self._dirty = True
484 self._updatedfiles.add(filename)
484 self._updatedfiles.add(filename)
485 entry = self._map.get(filename)
485 entry = self._map.get(filename)
486 if entry is None or not entry.tracked:
486 if entry is None or not entry.tracked:
487 self._check_new_tracked_filename(filename)
487 self._check_new_tracked_filename(filename)
488 return self._map.set_tracked(filename)
488 return self._map.set_tracked(filename)
489
489
490 @requires_no_parents_change
490 @requires_no_parents_change
491 def set_untracked(self, filename):
491 def set_untracked(self, filename):
492 """a "public" method for generic code to mark a file as untracked
492 """a "public" method for generic code to mark a file as untracked
493
493
494 This function is to be called outside of "update/merge" case. For
494 This function is to be called outside of "update/merge" case. For
495 example by a command like `hg remove X`.
495 example by a command like `hg remove X`.
496
496
497 return True the file was previously tracked, False otherwise.
497 return True the file was previously tracked, False otherwise.
498 """
498 """
499 ret = self._map.set_untracked(filename)
499 ret = self._map.set_untracked(filename)
500 if ret:
500 if ret:
501 self._dirty = True
501 self._dirty = True
502 self._updatedfiles.add(filename)
502 self._updatedfiles.add(filename)
503 return ret
503 return ret
504
504
505 @requires_no_parents_change
505 @requires_no_parents_change
506 def set_clean(self, filename, parentfiledata=None):
506 def set_clean(self, filename, parentfiledata=None):
507 """record that the current state of the file on disk is known to be clean"""
507 """record that the current state of the file on disk is known to be clean"""
508 self._dirty = True
508 self._dirty = True
509 self._updatedfiles.add(filename)
509 self._updatedfiles.add(filename)
510 if parentfiledata:
510 if parentfiledata:
511 (mode, size, mtime) = parentfiledata
511 (mode, size, mtime) = parentfiledata
512 else:
512 else:
513 (mode, size, mtime) = self._get_filedata(filename)
513 (mode, size, mtime) = self._get_filedata(filename)
514 if not self._map[filename].tracked:
514 if not self._map[filename].tracked:
515 self._check_new_tracked_filename(filename)
515 self._check_new_tracked_filename(filename)
516 self._map.set_clean(filename, mode, size, mtime)
516 self._map.set_clean(filename, mode, size, mtime)
517 if mtime > self._lastnormaltime:
517 if mtime > self._lastnormaltime:
518 # Remember the most recent modification timeslot for status(),
518 # Remember the most recent modification timeslot for status(),
519 # to make sure we won't miss future size-preserving file content
519 # to make sure we won't miss future size-preserving file content
520 # modifications that happen within the same timeslot.
520 # modifications that happen within the same timeslot.
521 self._lastnormaltime = mtime
521 self._lastnormaltime = mtime
522
522
523 @requires_no_parents_change
523 @requires_no_parents_change
524 def set_possibly_dirty(self, filename):
524 def set_possibly_dirty(self, filename):
525 """record that the current state of the file on disk is unknown"""
525 """record that the current state of the file on disk is unknown"""
526 self._dirty = True
526 self._dirty = True
527 self._updatedfiles.add(filename)
527 self._updatedfiles.add(filename)
528 self._map.set_possibly_dirty(filename)
528 self._map.set_possibly_dirty(filename)
529
529
530 @requires_parents_change
530 @requires_parents_change
531 def update_file_p1(
531 def update_file_p1(
532 self,
532 self,
533 filename,
533 filename,
534 p1_tracked,
534 p1_tracked,
535 ):
535 ):
536 """Set a file as tracked in the parent (or not)
536 """Set a file as tracked in the parent (or not)
537
537
538 This is to be called when adjust the dirstate to a new parent after an history
538 This is to be called when adjust the dirstate to a new parent after an history
539 rewriting operation.
539 rewriting operation.
540
540
541 It should not be called during a merge (p2 != nullid) and only within
541 It should not be called during a merge (p2 != nullid) and only within
542 a `with dirstate.parentchange():` context.
542 a `with dirstate.parentchange():` context.
543 """
543 """
544 if self.in_merge:
544 if self.in_merge:
545 msg = b'update_file_reference should not be called when merging'
545 msg = b'update_file_reference should not be called when merging'
546 raise error.ProgrammingError(msg)
546 raise error.ProgrammingError(msg)
547 entry = self._map.get(filename)
547 entry = self._map.get(filename)
548 if entry is None:
548 if entry is None:
549 wc_tracked = False
549 wc_tracked = False
550 else:
550 else:
551 wc_tracked = entry.tracked
551 wc_tracked = entry.tracked
552 possibly_dirty = False
552 possibly_dirty = False
553 if p1_tracked and wc_tracked:
553 if p1_tracked and wc_tracked:
554 # the underlying reference might have changed, we will have to
554 # the underlying reference might have changed, we will have to
555 # check it.
555 # check it.
556 possibly_dirty = True
556 possibly_dirty = True
557 elif not (p1_tracked or wc_tracked):
557 elif not (p1_tracked or wc_tracked):
558 # the file is no longer relevant to anyone
558 # the file is no longer relevant to anyone
559 if self._map.dropfile(filename):
559 if self._map.dropfile(filename):
560 self._dirty = True
560 self._dirty = True
561 self._updatedfiles.add(filename)
561 self._updatedfiles.add(filename)
562 elif (not p1_tracked) and wc_tracked:
562 elif (not p1_tracked) and wc_tracked:
563 if entry is not None and entry.added:
563 if entry is not None and entry.added:
564 return # avoid dropping copy information (maybe?)
564 return # avoid dropping copy information (maybe?)
565 elif p1_tracked and not wc_tracked:
565 elif p1_tracked and not wc_tracked:
566 pass
566 pass
567 else:
567 else:
568 assert False, 'unreachable'
568 assert False, 'unreachable'
569
569
570 # this mean we are doing call for file we do not really care about the
570 # this mean we are doing call for file we do not really care about the
571 # data (eg: added or removed), however this should be a minor overhead
571 # data (eg: added or removed), however this should be a minor overhead
572 # compared to the overall update process calling this.
572 # compared to the overall update process calling this.
573 parentfiledata = None
573 parentfiledata = None
574 if wc_tracked:
574 if wc_tracked:
575 parentfiledata = self._get_filedata(filename)
575 parentfiledata = self._get_filedata(filename)
576
576
577 self._updatedfiles.add(filename)
577 self._updatedfiles.add(filename)
578 self._map.reset_state(
578 self._map.reset_state(
579 filename,
579 filename,
580 wc_tracked,
580 wc_tracked,
581 p1_tracked,
581 p1_tracked,
582 possibly_dirty=possibly_dirty,
582 possibly_dirty=possibly_dirty,
583 parentfiledata=parentfiledata,
583 parentfiledata=parentfiledata,
584 )
584 )
585 if (
585 if (
586 parentfiledata is not None
586 parentfiledata is not None
587 and parentfiledata[2] > self._lastnormaltime
587 and parentfiledata[2] > self._lastnormaltime
588 ):
588 ):
589 # Remember the most recent modification timeslot for status(),
589 # Remember the most recent modification timeslot for status(),
590 # to make sure we won't miss future size-preserving file content
590 # to make sure we won't miss future size-preserving file content
591 # modifications that happen within the same timeslot.
591 # modifications that happen within the same timeslot.
592 self._lastnormaltime = parentfiledata[2]
592 self._lastnormaltime = parentfiledata[2]
593
593
594 @requires_parents_change
594 @requires_parents_change
595 def update_file(
595 def update_file(
596 self,
596 self,
597 filename,
597 filename,
598 wc_tracked,
598 wc_tracked,
599 p1_tracked,
599 p1_tracked,
600 p2_tracked=False,
600 p2_tracked=False,
601 merged=False,
601 merged=False,
602 clean_p1=False,
602 clean_p1=False,
603 clean_p2=False,
603 clean_p2=False,
604 possibly_dirty=False,
604 possibly_dirty=False,
605 parentfiledata=None,
605 parentfiledata=None,
606 ):
606 ):
607 """update the information about a file in the dirstate
607 """update the information about a file in the dirstate
608
608
609 This is to be called when the direstates parent changes to keep track
609 This is to be called when the direstates parent changes to keep track
610 of what is the file situation in regards to the working copy and its parent.
610 of what is the file situation in regards to the working copy and its parent.
611
611
612 This function must be called within a `dirstate.parentchange` context.
612 This function must be called within a `dirstate.parentchange` context.
613
613
614 note: the API is at an early stage and we might need to adjust it
614 note: the API is at an early stage and we might need to adjust it
615 depending of what information ends up being relevant and useful to
615 depending of what information ends up being relevant and useful to
616 other processing.
616 other processing.
617 """
617 """
618 if merged and (clean_p1 or clean_p2):
618 if merged and (clean_p1 or clean_p2):
619 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
619 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
620 raise error.ProgrammingError(msg)
620 raise error.ProgrammingError(msg)
621
621
622 # note: I do not think we need to double check name clash here since we
622 # note: I do not think we need to double check name clash here since we
623 # are in a update/merge case that should already have taken care of
623 # are in a update/merge case that should already have taken care of
624 # this. The test agrees
624 # this. The test agrees
625
625
626 self._dirty = True
626 self._dirty = True
627 self._updatedfiles.add(filename)
627 self._updatedfiles.add(filename)
628
628
629 need_parent_file_data = (
629 need_parent_file_data = (
630 not (possibly_dirty or clean_p2 or merged)
630 not (possibly_dirty or clean_p2 or merged)
631 and wc_tracked
631 and wc_tracked
632 and p1_tracked
632 and p1_tracked
633 )
633 )
634
634
635 # this mean we are doing call for file we do not really care about the
635 # this mean we are doing call for file we do not really care about the
636 # data (eg: added or removed), however this should be a minor overhead
636 # data (eg: added or removed), however this should be a minor overhead
637 # compared to the overall update process calling this.
637 # compared to the overall update process calling this.
638 if need_parent_file_data:
638 if need_parent_file_data:
639 if parentfiledata is None:
639 if parentfiledata is None:
640 parentfiledata = self._get_filedata(filename)
640 parentfiledata = self._get_filedata(filename)
641 mtime = parentfiledata[2]
641 mtime = parentfiledata[2]
642
642
643 if mtime > self._lastnormaltime:
643 if mtime > self._lastnormaltime:
644 # Remember the most recent modification timeslot for
644 # Remember the most recent modification timeslot for
645 # status(), to make sure we won't miss future
645 # status(), to make sure we won't miss future
646 # size-preserving file content modifications that happen
646 # size-preserving file content modifications that happen
647 # within the same timeslot.
647 # within the same timeslot.
648 self._lastnormaltime = mtime
648 self._lastnormaltime = mtime
649
649
650 self._map.reset_state(
650 self._map.reset_state(
651 filename,
651 filename,
652 wc_tracked,
652 wc_tracked,
653 p1_tracked,
653 p1_tracked,
654 p2_tracked=p2_tracked,
654 p2_tracked=p2_tracked,
655 merged=merged,
655 merged=merged,
656 clean_p1=clean_p1,
656 clean_p1=clean_p1,
657 clean_p2=clean_p2,
657 clean_p2=clean_p2,
658 possibly_dirty=possibly_dirty,
658 possibly_dirty=possibly_dirty,
659 parentfiledata=parentfiledata,
659 parentfiledata=parentfiledata,
660 )
660 )
661 if (
661 if (
662 parentfiledata is not None
662 parentfiledata is not None
663 and parentfiledata[2] > self._lastnormaltime
663 and parentfiledata[2] > self._lastnormaltime
664 ):
664 ):
665 # Remember the most recent modification timeslot for status(),
665 # Remember the most recent modification timeslot for status(),
666 # to make sure we won't miss future size-preserving file content
666 # to make sure we won't miss future size-preserving file content
667 # modifications that happen within the same timeslot.
667 # modifications that happen within the same timeslot.
668 self._lastnormaltime = parentfiledata[2]
668 self._lastnormaltime = parentfiledata[2]
669
669
670 def _addpath(
670 def _addpath(
671 self,
671 self,
672 f,
672 f,
673 mode=0,
673 mode=0,
674 size=None,
674 size=None,
675 mtime=None,
675 mtime=None,
676 added=False,
676 added=False,
677 merged=False,
677 merged=False,
678 from_p2=False,
678 from_p2=False,
679 possibly_dirty=False,
679 possibly_dirty=False,
680 ):
680 ):
681 entry = self._map.get(f)
681 entry = self._map.get(f)
682 if added or entry is not None and not entry.tracked:
682 if added or entry is not None and not entry.tracked:
683 self._check_new_tracked_filename(f)
683 self._check_new_tracked_filename(f)
684 self._dirty = True
684 self._dirty = True
685 self._updatedfiles.add(f)
685 self._updatedfiles.add(f)
686 self._map.addfile(
686 self._map.addfile(
687 f,
687 f,
688 mode=mode,
688 mode=mode,
689 size=size,
689 size=size,
690 mtime=mtime,
690 mtime=mtime,
691 added=added,
691 added=added,
692 merged=merged,
692 merged=merged,
693 from_p2=from_p2,
693 from_p2=from_p2,
694 possibly_dirty=possibly_dirty,
694 possibly_dirty=possibly_dirty,
695 )
695 )
696
696
697 def _check_new_tracked_filename(self, filename):
697 def _check_new_tracked_filename(self, filename):
698 scmutil.checkfilename(filename)
698 scmutil.checkfilename(filename)
699 if self._map.hastrackeddir(filename):
699 if self._map.hastrackeddir(filename):
700 msg = _(b'directory %r already in dirstate')
700 msg = _(b'directory %r already in dirstate')
701 msg %= pycompat.bytestr(filename)
701 msg %= pycompat.bytestr(filename)
702 raise error.Abort(msg)
702 raise error.Abort(msg)
703 # shadows
703 # shadows
704 for d in pathutil.finddirs(filename):
704 for d in pathutil.finddirs(filename):
705 if self._map.hastrackeddir(d):
705 if self._map.hastrackeddir(d):
706 break
706 break
707 entry = self._map.get(d)
707 entry = self._map.get(d)
708 if entry is not None and not entry.removed:
708 if entry is not None and not entry.removed:
709 msg = _(b'file %r in dirstate clashes with %r')
709 msg = _(b'file %r in dirstate clashes with %r')
710 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
710 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
711 raise error.Abort(msg)
711 raise error.Abort(msg)
712
712
713 def _get_filedata(self, filename):
713 def _get_filedata(self, filename):
714 """returns"""
714 """returns"""
715 s = os.lstat(self._join(filename))
715 s = os.lstat(self._join(filename))
716 mode = s.st_mode
716 mode = s.st_mode
717 size = s.st_size
717 size = s.st_size
718 mtime = s[stat.ST_MTIME]
718 mtime = s[stat.ST_MTIME]
719 return (mode, size, mtime)
719 return (mode, size, mtime)
720
720
721 def _normallookup(self, f):
721 def _normallookup(self, f):
722 '''Mark a file normal, but possibly dirty.'''
722 '''Mark a file normal, but possibly dirty.'''
723 if self.in_merge:
723 if self.in_merge:
724 # if there is a merge going on and the file was either
724 # if there is a merge going on and the file was either
725 # "merged" or coming from other parent (-2) before
725 # "merged" or coming from other parent (-2) before
726 # being removed, restore that state.
726 # being removed, restore that state.
727 entry = self._map.get(f)
727 entry = self._map.get(f)
728 if entry is not None:
728 if entry is not None:
729 # XXX this should probably be dealt with a a lower level
729 # XXX this should probably be dealt with a a lower level
730 # (see `merged_removed` and `from_p2_removed`)
730 # (see `merged_removed` and `from_p2_removed`)
731 if entry.merged_removed or entry.from_p2_removed:
731 if entry.merged_removed or entry.from_p2_removed:
732 source = self._map.copymap.get(f)
732 source = self._map.copymap.get(f)
733 if entry.merged_removed:
733 if entry.merged_removed:
734 self._addpath(f, merged=True)
734 self._addpath(f, merged=True)
735 else:
735 else:
736 self._addpath(f, from_p2=True)
736 self._addpath(f, from_p2=True)
737 self._map.copymap.pop(f, None)
737 self._map.copymap.pop(f, None)
738 if source is not None:
738 if source is not None:
739 self.copy(source, f)
739 self.copy(source, f)
740 return
740 return
741 elif entry.merged or entry.from_p2:
741 elif entry.merged or entry.from_p2:
742 return
742 return
743 self._addpath(f, possibly_dirty=True)
743 self._addpath(f, possibly_dirty=True)
744 self._map.copymap.pop(f, None)
744 self._map.copymap.pop(f, None)
745
745
746 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
746 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
747 if exists is None:
747 if exists is None:
748 exists = os.path.lexists(os.path.join(self._root, path))
748 exists = os.path.lexists(os.path.join(self._root, path))
749 if not exists:
749 if not exists:
750 # Maybe a path component exists
750 # Maybe a path component exists
751 if not ignoremissing and b'/' in path:
751 if not ignoremissing and b'/' in path:
752 d, f = path.rsplit(b'/', 1)
752 d, f = path.rsplit(b'/', 1)
753 d = self._normalize(d, False, ignoremissing, None)
753 d = self._normalize(d, False, ignoremissing, None)
754 folded = d + b"/" + f
754 folded = d + b"/" + f
755 else:
755 else:
756 # No path components, preserve original case
756 # No path components, preserve original case
757 folded = path
757 folded = path
758 else:
758 else:
759 # recursively normalize leading directory components
759 # recursively normalize leading directory components
760 # against dirstate
760 # against dirstate
761 if b'/' in normed:
761 if b'/' in normed:
762 d, f = normed.rsplit(b'/', 1)
762 d, f = normed.rsplit(b'/', 1)
763 d = self._normalize(d, False, ignoremissing, True)
763 d = self._normalize(d, False, ignoremissing, True)
764 r = self._root + b"/" + d
764 r = self._root + b"/" + d
765 folded = d + b"/" + util.fspath(f, r)
765 folded = d + b"/" + util.fspath(f, r)
766 else:
766 else:
767 folded = util.fspath(normed, self._root)
767 folded = util.fspath(normed, self._root)
768 storemap[normed] = folded
768 storemap[normed] = folded
769
769
770 return folded
770 return folded
771
771
772 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
772 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
773 normed = util.normcase(path)
773 normed = util.normcase(path)
774 folded = self._map.filefoldmap.get(normed, None)
774 folded = self._map.filefoldmap.get(normed, None)
775 if folded is None:
775 if folded is None:
776 if isknown:
776 if isknown:
777 folded = path
777 folded = path
778 else:
778 else:
779 folded = self._discoverpath(
779 folded = self._discoverpath(
780 path, normed, ignoremissing, exists, self._map.filefoldmap
780 path, normed, ignoremissing, exists, self._map.filefoldmap
781 )
781 )
782 return folded
782 return folded
783
783
784 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
784 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
785 normed = util.normcase(path)
785 normed = util.normcase(path)
786 folded = self._map.filefoldmap.get(normed, None)
786 folded = self._map.filefoldmap.get(normed, None)
787 if folded is None:
787 if folded is None:
788 folded = self._map.dirfoldmap.get(normed, None)
788 folded = self._map.dirfoldmap.get(normed, None)
789 if folded is None:
789 if folded is None:
790 if isknown:
790 if isknown:
791 folded = path
791 folded = path
792 else:
792 else:
793 # store discovered result in dirfoldmap so that future
793 # store discovered result in dirfoldmap so that future
794 # normalizefile calls don't start matching directories
794 # normalizefile calls don't start matching directories
795 folded = self._discoverpath(
795 folded = self._discoverpath(
796 path, normed, ignoremissing, exists, self._map.dirfoldmap
796 path, normed, ignoremissing, exists, self._map.dirfoldmap
797 )
797 )
798 return folded
798 return folded
799
799
800 def normalize(self, path, isknown=False, ignoremissing=False):
800 def normalize(self, path, isknown=False, ignoremissing=False):
801 """
801 """
802 normalize the case of a pathname when on a casefolding filesystem
802 normalize the case of a pathname when on a casefolding filesystem
803
803
804 isknown specifies whether the filename came from walking the
804 isknown specifies whether the filename came from walking the
805 disk, to avoid extra filesystem access.
805 disk, to avoid extra filesystem access.
806
806
807 If ignoremissing is True, missing path are returned
807 If ignoremissing is True, missing path are returned
808 unchanged. Otherwise, we try harder to normalize possibly
808 unchanged. Otherwise, we try harder to normalize possibly
809 existing path components.
809 existing path components.
810
810
811 The normalized case is determined based on the following precedence:
811 The normalized case is determined based on the following precedence:
812
812
813 - version of name already stored in the dirstate
813 - version of name already stored in the dirstate
814 - version of name stored on disk
814 - version of name stored on disk
815 - version provided via command arguments
815 - version provided via command arguments
816 """
816 """
817
817
818 if self._checkcase:
818 if self._checkcase:
819 return self._normalize(path, isknown, ignoremissing)
819 return self._normalize(path, isknown, ignoremissing)
820 return path
820 return path
821
821
822 def clear(self):
822 def clear(self):
823 self._map.clear()
823 self._map.clear()
824 self._lastnormaltime = 0
824 self._lastnormaltime = 0
825 self._updatedfiles.clear()
825 self._updatedfiles.clear()
826 self._dirty = True
826 self._dirty = True
827
827
828 def rebuild(self, parent, allfiles, changedfiles=None):
828 def rebuild(self, parent, allfiles, changedfiles=None):
829 if changedfiles is None:
829 if changedfiles is None:
830 # Rebuild entire dirstate
830 # Rebuild entire dirstate
831 to_lookup = allfiles
831 to_lookup = allfiles
832 to_drop = []
832 to_drop = []
833 lastnormaltime = self._lastnormaltime
833 lastnormaltime = self._lastnormaltime
834 self.clear()
834 self.clear()
835 self._lastnormaltime = lastnormaltime
835 self._lastnormaltime = lastnormaltime
836 elif len(changedfiles) < 10:
836 elif len(changedfiles) < 10:
837 # Avoid turning allfiles into a set, which can be expensive if it's
837 # Avoid turning allfiles into a set, which can be expensive if it's
838 # large.
838 # large.
839 to_lookup = []
839 to_lookup = []
840 to_drop = []
840 to_drop = []
841 for f in changedfiles:
841 for f in changedfiles:
842 if f in allfiles:
842 if f in allfiles:
843 to_lookup.append(f)
843 to_lookup.append(f)
844 else:
844 else:
845 to_drop.append(f)
845 to_drop.append(f)
846 else:
846 else:
847 changedfilesset = set(changedfiles)
847 changedfilesset = set(changedfiles)
848 to_lookup = changedfilesset & set(allfiles)
848 to_lookup = changedfilesset & set(allfiles)
849 to_drop = changedfilesset - to_lookup
849 to_drop = changedfilesset - to_lookup
850
850
851 if self._origpl is None:
851 if self._origpl is None:
852 self._origpl = self._pl
852 self._origpl = self._pl
853 self._map.setparents(parent, self._nodeconstants.nullid)
853 self._map.setparents(parent, self._nodeconstants.nullid)
854
854
855 for f in to_lookup:
855 for f in to_lookup:
856 self._normallookup(f)
856
857 if self.in_merge:
858 self.set_tracked(f)
859 else:
860 self._map.reset_state(
861 f,
862 wc_tracked=True,
863 p1_tracked=True,
864 possibly_dirty=True,
865 )
866 self._updatedfiles.add(f)
857 for f in to_drop:
867 for f in to_drop:
858 if self._map.dropfile(f):
868 if self._map.dropfile(f):
859 self._updatedfiles.add(f)
869 self._updatedfiles.add(f)
860
870
861 self._dirty = True
871 self._dirty = True
862
872
863 def identity(self):
873 def identity(self):
864 """Return identity of dirstate itself to detect changing in storage
874 """Return identity of dirstate itself to detect changing in storage
865
875
866 If identity of previous dirstate is equal to this, writing
876 If identity of previous dirstate is equal to this, writing
867 changes based on the former dirstate out can keep consistency.
877 changes based on the former dirstate out can keep consistency.
868 """
878 """
869 return self._map.identity
879 return self._map.identity
870
880
871 def write(self, tr):
881 def write(self, tr):
872 if not self._dirty:
882 if not self._dirty:
873 return
883 return
874
884
875 filename = self._filename
885 filename = self._filename
876 if tr:
886 if tr:
877 # 'dirstate.write()' is not only for writing in-memory
887 # 'dirstate.write()' is not only for writing in-memory
878 # changes out, but also for dropping ambiguous timestamp.
888 # changes out, but also for dropping ambiguous timestamp.
879 # delayed writing re-raise "ambiguous timestamp issue".
889 # delayed writing re-raise "ambiguous timestamp issue".
880 # See also the wiki page below for detail:
890 # See also the wiki page below for detail:
881 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
891 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
882
892
883 # emulate dropping timestamp in 'parsers.pack_dirstate'
893 # emulate dropping timestamp in 'parsers.pack_dirstate'
884 now = _getfsnow(self._opener)
894 now = _getfsnow(self._opener)
885 self._map.clearambiguoustimes(self._updatedfiles, now)
895 self._map.clearambiguoustimes(self._updatedfiles, now)
886
896
887 # emulate that all 'dirstate.normal' results are written out
897 # emulate that all 'dirstate.normal' results are written out
888 self._lastnormaltime = 0
898 self._lastnormaltime = 0
889 self._updatedfiles.clear()
899 self._updatedfiles.clear()
890
900
891 # delay writing in-memory changes out
901 # delay writing in-memory changes out
892 tr.addfilegenerator(
902 tr.addfilegenerator(
893 b'dirstate',
903 b'dirstate',
894 (self._filename,),
904 (self._filename,),
895 lambda f: self._writedirstate(tr, f),
905 lambda f: self._writedirstate(tr, f),
896 location=b'plain',
906 location=b'plain',
897 )
907 )
898 return
908 return
899
909
900 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
910 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
901 self._writedirstate(tr, st)
911 self._writedirstate(tr, st)
902
912
903 def addparentchangecallback(self, category, callback):
913 def addparentchangecallback(self, category, callback):
904 """add a callback to be called when the wd parents are changed
914 """add a callback to be called when the wd parents are changed
905
915
906 Callback will be called with the following arguments:
916 Callback will be called with the following arguments:
907 dirstate, (oldp1, oldp2), (newp1, newp2)
917 dirstate, (oldp1, oldp2), (newp1, newp2)
908
918
909 Category is a unique identifier to allow overwriting an old callback
919 Category is a unique identifier to allow overwriting an old callback
910 with a newer callback.
920 with a newer callback.
911 """
921 """
912 self._plchangecallbacks[category] = callback
922 self._plchangecallbacks[category] = callback
913
923
914 def _writedirstate(self, tr, st):
924 def _writedirstate(self, tr, st):
915 # notify callbacks about parents change
925 # notify callbacks about parents change
916 if self._origpl is not None and self._origpl != self._pl:
926 if self._origpl is not None and self._origpl != self._pl:
917 for c, callback in sorted(
927 for c, callback in sorted(
918 pycompat.iteritems(self._plchangecallbacks)
928 pycompat.iteritems(self._plchangecallbacks)
919 ):
929 ):
920 callback(self, self._origpl, self._pl)
930 callback(self, self._origpl, self._pl)
921 self._origpl = None
931 self._origpl = None
922 # use the modification time of the newly created temporary file as the
932 # use the modification time of the newly created temporary file as the
923 # filesystem's notion of 'now'
933 # filesystem's notion of 'now'
924 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
934 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
925
935
926 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
936 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
927 # timestamp of each entries in dirstate, because of 'now > mtime'
937 # timestamp of each entries in dirstate, because of 'now > mtime'
928 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
938 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
929 if delaywrite > 0:
939 if delaywrite > 0:
930 # do we have any files to delay for?
940 # do we have any files to delay for?
931 for f, e in pycompat.iteritems(self._map):
941 for f, e in pycompat.iteritems(self._map):
932 if e.need_delay(now):
942 if e.need_delay(now):
933 import time # to avoid useless import
943 import time # to avoid useless import
934
944
935 # rather than sleep n seconds, sleep until the next
945 # rather than sleep n seconds, sleep until the next
936 # multiple of n seconds
946 # multiple of n seconds
937 clock = time.time()
947 clock = time.time()
938 start = int(clock) - (int(clock) % delaywrite)
948 start = int(clock) - (int(clock) % delaywrite)
939 end = start + delaywrite
949 end = start + delaywrite
940 time.sleep(end - clock)
950 time.sleep(end - clock)
941 now = end # trust our estimate that the end is near now
951 now = end # trust our estimate that the end is near now
942 break
952 break
943
953
944 self._map.write(tr, st, now)
954 self._map.write(tr, st, now)
945 self._lastnormaltime = 0
955 self._lastnormaltime = 0
946 self._dirty = False
956 self._dirty = False
947
957
948 def _dirignore(self, f):
958 def _dirignore(self, f):
949 if self._ignore(f):
959 if self._ignore(f):
950 return True
960 return True
951 for p in pathutil.finddirs(f):
961 for p in pathutil.finddirs(f):
952 if self._ignore(p):
962 if self._ignore(p):
953 return True
963 return True
954 return False
964 return False
955
965
956 def _ignorefiles(self):
966 def _ignorefiles(self):
957 files = []
967 files = []
958 if os.path.exists(self._join(b'.hgignore')):
968 if os.path.exists(self._join(b'.hgignore')):
959 files.append(self._join(b'.hgignore'))
969 files.append(self._join(b'.hgignore'))
960 for name, path in self._ui.configitems(b"ui"):
970 for name, path in self._ui.configitems(b"ui"):
961 if name == b'ignore' or name.startswith(b'ignore.'):
971 if name == b'ignore' or name.startswith(b'ignore.'):
962 # we need to use os.path.join here rather than self._join
972 # we need to use os.path.join here rather than self._join
963 # because path is arbitrary and user-specified
973 # because path is arbitrary and user-specified
964 files.append(os.path.join(self._rootdir, util.expandpath(path)))
974 files.append(os.path.join(self._rootdir, util.expandpath(path)))
965 return files
975 return files
966
976
967 def _ignorefileandline(self, f):
977 def _ignorefileandline(self, f):
968 files = collections.deque(self._ignorefiles())
978 files = collections.deque(self._ignorefiles())
969 visited = set()
979 visited = set()
970 while files:
980 while files:
971 i = files.popleft()
981 i = files.popleft()
972 patterns = matchmod.readpatternfile(
982 patterns = matchmod.readpatternfile(
973 i, self._ui.warn, sourceinfo=True
983 i, self._ui.warn, sourceinfo=True
974 )
984 )
975 for pattern, lineno, line in patterns:
985 for pattern, lineno, line in patterns:
976 kind, p = matchmod._patsplit(pattern, b'glob')
986 kind, p = matchmod._patsplit(pattern, b'glob')
977 if kind == b"subinclude":
987 if kind == b"subinclude":
978 if p not in visited:
988 if p not in visited:
979 files.append(p)
989 files.append(p)
980 continue
990 continue
981 m = matchmod.match(
991 m = matchmod.match(
982 self._root, b'', [], [pattern], warn=self._ui.warn
992 self._root, b'', [], [pattern], warn=self._ui.warn
983 )
993 )
984 if m(f):
994 if m(f):
985 return (i, lineno, line)
995 return (i, lineno, line)
986 visited.add(i)
996 visited.add(i)
987 return (None, -1, b"")
997 return (None, -1, b"")
988
998
989 def _walkexplicit(self, match, subrepos):
999 def _walkexplicit(self, match, subrepos):
990 """Get stat data about the files explicitly specified by match.
1000 """Get stat data about the files explicitly specified by match.
991
1001
992 Return a triple (results, dirsfound, dirsnotfound).
1002 Return a triple (results, dirsfound, dirsnotfound).
993 - results is a mapping from filename to stat result. It also contains
1003 - results is a mapping from filename to stat result. It also contains
994 listings mapping subrepos and .hg to None.
1004 listings mapping subrepos and .hg to None.
995 - dirsfound is a list of files found to be directories.
1005 - dirsfound is a list of files found to be directories.
996 - dirsnotfound is a list of files that the dirstate thinks are
1006 - dirsnotfound is a list of files that the dirstate thinks are
997 directories and that were not found."""
1007 directories and that were not found."""
998
1008
999 def badtype(mode):
1009 def badtype(mode):
1000 kind = _(b'unknown')
1010 kind = _(b'unknown')
1001 if stat.S_ISCHR(mode):
1011 if stat.S_ISCHR(mode):
1002 kind = _(b'character device')
1012 kind = _(b'character device')
1003 elif stat.S_ISBLK(mode):
1013 elif stat.S_ISBLK(mode):
1004 kind = _(b'block device')
1014 kind = _(b'block device')
1005 elif stat.S_ISFIFO(mode):
1015 elif stat.S_ISFIFO(mode):
1006 kind = _(b'fifo')
1016 kind = _(b'fifo')
1007 elif stat.S_ISSOCK(mode):
1017 elif stat.S_ISSOCK(mode):
1008 kind = _(b'socket')
1018 kind = _(b'socket')
1009 elif stat.S_ISDIR(mode):
1019 elif stat.S_ISDIR(mode):
1010 kind = _(b'directory')
1020 kind = _(b'directory')
1011 return _(b'unsupported file type (type is %s)') % kind
1021 return _(b'unsupported file type (type is %s)') % kind
1012
1022
1013 badfn = match.bad
1023 badfn = match.bad
1014 dmap = self._map
1024 dmap = self._map
1015 lstat = os.lstat
1025 lstat = os.lstat
1016 getkind = stat.S_IFMT
1026 getkind = stat.S_IFMT
1017 dirkind = stat.S_IFDIR
1027 dirkind = stat.S_IFDIR
1018 regkind = stat.S_IFREG
1028 regkind = stat.S_IFREG
1019 lnkkind = stat.S_IFLNK
1029 lnkkind = stat.S_IFLNK
1020 join = self._join
1030 join = self._join
1021 dirsfound = []
1031 dirsfound = []
1022 foundadd = dirsfound.append
1032 foundadd = dirsfound.append
1023 dirsnotfound = []
1033 dirsnotfound = []
1024 notfoundadd = dirsnotfound.append
1034 notfoundadd = dirsnotfound.append
1025
1035
1026 if not match.isexact() and self._checkcase:
1036 if not match.isexact() and self._checkcase:
1027 normalize = self._normalize
1037 normalize = self._normalize
1028 else:
1038 else:
1029 normalize = None
1039 normalize = None
1030
1040
1031 files = sorted(match.files())
1041 files = sorted(match.files())
1032 subrepos.sort()
1042 subrepos.sort()
1033 i, j = 0, 0
1043 i, j = 0, 0
1034 while i < len(files) and j < len(subrepos):
1044 while i < len(files) and j < len(subrepos):
1035 subpath = subrepos[j] + b"/"
1045 subpath = subrepos[j] + b"/"
1036 if files[i] < subpath:
1046 if files[i] < subpath:
1037 i += 1
1047 i += 1
1038 continue
1048 continue
1039 while i < len(files) and files[i].startswith(subpath):
1049 while i < len(files) and files[i].startswith(subpath):
1040 del files[i]
1050 del files[i]
1041 j += 1
1051 j += 1
1042
1052
1043 if not files or b'' in files:
1053 if not files or b'' in files:
1044 files = [b'']
1054 files = [b'']
1045 # constructing the foldmap is expensive, so don't do it for the
1055 # constructing the foldmap is expensive, so don't do it for the
1046 # common case where files is ['']
1056 # common case where files is ['']
1047 normalize = None
1057 normalize = None
1048 results = dict.fromkeys(subrepos)
1058 results = dict.fromkeys(subrepos)
1049 results[b'.hg'] = None
1059 results[b'.hg'] = None
1050
1060
1051 for ff in files:
1061 for ff in files:
1052 if normalize:
1062 if normalize:
1053 nf = normalize(ff, False, True)
1063 nf = normalize(ff, False, True)
1054 else:
1064 else:
1055 nf = ff
1065 nf = ff
1056 if nf in results:
1066 if nf in results:
1057 continue
1067 continue
1058
1068
1059 try:
1069 try:
1060 st = lstat(join(nf))
1070 st = lstat(join(nf))
1061 kind = getkind(st.st_mode)
1071 kind = getkind(st.st_mode)
1062 if kind == dirkind:
1072 if kind == dirkind:
1063 if nf in dmap:
1073 if nf in dmap:
1064 # file replaced by dir on disk but still in dirstate
1074 # file replaced by dir on disk but still in dirstate
1065 results[nf] = None
1075 results[nf] = None
1066 foundadd((nf, ff))
1076 foundadd((nf, ff))
1067 elif kind == regkind or kind == lnkkind:
1077 elif kind == regkind or kind == lnkkind:
1068 results[nf] = st
1078 results[nf] = st
1069 else:
1079 else:
1070 badfn(ff, badtype(kind))
1080 badfn(ff, badtype(kind))
1071 if nf in dmap:
1081 if nf in dmap:
1072 results[nf] = None
1082 results[nf] = None
1073 except OSError as inst: # nf not found on disk - it is dirstate only
1083 except OSError as inst: # nf not found on disk - it is dirstate only
1074 if nf in dmap: # does it exactly match a missing file?
1084 if nf in dmap: # does it exactly match a missing file?
1075 results[nf] = None
1085 results[nf] = None
1076 else: # does it match a missing directory?
1086 else: # does it match a missing directory?
1077 if self._map.hasdir(nf):
1087 if self._map.hasdir(nf):
1078 notfoundadd(nf)
1088 notfoundadd(nf)
1079 else:
1089 else:
1080 badfn(ff, encoding.strtolocal(inst.strerror))
1090 badfn(ff, encoding.strtolocal(inst.strerror))
1081
1091
1082 # match.files() may contain explicitly-specified paths that shouldn't
1092 # match.files() may contain explicitly-specified paths that shouldn't
1083 # be taken; drop them from the list of files found. dirsfound/notfound
1093 # be taken; drop them from the list of files found. dirsfound/notfound
1084 # aren't filtered here because they will be tested later.
1094 # aren't filtered here because they will be tested later.
1085 if match.anypats():
1095 if match.anypats():
1086 for f in list(results):
1096 for f in list(results):
1087 if f == b'.hg' or f in subrepos:
1097 if f == b'.hg' or f in subrepos:
1088 # keep sentinel to disable further out-of-repo walks
1098 # keep sentinel to disable further out-of-repo walks
1089 continue
1099 continue
1090 if not match(f):
1100 if not match(f):
1091 del results[f]
1101 del results[f]
1092
1102
1093 # Case insensitive filesystems cannot rely on lstat() failing to detect
1103 # Case insensitive filesystems cannot rely on lstat() failing to detect
1094 # a case-only rename. Prune the stat object for any file that does not
1104 # a case-only rename. Prune the stat object for any file that does not
1095 # match the case in the filesystem, if there are multiple files that
1105 # match the case in the filesystem, if there are multiple files that
1096 # normalize to the same path.
1106 # normalize to the same path.
1097 if match.isexact() and self._checkcase:
1107 if match.isexact() and self._checkcase:
1098 normed = {}
1108 normed = {}
1099
1109
1100 for f, st in pycompat.iteritems(results):
1110 for f, st in pycompat.iteritems(results):
1101 if st is None:
1111 if st is None:
1102 continue
1112 continue
1103
1113
1104 nc = util.normcase(f)
1114 nc = util.normcase(f)
1105 paths = normed.get(nc)
1115 paths = normed.get(nc)
1106
1116
1107 if paths is None:
1117 if paths is None:
1108 paths = set()
1118 paths = set()
1109 normed[nc] = paths
1119 normed[nc] = paths
1110
1120
1111 paths.add(f)
1121 paths.add(f)
1112
1122
1113 for norm, paths in pycompat.iteritems(normed):
1123 for norm, paths in pycompat.iteritems(normed):
1114 if len(paths) > 1:
1124 if len(paths) > 1:
1115 for path in paths:
1125 for path in paths:
1116 folded = self._discoverpath(
1126 folded = self._discoverpath(
1117 path, norm, True, None, self._map.dirfoldmap
1127 path, norm, True, None, self._map.dirfoldmap
1118 )
1128 )
1119 if path != folded:
1129 if path != folded:
1120 results[path] = None
1130 results[path] = None
1121
1131
1122 return results, dirsfound, dirsnotfound
1132 return results, dirsfound, dirsnotfound
1123
1133
1124 def walk(self, match, subrepos, unknown, ignored, full=True):
1134 def walk(self, match, subrepos, unknown, ignored, full=True):
1125 """
1135 """
1126 Walk recursively through the directory tree, finding all files
1136 Walk recursively through the directory tree, finding all files
1127 matched by match.
1137 matched by match.
1128
1138
1129 If full is False, maybe skip some known-clean files.
1139 If full is False, maybe skip some known-clean files.
1130
1140
1131 Return a dict mapping filename to stat-like object (either
1141 Return a dict mapping filename to stat-like object (either
1132 mercurial.osutil.stat instance or return value of os.stat()).
1142 mercurial.osutil.stat instance or return value of os.stat()).
1133
1143
1134 """
1144 """
1135 # full is a flag that extensions that hook into walk can use -- this
1145 # full is a flag that extensions that hook into walk can use -- this
1136 # implementation doesn't use it at all. This satisfies the contract
1146 # implementation doesn't use it at all. This satisfies the contract
1137 # because we only guarantee a "maybe".
1147 # because we only guarantee a "maybe".
1138
1148
1139 if ignored:
1149 if ignored:
1140 ignore = util.never
1150 ignore = util.never
1141 dirignore = util.never
1151 dirignore = util.never
1142 elif unknown:
1152 elif unknown:
1143 ignore = self._ignore
1153 ignore = self._ignore
1144 dirignore = self._dirignore
1154 dirignore = self._dirignore
1145 else:
1155 else:
1146 # if not unknown and not ignored, drop dir recursion and step 2
1156 # if not unknown and not ignored, drop dir recursion and step 2
1147 ignore = util.always
1157 ignore = util.always
1148 dirignore = util.always
1158 dirignore = util.always
1149
1159
1150 matchfn = match.matchfn
1160 matchfn = match.matchfn
1151 matchalways = match.always()
1161 matchalways = match.always()
1152 matchtdir = match.traversedir
1162 matchtdir = match.traversedir
1153 dmap = self._map
1163 dmap = self._map
1154 listdir = util.listdir
1164 listdir = util.listdir
1155 lstat = os.lstat
1165 lstat = os.lstat
1156 dirkind = stat.S_IFDIR
1166 dirkind = stat.S_IFDIR
1157 regkind = stat.S_IFREG
1167 regkind = stat.S_IFREG
1158 lnkkind = stat.S_IFLNK
1168 lnkkind = stat.S_IFLNK
1159 join = self._join
1169 join = self._join
1160
1170
1161 exact = skipstep3 = False
1171 exact = skipstep3 = False
1162 if match.isexact(): # match.exact
1172 if match.isexact(): # match.exact
1163 exact = True
1173 exact = True
1164 dirignore = util.always # skip step 2
1174 dirignore = util.always # skip step 2
1165 elif match.prefix(): # match.match, no patterns
1175 elif match.prefix(): # match.match, no patterns
1166 skipstep3 = True
1176 skipstep3 = True
1167
1177
1168 if not exact and self._checkcase:
1178 if not exact and self._checkcase:
1169 normalize = self._normalize
1179 normalize = self._normalize
1170 normalizefile = self._normalizefile
1180 normalizefile = self._normalizefile
1171 skipstep3 = False
1181 skipstep3 = False
1172 else:
1182 else:
1173 normalize = self._normalize
1183 normalize = self._normalize
1174 normalizefile = None
1184 normalizefile = None
1175
1185
1176 # step 1: find all explicit files
1186 # step 1: find all explicit files
1177 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1187 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1178 if matchtdir:
1188 if matchtdir:
1179 for d in work:
1189 for d in work:
1180 matchtdir(d[0])
1190 matchtdir(d[0])
1181 for d in dirsnotfound:
1191 for d in dirsnotfound:
1182 matchtdir(d)
1192 matchtdir(d)
1183
1193
1184 skipstep3 = skipstep3 and not (work or dirsnotfound)
1194 skipstep3 = skipstep3 and not (work or dirsnotfound)
1185 work = [d for d in work if not dirignore(d[0])]
1195 work = [d for d in work if not dirignore(d[0])]
1186
1196
1187 # step 2: visit subdirectories
1197 # step 2: visit subdirectories
1188 def traverse(work, alreadynormed):
1198 def traverse(work, alreadynormed):
1189 wadd = work.append
1199 wadd = work.append
1190 while work:
1200 while work:
1191 tracing.counter('dirstate.walk work', len(work))
1201 tracing.counter('dirstate.walk work', len(work))
1192 nd = work.pop()
1202 nd = work.pop()
1193 visitentries = match.visitchildrenset(nd)
1203 visitentries = match.visitchildrenset(nd)
1194 if not visitentries:
1204 if not visitentries:
1195 continue
1205 continue
1196 if visitentries == b'this' or visitentries == b'all':
1206 if visitentries == b'this' or visitentries == b'all':
1197 visitentries = None
1207 visitentries = None
1198 skip = None
1208 skip = None
1199 if nd != b'':
1209 if nd != b'':
1200 skip = b'.hg'
1210 skip = b'.hg'
1201 try:
1211 try:
1202 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1212 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1203 entries = listdir(join(nd), stat=True, skip=skip)
1213 entries = listdir(join(nd), stat=True, skip=skip)
1204 except OSError as inst:
1214 except OSError as inst:
1205 if inst.errno in (errno.EACCES, errno.ENOENT):
1215 if inst.errno in (errno.EACCES, errno.ENOENT):
1206 match.bad(
1216 match.bad(
1207 self.pathto(nd), encoding.strtolocal(inst.strerror)
1217 self.pathto(nd), encoding.strtolocal(inst.strerror)
1208 )
1218 )
1209 continue
1219 continue
1210 raise
1220 raise
1211 for f, kind, st in entries:
1221 for f, kind, st in entries:
1212 # Some matchers may return files in the visitentries set,
1222 # Some matchers may return files in the visitentries set,
1213 # instead of 'this', if the matcher explicitly mentions them
1223 # instead of 'this', if the matcher explicitly mentions them
1214 # and is not an exactmatcher. This is acceptable; we do not
1224 # and is not an exactmatcher. This is acceptable; we do not
1215 # make any hard assumptions about file-or-directory below
1225 # make any hard assumptions about file-or-directory below
1216 # based on the presence of `f` in visitentries. If
1226 # based on the presence of `f` in visitentries. If
1217 # visitchildrenset returned a set, we can always skip the
1227 # visitchildrenset returned a set, we can always skip the
1218 # entries *not* in the set it provided regardless of whether
1228 # entries *not* in the set it provided regardless of whether
1219 # they're actually a file or a directory.
1229 # they're actually a file or a directory.
1220 if visitentries and f not in visitentries:
1230 if visitentries and f not in visitentries:
1221 continue
1231 continue
1222 if normalizefile:
1232 if normalizefile:
1223 # even though f might be a directory, we're only
1233 # even though f might be a directory, we're only
1224 # interested in comparing it to files currently in the
1234 # interested in comparing it to files currently in the
1225 # dmap -- therefore normalizefile is enough
1235 # dmap -- therefore normalizefile is enough
1226 nf = normalizefile(
1236 nf = normalizefile(
1227 nd and (nd + b"/" + f) or f, True, True
1237 nd and (nd + b"/" + f) or f, True, True
1228 )
1238 )
1229 else:
1239 else:
1230 nf = nd and (nd + b"/" + f) or f
1240 nf = nd and (nd + b"/" + f) or f
1231 if nf not in results:
1241 if nf not in results:
1232 if kind == dirkind:
1242 if kind == dirkind:
1233 if not ignore(nf):
1243 if not ignore(nf):
1234 if matchtdir:
1244 if matchtdir:
1235 matchtdir(nf)
1245 matchtdir(nf)
1236 wadd(nf)
1246 wadd(nf)
1237 if nf in dmap and (matchalways or matchfn(nf)):
1247 if nf in dmap and (matchalways or matchfn(nf)):
1238 results[nf] = None
1248 results[nf] = None
1239 elif kind == regkind or kind == lnkkind:
1249 elif kind == regkind or kind == lnkkind:
1240 if nf in dmap:
1250 if nf in dmap:
1241 if matchalways or matchfn(nf):
1251 if matchalways or matchfn(nf):
1242 results[nf] = st
1252 results[nf] = st
1243 elif (matchalways or matchfn(nf)) and not ignore(
1253 elif (matchalways or matchfn(nf)) and not ignore(
1244 nf
1254 nf
1245 ):
1255 ):
1246 # unknown file -- normalize if necessary
1256 # unknown file -- normalize if necessary
1247 if not alreadynormed:
1257 if not alreadynormed:
1248 nf = normalize(nf, False, True)
1258 nf = normalize(nf, False, True)
1249 results[nf] = st
1259 results[nf] = st
1250 elif nf in dmap and (matchalways or matchfn(nf)):
1260 elif nf in dmap and (matchalways or matchfn(nf)):
1251 results[nf] = None
1261 results[nf] = None
1252
1262
1253 for nd, d in work:
1263 for nd, d in work:
1254 # alreadynormed means that processwork doesn't have to do any
1264 # alreadynormed means that processwork doesn't have to do any
1255 # expensive directory normalization
1265 # expensive directory normalization
1256 alreadynormed = not normalize or nd == d
1266 alreadynormed = not normalize or nd == d
1257 traverse([d], alreadynormed)
1267 traverse([d], alreadynormed)
1258
1268
1259 for s in subrepos:
1269 for s in subrepos:
1260 del results[s]
1270 del results[s]
1261 del results[b'.hg']
1271 del results[b'.hg']
1262
1272
1263 # step 3: visit remaining files from dmap
1273 # step 3: visit remaining files from dmap
1264 if not skipstep3 and not exact:
1274 if not skipstep3 and not exact:
1265 # If a dmap file is not in results yet, it was either
1275 # If a dmap file is not in results yet, it was either
1266 # a) not matching matchfn b) ignored, c) missing, or d) under a
1276 # a) not matching matchfn b) ignored, c) missing, or d) under a
1267 # symlink directory.
1277 # symlink directory.
1268 if not results and matchalways:
1278 if not results and matchalways:
1269 visit = [f for f in dmap]
1279 visit = [f for f in dmap]
1270 else:
1280 else:
1271 visit = [f for f in dmap if f not in results and matchfn(f)]
1281 visit = [f for f in dmap if f not in results and matchfn(f)]
1272 visit.sort()
1282 visit.sort()
1273
1283
1274 if unknown:
1284 if unknown:
1275 # unknown == True means we walked all dirs under the roots
1285 # unknown == True means we walked all dirs under the roots
1276 # that wasn't ignored, and everything that matched was stat'ed
1286 # that wasn't ignored, and everything that matched was stat'ed
1277 # and is already in results.
1287 # and is already in results.
1278 # The rest must thus be ignored or under a symlink.
1288 # The rest must thus be ignored or under a symlink.
1279 audit_path = pathutil.pathauditor(self._root, cached=True)
1289 audit_path = pathutil.pathauditor(self._root, cached=True)
1280
1290
1281 for nf in iter(visit):
1291 for nf in iter(visit):
1282 # If a stat for the same file was already added with a
1292 # If a stat for the same file was already added with a
1283 # different case, don't add one for this, since that would
1293 # different case, don't add one for this, since that would
1284 # make it appear as if the file exists under both names
1294 # make it appear as if the file exists under both names
1285 # on disk.
1295 # on disk.
1286 if (
1296 if (
1287 normalizefile
1297 normalizefile
1288 and normalizefile(nf, True, True) in results
1298 and normalizefile(nf, True, True) in results
1289 ):
1299 ):
1290 results[nf] = None
1300 results[nf] = None
1291 # Report ignored items in the dmap as long as they are not
1301 # Report ignored items in the dmap as long as they are not
1292 # under a symlink directory.
1302 # under a symlink directory.
1293 elif audit_path.check(nf):
1303 elif audit_path.check(nf):
1294 try:
1304 try:
1295 results[nf] = lstat(join(nf))
1305 results[nf] = lstat(join(nf))
1296 # file was just ignored, no links, and exists
1306 # file was just ignored, no links, and exists
1297 except OSError:
1307 except OSError:
1298 # file doesn't exist
1308 # file doesn't exist
1299 results[nf] = None
1309 results[nf] = None
1300 else:
1310 else:
1301 # It's either missing or under a symlink directory
1311 # It's either missing or under a symlink directory
1302 # which we in this case report as missing
1312 # which we in this case report as missing
1303 results[nf] = None
1313 results[nf] = None
1304 else:
1314 else:
1305 # We may not have walked the full directory tree above,
1315 # We may not have walked the full directory tree above,
1306 # so stat and check everything we missed.
1316 # so stat and check everything we missed.
1307 iv = iter(visit)
1317 iv = iter(visit)
1308 for st in util.statfiles([join(i) for i in visit]):
1318 for st in util.statfiles([join(i) for i in visit]):
1309 results[next(iv)] = st
1319 results[next(iv)] = st
1310 return results
1320 return results
1311
1321
1312 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1322 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1313 # Force Rayon (Rust parallelism library) to respect the number of
1323 # Force Rayon (Rust parallelism library) to respect the number of
1314 # workers. This is a temporary workaround until Rust code knows
1324 # workers. This is a temporary workaround until Rust code knows
1315 # how to read the config file.
1325 # how to read the config file.
1316 numcpus = self._ui.configint(b"worker", b"numcpus")
1326 numcpus = self._ui.configint(b"worker", b"numcpus")
1317 if numcpus is not None:
1327 if numcpus is not None:
1318 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1328 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1319
1329
1320 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1330 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1321 if not workers_enabled:
1331 if not workers_enabled:
1322 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1332 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1323
1333
1324 (
1334 (
1325 lookup,
1335 lookup,
1326 modified,
1336 modified,
1327 added,
1337 added,
1328 removed,
1338 removed,
1329 deleted,
1339 deleted,
1330 clean,
1340 clean,
1331 ignored,
1341 ignored,
1332 unknown,
1342 unknown,
1333 warnings,
1343 warnings,
1334 bad,
1344 bad,
1335 traversed,
1345 traversed,
1336 dirty,
1346 dirty,
1337 ) = rustmod.status(
1347 ) = rustmod.status(
1338 self._map._rustmap,
1348 self._map._rustmap,
1339 matcher,
1349 matcher,
1340 self._rootdir,
1350 self._rootdir,
1341 self._ignorefiles(),
1351 self._ignorefiles(),
1342 self._checkexec,
1352 self._checkexec,
1343 self._lastnormaltime,
1353 self._lastnormaltime,
1344 bool(list_clean),
1354 bool(list_clean),
1345 bool(list_ignored),
1355 bool(list_ignored),
1346 bool(list_unknown),
1356 bool(list_unknown),
1347 bool(matcher.traversedir),
1357 bool(matcher.traversedir),
1348 )
1358 )
1349
1359
1350 self._dirty |= dirty
1360 self._dirty |= dirty
1351
1361
1352 if matcher.traversedir:
1362 if matcher.traversedir:
1353 for dir in traversed:
1363 for dir in traversed:
1354 matcher.traversedir(dir)
1364 matcher.traversedir(dir)
1355
1365
1356 if self._ui.warn:
1366 if self._ui.warn:
1357 for item in warnings:
1367 for item in warnings:
1358 if isinstance(item, tuple):
1368 if isinstance(item, tuple):
1359 file_path, syntax = item
1369 file_path, syntax = item
1360 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1370 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1361 file_path,
1371 file_path,
1362 syntax,
1372 syntax,
1363 )
1373 )
1364 self._ui.warn(msg)
1374 self._ui.warn(msg)
1365 else:
1375 else:
1366 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1376 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1367 self._ui.warn(
1377 self._ui.warn(
1368 msg
1378 msg
1369 % (
1379 % (
1370 pathutil.canonpath(
1380 pathutil.canonpath(
1371 self._rootdir, self._rootdir, item
1381 self._rootdir, self._rootdir, item
1372 ),
1382 ),
1373 b"No such file or directory",
1383 b"No such file or directory",
1374 )
1384 )
1375 )
1385 )
1376
1386
1377 for (fn, message) in bad:
1387 for (fn, message) in bad:
1378 matcher.bad(fn, encoding.strtolocal(message))
1388 matcher.bad(fn, encoding.strtolocal(message))
1379
1389
1380 status = scmutil.status(
1390 status = scmutil.status(
1381 modified=modified,
1391 modified=modified,
1382 added=added,
1392 added=added,
1383 removed=removed,
1393 removed=removed,
1384 deleted=deleted,
1394 deleted=deleted,
1385 unknown=unknown,
1395 unknown=unknown,
1386 ignored=ignored,
1396 ignored=ignored,
1387 clean=clean,
1397 clean=clean,
1388 )
1398 )
1389 return (lookup, status)
1399 return (lookup, status)
1390
1400
1391 def status(self, match, subrepos, ignored, clean, unknown):
1401 def status(self, match, subrepos, ignored, clean, unknown):
1392 """Determine the status of the working copy relative to the
1402 """Determine the status of the working copy relative to the
1393 dirstate and return a pair of (unsure, status), where status is of type
1403 dirstate and return a pair of (unsure, status), where status is of type
1394 scmutil.status and:
1404 scmutil.status and:
1395
1405
1396 unsure:
1406 unsure:
1397 files that might have been modified since the dirstate was
1407 files that might have been modified since the dirstate was
1398 written, but need to be read to be sure (size is the same
1408 written, but need to be read to be sure (size is the same
1399 but mtime differs)
1409 but mtime differs)
1400 status.modified:
1410 status.modified:
1401 files that have definitely been modified since the dirstate
1411 files that have definitely been modified since the dirstate
1402 was written (different size or mode)
1412 was written (different size or mode)
1403 status.clean:
1413 status.clean:
1404 files that have definitely not been modified since the
1414 files that have definitely not been modified since the
1405 dirstate was written
1415 dirstate was written
1406 """
1416 """
1407 listignored, listclean, listunknown = ignored, clean, unknown
1417 listignored, listclean, listunknown = ignored, clean, unknown
1408 lookup, modified, added, unknown, ignored = [], [], [], [], []
1418 lookup, modified, added, unknown, ignored = [], [], [], [], []
1409 removed, deleted, clean = [], [], []
1419 removed, deleted, clean = [], [], []
1410
1420
1411 dmap = self._map
1421 dmap = self._map
1412 dmap.preload()
1422 dmap.preload()
1413
1423
1414 use_rust = True
1424 use_rust = True
1415
1425
1416 allowed_matchers = (
1426 allowed_matchers = (
1417 matchmod.alwaysmatcher,
1427 matchmod.alwaysmatcher,
1418 matchmod.exactmatcher,
1428 matchmod.exactmatcher,
1419 matchmod.includematcher,
1429 matchmod.includematcher,
1420 )
1430 )
1421
1431
1422 if rustmod is None:
1432 if rustmod is None:
1423 use_rust = False
1433 use_rust = False
1424 elif self._checkcase:
1434 elif self._checkcase:
1425 # Case-insensitive filesystems are not handled yet
1435 # Case-insensitive filesystems are not handled yet
1426 use_rust = False
1436 use_rust = False
1427 elif subrepos:
1437 elif subrepos:
1428 use_rust = False
1438 use_rust = False
1429 elif sparse.enabled:
1439 elif sparse.enabled:
1430 use_rust = False
1440 use_rust = False
1431 elif not isinstance(match, allowed_matchers):
1441 elif not isinstance(match, allowed_matchers):
1432 # Some matchers have yet to be implemented
1442 # Some matchers have yet to be implemented
1433 use_rust = False
1443 use_rust = False
1434
1444
1435 if use_rust:
1445 if use_rust:
1436 try:
1446 try:
1437 return self._rust_status(
1447 return self._rust_status(
1438 match, listclean, listignored, listunknown
1448 match, listclean, listignored, listunknown
1439 )
1449 )
1440 except rustmod.FallbackError:
1450 except rustmod.FallbackError:
1441 pass
1451 pass
1442
1452
1443 def noop(f):
1453 def noop(f):
1444 pass
1454 pass
1445
1455
1446 dcontains = dmap.__contains__
1456 dcontains = dmap.__contains__
1447 dget = dmap.__getitem__
1457 dget = dmap.__getitem__
1448 ladd = lookup.append # aka "unsure"
1458 ladd = lookup.append # aka "unsure"
1449 madd = modified.append
1459 madd = modified.append
1450 aadd = added.append
1460 aadd = added.append
1451 uadd = unknown.append if listunknown else noop
1461 uadd = unknown.append if listunknown else noop
1452 iadd = ignored.append if listignored else noop
1462 iadd = ignored.append if listignored else noop
1453 radd = removed.append
1463 radd = removed.append
1454 dadd = deleted.append
1464 dadd = deleted.append
1455 cadd = clean.append if listclean else noop
1465 cadd = clean.append if listclean else noop
1456 mexact = match.exact
1466 mexact = match.exact
1457 dirignore = self._dirignore
1467 dirignore = self._dirignore
1458 checkexec = self._checkexec
1468 checkexec = self._checkexec
1459 copymap = self._map.copymap
1469 copymap = self._map.copymap
1460 lastnormaltime = self._lastnormaltime
1470 lastnormaltime = self._lastnormaltime
1461
1471
1462 # We need to do full walks when either
1472 # We need to do full walks when either
1463 # - we're listing all clean files, or
1473 # - we're listing all clean files, or
1464 # - match.traversedir does something, because match.traversedir should
1474 # - match.traversedir does something, because match.traversedir should
1465 # be called for every dir in the working dir
1475 # be called for every dir in the working dir
1466 full = listclean or match.traversedir is not None
1476 full = listclean or match.traversedir is not None
1467 for fn, st in pycompat.iteritems(
1477 for fn, st in pycompat.iteritems(
1468 self.walk(match, subrepos, listunknown, listignored, full=full)
1478 self.walk(match, subrepos, listunknown, listignored, full=full)
1469 ):
1479 ):
1470 if not dcontains(fn):
1480 if not dcontains(fn):
1471 if (listignored or mexact(fn)) and dirignore(fn):
1481 if (listignored or mexact(fn)) and dirignore(fn):
1472 if listignored:
1482 if listignored:
1473 iadd(fn)
1483 iadd(fn)
1474 else:
1484 else:
1475 uadd(fn)
1485 uadd(fn)
1476 continue
1486 continue
1477
1487
1478 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1488 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1479 # written like that for performance reasons. dmap[fn] is not a
1489 # written like that for performance reasons. dmap[fn] is not a
1480 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1490 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1481 # opcode has fast paths when the value to be unpacked is a tuple or
1491 # opcode has fast paths when the value to be unpacked is a tuple or
1482 # a list, but falls back to creating a full-fledged iterator in
1492 # a list, but falls back to creating a full-fledged iterator in
1483 # general. That is much slower than simply accessing and storing the
1493 # general. That is much slower than simply accessing and storing the
1484 # tuple members one by one.
1494 # tuple members one by one.
1485 t = dget(fn)
1495 t = dget(fn)
1486 mode = t.mode
1496 mode = t.mode
1487 size = t.size
1497 size = t.size
1488 time = t.mtime
1498 time = t.mtime
1489
1499
1490 if not st and t.tracked:
1500 if not st and t.tracked:
1491 dadd(fn)
1501 dadd(fn)
1492 elif t.merged:
1502 elif t.merged:
1493 madd(fn)
1503 madd(fn)
1494 elif t.added:
1504 elif t.added:
1495 aadd(fn)
1505 aadd(fn)
1496 elif t.removed:
1506 elif t.removed:
1497 radd(fn)
1507 radd(fn)
1498 elif t.tracked:
1508 elif t.tracked:
1499 if (
1509 if (
1500 size >= 0
1510 size >= 0
1501 and (
1511 and (
1502 (size != st.st_size and size != st.st_size & _rangemask)
1512 (size != st.st_size and size != st.st_size & _rangemask)
1503 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1513 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1504 )
1514 )
1505 or t.from_p2
1515 or t.from_p2
1506 or fn in copymap
1516 or fn in copymap
1507 ):
1517 ):
1508 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1518 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1509 # issue6456: Size returned may be longer due to
1519 # issue6456: Size returned may be longer due to
1510 # encryption on EXT-4 fscrypt, undecided.
1520 # encryption on EXT-4 fscrypt, undecided.
1511 ladd(fn)
1521 ladd(fn)
1512 else:
1522 else:
1513 madd(fn)
1523 madd(fn)
1514 elif (
1524 elif (
1515 time != st[stat.ST_MTIME]
1525 time != st[stat.ST_MTIME]
1516 and time != st[stat.ST_MTIME] & _rangemask
1526 and time != st[stat.ST_MTIME] & _rangemask
1517 ):
1527 ):
1518 ladd(fn)
1528 ladd(fn)
1519 elif st[stat.ST_MTIME] == lastnormaltime:
1529 elif st[stat.ST_MTIME] == lastnormaltime:
1520 # fn may have just been marked as normal and it may have
1530 # fn may have just been marked as normal and it may have
1521 # changed in the same second without changing its size.
1531 # changed in the same second without changing its size.
1522 # This can happen if we quickly do multiple commits.
1532 # This can happen if we quickly do multiple commits.
1523 # Force lookup, so we don't miss such a racy file change.
1533 # Force lookup, so we don't miss such a racy file change.
1524 ladd(fn)
1534 ladd(fn)
1525 elif listclean:
1535 elif listclean:
1526 cadd(fn)
1536 cadd(fn)
1527 status = scmutil.status(
1537 status = scmutil.status(
1528 modified, added, removed, deleted, unknown, ignored, clean
1538 modified, added, removed, deleted, unknown, ignored, clean
1529 )
1539 )
1530 return (lookup, status)
1540 return (lookup, status)
1531
1541
1532 def matches(self, match):
1542 def matches(self, match):
1533 """
1543 """
1534 return files in the dirstate (in whatever state) filtered by match
1544 return files in the dirstate (in whatever state) filtered by match
1535 """
1545 """
1536 dmap = self._map
1546 dmap = self._map
1537 if rustmod is not None:
1547 if rustmod is not None:
1538 dmap = self._map._rustmap
1548 dmap = self._map._rustmap
1539
1549
1540 if match.always():
1550 if match.always():
1541 return dmap.keys()
1551 return dmap.keys()
1542 files = match.files()
1552 files = match.files()
1543 if match.isexact():
1553 if match.isexact():
1544 # fast path -- filter the other way around, since typically files is
1554 # fast path -- filter the other way around, since typically files is
1545 # much smaller than dmap
1555 # much smaller than dmap
1546 return [f for f in files if f in dmap]
1556 return [f for f in files if f in dmap]
1547 if match.prefix() and all(fn in dmap for fn in files):
1557 if match.prefix() and all(fn in dmap for fn in files):
1548 # fast path -- all the values are known to be files, so just return
1558 # fast path -- all the values are known to be files, so just return
1549 # that
1559 # that
1550 return list(files)
1560 return list(files)
1551 return [f for f in dmap if match(f)]
1561 return [f for f in dmap if match(f)]
1552
1562
1553 def _actualfilename(self, tr):
1563 def _actualfilename(self, tr):
1554 if tr:
1564 if tr:
1555 return self._pendingfilename
1565 return self._pendingfilename
1556 else:
1566 else:
1557 return self._filename
1567 return self._filename
1558
1568
1559 def savebackup(self, tr, backupname):
1569 def savebackup(self, tr, backupname):
1560 '''Save current dirstate into backup file'''
1570 '''Save current dirstate into backup file'''
1561 filename = self._actualfilename(tr)
1571 filename = self._actualfilename(tr)
1562 assert backupname != filename
1572 assert backupname != filename
1563
1573
1564 # use '_writedirstate' instead of 'write' to write changes certainly,
1574 # use '_writedirstate' instead of 'write' to write changes certainly,
1565 # because the latter omits writing out if transaction is running.
1575 # because the latter omits writing out if transaction is running.
1566 # output file will be used to create backup of dirstate at this point.
1576 # output file will be used to create backup of dirstate at this point.
1567 if self._dirty or not self._opener.exists(filename):
1577 if self._dirty or not self._opener.exists(filename):
1568 self._writedirstate(
1578 self._writedirstate(
1569 tr,
1579 tr,
1570 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1580 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1571 )
1581 )
1572
1582
1573 if tr:
1583 if tr:
1574 # ensure that subsequent tr.writepending returns True for
1584 # ensure that subsequent tr.writepending returns True for
1575 # changes written out above, even if dirstate is never
1585 # changes written out above, even if dirstate is never
1576 # changed after this
1586 # changed after this
1577 tr.addfilegenerator(
1587 tr.addfilegenerator(
1578 b'dirstate',
1588 b'dirstate',
1579 (self._filename,),
1589 (self._filename,),
1580 lambda f: self._writedirstate(tr, f),
1590 lambda f: self._writedirstate(tr, f),
1581 location=b'plain',
1591 location=b'plain',
1582 )
1592 )
1583
1593
1584 # ensure that pending file written above is unlinked at
1594 # ensure that pending file written above is unlinked at
1585 # failure, even if tr.writepending isn't invoked until the
1595 # failure, even if tr.writepending isn't invoked until the
1586 # end of this transaction
1596 # end of this transaction
1587 tr.registertmp(filename, location=b'plain')
1597 tr.registertmp(filename, location=b'plain')
1588
1598
1589 self._opener.tryunlink(backupname)
1599 self._opener.tryunlink(backupname)
1590 # hardlink backup is okay because _writedirstate is always called
1600 # hardlink backup is okay because _writedirstate is always called
1591 # with an "atomictemp=True" file.
1601 # with an "atomictemp=True" file.
1592 util.copyfile(
1602 util.copyfile(
1593 self._opener.join(filename),
1603 self._opener.join(filename),
1594 self._opener.join(backupname),
1604 self._opener.join(backupname),
1595 hardlink=True,
1605 hardlink=True,
1596 )
1606 )
1597
1607
1598 def restorebackup(self, tr, backupname):
1608 def restorebackup(self, tr, backupname):
1599 '''Restore dirstate by backup file'''
1609 '''Restore dirstate by backup file'''
1600 # this "invalidate()" prevents "wlock.release()" from writing
1610 # this "invalidate()" prevents "wlock.release()" from writing
1601 # changes of dirstate out after restoring from backup file
1611 # changes of dirstate out after restoring from backup file
1602 self.invalidate()
1612 self.invalidate()
1603 filename = self._actualfilename(tr)
1613 filename = self._actualfilename(tr)
1604 o = self._opener
1614 o = self._opener
1605 if util.samefile(o.join(backupname), o.join(filename)):
1615 if util.samefile(o.join(backupname), o.join(filename)):
1606 o.unlink(backupname)
1616 o.unlink(backupname)
1607 else:
1617 else:
1608 o.rename(backupname, filename, checkambig=True)
1618 o.rename(backupname, filename, checkambig=True)
1609
1619
1610 def clearbackup(self, tr, backupname):
1620 def clearbackup(self, tr, backupname):
1611 '''Clear backup file'''
1621 '''Clear backup file'''
1612 self._opener.unlink(backupname)
1622 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now