##// END OF EJS Templates
dirstate: replace the use of _normallookup in `setparents`...
marmoute -
r48805:625b84c1 default
parent child Browse files
Show More
@@ -1,1607 +1,1612 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = parsers.DirstateItem
48 DirstateItem = parsers.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._updatedfiles = set()
133 self._updatedfiles = set()
134 self._mapcls = dirstatemap.dirstatemap
134 self._mapcls = dirstatemap.dirstatemap
135 # Access and cache cwd early, so we don't access it for the first time
135 # Access and cache cwd early, so we don't access it for the first time
136 # after a working-copy update caused it to not exist (accessing it then
136 # after a working-copy update caused it to not exist (accessing it then
137 # raises an exception).
137 # raises an exception).
138 self._cwd
138 self._cwd
139
139
140 def prefetch_parents(self):
140 def prefetch_parents(self):
141 """make sure the parents are loaded
141 """make sure the parents are loaded
142
142
143 Used to avoid a race condition.
143 Used to avoid a race condition.
144 """
144 """
145 self._pl
145 self._pl
146
146
147 @contextlib.contextmanager
147 @contextlib.contextmanager
148 def parentchange(self):
148 def parentchange(self):
149 """Context manager for handling dirstate parents.
149 """Context manager for handling dirstate parents.
150
150
151 If an exception occurs in the scope of the context manager,
151 If an exception occurs in the scope of the context manager,
152 the incoherent dirstate won't be written when wlock is
152 the incoherent dirstate won't be written when wlock is
153 released.
153 released.
154 """
154 """
155 self._parentwriters += 1
155 self._parentwriters += 1
156 yield
156 yield
157 # Typically we want the "undo" step of a context manager in a
157 # Typically we want the "undo" step of a context manager in a
158 # finally block so it happens even when an exception
158 # finally block so it happens even when an exception
159 # occurs. In this case, however, we only want to decrement
159 # occurs. In this case, however, we only want to decrement
160 # parentwriters if the code in the with statement exits
160 # parentwriters if the code in the with statement exits
161 # normally, so we don't have a try/finally here on purpose.
161 # normally, so we don't have a try/finally here on purpose.
162 self._parentwriters -= 1
162 self._parentwriters -= 1
163
163
164 def pendingparentchange(self):
164 def pendingparentchange(self):
165 """Returns true if the dirstate is in the middle of a set of changes
165 """Returns true if the dirstate is in the middle of a set of changes
166 that modify the dirstate parent.
166 that modify the dirstate parent.
167 """
167 """
168 return self._parentwriters > 0
168 return self._parentwriters > 0
169
169
170 @propertycache
170 @propertycache
171 def _map(self):
171 def _map(self):
172 """Return the dirstate contents (see documentation for dirstatemap)."""
172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 self._map = self._mapcls(
173 self._map = self._mapcls(
174 self._ui,
174 self._ui,
175 self._opener,
175 self._opener,
176 self._root,
176 self._root,
177 self._nodeconstants,
177 self._nodeconstants,
178 self._use_dirstate_v2,
178 self._use_dirstate_v2,
179 )
179 )
180 return self._map
180 return self._map
181
181
182 @property
182 @property
183 def _sparsematcher(self):
183 def _sparsematcher(self):
184 """The matcher for the sparse checkout.
184 """The matcher for the sparse checkout.
185
185
186 The working directory may not include every file from a manifest. The
186 The working directory may not include every file from a manifest. The
187 matcher obtained by this property will match a path if it is to be
187 matcher obtained by this property will match a path if it is to be
188 included in the working directory.
188 included in the working directory.
189 """
189 """
190 # TODO there is potential to cache this property. For now, the matcher
190 # TODO there is potential to cache this property. For now, the matcher
191 # is resolved on every access. (But the called function does use a
191 # is resolved on every access. (But the called function does use a
192 # cache to keep the lookup fast.)
192 # cache to keep the lookup fast.)
193 return self._sparsematchfn()
193 return self._sparsematchfn()
194
194
195 @repocache(b'branch')
195 @repocache(b'branch')
196 def _branch(self):
196 def _branch(self):
197 try:
197 try:
198 return self._opener.read(b"branch").strip() or b"default"
198 return self._opener.read(b"branch").strip() or b"default"
199 except IOError as inst:
199 except IOError as inst:
200 if inst.errno != errno.ENOENT:
200 if inst.errno != errno.ENOENT:
201 raise
201 raise
202 return b"default"
202 return b"default"
203
203
204 @property
204 @property
205 def _pl(self):
205 def _pl(self):
206 return self._map.parents()
206 return self._map.parents()
207
207
208 def hasdir(self, d):
208 def hasdir(self, d):
209 return self._map.hastrackeddir(d)
209 return self._map.hastrackeddir(d)
210
210
211 @rootcache(b'.hgignore')
211 @rootcache(b'.hgignore')
212 def _ignore(self):
212 def _ignore(self):
213 files = self._ignorefiles()
213 files = self._ignorefiles()
214 if not files:
214 if not files:
215 return matchmod.never()
215 return matchmod.never()
216
216
217 pats = [b'include:%s' % f for f in files]
217 pats = [b'include:%s' % f for f in files]
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219
219
220 @propertycache
220 @propertycache
221 def _slash(self):
221 def _slash(self):
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223
223
224 @propertycache
224 @propertycache
225 def _checklink(self):
225 def _checklink(self):
226 return util.checklink(self._root)
226 return util.checklink(self._root)
227
227
228 @propertycache
228 @propertycache
229 def _checkexec(self):
229 def _checkexec(self):
230 return bool(util.checkexec(self._root))
230 return bool(util.checkexec(self._root))
231
231
232 @propertycache
232 @propertycache
233 def _checkcase(self):
233 def _checkcase(self):
234 return not util.fscasesensitive(self._join(b'.hg'))
234 return not util.fscasesensitive(self._join(b'.hg'))
235
235
236 def _join(self, f):
236 def _join(self, f):
237 # much faster than os.path.join()
237 # much faster than os.path.join()
238 # it's safe because f is always a relative path
238 # it's safe because f is always a relative path
239 return self._rootdir + f
239 return self._rootdir + f
240
240
241 def flagfunc(self, buildfallback):
241 def flagfunc(self, buildfallback):
242 if self._checklink and self._checkexec:
242 if self._checklink and self._checkexec:
243
243
244 def f(x):
244 def f(x):
245 try:
245 try:
246 st = os.lstat(self._join(x))
246 st = os.lstat(self._join(x))
247 if util.statislink(st):
247 if util.statislink(st):
248 return b'l'
248 return b'l'
249 if util.statisexec(st):
249 if util.statisexec(st):
250 return b'x'
250 return b'x'
251 except OSError:
251 except OSError:
252 pass
252 pass
253 return b''
253 return b''
254
254
255 return f
255 return f
256
256
257 fallback = buildfallback()
257 fallback = buildfallback()
258 if self._checklink:
258 if self._checklink:
259
259
260 def f(x):
260 def f(x):
261 if os.path.islink(self._join(x)):
261 if os.path.islink(self._join(x)):
262 return b'l'
262 return b'l'
263 if b'x' in fallback(x):
263 if b'x' in fallback(x):
264 return b'x'
264 return b'x'
265 return b''
265 return b''
266
266
267 return f
267 return f
268 if self._checkexec:
268 if self._checkexec:
269
269
270 def f(x):
270 def f(x):
271 if b'l' in fallback(x):
271 if b'l' in fallback(x):
272 return b'l'
272 return b'l'
273 if util.isexec(self._join(x)):
273 if util.isexec(self._join(x)):
274 return b'x'
274 return b'x'
275 return b''
275 return b''
276
276
277 return f
277 return f
278 else:
278 else:
279 return fallback
279 return fallback
280
280
281 @propertycache
281 @propertycache
282 def _cwd(self):
282 def _cwd(self):
283 # internal config: ui.forcecwd
283 # internal config: ui.forcecwd
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 if forcecwd:
285 if forcecwd:
286 return forcecwd
286 return forcecwd
287 return encoding.getcwd()
287 return encoding.getcwd()
288
288
289 def getcwd(self):
289 def getcwd(self):
290 """Return the path from which a canonical path is calculated.
290 """Return the path from which a canonical path is calculated.
291
291
292 This path should be used to resolve file patterns or to convert
292 This path should be used to resolve file patterns or to convert
293 canonical paths back to file paths for display. It shouldn't be
293 canonical paths back to file paths for display. It shouldn't be
294 used to get real file paths. Use vfs functions instead.
294 used to get real file paths. Use vfs functions instead.
295 """
295 """
296 cwd = self._cwd
296 cwd = self._cwd
297 if cwd == self._root:
297 if cwd == self._root:
298 return b''
298 return b''
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 rootsep = self._root
300 rootsep = self._root
301 if not util.endswithsep(rootsep):
301 if not util.endswithsep(rootsep):
302 rootsep += pycompat.ossep
302 rootsep += pycompat.ossep
303 if cwd.startswith(rootsep):
303 if cwd.startswith(rootsep):
304 return cwd[len(rootsep) :]
304 return cwd[len(rootsep) :]
305 else:
305 else:
306 # we're outside the repo. return an absolute path.
306 # we're outside the repo. return an absolute path.
307 return cwd
307 return cwd
308
308
309 def pathto(self, f, cwd=None):
309 def pathto(self, f, cwd=None):
310 if cwd is None:
310 if cwd is None:
311 cwd = self.getcwd()
311 cwd = self.getcwd()
312 path = util.pathto(self._root, cwd, f)
312 path = util.pathto(self._root, cwd, f)
313 if self._slash:
313 if self._slash:
314 return util.pconvert(path)
314 return util.pconvert(path)
315 return path
315 return path
316
316
317 def __getitem__(self, key):
317 def __getitem__(self, key):
318 """Return the current state of key (a filename) in the dirstate.
318 """Return the current state of key (a filename) in the dirstate.
319
319
320 States are:
320 States are:
321 n normal
321 n normal
322 m needs merging
322 m needs merging
323 r marked for removal
323 r marked for removal
324 a marked for addition
324 a marked for addition
325 ? not tracked
325 ? not tracked
326
326
327 XXX The "state" is a bit obscure to be in the "public" API. we should
327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 consider migrating all user of this to going through the dirstate entry
328 consider migrating all user of this to going through the dirstate entry
329 instead.
329 instead.
330 """
330 """
331 entry = self._map.get(key)
331 entry = self._map.get(key)
332 if entry is not None:
332 if entry is not None:
333 return entry.state
333 return entry.state
334 return b'?'
334 return b'?'
335
335
336 def __contains__(self, key):
336 def __contains__(self, key):
337 return key in self._map
337 return key in self._map
338
338
339 def __iter__(self):
339 def __iter__(self):
340 return iter(sorted(self._map))
340 return iter(sorted(self._map))
341
341
342 def items(self):
342 def items(self):
343 return pycompat.iteritems(self._map)
343 return pycompat.iteritems(self._map)
344
344
345 iteritems = items
345 iteritems = items
346
346
347 def directories(self):
347 def directories(self):
348 return self._map.directories()
348 return self._map.directories()
349
349
350 def parents(self):
350 def parents(self):
351 return [self._validate(p) for p in self._pl]
351 return [self._validate(p) for p in self._pl]
352
352
353 def p1(self):
353 def p1(self):
354 return self._validate(self._pl[0])
354 return self._validate(self._pl[0])
355
355
356 def p2(self):
356 def p2(self):
357 return self._validate(self._pl[1])
357 return self._validate(self._pl[1])
358
358
359 @property
359 @property
360 def in_merge(self):
360 def in_merge(self):
361 """True if a merge is in progress"""
361 """True if a merge is in progress"""
362 return self._pl[1] != self._nodeconstants.nullid
362 return self._pl[1] != self._nodeconstants.nullid
363
363
364 def branch(self):
364 def branch(self):
365 return encoding.tolocal(self._branch)
365 return encoding.tolocal(self._branch)
366
366
367 def setparents(self, p1, p2=None):
367 def setparents(self, p1, p2=None):
368 """Set dirstate parents to p1 and p2.
368 """Set dirstate parents to p1 and p2.
369
369
370 When moving from two parents to one, "merged" entries a
370 When moving from two parents to one, "merged" entries a
371 adjusted to normal and previous copy records discarded and
371 adjusted to normal and previous copy records discarded and
372 returned by the call.
372 returned by the call.
373
373
374 See localrepo.setparents()
374 See localrepo.setparents()
375 """
375 """
376 if p2 is None:
376 if p2 is None:
377 p2 = self._nodeconstants.nullid
377 p2 = self._nodeconstants.nullid
378 if self._parentwriters == 0:
378 if self._parentwriters == 0:
379 raise ValueError(
379 raise ValueError(
380 b"cannot set dirstate parent outside of "
380 b"cannot set dirstate parent outside of "
381 b"dirstate.parentchange context manager"
381 b"dirstate.parentchange context manager"
382 )
382 )
383
383
384 self._dirty = True
384 self._dirty = True
385 oldp2 = self._pl[1]
385 oldp2 = self._pl[1]
386 if self._origpl is None:
386 if self._origpl is None:
387 self._origpl = self._pl
387 self._origpl = self._pl
388 self._map.setparents(p1, p2)
388 self._map.setparents(p1, p2)
389 copies = {}
389 copies = {}
390 nullid = self._nodeconstants.nullid
390 nullid = self._nodeconstants.nullid
391 if oldp2 != nullid and p2 == nullid:
391 if oldp2 != nullid and p2 == nullid:
392 candidatefiles = self._map.non_normal_or_other_parent_paths()
392 candidatefiles = self._map.non_normal_or_other_parent_paths()
393
393
394 for f in candidatefiles:
394 for f in candidatefiles:
395 s = self._map.get(f)
395 s = self._map.get(f)
396 if s is None:
396 if s is None:
397 continue
397 continue
398
398
399 # Discard "merged" markers when moving away from a merge state
399 # Discard "merged" markers when moving away from a merge state
400 if s.merged:
400 if s.merged:
401 source = self._map.copymap.get(f)
401 source = self._map.copymap.get(f)
402 if source:
402 if source:
403 copies[f] = source
403 copies[f] = source
404 self._normallookup(f)
404 self._map.reset_state(
405 f,
406 wc_tracked=True,
407 p1_tracked=True,
408 possibly_dirty=True,
409 )
405 # Also fix up otherparent markers
410 # Also fix up otherparent markers
406 elif s.from_p2:
411 elif s.from_p2:
407 source = self._map.copymap.get(f)
412 source = self._map.copymap.get(f)
408 if source:
413 if source:
409 copies[f] = source
414 copies[f] = source
410 self._check_new_tracked_filename(f)
415 self._check_new_tracked_filename(f)
411 self._updatedfiles.add(f)
416 self._updatedfiles.add(f)
412 self._map.reset_state(
417 self._map.reset_state(
413 f,
418 f,
414 p1_tracked=False,
419 p1_tracked=False,
415 wc_tracked=True,
420 wc_tracked=True,
416 )
421 )
417 return copies
422 return copies
418
423
419 def setbranch(self, branch):
424 def setbranch(self, branch):
420 self.__class__._branch.set(self, encoding.fromlocal(branch))
425 self.__class__._branch.set(self, encoding.fromlocal(branch))
421 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
426 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
422 try:
427 try:
423 f.write(self._branch + b'\n')
428 f.write(self._branch + b'\n')
424 f.close()
429 f.close()
425
430
426 # make sure filecache has the correct stat info for _branch after
431 # make sure filecache has the correct stat info for _branch after
427 # replacing the underlying file
432 # replacing the underlying file
428 ce = self._filecache[b'_branch']
433 ce = self._filecache[b'_branch']
429 if ce:
434 if ce:
430 ce.refresh()
435 ce.refresh()
431 except: # re-raises
436 except: # re-raises
432 f.discard()
437 f.discard()
433 raise
438 raise
434
439
435 def invalidate(self):
440 def invalidate(self):
436 """Causes the next access to reread the dirstate.
441 """Causes the next access to reread the dirstate.
437
442
438 This is different from localrepo.invalidatedirstate() because it always
443 This is different from localrepo.invalidatedirstate() because it always
439 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
444 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
440 check whether the dirstate has changed before rereading it."""
445 check whether the dirstate has changed before rereading it."""
441
446
442 for a in ("_map", "_branch", "_ignore"):
447 for a in ("_map", "_branch", "_ignore"):
443 if a in self.__dict__:
448 if a in self.__dict__:
444 delattr(self, a)
449 delattr(self, a)
445 self._lastnormaltime = 0
450 self._lastnormaltime = 0
446 self._dirty = False
451 self._dirty = False
447 self._updatedfiles.clear()
452 self._updatedfiles.clear()
448 self._parentwriters = 0
453 self._parentwriters = 0
449 self._origpl = None
454 self._origpl = None
450
455
451 def copy(self, source, dest):
456 def copy(self, source, dest):
452 """Mark dest as a copy of source. Unmark dest if source is None."""
457 """Mark dest as a copy of source. Unmark dest if source is None."""
453 if source == dest:
458 if source == dest:
454 return
459 return
455 self._dirty = True
460 self._dirty = True
456 if source is not None:
461 if source is not None:
457 self._map.copymap[dest] = source
462 self._map.copymap[dest] = source
458 self._updatedfiles.add(source)
463 self._updatedfiles.add(source)
459 self._updatedfiles.add(dest)
464 self._updatedfiles.add(dest)
460 elif self._map.copymap.pop(dest, None):
465 elif self._map.copymap.pop(dest, None):
461 self._updatedfiles.add(dest)
466 self._updatedfiles.add(dest)
462
467
463 def copied(self, file):
468 def copied(self, file):
464 return self._map.copymap.get(file, None)
469 return self._map.copymap.get(file, None)
465
470
466 def copies(self):
471 def copies(self):
467 return self._map.copymap
472 return self._map.copymap
468
473
469 @requires_no_parents_change
474 @requires_no_parents_change
470 def set_tracked(self, filename):
475 def set_tracked(self, filename):
471 """a "public" method for generic code to mark a file as tracked
476 """a "public" method for generic code to mark a file as tracked
472
477
473 This function is to be called outside of "update/merge" case. For
478 This function is to be called outside of "update/merge" case. For
474 example by a command like `hg add X`.
479 example by a command like `hg add X`.
475
480
476 return True the file was previously untracked, False otherwise.
481 return True the file was previously untracked, False otherwise.
477 """
482 """
478 self._dirty = True
483 self._dirty = True
479 self._updatedfiles.add(filename)
484 self._updatedfiles.add(filename)
480 entry = self._map.get(filename)
485 entry = self._map.get(filename)
481 if entry is None or not entry.tracked:
486 if entry is None or not entry.tracked:
482 self._check_new_tracked_filename(filename)
487 self._check_new_tracked_filename(filename)
483 return self._map.set_tracked(filename)
488 return self._map.set_tracked(filename)
484
489
485 @requires_no_parents_change
490 @requires_no_parents_change
486 def set_untracked(self, filename):
491 def set_untracked(self, filename):
487 """a "public" method for generic code to mark a file as untracked
492 """a "public" method for generic code to mark a file as untracked
488
493
489 This function is to be called outside of "update/merge" case. For
494 This function is to be called outside of "update/merge" case. For
490 example by a command like `hg remove X`.
495 example by a command like `hg remove X`.
491
496
492 return True the file was previously tracked, False otherwise.
497 return True the file was previously tracked, False otherwise.
493 """
498 """
494 ret = self._map.set_untracked(filename)
499 ret = self._map.set_untracked(filename)
495 if ret:
500 if ret:
496 self._dirty = True
501 self._dirty = True
497 self._updatedfiles.add(filename)
502 self._updatedfiles.add(filename)
498 return ret
503 return ret
499
504
500 @requires_no_parents_change
505 @requires_no_parents_change
501 def set_clean(self, filename, parentfiledata=None):
506 def set_clean(self, filename, parentfiledata=None):
502 """record that the current state of the file on disk is known to be clean"""
507 """record that the current state of the file on disk is known to be clean"""
503 self._dirty = True
508 self._dirty = True
504 self._updatedfiles.add(filename)
509 self._updatedfiles.add(filename)
505 if parentfiledata:
510 if parentfiledata:
506 (mode, size, mtime) = parentfiledata
511 (mode, size, mtime) = parentfiledata
507 else:
512 else:
508 (mode, size, mtime) = self._get_filedata(filename)
513 (mode, size, mtime) = self._get_filedata(filename)
509 if not self._map[filename].tracked:
514 if not self._map[filename].tracked:
510 self._check_new_tracked_filename(filename)
515 self._check_new_tracked_filename(filename)
511 self._map.set_clean(filename, mode, size, mtime)
516 self._map.set_clean(filename, mode, size, mtime)
512 if mtime > self._lastnormaltime:
517 if mtime > self._lastnormaltime:
513 # Remember the most recent modification timeslot for status(),
518 # Remember the most recent modification timeslot for status(),
514 # to make sure we won't miss future size-preserving file content
519 # to make sure we won't miss future size-preserving file content
515 # modifications that happen within the same timeslot.
520 # modifications that happen within the same timeslot.
516 self._lastnormaltime = mtime
521 self._lastnormaltime = mtime
517
522
518 @requires_no_parents_change
523 @requires_no_parents_change
519 def set_possibly_dirty(self, filename):
524 def set_possibly_dirty(self, filename):
520 """record that the current state of the file on disk is unknown"""
525 """record that the current state of the file on disk is unknown"""
521 self._dirty = True
526 self._dirty = True
522 self._updatedfiles.add(filename)
527 self._updatedfiles.add(filename)
523 self._map.set_possibly_dirty(filename)
528 self._map.set_possibly_dirty(filename)
524
529
525 @requires_parents_change
530 @requires_parents_change
526 def update_file_p1(
531 def update_file_p1(
527 self,
532 self,
528 filename,
533 filename,
529 p1_tracked,
534 p1_tracked,
530 ):
535 ):
531 """Set a file as tracked in the parent (or not)
536 """Set a file as tracked in the parent (or not)
532
537
533 This is to be called when adjust the dirstate to a new parent after an history
538 This is to be called when adjust the dirstate to a new parent after an history
534 rewriting operation.
539 rewriting operation.
535
540
536 It should not be called during a merge (p2 != nullid) and only within
541 It should not be called during a merge (p2 != nullid) and only within
537 a `with dirstate.parentchange():` context.
542 a `with dirstate.parentchange():` context.
538 """
543 """
539 if self.in_merge:
544 if self.in_merge:
540 msg = b'update_file_reference should not be called when merging'
545 msg = b'update_file_reference should not be called when merging'
541 raise error.ProgrammingError(msg)
546 raise error.ProgrammingError(msg)
542 entry = self._map.get(filename)
547 entry = self._map.get(filename)
543 if entry is None:
548 if entry is None:
544 wc_tracked = False
549 wc_tracked = False
545 else:
550 else:
546 wc_tracked = entry.tracked
551 wc_tracked = entry.tracked
547 possibly_dirty = False
552 possibly_dirty = False
548 if p1_tracked and wc_tracked:
553 if p1_tracked and wc_tracked:
549 # the underlying reference might have changed, we will have to
554 # the underlying reference might have changed, we will have to
550 # check it.
555 # check it.
551 possibly_dirty = True
556 possibly_dirty = True
552 elif not (p1_tracked or wc_tracked):
557 elif not (p1_tracked or wc_tracked):
553 # the file is no longer relevant to anyone
558 # the file is no longer relevant to anyone
554 if self._map.dropfile(filename):
559 if self._map.dropfile(filename):
555 self._dirty = True
560 self._dirty = True
556 self._updatedfiles.add(filename)
561 self._updatedfiles.add(filename)
557 elif (not p1_tracked) and wc_tracked:
562 elif (not p1_tracked) and wc_tracked:
558 if entry is not None and entry.added:
563 if entry is not None and entry.added:
559 return # avoid dropping copy information (maybe?)
564 return # avoid dropping copy information (maybe?)
560 elif p1_tracked and not wc_tracked:
565 elif p1_tracked and not wc_tracked:
561 pass
566 pass
562 else:
567 else:
563 assert False, 'unreachable'
568 assert False, 'unreachable'
564
569
565 # this mean we are doing call for file we do not really care about the
570 # this mean we are doing call for file we do not really care about the
566 # data (eg: added or removed), however this should be a minor overhead
571 # data (eg: added or removed), however this should be a minor overhead
567 # compared to the overall update process calling this.
572 # compared to the overall update process calling this.
568 parentfiledata = None
573 parentfiledata = None
569 if wc_tracked:
574 if wc_tracked:
570 parentfiledata = self._get_filedata(filename)
575 parentfiledata = self._get_filedata(filename)
571
576
572 self._updatedfiles.add(filename)
577 self._updatedfiles.add(filename)
573 self._map.reset_state(
578 self._map.reset_state(
574 filename,
579 filename,
575 wc_tracked,
580 wc_tracked,
576 p1_tracked,
581 p1_tracked,
577 possibly_dirty=possibly_dirty,
582 possibly_dirty=possibly_dirty,
578 parentfiledata=parentfiledata,
583 parentfiledata=parentfiledata,
579 )
584 )
580 if (
585 if (
581 parentfiledata is not None
586 parentfiledata is not None
582 and parentfiledata[2] > self._lastnormaltime
587 and parentfiledata[2] > self._lastnormaltime
583 ):
588 ):
584 # Remember the most recent modification timeslot for status(),
589 # Remember the most recent modification timeslot for status(),
585 # to make sure we won't miss future size-preserving file content
590 # to make sure we won't miss future size-preserving file content
586 # modifications that happen within the same timeslot.
591 # modifications that happen within the same timeslot.
587 self._lastnormaltime = parentfiledata[2]
592 self._lastnormaltime = parentfiledata[2]
588
593
589 @requires_parents_change
594 @requires_parents_change
590 def update_file(
595 def update_file(
591 self,
596 self,
592 filename,
597 filename,
593 wc_tracked,
598 wc_tracked,
594 p1_tracked,
599 p1_tracked,
595 p2_tracked=False,
600 p2_tracked=False,
596 merged=False,
601 merged=False,
597 clean_p1=False,
602 clean_p1=False,
598 clean_p2=False,
603 clean_p2=False,
599 possibly_dirty=False,
604 possibly_dirty=False,
600 parentfiledata=None,
605 parentfiledata=None,
601 ):
606 ):
602 """update the information about a file in the dirstate
607 """update the information about a file in the dirstate
603
608
604 This is to be called when the direstates parent changes to keep track
609 This is to be called when the direstates parent changes to keep track
605 of what is the file situation in regards to the working copy and its parent.
610 of what is the file situation in regards to the working copy and its parent.
606
611
607 This function must be called within a `dirstate.parentchange` context.
612 This function must be called within a `dirstate.parentchange` context.
608
613
609 note: the API is at an early stage and we might need to adjust it
614 note: the API is at an early stage and we might need to adjust it
610 depending of what information ends up being relevant and useful to
615 depending of what information ends up being relevant and useful to
611 other processing.
616 other processing.
612 """
617 """
613 if merged and (clean_p1 or clean_p2):
618 if merged and (clean_p1 or clean_p2):
614 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
619 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
615 raise error.ProgrammingError(msg)
620 raise error.ProgrammingError(msg)
616
621
617 # note: I do not think we need to double check name clash here since we
622 # note: I do not think we need to double check name clash here since we
618 # are in a update/merge case that should already have taken care of
623 # are in a update/merge case that should already have taken care of
619 # this. The test agrees
624 # this. The test agrees
620
625
621 self._dirty = True
626 self._dirty = True
622 self._updatedfiles.add(filename)
627 self._updatedfiles.add(filename)
623
628
624 need_parent_file_data = (
629 need_parent_file_data = (
625 not (possibly_dirty or clean_p2 or merged)
630 not (possibly_dirty or clean_p2 or merged)
626 and wc_tracked
631 and wc_tracked
627 and p1_tracked
632 and p1_tracked
628 )
633 )
629
634
630 # this mean we are doing call for file we do not really care about the
635 # this mean we are doing call for file we do not really care about the
631 # data (eg: added or removed), however this should be a minor overhead
636 # data (eg: added or removed), however this should be a minor overhead
632 # compared to the overall update process calling this.
637 # compared to the overall update process calling this.
633 if need_parent_file_data:
638 if need_parent_file_data:
634 if parentfiledata is None:
639 if parentfiledata is None:
635 parentfiledata = self._get_filedata(filename)
640 parentfiledata = self._get_filedata(filename)
636 mtime = parentfiledata[2]
641 mtime = parentfiledata[2]
637
642
638 if mtime > self._lastnormaltime:
643 if mtime > self._lastnormaltime:
639 # Remember the most recent modification timeslot for
644 # Remember the most recent modification timeslot for
640 # status(), to make sure we won't miss future
645 # status(), to make sure we won't miss future
641 # size-preserving file content modifications that happen
646 # size-preserving file content modifications that happen
642 # within the same timeslot.
647 # within the same timeslot.
643 self._lastnormaltime = mtime
648 self._lastnormaltime = mtime
644
649
645 self._map.reset_state(
650 self._map.reset_state(
646 filename,
651 filename,
647 wc_tracked,
652 wc_tracked,
648 p1_tracked,
653 p1_tracked,
649 p2_tracked=p2_tracked,
654 p2_tracked=p2_tracked,
650 merged=merged,
655 merged=merged,
651 clean_p1=clean_p1,
656 clean_p1=clean_p1,
652 clean_p2=clean_p2,
657 clean_p2=clean_p2,
653 possibly_dirty=possibly_dirty,
658 possibly_dirty=possibly_dirty,
654 parentfiledata=parentfiledata,
659 parentfiledata=parentfiledata,
655 )
660 )
656 if (
661 if (
657 parentfiledata is not None
662 parentfiledata is not None
658 and parentfiledata[2] > self._lastnormaltime
663 and parentfiledata[2] > self._lastnormaltime
659 ):
664 ):
660 # Remember the most recent modification timeslot for status(),
665 # Remember the most recent modification timeslot for status(),
661 # to make sure we won't miss future size-preserving file content
666 # to make sure we won't miss future size-preserving file content
662 # modifications that happen within the same timeslot.
667 # modifications that happen within the same timeslot.
663 self._lastnormaltime = parentfiledata[2]
668 self._lastnormaltime = parentfiledata[2]
664
669
665 def _addpath(
670 def _addpath(
666 self,
671 self,
667 f,
672 f,
668 mode=0,
673 mode=0,
669 size=None,
674 size=None,
670 mtime=None,
675 mtime=None,
671 added=False,
676 added=False,
672 merged=False,
677 merged=False,
673 from_p2=False,
678 from_p2=False,
674 possibly_dirty=False,
679 possibly_dirty=False,
675 ):
680 ):
676 entry = self._map.get(f)
681 entry = self._map.get(f)
677 if added or entry is not None and not entry.tracked:
682 if added or entry is not None and not entry.tracked:
678 self._check_new_tracked_filename(f)
683 self._check_new_tracked_filename(f)
679 self._dirty = True
684 self._dirty = True
680 self._updatedfiles.add(f)
685 self._updatedfiles.add(f)
681 self._map.addfile(
686 self._map.addfile(
682 f,
687 f,
683 mode=mode,
688 mode=mode,
684 size=size,
689 size=size,
685 mtime=mtime,
690 mtime=mtime,
686 added=added,
691 added=added,
687 merged=merged,
692 merged=merged,
688 from_p2=from_p2,
693 from_p2=from_p2,
689 possibly_dirty=possibly_dirty,
694 possibly_dirty=possibly_dirty,
690 )
695 )
691
696
692 def _check_new_tracked_filename(self, filename):
697 def _check_new_tracked_filename(self, filename):
693 scmutil.checkfilename(filename)
698 scmutil.checkfilename(filename)
694 if self._map.hastrackeddir(filename):
699 if self._map.hastrackeddir(filename):
695 msg = _(b'directory %r already in dirstate')
700 msg = _(b'directory %r already in dirstate')
696 msg %= pycompat.bytestr(filename)
701 msg %= pycompat.bytestr(filename)
697 raise error.Abort(msg)
702 raise error.Abort(msg)
698 # shadows
703 # shadows
699 for d in pathutil.finddirs(filename):
704 for d in pathutil.finddirs(filename):
700 if self._map.hastrackeddir(d):
705 if self._map.hastrackeddir(d):
701 break
706 break
702 entry = self._map.get(d)
707 entry = self._map.get(d)
703 if entry is not None and not entry.removed:
708 if entry is not None and not entry.removed:
704 msg = _(b'file %r in dirstate clashes with %r')
709 msg = _(b'file %r in dirstate clashes with %r')
705 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
710 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
706 raise error.Abort(msg)
711 raise error.Abort(msg)
707
712
708 def _get_filedata(self, filename):
713 def _get_filedata(self, filename):
709 """returns"""
714 """returns"""
710 s = os.lstat(self._join(filename))
715 s = os.lstat(self._join(filename))
711 mode = s.st_mode
716 mode = s.st_mode
712 size = s.st_size
717 size = s.st_size
713 mtime = s[stat.ST_MTIME]
718 mtime = s[stat.ST_MTIME]
714 return (mode, size, mtime)
719 return (mode, size, mtime)
715
720
716 def _normallookup(self, f):
721 def _normallookup(self, f):
717 '''Mark a file normal, but possibly dirty.'''
722 '''Mark a file normal, but possibly dirty.'''
718 if self.in_merge:
723 if self.in_merge:
719 # if there is a merge going on and the file was either
724 # if there is a merge going on and the file was either
720 # "merged" or coming from other parent (-2) before
725 # "merged" or coming from other parent (-2) before
721 # being removed, restore that state.
726 # being removed, restore that state.
722 entry = self._map.get(f)
727 entry = self._map.get(f)
723 if entry is not None:
728 if entry is not None:
724 # XXX this should probably be dealt with a a lower level
729 # XXX this should probably be dealt with a a lower level
725 # (see `merged_removed` and `from_p2_removed`)
730 # (see `merged_removed` and `from_p2_removed`)
726 if entry.merged_removed or entry.from_p2_removed:
731 if entry.merged_removed or entry.from_p2_removed:
727 source = self._map.copymap.get(f)
732 source = self._map.copymap.get(f)
728 if entry.merged_removed:
733 if entry.merged_removed:
729 self._addpath(f, merged=True)
734 self._addpath(f, merged=True)
730 else:
735 else:
731 self._addpath(f, from_p2=True)
736 self._addpath(f, from_p2=True)
732 self._map.copymap.pop(f, None)
737 self._map.copymap.pop(f, None)
733 if source is not None:
738 if source is not None:
734 self.copy(source, f)
739 self.copy(source, f)
735 return
740 return
736 elif entry.merged or entry.from_p2:
741 elif entry.merged or entry.from_p2:
737 return
742 return
738 self._addpath(f, possibly_dirty=True)
743 self._addpath(f, possibly_dirty=True)
739 self._map.copymap.pop(f, None)
744 self._map.copymap.pop(f, None)
740
745
741 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
746 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
742 if exists is None:
747 if exists is None:
743 exists = os.path.lexists(os.path.join(self._root, path))
748 exists = os.path.lexists(os.path.join(self._root, path))
744 if not exists:
749 if not exists:
745 # Maybe a path component exists
750 # Maybe a path component exists
746 if not ignoremissing and b'/' in path:
751 if not ignoremissing and b'/' in path:
747 d, f = path.rsplit(b'/', 1)
752 d, f = path.rsplit(b'/', 1)
748 d = self._normalize(d, False, ignoremissing, None)
753 d = self._normalize(d, False, ignoremissing, None)
749 folded = d + b"/" + f
754 folded = d + b"/" + f
750 else:
755 else:
751 # No path components, preserve original case
756 # No path components, preserve original case
752 folded = path
757 folded = path
753 else:
758 else:
754 # recursively normalize leading directory components
759 # recursively normalize leading directory components
755 # against dirstate
760 # against dirstate
756 if b'/' in normed:
761 if b'/' in normed:
757 d, f = normed.rsplit(b'/', 1)
762 d, f = normed.rsplit(b'/', 1)
758 d = self._normalize(d, False, ignoremissing, True)
763 d = self._normalize(d, False, ignoremissing, True)
759 r = self._root + b"/" + d
764 r = self._root + b"/" + d
760 folded = d + b"/" + util.fspath(f, r)
765 folded = d + b"/" + util.fspath(f, r)
761 else:
766 else:
762 folded = util.fspath(normed, self._root)
767 folded = util.fspath(normed, self._root)
763 storemap[normed] = folded
768 storemap[normed] = folded
764
769
765 return folded
770 return folded
766
771
767 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
772 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
768 normed = util.normcase(path)
773 normed = util.normcase(path)
769 folded = self._map.filefoldmap.get(normed, None)
774 folded = self._map.filefoldmap.get(normed, None)
770 if folded is None:
775 if folded is None:
771 if isknown:
776 if isknown:
772 folded = path
777 folded = path
773 else:
778 else:
774 folded = self._discoverpath(
779 folded = self._discoverpath(
775 path, normed, ignoremissing, exists, self._map.filefoldmap
780 path, normed, ignoremissing, exists, self._map.filefoldmap
776 )
781 )
777 return folded
782 return folded
778
783
779 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
784 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
780 normed = util.normcase(path)
785 normed = util.normcase(path)
781 folded = self._map.filefoldmap.get(normed, None)
786 folded = self._map.filefoldmap.get(normed, None)
782 if folded is None:
787 if folded is None:
783 folded = self._map.dirfoldmap.get(normed, None)
788 folded = self._map.dirfoldmap.get(normed, None)
784 if folded is None:
789 if folded is None:
785 if isknown:
790 if isknown:
786 folded = path
791 folded = path
787 else:
792 else:
788 # store discovered result in dirfoldmap so that future
793 # store discovered result in dirfoldmap so that future
789 # normalizefile calls don't start matching directories
794 # normalizefile calls don't start matching directories
790 folded = self._discoverpath(
795 folded = self._discoverpath(
791 path, normed, ignoremissing, exists, self._map.dirfoldmap
796 path, normed, ignoremissing, exists, self._map.dirfoldmap
792 )
797 )
793 return folded
798 return folded
794
799
795 def normalize(self, path, isknown=False, ignoremissing=False):
800 def normalize(self, path, isknown=False, ignoremissing=False):
796 """
801 """
797 normalize the case of a pathname when on a casefolding filesystem
802 normalize the case of a pathname when on a casefolding filesystem
798
803
799 isknown specifies whether the filename came from walking the
804 isknown specifies whether the filename came from walking the
800 disk, to avoid extra filesystem access.
805 disk, to avoid extra filesystem access.
801
806
802 If ignoremissing is True, missing path are returned
807 If ignoremissing is True, missing path are returned
803 unchanged. Otherwise, we try harder to normalize possibly
808 unchanged. Otherwise, we try harder to normalize possibly
804 existing path components.
809 existing path components.
805
810
806 The normalized case is determined based on the following precedence:
811 The normalized case is determined based on the following precedence:
807
812
808 - version of name already stored in the dirstate
813 - version of name already stored in the dirstate
809 - version of name stored on disk
814 - version of name stored on disk
810 - version provided via command arguments
815 - version provided via command arguments
811 """
816 """
812
817
813 if self._checkcase:
818 if self._checkcase:
814 return self._normalize(path, isknown, ignoremissing)
819 return self._normalize(path, isknown, ignoremissing)
815 return path
820 return path
816
821
817 def clear(self):
822 def clear(self):
818 self._map.clear()
823 self._map.clear()
819 self._lastnormaltime = 0
824 self._lastnormaltime = 0
820 self._updatedfiles.clear()
825 self._updatedfiles.clear()
821 self._dirty = True
826 self._dirty = True
822
827
823 def rebuild(self, parent, allfiles, changedfiles=None):
828 def rebuild(self, parent, allfiles, changedfiles=None):
824 if changedfiles is None:
829 if changedfiles is None:
825 # Rebuild entire dirstate
830 # Rebuild entire dirstate
826 to_lookup = allfiles
831 to_lookup = allfiles
827 to_drop = []
832 to_drop = []
828 lastnormaltime = self._lastnormaltime
833 lastnormaltime = self._lastnormaltime
829 self.clear()
834 self.clear()
830 self._lastnormaltime = lastnormaltime
835 self._lastnormaltime = lastnormaltime
831 elif len(changedfiles) < 10:
836 elif len(changedfiles) < 10:
832 # Avoid turning allfiles into a set, which can be expensive if it's
837 # Avoid turning allfiles into a set, which can be expensive if it's
833 # large.
838 # large.
834 to_lookup = []
839 to_lookup = []
835 to_drop = []
840 to_drop = []
836 for f in changedfiles:
841 for f in changedfiles:
837 if f in allfiles:
842 if f in allfiles:
838 to_lookup.append(f)
843 to_lookup.append(f)
839 else:
844 else:
840 to_drop.append(f)
845 to_drop.append(f)
841 else:
846 else:
842 changedfilesset = set(changedfiles)
847 changedfilesset = set(changedfiles)
843 to_lookup = changedfilesset & set(allfiles)
848 to_lookup = changedfilesset & set(allfiles)
844 to_drop = changedfilesset - to_lookup
849 to_drop = changedfilesset - to_lookup
845
850
846 if self._origpl is None:
851 if self._origpl is None:
847 self._origpl = self._pl
852 self._origpl = self._pl
848 self._map.setparents(parent, self._nodeconstants.nullid)
853 self._map.setparents(parent, self._nodeconstants.nullid)
849
854
850 for f in to_lookup:
855 for f in to_lookup:
851 self._normallookup(f)
856 self._normallookup(f)
852 for f in to_drop:
857 for f in to_drop:
853 if self._map.dropfile(f):
858 if self._map.dropfile(f):
854 self._updatedfiles.add(f)
859 self._updatedfiles.add(f)
855
860
856 self._dirty = True
861 self._dirty = True
857
862
858 def identity(self):
863 def identity(self):
859 """Return identity of dirstate itself to detect changing in storage
864 """Return identity of dirstate itself to detect changing in storage
860
865
861 If identity of previous dirstate is equal to this, writing
866 If identity of previous dirstate is equal to this, writing
862 changes based on the former dirstate out can keep consistency.
867 changes based on the former dirstate out can keep consistency.
863 """
868 """
864 return self._map.identity
869 return self._map.identity
865
870
866 def write(self, tr):
871 def write(self, tr):
867 if not self._dirty:
872 if not self._dirty:
868 return
873 return
869
874
870 filename = self._filename
875 filename = self._filename
871 if tr:
876 if tr:
872 # 'dirstate.write()' is not only for writing in-memory
877 # 'dirstate.write()' is not only for writing in-memory
873 # changes out, but also for dropping ambiguous timestamp.
878 # changes out, but also for dropping ambiguous timestamp.
874 # delayed writing re-raise "ambiguous timestamp issue".
879 # delayed writing re-raise "ambiguous timestamp issue".
875 # See also the wiki page below for detail:
880 # See also the wiki page below for detail:
876 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
881 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
877
882
878 # emulate dropping timestamp in 'parsers.pack_dirstate'
883 # emulate dropping timestamp in 'parsers.pack_dirstate'
879 now = _getfsnow(self._opener)
884 now = _getfsnow(self._opener)
880 self._map.clearambiguoustimes(self._updatedfiles, now)
885 self._map.clearambiguoustimes(self._updatedfiles, now)
881
886
882 # emulate that all 'dirstate.normal' results are written out
887 # emulate that all 'dirstate.normal' results are written out
883 self._lastnormaltime = 0
888 self._lastnormaltime = 0
884 self._updatedfiles.clear()
889 self._updatedfiles.clear()
885
890
886 # delay writing in-memory changes out
891 # delay writing in-memory changes out
887 tr.addfilegenerator(
892 tr.addfilegenerator(
888 b'dirstate',
893 b'dirstate',
889 (self._filename,),
894 (self._filename,),
890 lambda f: self._writedirstate(tr, f),
895 lambda f: self._writedirstate(tr, f),
891 location=b'plain',
896 location=b'plain',
892 )
897 )
893 return
898 return
894
899
895 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
900 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
896 self._writedirstate(tr, st)
901 self._writedirstate(tr, st)
897
902
898 def addparentchangecallback(self, category, callback):
903 def addparentchangecallback(self, category, callback):
899 """add a callback to be called when the wd parents are changed
904 """add a callback to be called when the wd parents are changed
900
905
901 Callback will be called with the following arguments:
906 Callback will be called with the following arguments:
902 dirstate, (oldp1, oldp2), (newp1, newp2)
907 dirstate, (oldp1, oldp2), (newp1, newp2)
903
908
904 Category is a unique identifier to allow overwriting an old callback
909 Category is a unique identifier to allow overwriting an old callback
905 with a newer callback.
910 with a newer callback.
906 """
911 """
907 self._plchangecallbacks[category] = callback
912 self._plchangecallbacks[category] = callback
908
913
909 def _writedirstate(self, tr, st):
914 def _writedirstate(self, tr, st):
910 # notify callbacks about parents change
915 # notify callbacks about parents change
911 if self._origpl is not None and self._origpl != self._pl:
916 if self._origpl is not None and self._origpl != self._pl:
912 for c, callback in sorted(
917 for c, callback in sorted(
913 pycompat.iteritems(self._plchangecallbacks)
918 pycompat.iteritems(self._plchangecallbacks)
914 ):
919 ):
915 callback(self, self._origpl, self._pl)
920 callback(self, self._origpl, self._pl)
916 self._origpl = None
921 self._origpl = None
917 # use the modification time of the newly created temporary file as the
922 # use the modification time of the newly created temporary file as the
918 # filesystem's notion of 'now'
923 # filesystem's notion of 'now'
919 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
924 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
920
925
921 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
926 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
922 # timestamp of each entries in dirstate, because of 'now > mtime'
927 # timestamp of each entries in dirstate, because of 'now > mtime'
923 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
928 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
924 if delaywrite > 0:
929 if delaywrite > 0:
925 # do we have any files to delay for?
930 # do we have any files to delay for?
926 for f, e in pycompat.iteritems(self._map):
931 for f, e in pycompat.iteritems(self._map):
927 if e.need_delay(now):
932 if e.need_delay(now):
928 import time # to avoid useless import
933 import time # to avoid useless import
929
934
930 # rather than sleep n seconds, sleep until the next
935 # rather than sleep n seconds, sleep until the next
931 # multiple of n seconds
936 # multiple of n seconds
932 clock = time.time()
937 clock = time.time()
933 start = int(clock) - (int(clock) % delaywrite)
938 start = int(clock) - (int(clock) % delaywrite)
934 end = start + delaywrite
939 end = start + delaywrite
935 time.sleep(end - clock)
940 time.sleep(end - clock)
936 now = end # trust our estimate that the end is near now
941 now = end # trust our estimate that the end is near now
937 break
942 break
938
943
939 self._map.write(tr, st, now)
944 self._map.write(tr, st, now)
940 self._lastnormaltime = 0
945 self._lastnormaltime = 0
941 self._dirty = False
946 self._dirty = False
942
947
943 def _dirignore(self, f):
948 def _dirignore(self, f):
944 if self._ignore(f):
949 if self._ignore(f):
945 return True
950 return True
946 for p in pathutil.finddirs(f):
951 for p in pathutil.finddirs(f):
947 if self._ignore(p):
952 if self._ignore(p):
948 return True
953 return True
949 return False
954 return False
950
955
951 def _ignorefiles(self):
956 def _ignorefiles(self):
952 files = []
957 files = []
953 if os.path.exists(self._join(b'.hgignore')):
958 if os.path.exists(self._join(b'.hgignore')):
954 files.append(self._join(b'.hgignore'))
959 files.append(self._join(b'.hgignore'))
955 for name, path in self._ui.configitems(b"ui"):
960 for name, path in self._ui.configitems(b"ui"):
956 if name == b'ignore' or name.startswith(b'ignore.'):
961 if name == b'ignore' or name.startswith(b'ignore.'):
957 # we need to use os.path.join here rather than self._join
962 # we need to use os.path.join here rather than self._join
958 # because path is arbitrary and user-specified
963 # because path is arbitrary and user-specified
959 files.append(os.path.join(self._rootdir, util.expandpath(path)))
964 files.append(os.path.join(self._rootdir, util.expandpath(path)))
960 return files
965 return files
961
966
962 def _ignorefileandline(self, f):
967 def _ignorefileandline(self, f):
963 files = collections.deque(self._ignorefiles())
968 files = collections.deque(self._ignorefiles())
964 visited = set()
969 visited = set()
965 while files:
970 while files:
966 i = files.popleft()
971 i = files.popleft()
967 patterns = matchmod.readpatternfile(
972 patterns = matchmod.readpatternfile(
968 i, self._ui.warn, sourceinfo=True
973 i, self._ui.warn, sourceinfo=True
969 )
974 )
970 for pattern, lineno, line in patterns:
975 for pattern, lineno, line in patterns:
971 kind, p = matchmod._patsplit(pattern, b'glob')
976 kind, p = matchmod._patsplit(pattern, b'glob')
972 if kind == b"subinclude":
977 if kind == b"subinclude":
973 if p not in visited:
978 if p not in visited:
974 files.append(p)
979 files.append(p)
975 continue
980 continue
976 m = matchmod.match(
981 m = matchmod.match(
977 self._root, b'', [], [pattern], warn=self._ui.warn
982 self._root, b'', [], [pattern], warn=self._ui.warn
978 )
983 )
979 if m(f):
984 if m(f):
980 return (i, lineno, line)
985 return (i, lineno, line)
981 visited.add(i)
986 visited.add(i)
982 return (None, -1, b"")
987 return (None, -1, b"")
983
988
984 def _walkexplicit(self, match, subrepos):
989 def _walkexplicit(self, match, subrepos):
985 """Get stat data about the files explicitly specified by match.
990 """Get stat data about the files explicitly specified by match.
986
991
987 Return a triple (results, dirsfound, dirsnotfound).
992 Return a triple (results, dirsfound, dirsnotfound).
988 - results is a mapping from filename to stat result. It also contains
993 - results is a mapping from filename to stat result. It also contains
989 listings mapping subrepos and .hg to None.
994 listings mapping subrepos and .hg to None.
990 - dirsfound is a list of files found to be directories.
995 - dirsfound is a list of files found to be directories.
991 - dirsnotfound is a list of files that the dirstate thinks are
996 - dirsnotfound is a list of files that the dirstate thinks are
992 directories and that were not found."""
997 directories and that were not found."""
993
998
994 def badtype(mode):
999 def badtype(mode):
995 kind = _(b'unknown')
1000 kind = _(b'unknown')
996 if stat.S_ISCHR(mode):
1001 if stat.S_ISCHR(mode):
997 kind = _(b'character device')
1002 kind = _(b'character device')
998 elif stat.S_ISBLK(mode):
1003 elif stat.S_ISBLK(mode):
999 kind = _(b'block device')
1004 kind = _(b'block device')
1000 elif stat.S_ISFIFO(mode):
1005 elif stat.S_ISFIFO(mode):
1001 kind = _(b'fifo')
1006 kind = _(b'fifo')
1002 elif stat.S_ISSOCK(mode):
1007 elif stat.S_ISSOCK(mode):
1003 kind = _(b'socket')
1008 kind = _(b'socket')
1004 elif stat.S_ISDIR(mode):
1009 elif stat.S_ISDIR(mode):
1005 kind = _(b'directory')
1010 kind = _(b'directory')
1006 return _(b'unsupported file type (type is %s)') % kind
1011 return _(b'unsupported file type (type is %s)') % kind
1007
1012
1008 badfn = match.bad
1013 badfn = match.bad
1009 dmap = self._map
1014 dmap = self._map
1010 lstat = os.lstat
1015 lstat = os.lstat
1011 getkind = stat.S_IFMT
1016 getkind = stat.S_IFMT
1012 dirkind = stat.S_IFDIR
1017 dirkind = stat.S_IFDIR
1013 regkind = stat.S_IFREG
1018 regkind = stat.S_IFREG
1014 lnkkind = stat.S_IFLNK
1019 lnkkind = stat.S_IFLNK
1015 join = self._join
1020 join = self._join
1016 dirsfound = []
1021 dirsfound = []
1017 foundadd = dirsfound.append
1022 foundadd = dirsfound.append
1018 dirsnotfound = []
1023 dirsnotfound = []
1019 notfoundadd = dirsnotfound.append
1024 notfoundadd = dirsnotfound.append
1020
1025
1021 if not match.isexact() and self._checkcase:
1026 if not match.isexact() and self._checkcase:
1022 normalize = self._normalize
1027 normalize = self._normalize
1023 else:
1028 else:
1024 normalize = None
1029 normalize = None
1025
1030
1026 files = sorted(match.files())
1031 files = sorted(match.files())
1027 subrepos.sort()
1032 subrepos.sort()
1028 i, j = 0, 0
1033 i, j = 0, 0
1029 while i < len(files) and j < len(subrepos):
1034 while i < len(files) and j < len(subrepos):
1030 subpath = subrepos[j] + b"/"
1035 subpath = subrepos[j] + b"/"
1031 if files[i] < subpath:
1036 if files[i] < subpath:
1032 i += 1
1037 i += 1
1033 continue
1038 continue
1034 while i < len(files) and files[i].startswith(subpath):
1039 while i < len(files) and files[i].startswith(subpath):
1035 del files[i]
1040 del files[i]
1036 j += 1
1041 j += 1
1037
1042
1038 if not files or b'' in files:
1043 if not files or b'' in files:
1039 files = [b'']
1044 files = [b'']
1040 # constructing the foldmap is expensive, so don't do it for the
1045 # constructing the foldmap is expensive, so don't do it for the
1041 # common case where files is ['']
1046 # common case where files is ['']
1042 normalize = None
1047 normalize = None
1043 results = dict.fromkeys(subrepos)
1048 results = dict.fromkeys(subrepos)
1044 results[b'.hg'] = None
1049 results[b'.hg'] = None
1045
1050
1046 for ff in files:
1051 for ff in files:
1047 if normalize:
1052 if normalize:
1048 nf = normalize(ff, False, True)
1053 nf = normalize(ff, False, True)
1049 else:
1054 else:
1050 nf = ff
1055 nf = ff
1051 if nf in results:
1056 if nf in results:
1052 continue
1057 continue
1053
1058
1054 try:
1059 try:
1055 st = lstat(join(nf))
1060 st = lstat(join(nf))
1056 kind = getkind(st.st_mode)
1061 kind = getkind(st.st_mode)
1057 if kind == dirkind:
1062 if kind == dirkind:
1058 if nf in dmap:
1063 if nf in dmap:
1059 # file replaced by dir on disk but still in dirstate
1064 # file replaced by dir on disk but still in dirstate
1060 results[nf] = None
1065 results[nf] = None
1061 foundadd((nf, ff))
1066 foundadd((nf, ff))
1062 elif kind == regkind or kind == lnkkind:
1067 elif kind == regkind or kind == lnkkind:
1063 results[nf] = st
1068 results[nf] = st
1064 else:
1069 else:
1065 badfn(ff, badtype(kind))
1070 badfn(ff, badtype(kind))
1066 if nf in dmap:
1071 if nf in dmap:
1067 results[nf] = None
1072 results[nf] = None
1068 except OSError as inst: # nf not found on disk - it is dirstate only
1073 except OSError as inst: # nf not found on disk - it is dirstate only
1069 if nf in dmap: # does it exactly match a missing file?
1074 if nf in dmap: # does it exactly match a missing file?
1070 results[nf] = None
1075 results[nf] = None
1071 else: # does it match a missing directory?
1076 else: # does it match a missing directory?
1072 if self._map.hasdir(nf):
1077 if self._map.hasdir(nf):
1073 notfoundadd(nf)
1078 notfoundadd(nf)
1074 else:
1079 else:
1075 badfn(ff, encoding.strtolocal(inst.strerror))
1080 badfn(ff, encoding.strtolocal(inst.strerror))
1076
1081
1077 # match.files() may contain explicitly-specified paths that shouldn't
1082 # match.files() may contain explicitly-specified paths that shouldn't
1078 # be taken; drop them from the list of files found. dirsfound/notfound
1083 # be taken; drop them from the list of files found. dirsfound/notfound
1079 # aren't filtered here because they will be tested later.
1084 # aren't filtered here because they will be tested later.
1080 if match.anypats():
1085 if match.anypats():
1081 for f in list(results):
1086 for f in list(results):
1082 if f == b'.hg' or f in subrepos:
1087 if f == b'.hg' or f in subrepos:
1083 # keep sentinel to disable further out-of-repo walks
1088 # keep sentinel to disable further out-of-repo walks
1084 continue
1089 continue
1085 if not match(f):
1090 if not match(f):
1086 del results[f]
1091 del results[f]
1087
1092
1088 # Case insensitive filesystems cannot rely on lstat() failing to detect
1093 # Case insensitive filesystems cannot rely on lstat() failing to detect
1089 # a case-only rename. Prune the stat object for any file that does not
1094 # a case-only rename. Prune the stat object for any file that does not
1090 # match the case in the filesystem, if there are multiple files that
1095 # match the case in the filesystem, if there are multiple files that
1091 # normalize to the same path.
1096 # normalize to the same path.
1092 if match.isexact() and self._checkcase:
1097 if match.isexact() and self._checkcase:
1093 normed = {}
1098 normed = {}
1094
1099
1095 for f, st in pycompat.iteritems(results):
1100 for f, st in pycompat.iteritems(results):
1096 if st is None:
1101 if st is None:
1097 continue
1102 continue
1098
1103
1099 nc = util.normcase(f)
1104 nc = util.normcase(f)
1100 paths = normed.get(nc)
1105 paths = normed.get(nc)
1101
1106
1102 if paths is None:
1107 if paths is None:
1103 paths = set()
1108 paths = set()
1104 normed[nc] = paths
1109 normed[nc] = paths
1105
1110
1106 paths.add(f)
1111 paths.add(f)
1107
1112
1108 for norm, paths in pycompat.iteritems(normed):
1113 for norm, paths in pycompat.iteritems(normed):
1109 if len(paths) > 1:
1114 if len(paths) > 1:
1110 for path in paths:
1115 for path in paths:
1111 folded = self._discoverpath(
1116 folded = self._discoverpath(
1112 path, norm, True, None, self._map.dirfoldmap
1117 path, norm, True, None, self._map.dirfoldmap
1113 )
1118 )
1114 if path != folded:
1119 if path != folded:
1115 results[path] = None
1120 results[path] = None
1116
1121
1117 return results, dirsfound, dirsnotfound
1122 return results, dirsfound, dirsnotfound
1118
1123
1119 def walk(self, match, subrepos, unknown, ignored, full=True):
1124 def walk(self, match, subrepos, unknown, ignored, full=True):
1120 """
1125 """
1121 Walk recursively through the directory tree, finding all files
1126 Walk recursively through the directory tree, finding all files
1122 matched by match.
1127 matched by match.
1123
1128
1124 If full is False, maybe skip some known-clean files.
1129 If full is False, maybe skip some known-clean files.
1125
1130
1126 Return a dict mapping filename to stat-like object (either
1131 Return a dict mapping filename to stat-like object (either
1127 mercurial.osutil.stat instance or return value of os.stat()).
1132 mercurial.osutil.stat instance or return value of os.stat()).
1128
1133
1129 """
1134 """
1130 # full is a flag that extensions that hook into walk can use -- this
1135 # full is a flag that extensions that hook into walk can use -- this
1131 # implementation doesn't use it at all. This satisfies the contract
1136 # implementation doesn't use it at all. This satisfies the contract
1132 # because we only guarantee a "maybe".
1137 # because we only guarantee a "maybe".
1133
1138
1134 if ignored:
1139 if ignored:
1135 ignore = util.never
1140 ignore = util.never
1136 dirignore = util.never
1141 dirignore = util.never
1137 elif unknown:
1142 elif unknown:
1138 ignore = self._ignore
1143 ignore = self._ignore
1139 dirignore = self._dirignore
1144 dirignore = self._dirignore
1140 else:
1145 else:
1141 # if not unknown and not ignored, drop dir recursion and step 2
1146 # if not unknown and not ignored, drop dir recursion and step 2
1142 ignore = util.always
1147 ignore = util.always
1143 dirignore = util.always
1148 dirignore = util.always
1144
1149
1145 matchfn = match.matchfn
1150 matchfn = match.matchfn
1146 matchalways = match.always()
1151 matchalways = match.always()
1147 matchtdir = match.traversedir
1152 matchtdir = match.traversedir
1148 dmap = self._map
1153 dmap = self._map
1149 listdir = util.listdir
1154 listdir = util.listdir
1150 lstat = os.lstat
1155 lstat = os.lstat
1151 dirkind = stat.S_IFDIR
1156 dirkind = stat.S_IFDIR
1152 regkind = stat.S_IFREG
1157 regkind = stat.S_IFREG
1153 lnkkind = stat.S_IFLNK
1158 lnkkind = stat.S_IFLNK
1154 join = self._join
1159 join = self._join
1155
1160
1156 exact = skipstep3 = False
1161 exact = skipstep3 = False
1157 if match.isexact(): # match.exact
1162 if match.isexact(): # match.exact
1158 exact = True
1163 exact = True
1159 dirignore = util.always # skip step 2
1164 dirignore = util.always # skip step 2
1160 elif match.prefix(): # match.match, no patterns
1165 elif match.prefix(): # match.match, no patterns
1161 skipstep3 = True
1166 skipstep3 = True
1162
1167
1163 if not exact and self._checkcase:
1168 if not exact and self._checkcase:
1164 normalize = self._normalize
1169 normalize = self._normalize
1165 normalizefile = self._normalizefile
1170 normalizefile = self._normalizefile
1166 skipstep3 = False
1171 skipstep3 = False
1167 else:
1172 else:
1168 normalize = self._normalize
1173 normalize = self._normalize
1169 normalizefile = None
1174 normalizefile = None
1170
1175
1171 # step 1: find all explicit files
1176 # step 1: find all explicit files
1172 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1177 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1173 if matchtdir:
1178 if matchtdir:
1174 for d in work:
1179 for d in work:
1175 matchtdir(d[0])
1180 matchtdir(d[0])
1176 for d in dirsnotfound:
1181 for d in dirsnotfound:
1177 matchtdir(d)
1182 matchtdir(d)
1178
1183
1179 skipstep3 = skipstep3 and not (work or dirsnotfound)
1184 skipstep3 = skipstep3 and not (work or dirsnotfound)
1180 work = [d for d in work if not dirignore(d[0])]
1185 work = [d for d in work if not dirignore(d[0])]
1181
1186
1182 # step 2: visit subdirectories
1187 # step 2: visit subdirectories
1183 def traverse(work, alreadynormed):
1188 def traverse(work, alreadynormed):
1184 wadd = work.append
1189 wadd = work.append
1185 while work:
1190 while work:
1186 tracing.counter('dirstate.walk work', len(work))
1191 tracing.counter('dirstate.walk work', len(work))
1187 nd = work.pop()
1192 nd = work.pop()
1188 visitentries = match.visitchildrenset(nd)
1193 visitentries = match.visitchildrenset(nd)
1189 if not visitentries:
1194 if not visitentries:
1190 continue
1195 continue
1191 if visitentries == b'this' or visitentries == b'all':
1196 if visitentries == b'this' or visitentries == b'all':
1192 visitentries = None
1197 visitentries = None
1193 skip = None
1198 skip = None
1194 if nd != b'':
1199 if nd != b'':
1195 skip = b'.hg'
1200 skip = b'.hg'
1196 try:
1201 try:
1197 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1202 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1198 entries = listdir(join(nd), stat=True, skip=skip)
1203 entries = listdir(join(nd), stat=True, skip=skip)
1199 except OSError as inst:
1204 except OSError as inst:
1200 if inst.errno in (errno.EACCES, errno.ENOENT):
1205 if inst.errno in (errno.EACCES, errno.ENOENT):
1201 match.bad(
1206 match.bad(
1202 self.pathto(nd), encoding.strtolocal(inst.strerror)
1207 self.pathto(nd), encoding.strtolocal(inst.strerror)
1203 )
1208 )
1204 continue
1209 continue
1205 raise
1210 raise
1206 for f, kind, st in entries:
1211 for f, kind, st in entries:
1207 # Some matchers may return files in the visitentries set,
1212 # Some matchers may return files in the visitentries set,
1208 # instead of 'this', if the matcher explicitly mentions them
1213 # instead of 'this', if the matcher explicitly mentions them
1209 # and is not an exactmatcher. This is acceptable; we do not
1214 # and is not an exactmatcher. This is acceptable; we do not
1210 # make any hard assumptions about file-or-directory below
1215 # make any hard assumptions about file-or-directory below
1211 # based on the presence of `f` in visitentries. If
1216 # based on the presence of `f` in visitentries. If
1212 # visitchildrenset returned a set, we can always skip the
1217 # visitchildrenset returned a set, we can always skip the
1213 # entries *not* in the set it provided regardless of whether
1218 # entries *not* in the set it provided regardless of whether
1214 # they're actually a file or a directory.
1219 # they're actually a file or a directory.
1215 if visitentries and f not in visitentries:
1220 if visitentries and f not in visitentries:
1216 continue
1221 continue
1217 if normalizefile:
1222 if normalizefile:
1218 # even though f might be a directory, we're only
1223 # even though f might be a directory, we're only
1219 # interested in comparing it to files currently in the
1224 # interested in comparing it to files currently in the
1220 # dmap -- therefore normalizefile is enough
1225 # dmap -- therefore normalizefile is enough
1221 nf = normalizefile(
1226 nf = normalizefile(
1222 nd and (nd + b"/" + f) or f, True, True
1227 nd and (nd + b"/" + f) or f, True, True
1223 )
1228 )
1224 else:
1229 else:
1225 nf = nd and (nd + b"/" + f) or f
1230 nf = nd and (nd + b"/" + f) or f
1226 if nf not in results:
1231 if nf not in results:
1227 if kind == dirkind:
1232 if kind == dirkind:
1228 if not ignore(nf):
1233 if not ignore(nf):
1229 if matchtdir:
1234 if matchtdir:
1230 matchtdir(nf)
1235 matchtdir(nf)
1231 wadd(nf)
1236 wadd(nf)
1232 if nf in dmap and (matchalways or matchfn(nf)):
1237 if nf in dmap and (matchalways or matchfn(nf)):
1233 results[nf] = None
1238 results[nf] = None
1234 elif kind == regkind or kind == lnkkind:
1239 elif kind == regkind or kind == lnkkind:
1235 if nf in dmap:
1240 if nf in dmap:
1236 if matchalways or matchfn(nf):
1241 if matchalways or matchfn(nf):
1237 results[nf] = st
1242 results[nf] = st
1238 elif (matchalways or matchfn(nf)) and not ignore(
1243 elif (matchalways or matchfn(nf)) and not ignore(
1239 nf
1244 nf
1240 ):
1245 ):
1241 # unknown file -- normalize if necessary
1246 # unknown file -- normalize if necessary
1242 if not alreadynormed:
1247 if not alreadynormed:
1243 nf = normalize(nf, False, True)
1248 nf = normalize(nf, False, True)
1244 results[nf] = st
1249 results[nf] = st
1245 elif nf in dmap and (matchalways or matchfn(nf)):
1250 elif nf in dmap and (matchalways or matchfn(nf)):
1246 results[nf] = None
1251 results[nf] = None
1247
1252
1248 for nd, d in work:
1253 for nd, d in work:
1249 # alreadynormed means that processwork doesn't have to do any
1254 # alreadynormed means that processwork doesn't have to do any
1250 # expensive directory normalization
1255 # expensive directory normalization
1251 alreadynormed = not normalize or nd == d
1256 alreadynormed = not normalize or nd == d
1252 traverse([d], alreadynormed)
1257 traverse([d], alreadynormed)
1253
1258
1254 for s in subrepos:
1259 for s in subrepos:
1255 del results[s]
1260 del results[s]
1256 del results[b'.hg']
1261 del results[b'.hg']
1257
1262
1258 # step 3: visit remaining files from dmap
1263 # step 3: visit remaining files from dmap
1259 if not skipstep3 and not exact:
1264 if not skipstep3 and not exact:
1260 # If a dmap file is not in results yet, it was either
1265 # If a dmap file is not in results yet, it was either
1261 # a) not matching matchfn b) ignored, c) missing, or d) under a
1266 # a) not matching matchfn b) ignored, c) missing, or d) under a
1262 # symlink directory.
1267 # symlink directory.
1263 if not results and matchalways:
1268 if not results and matchalways:
1264 visit = [f for f in dmap]
1269 visit = [f for f in dmap]
1265 else:
1270 else:
1266 visit = [f for f in dmap if f not in results and matchfn(f)]
1271 visit = [f for f in dmap if f not in results and matchfn(f)]
1267 visit.sort()
1272 visit.sort()
1268
1273
1269 if unknown:
1274 if unknown:
1270 # unknown == True means we walked all dirs under the roots
1275 # unknown == True means we walked all dirs under the roots
1271 # that wasn't ignored, and everything that matched was stat'ed
1276 # that wasn't ignored, and everything that matched was stat'ed
1272 # and is already in results.
1277 # and is already in results.
1273 # The rest must thus be ignored or under a symlink.
1278 # The rest must thus be ignored or under a symlink.
1274 audit_path = pathutil.pathauditor(self._root, cached=True)
1279 audit_path = pathutil.pathauditor(self._root, cached=True)
1275
1280
1276 for nf in iter(visit):
1281 for nf in iter(visit):
1277 # If a stat for the same file was already added with a
1282 # If a stat for the same file was already added with a
1278 # different case, don't add one for this, since that would
1283 # different case, don't add one for this, since that would
1279 # make it appear as if the file exists under both names
1284 # make it appear as if the file exists under both names
1280 # on disk.
1285 # on disk.
1281 if (
1286 if (
1282 normalizefile
1287 normalizefile
1283 and normalizefile(nf, True, True) in results
1288 and normalizefile(nf, True, True) in results
1284 ):
1289 ):
1285 results[nf] = None
1290 results[nf] = None
1286 # Report ignored items in the dmap as long as they are not
1291 # Report ignored items in the dmap as long as they are not
1287 # under a symlink directory.
1292 # under a symlink directory.
1288 elif audit_path.check(nf):
1293 elif audit_path.check(nf):
1289 try:
1294 try:
1290 results[nf] = lstat(join(nf))
1295 results[nf] = lstat(join(nf))
1291 # file was just ignored, no links, and exists
1296 # file was just ignored, no links, and exists
1292 except OSError:
1297 except OSError:
1293 # file doesn't exist
1298 # file doesn't exist
1294 results[nf] = None
1299 results[nf] = None
1295 else:
1300 else:
1296 # It's either missing or under a symlink directory
1301 # It's either missing or under a symlink directory
1297 # which we in this case report as missing
1302 # which we in this case report as missing
1298 results[nf] = None
1303 results[nf] = None
1299 else:
1304 else:
1300 # We may not have walked the full directory tree above,
1305 # We may not have walked the full directory tree above,
1301 # so stat and check everything we missed.
1306 # so stat and check everything we missed.
1302 iv = iter(visit)
1307 iv = iter(visit)
1303 for st in util.statfiles([join(i) for i in visit]):
1308 for st in util.statfiles([join(i) for i in visit]):
1304 results[next(iv)] = st
1309 results[next(iv)] = st
1305 return results
1310 return results
1306
1311
1307 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1312 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1308 # Force Rayon (Rust parallelism library) to respect the number of
1313 # Force Rayon (Rust parallelism library) to respect the number of
1309 # workers. This is a temporary workaround until Rust code knows
1314 # workers. This is a temporary workaround until Rust code knows
1310 # how to read the config file.
1315 # how to read the config file.
1311 numcpus = self._ui.configint(b"worker", b"numcpus")
1316 numcpus = self._ui.configint(b"worker", b"numcpus")
1312 if numcpus is not None:
1317 if numcpus is not None:
1313 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1318 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1314
1319
1315 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1320 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1316 if not workers_enabled:
1321 if not workers_enabled:
1317 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1322 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1318
1323
1319 (
1324 (
1320 lookup,
1325 lookup,
1321 modified,
1326 modified,
1322 added,
1327 added,
1323 removed,
1328 removed,
1324 deleted,
1329 deleted,
1325 clean,
1330 clean,
1326 ignored,
1331 ignored,
1327 unknown,
1332 unknown,
1328 warnings,
1333 warnings,
1329 bad,
1334 bad,
1330 traversed,
1335 traversed,
1331 dirty,
1336 dirty,
1332 ) = rustmod.status(
1337 ) = rustmod.status(
1333 self._map._rustmap,
1338 self._map._rustmap,
1334 matcher,
1339 matcher,
1335 self._rootdir,
1340 self._rootdir,
1336 self._ignorefiles(),
1341 self._ignorefiles(),
1337 self._checkexec,
1342 self._checkexec,
1338 self._lastnormaltime,
1343 self._lastnormaltime,
1339 bool(list_clean),
1344 bool(list_clean),
1340 bool(list_ignored),
1345 bool(list_ignored),
1341 bool(list_unknown),
1346 bool(list_unknown),
1342 bool(matcher.traversedir),
1347 bool(matcher.traversedir),
1343 )
1348 )
1344
1349
1345 self._dirty |= dirty
1350 self._dirty |= dirty
1346
1351
1347 if matcher.traversedir:
1352 if matcher.traversedir:
1348 for dir in traversed:
1353 for dir in traversed:
1349 matcher.traversedir(dir)
1354 matcher.traversedir(dir)
1350
1355
1351 if self._ui.warn:
1356 if self._ui.warn:
1352 for item in warnings:
1357 for item in warnings:
1353 if isinstance(item, tuple):
1358 if isinstance(item, tuple):
1354 file_path, syntax = item
1359 file_path, syntax = item
1355 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1360 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1356 file_path,
1361 file_path,
1357 syntax,
1362 syntax,
1358 )
1363 )
1359 self._ui.warn(msg)
1364 self._ui.warn(msg)
1360 else:
1365 else:
1361 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1366 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1362 self._ui.warn(
1367 self._ui.warn(
1363 msg
1368 msg
1364 % (
1369 % (
1365 pathutil.canonpath(
1370 pathutil.canonpath(
1366 self._rootdir, self._rootdir, item
1371 self._rootdir, self._rootdir, item
1367 ),
1372 ),
1368 b"No such file or directory",
1373 b"No such file or directory",
1369 )
1374 )
1370 )
1375 )
1371
1376
1372 for (fn, message) in bad:
1377 for (fn, message) in bad:
1373 matcher.bad(fn, encoding.strtolocal(message))
1378 matcher.bad(fn, encoding.strtolocal(message))
1374
1379
1375 status = scmutil.status(
1380 status = scmutil.status(
1376 modified=modified,
1381 modified=modified,
1377 added=added,
1382 added=added,
1378 removed=removed,
1383 removed=removed,
1379 deleted=deleted,
1384 deleted=deleted,
1380 unknown=unknown,
1385 unknown=unknown,
1381 ignored=ignored,
1386 ignored=ignored,
1382 clean=clean,
1387 clean=clean,
1383 )
1388 )
1384 return (lookup, status)
1389 return (lookup, status)
1385
1390
1386 def status(self, match, subrepos, ignored, clean, unknown):
1391 def status(self, match, subrepos, ignored, clean, unknown):
1387 """Determine the status of the working copy relative to the
1392 """Determine the status of the working copy relative to the
1388 dirstate and return a pair of (unsure, status), where status is of type
1393 dirstate and return a pair of (unsure, status), where status is of type
1389 scmutil.status and:
1394 scmutil.status and:
1390
1395
1391 unsure:
1396 unsure:
1392 files that might have been modified since the dirstate was
1397 files that might have been modified since the dirstate was
1393 written, but need to be read to be sure (size is the same
1398 written, but need to be read to be sure (size is the same
1394 but mtime differs)
1399 but mtime differs)
1395 status.modified:
1400 status.modified:
1396 files that have definitely been modified since the dirstate
1401 files that have definitely been modified since the dirstate
1397 was written (different size or mode)
1402 was written (different size or mode)
1398 status.clean:
1403 status.clean:
1399 files that have definitely not been modified since the
1404 files that have definitely not been modified since the
1400 dirstate was written
1405 dirstate was written
1401 """
1406 """
1402 listignored, listclean, listunknown = ignored, clean, unknown
1407 listignored, listclean, listunknown = ignored, clean, unknown
1403 lookup, modified, added, unknown, ignored = [], [], [], [], []
1408 lookup, modified, added, unknown, ignored = [], [], [], [], []
1404 removed, deleted, clean = [], [], []
1409 removed, deleted, clean = [], [], []
1405
1410
1406 dmap = self._map
1411 dmap = self._map
1407 dmap.preload()
1412 dmap.preload()
1408
1413
1409 use_rust = True
1414 use_rust = True
1410
1415
1411 allowed_matchers = (
1416 allowed_matchers = (
1412 matchmod.alwaysmatcher,
1417 matchmod.alwaysmatcher,
1413 matchmod.exactmatcher,
1418 matchmod.exactmatcher,
1414 matchmod.includematcher,
1419 matchmod.includematcher,
1415 )
1420 )
1416
1421
1417 if rustmod is None:
1422 if rustmod is None:
1418 use_rust = False
1423 use_rust = False
1419 elif self._checkcase:
1424 elif self._checkcase:
1420 # Case-insensitive filesystems are not handled yet
1425 # Case-insensitive filesystems are not handled yet
1421 use_rust = False
1426 use_rust = False
1422 elif subrepos:
1427 elif subrepos:
1423 use_rust = False
1428 use_rust = False
1424 elif sparse.enabled:
1429 elif sparse.enabled:
1425 use_rust = False
1430 use_rust = False
1426 elif not isinstance(match, allowed_matchers):
1431 elif not isinstance(match, allowed_matchers):
1427 # Some matchers have yet to be implemented
1432 # Some matchers have yet to be implemented
1428 use_rust = False
1433 use_rust = False
1429
1434
1430 if use_rust:
1435 if use_rust:
1431 try:
1436 try:
1432 return self._rust_status(
1437 return self._rust_status(
1433 match, listclean, listignored, listunknown
1438 match, listclean, listignored, listunknown
1434 )
1439 )
1435 except rustmod.FallbackError:
1440 except rustmod.FallbackError:
1436 pass
1441 pass
1437
1442
1438 def noop(f):
1443 def noop(f):
1439 pass
1444 pass
1440
1445
1441 dcontains = dmap.__contains__
1446 dcontains = dmap.__contains__
1442 dget = dmap.__getitem__
1447 dget = dmap.__getitem__
1443 ladd = lookup.append # aka "unsure"
1448 ladd = lookup.append # aka "unsure"
1444 madd = modified.append
1449 madd = modified.append
1445 aadd = added.append
1450 aadd = added.append
1446 uadd = unknown.append if listunknown else noop
1451 uadd = unknown.append if listunknown else noop
1447 iadd = ignored.append if listignored else noop
1452 iadd = ignored.append if listignored else noop
1448 radd = removed.append
1453 radd = removed.append
1449 dadd = deleted.append
1454 dadd = deleted.append
1450 cadd = clean.append if listclean else noop
1455 cadd = clean.append if listclean else noop
1451 mexact = match.exact
1456 mexact = match.exact
1452 dirignore = self._dirignore
1457 dirignore = self._dirignore
1453 checkexec = self._checkexec
1458 checkexec = self._checkexec
1454 copymap = self._map.copymap
1459 copymap = self._map.copymap
1455 lastnormaltime = self._lastnormaltime
1460 lastnormaltime = self._lastnormaltime
1456
1461
1457 # We need to do full walks when either
1462 # We need to do full walks when either
1458 # - we're listing all clean files, or
1463 # - we're listing all clean files, or
1459 # - match.traversedir does something, because match.traversedir should
1464 # - match.traversedir does something, because match.traversedir should
1460 # be called for every dir in the working dir
1465 # be called for every dir in the working dir
1461 full = listclean or match.traversedir is not None
1466 full = listclean or match.traversedir is not None
1462 for fn, st in pycompat.iteritems(
1467 for fn, st in pycompat.iteritems(
1463 self.walk(match, subrepos, listunknown, listignored, full=full)
1468 self.walk(match, subrepos, listunknown, listignored, full=full)
1464 ):
1469 ):
1465 if not dcontains(fn):
1470 if not dcontains(fn):
1466 if (listignored or mexact(fn)) and dirignore(fn):
1471 if (listignored or mexact(fn)) and dirignore(fn):
1467 if listignored:
1472 if listignored:
1468 iadd(fn)
1473 iadd(fn)
1469 else:
1474 else:
1470 uadd(fn)
1475 uadd(fn)
1471 continue
1476 continue
1472
1477
1473 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1478 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1474 # written like that for performance reasons. dmap[fn] is not a
1479 # written like that for performance reasons. dmap[fn] is not a
1475 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1480 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1476 # opcode has fast paths when the value to be unpacked is a tuple or
1481 # opcode has fast paths when the value to be unpacked is a tuple or
1477 # a list, but falls back to creating a full-fledged iterator in
1482 # a list, but falls back to creating a full-fledged iterator in
1478 # general. That is much slower than simply accessing and storing the
1483 # general. That is much slower than simply accessing and storing the
1479 # tuple members one by one.
1484 # tuple members one by one.
1480 t = dget(fn)
1485 t = dget(fn)
1481 mode = t.mode
1486 mode = t.mode
1482 size = t.size
1487 size = t.size
1483 time = t.mtime
1488 time = t.mtime
1484
1489
1485 if not st and t.tracked:
1490 if not st and t.tracked:
1486 dadd(fn)
1491 dadd(fn)
1487 elif t.merged:
1492 elif t.merged:
1488 madd(fn)
1493 madd(fn)
1489 elif t.added:
1494 elif t.added:
1490 aadd(fn)
1495 aadd(fn)
1491 elif t.removed:
1496 elif t.removed:
1492 radd(fn)
1497 radd(fn)
1493 elif t.tracked:
1498 elif t.tracked:
1494 if (
1499 if (
1495 size >= 0
1500 size >= 0
1496 and (
1501 and (
1497 (size != st.st_size and size != st.st_size & _rangemask)
1502 (size != st.st_size and size != st.st_size & _rangemask)
1498 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1503 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1499 )
1504 )
1500 or t.from_p2
1505 or t.from_p2
1501 or fn in copymap
1506 or fn in copymap
1502 ):
1507 ):
1503 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1508 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1504 # issue6456: Size returned may be longer due to
1509 # issue6456: Size returned may be longer due to
1505 # encryption on EXT-4 fscrypt, undecided.
1510 # encryption on EXT-4 fscrypt, undecided.
1506 ladd(fn)
1511 ladd(fn)
1507 else:
1512 else:
1508 madd(fn)
1513 madd(fn)
1509 elif (
1514 elif (
1510 time != st[stat.ST_MTIME]
1515 time != st[stat.ST_MTIME]
1511 and time != st[stat.ST_MTIME] & _rangemask
1516 and time != st[stat.ST_MTIME] & _rangemask
1512 ):
1517 ):
1513 ladd(fn)
1518 ladd(fn)
1514 elif st[stat.ST_MTIME] == lastnormaltime:
1519 elif st[stat.ST_MTIME] == lastnormaltime:
1515 # fn may have just been marked as normal and it may have
1520 # fn may have just been marked as normal and it may have
1516 # changed in the same second without changing its size.
1521 # changed in the same second without changing its size.
1517 # This can happen if we quickly do multiple commits.
1522 # This can happen if we quickly do multiple commits.
1518 # Force lookup, so we don't miss such a racy file change.
1523 # Force lookup, so we don't miss such a racy file change.
1519 ladd(fn)
1524 ladd(fn)
1520 elif listclean:
1525 elif listclean:
1521 cadd(fn)
1526 cadd(fn)
1522 status = scmutil.status(
1527 status = scmutil.status(
1523 modified, added, removed, deleted, unknown, ignored, clean
1528 modified, added, removed, deleted, unknown, ignored, clean
1524 )
1529 )
1525 return (lookup, status)
1530 return (lookup, status)
1526
1531
1527 def matches(self, match):
1532 def matches(self, match):
1528 """
1533 """
1529 return files in the dirstate (in whatever state) filtered by match
1534 return files in the dirstate (in whatever state) filtered by match
1530 """
1535 """
1531 dmap = self._map
1536 dmap = self._map
1532 if rustmod is not None:
1537 if rustmod is not None:
1533 dmap = self._map._rustmap
1538 dmap = self._map._rustmap
1534
1539
1535 if match.always():
1540 if match.always():
1536 return dmap.keys()
1541 return dmap.keys()
1537 files = match.files()
1542 files = match.files()
1538 if match.isexact():
1543 if match.isexact():
1539 # fast path -- filter the other way around, since typically files is
1544 # fast path -- filter the other way around, since typically files is
1540 # much smaller than dmap
1545 # much smaller than dmap
1541 return [f for f in files if f in dmap]
1546 return [f for f in files if f in dmap]
1542 if match.prefix() and all(fn in dmap for fn in files):
1547 if match.prefix() and all(fn in dmap for fn in files):
1543 # fast path -- all the values are known to be files, so just return
1548 # fast path -- all the values are known to be files, so just return
1544 # that
1549 # that
1545 return list(files)
1550 return list(files)
1546 return [f for f in dmap if match(f)]
1551 return [f for f in dmap if match(f)]
1547
1552
1548 def _actualfilename(self, tr):
1553 def _actualfilename(self, tr):
1549 if tr:
1554 if tr:
1550 return self._pendingfilename
1555 return self._pendingfilename
1551 else:
1556 else:
1552 return self._filename
1557 return self._filename
1553
1558
1554 def savebackup(self, tr, backupname):
1559 def savebackup(self, tr, backupname):
1555 '''Save current dirstate into backup file'''
1560 '''Save current dirstate into backup file'''
1556 filename = self._actualfilename(tr)
1561 filename = self._actualfilename(tr)
1557 assert backupname != filename
1562 assert backupname != filename
1558
1563
1559 # use '_writedirstate' instead of 'write' to write changes certainly,
1564 # use '_writedirstate' instead of 'write' to write changes certainly,
1560 # because the latter omits writing out if transaction is running.
1565 # because the latter omits writing out if transaction is running.
1561 # output file will be used to create backup of dirstate at this point.
1566 # output file will be used to create backup of dirstate at this point.
1562 if self._dirty or not self._opener.exists(filename):
1567 if self._dirty or not self._opener.exists(filename):
1563 self._writedirstate(
1568 self._writedirstate(
1564 tr,
1569 tr,
1565 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1570 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1566 )
1571 )
1567
1572
1568 if tr:
1573 if tr:
1569 # ensure that subsequent tr.writepending returns True for
1574 # ensure that subsequent tr.writepending returns True for
1570 # changes written out above, even if dirstate is never
1575 # changes written out above, even if dirstate is never
1571 # changed after this
1576 # changed after this
1572 tr.addfilegenerator(
1577 tr.addfilegenerator(
1573 b'dirstate',
1578 b'dirstate',
1574 (self._filename,),
1579 (self._filename,),
1575 lambda f: self._writedirstate(tr, f),
1580 lambda f: self._writedirstate(tr, f),
1576 location=b'plain',
1581 location=b'plain',
1577 )
1582 )
1578
1583
1579 # ensure that pending file written above is unlinked at
1584 # ensure that pending file written above is unlinked at
1580 # failure, even if tr.writepending isn't invoked until the
1585 # failure, even if tr.writepending isn't invoked until the
1581 # end of this transaction
1586 # end of this transaction
1582 tr.registertmp(filename, location=b'plain')
1587 tr.registertmp(filename, location=b'plain')
1583
1588
1584 self._opener.tryunlink(backupname)
1589 self._opener.tryunlink(backupname)
1585 # hardlink backup is okay because _writedirstate is always called
1590 # hardlink backup is okay because _writedirstate is always called
1586 # with an "atomictemp=True" file.
1591 # with an "atomictemp=True" file.
1587 util.copyfile(
1592 util.copyfile(
1588 self._opener.join(filename),
1593 self._opener.join(filename),
1589 self._opener.join(backupname),
1594 self._opener.join(backupname),
1590 hardlink=True,
1595 hardlink=True,
1591 )
1596 )
1592
1597
1593 def restorebackup(self, tr, backupname):
1598 def restorebackup(self, tr, backupname):
1594 '''Restore dirstate by backup file'''
1599 '''Restore dirstate by backup file'''
1595 # this "invalidate()" prevents "wlock.release()" from writing
1600 # this "invalidate()" prevents "wlock.release()" from writing
1596 # changes of dirstate out after restoring from backup file
1601 # changes of dirstate out after restoring from backup file
1597 self.invalidate()
1602 self.invalidate()
1598 filename = self._actualfilename(tr)
1603 filename = self._actualfilename(tr)
1599 o = self._opener
1604 o = self._opener
1600 if util.samefile(o.join(backupname), o.join(filename)):
1605 if util.samefile(o.join(backupname), o.join(filename)):
1601 o.unlink(backupname)
1606 o.unlink(backupname)
1602 else:
1607 else:
1603 o.rename(backupname, filename, checkambig=True)
1608 o.rename(backupname, filename, checkambig=True)
1604
1609
1605 def clearbackup(self, tr, backupname):
1610 def clearbackup(self, tr, backupname):
1606 '''Clear backup file'''
1611 '''Clear backup file'''
1607 self._opener.unlink(backupname)
1612 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now