##// END OF EJS Templates
dirstate: deprecate the `add` method outside of update/merge context...
marmoute -
r48461:b2082426 default
parent child Browse files
Show More
@@ -1,1609 +1,1616 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = parsers.DirstateItem
48 DirstateItem = parsers.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._updatedfiles = set()
133 self._updatedfiles = set()
134 self._mapcls = dirstatemap.dirstatemap
134 self._mapcls = dirstatemap.dirstatemap
135 # Access and cache cwd early, so we don't access it for the first time
135 # Access and cache cwd early, so we don't access it for the first time
136 # after a working-copy update caused it to not exist (accessing it then
136 # after a working-copy update caused it to not exist (accessing it then
137 # raises an exception).
137 # raises an exception).
138 self._cwd
138 self._cwd
139
139
140 def prefetch_parents(self):
140 def prefetch_parents(self):
141 """make sure the parents are loaded
141 """make sure the parents are loaded
142
142
143 Used to avoid a race condition.
143 Used to avoid a race condition.
144 """
144 """
145 self._pl
145 self._pl
146
146
147 @contextlib.contextmanager
147 @contextlib.contextmanager
148 def parentchange(self):
148 def parentchange(self):
149 """Context manager for handling dirstate parents.
149 """Context manager for handling dirstate parents.
150
150
151 If an exception occurs in the scope of the context manager,
151 If an exception occurs in the scope of the context manager,
152 the incoherent dirstate won't be written when wlock is
152 the incoherent dirstate won't be written when wlock is
153 released.
153 released.
154 """
154 """
155 self._parentwriters += 1
155 self._parentwriters += 1
156 yield
156 yield
157 # Typically we want the "undo" step of a context manager in a
157 # Typically we want the "undo" step of a context manager in a
158 # finally block so it happens even when an exception
158 # finally block so it happens even when an exception
159 # occurs. In this case, however, we only want to decrement
159 # occurs. In this case, however, we only want to decrement
160 # parentwriters if the code in the with statement exits
160 # parentwriters if the code in the with statement exits
161 # normally, so we don't have a try/finally here on purpose.
161 # normally, so we don't have a try/finally here on purpose.
162 self._parentwriters -= 1
162 self._parentwriters -= 1
163
163
164 def pendingparentchange(self):
164 def pendingparentchange(self):
165 """Returns true if the dirstate is in the middle of a set of changes
165 """Returns true if the dirstate is in the middle of a set of changes
166 that modify the dirstate parent.
166 that modify the dirstate parent.
167 """
167 """
168 return self._parentwriters > 0
168 return self._parentwriters > 0
169
169
170 @propertycache
170 @propertycache
171 def _map(self):
171 def _map(self):
172 """Return the dirstate contents (see documentation for dirstatemap)."""
172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 self._map = self._mapcls(
173 self._map = self._mapcls(
174 self._ui,
174 self._ui,
175 self._opener,
175 self._opener,
176 self._root,
176 self._root,
177 self._nodeconstants,
177 self._nodeconstants,
178 self._use_dirstate_v2,
178 self._use_dirstate_v2,
179 )
179 )
180 return self._map
180 return self._map
181
181
182 @property
182 @property
183 def _sparsematcher(self):
183 def _sparsematcher(self):
184 """The matcher for the sparse checkout.
184 """The matcher for the sparse checkout.
185
185
186 The working directory may not include every file from a manifest. The
186 The working directory may not include every file from a manifest. The
187 matcher obtained by this property will match a path if it is to be
187 matcher obtained by this property will match a path if it is to be
188 included in the working directory.
188 included in the working directory.
189 """
189 """
190 # TODO there is potential to cache this property. For now, the matcher
190 # TODO there is potential to cache this property. For now, the matcher
191 # is resolved on every access. (But the called function does use a
191 # is resolved on every access. (But the called function does use a
192 # cache to keep the lookup fast.)
192 # cache to keep the lookup fast.)
193 return self._sparsematchfn()
193 return self._sparsematchfn()
194
194
195 @repocache(b'branch')
195 @repocache(b'branch')
196 def _branch(self):
196 def _branch(self):
197 try:
197 try:
198 return self._opener.read(b"branch").strip() or b"default"
198 return self._opener.read(b"branch").strip() or b"default"
199 except IOError as inst:
199 except IOError as inst:
200 if inst.errno != errno.ENOENT:
200 if inst.errno != errno.ENOENT:
201 raise
201 raise
202 return b"default"
202 return b"default"
203
203
204 @property
204 @property
205 def _pl(self):
205 def _pl(self):
206 return self._map.parents()
206 return self._map.parents()
207
207
208 def hasdir(self, d):
208 def hasdir(self, d):
209 return self._map.hastrackeddir(d)
209 return self._map.hastrackeddir(d)
210
210
211 @rootcache(b'.hgignore')
211 @rootcache(b'.hgignore')
212 def _ignore(self):
212 def _ignore(self):
213 files = self._ignorefiles()
213 files = self._ignorefiles()
214 if not files:
214 if not files:
215 return matchmod.never()
215 return matchmod.never()
216
216
217 pats = [b'include:%s' % f for f in files]
217 pats = [b'include:%s' % f for f in files]
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219
219
220 @propertycache
220 @propertycache
221 def _slash(self):
221 def _slash(self):
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223
223
224 @propertycache
224 @propertycache
225 def _checklink(self):
225 def _checklink(self):
226 return util.checklink(self._root)
226 return util.checklink(self._root)
227
227
228 @propertycache
228 @propertycache
229 def _checkexec(self):
229 def _checkexec(self):
230 return bool(util.checkexec(self._root))
230 return bool(util.checkexec(self._root))
231
231
232 @propertycache
232 @propertycache
233 def _checkcase(self):
233 def _checkcase(self):
234 return not util.fscasesensitive(self._join(b'.hg'))
234 return not util.fscasesensitive(self._join(b'.hg'))
235
235
236 def _join(self, f):
236 def _join(self, f):
237 # much faster than os.path.join()
237 # much faster than os.path.join()
238 # it's safe because f is always a relative path
238 # it's safe because f is always a relative path
239 return self._rootdir + f
239 return self._rootdir + f
240
240
241 def flagfunc(self, buildfallback):
241 def flagfunc(self, buildfallback):
242 if self._checklink and self._checkexec:
242 if self._checklink and self._checkexec:
243
243
244 def f(x):
244 def f(x):
245 try:
245 try:
246 st = os.lstat(self._join(x))
246 st = os.lstat(self._join(x))
247 if util.statislink(st):
247 if util.statislink(st):
248 return b'l'
248 return b'l'
249 if util.statisexec(st):
249 if util.statisexec(st):
250 return b'x'
250 return b'x'
251 except OSError:
251 except OSError:
252 pass
252 pass
253 return b''
253 return b''
254
254
255 return f
255 return f
256
256
257 fallback = buildfallback()
257 fallback = buildfallback()
258 if self._checklink:
258 if self._checklink:
259
259
260 def f(x):
260 def f(x):
261 if os.path.islink(self._join(x)):
261 if os.path.islink(self._join(x)):
262 return b'l'
262 return b'l'
263 if b'x' in fallback(x):
263 if b'x' in fallback(x):
264 return b'x'
264 return b'x'
265 return b''
265 return b''
266
266
267 return f
267 return f
268 if self._checkexec:
268 if self._checkexec:
269
269
270 def f(x):
270 def f(x):
271 if b'l' in fallback(x):
271 if b'l' in fallback(x):
272 return b'l'
272 return b'l'
273 if util.isexec(self._join(x)):
273 if util.isexec(self._join(x)):
274 return b'x'
274 return b'x'
275 return b''
275 return b''
276
276
277 return f
277 return f
278 else:
278 else:
279 return fallback
279 return fallback
280
280
281 @propertycache
281 @propertycache
282 def _cwd(self):
282 def _cwd(self):
283 # internal config: ui.forcecwd
283 # internal config: ui.forcecwd
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 if forcecwd:
285 if forcecwd:
286 return forcecwd
286 return forcecwd
287 return encoding.getcwd()
287 return encoding.getcwd()
288
288
289 def getcwd(self):
289 def getcwd(self):
290 """Return the path from which a canonical path is calculated.
290 """Return the path from which a canonical path is calculated.
291
291
292 This path should be used to resolve file patterns or to convert
292 This path should be used to resolve file patterns or to convert
293 canonical paths back to file paths for display. It shouldn't be
293 canonical paths back to file paths for display. It shouldn't be
294 used to get real file paths. Use vfs functions instead.
294 used to get real file paths. Use vfs functions instead.
295 """
295 """
296 cwd = self._cwd
296 cwd = self._cwd
297 if cwd == self._root:
297 if cwd == self._root:
298 return b''
298 return b''
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 rootsep = self._root
300 rootsep = self._root
301 if not util.endswithsep(rootsep):
301 if not util.endswithsep(rootsep):
302 rootsep += pycompat.ossep
302 rootsep += pycompat.ossep
303 if cwd.startswith(rootsep):
303 if cwd.startswith(rootsep):
304 return cwd[len(rootsep) :]
304 return cwd[len(rootsep) :]
305 else:
305 else:
306 # we're outside the repo. return an absolute path.
306 # we're outside the repo. return an absolute path.
307 return cwd
307 return cwd
308
308
309 def pathto(self, f, cwd=None):
309 def pathto(self, f, cwd=None):
310 if cwd is None:
310 if cwd is None:
311 cwd = self.getcwd()
311 cwd = self.getcwd()
312 path = util.pathto(self._root, cwd, f)
312 path = util.pathto(self._root, cwd, f)
313 if self._slash:
313 if self._slash:
314 return util.pconvert(path)
314 return util.pconvert(path)
315 return path
315 return path
316
316
317 def __getitem__(self, key):
317 def __getitem__(self, key):
318 """Return the current state of key (a filename) in the dirstate.
318 """Return the current state of key (a filename) in the dirstate.
319
319
320 States are:
320 States are:
321 n normal
321 n normal
322 m needs merging
322 m needs merging
323 r marked for removal
323 r marked for removal
324 a marked for addition
324 a marked for addition
325 ? not tracked
325 ? not tracked
326
326
327 XXX The "state" is a bit obscure to be in the "public" API. we should
327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 consider migrating all user of this to going through the dirstate entry
328 consider migrating all user of this to going through the dirstate entry
329 instead.
329 instead.
330 """
330 """
331 entry = self._map.get(key)
331 entry = self._map.get(key)
332 if entry is not None:
332 if entry is not None:
333 return entry.state
333 return entry.state
334 return b'?'
334 return b'?'
335
335
336 def __contains__(self, key):
336 def __contains__(self, key):
337 return key in self._map
337 return key in self._map
338
338
339 def __iter__(self):
339 def __iter__(self):
340 return iter(sorted(self._map))
340 return iter(sorted(self._map))
341
341
342 def items(self):
342 def items(self):
343 return pycompat.iteritems(self._map)
343 return pycompat.iteritems(self._map)
344
344
345 iteritems = items
345 iteritems = items
346
346
347 def directories(self):
347 def directories(self):
348 return self._map.directories()
348 return self._map.directories()
349
349
350 def parents(self):
350 def parents(self):
351 return [self._validate(p) for p in self._pl]
351 return [self._validate(p) for p in self._pl]
352
352
353 def p1(self):
353 def p1(self):
354 return self._validate(self._pl[0])
354 return self._validate(self._pl[0])
355
355
356 def p2(self):
356 def p2(self):
357 return self._validate(self._pl[1])
357 return self._validate(self._pl[1])
358
358
359 @property
359 @property
360 def in_merge(self):
360 def in_merge(self):
361 """True if a merge is in progress"""
361 """True if a merge is in progress"""
362 return self._pl[1] != self._nodeconstants.nullid
362 return self._pl[1] != self._nodeconstants.nullid
363
363
364 def branch(self):
364 def branch(self):
365 return encoding.tolocal(self._branch)
365 return encoding.tolocal(self._branch)
366
366
367 def setparents(self, p1, p2=None):
367 def setparents(self, p1, p2=None):
368 """Set dirstate parents to p1 and p2.
368 """Set dirstate parents to p1 and p2.
369
369
370 When moving from two parents to one, "merged" entries a
370 When moving from two parents to one, "merged" entries a
371 adjusted to normal and previous copy records discarded and
371 adjusted to normal and previous copy records discarded and
372 returned by the call.
372 returned by the call.
373
373
374 See localrepo.setparents()
374 See localrepo.setparents()
375 """
375 """
376 if p2 is None:
376 if p2 is None:
377 p2 = self._nodeconstants.nullid
377 p2 = self._nodeconstants.nullid
378 if self._parentwriters == 0:
378 if self._parentwriters == 0:
379 raise ValueError(
379 raise ValueError(
380 b"cannot set dirstate parent outside of "
380 b"cannot set dirstate parent outside of "
381 b"dirstate.parentchange context manager"
381 b"dirstate.parentchange context manager"
382 )
382 )
383
383
384 self._dirty = True
384 self._dirty = True
385 oldp2 = self._pl[1]
385 oldp2 = self._pl[1]
386 if self._origpl is None:
386 if self._origpl is None:
387 self._origpl = self._pl
387 self._origpl = self._pl
388 self._map.setparents(p1, p2)
388 self._map.setparents(p1, p2)
389 copies = {}
389 copies = {}
390 if (
390 if (
391 oldp2 != self._nodeconstants.nullid
391 oldp2 != self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
393 ):
393 ):
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395
395
396 for f in candidatefiles:
396 for f in candidatefiles:
397 s = self._map.get(f)
397 s = self._map.get(f)
398 if s is None:
398 if s is None:
399 continue
399 continue
400
400
401 # Discard "merged" markers when moving away from a merge state
401 # Discard "merged" markers when moving away from a merge state
402 if s.merged:
402 if s.merged:
403 source = self._map.copymap.get(f)
403 source = self._map.copymap.get(f)
404 if source:
404 if source:
405 copies[f] = source
405 copies[f] = source
406 self.normallookup(f)
406 self.normallookup(f)
407 # Also fix up otherparent markers
407 # Also fix up otherparent markers
408 elif s.from_p2:
408 elif s.from_p2:
409 source = self._map.copymap.get(f)
409 source = self._map.copymap.get(f)
410 if source:
410 if source:
411 copies[f] = source
411 copies[f] = source
412 self._add(f)
412 self._add(f)
413 return copies
413 return copies
414
414
415 def setbranch(self, branch):
415 def setbranch(self, branch):
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 try:
418 try:
419 f.write(self._branch + b'\n')
419 f.write(self._branch + b'\n')
420 f.close()
420 f.close()
421
421
422 # make sure filecache has the correct stat info for _branch after
422 # make sure filecache has the correct stat info for _branch after
423 # replacing the underlying file
423 # replacing the underlying file
424 ce = self._filecache[b'_branch']
424 ce = self._filecache[b'_branch']
425 if ce:
425 if ce:
426 ce.refresh()
426 ce.refresh()
427 except: # re-raises
427 except: # re-raises
428 f.discard()
428 f.discard()
429 raise
429 raise
430
430
431 def invalidate(self):
431 def invalidate(self):
432 """Causes the next access to reread the dirstate.
432 """Causes the next access to reread the dirstate.
433
433
434 This is different from localrepo.invalidatedirstate() because it always
434 This is different from localrepo.invalidatedirstate() because it always
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 check whether the dirstate has changed before rereading it."""
436 check whether the dirstate has changed before rereading it."""
437
437
438 for a in ("_map", "_branch", "_ignore"):
438 for a in ("_map", "_branch", "_ignore"):
439 if a in self.__dict__:
439 if a in self.__dict__:
440 delattr(self, a)
440 delattr(self, a)
441 self._lastnormaltime = 0
441 self._lastnormaltime = 0
442 self._dirty = False
442 self._dirty = False
443 self._updatedfiles.clear()
443 self._updatedfiles.clear()
444 self._parentwriters = 0
444 self._parentwriters = 0
445 self._origpl = None
445 self._origpl = None
446
446
447 def copy(self, source, dest):
447 def copy(self, source, dest):
448 """Mark dest as a copy of source. Unmark dest if source is None."""
448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 if source == dest:
449 if source == dest:
450 return
450 return
451 self._dirty = True
451 self._dirty = True
452 if source is not None:
452 if source is not None:
453 self._map.copymap[dest] = source
453 self._map.copymap[dest] = source
454 self._updatedfiles.add(source)
454 self._updatedfiles.add(source)
455 self._updatedfiles.add(dest)
455 self._updatedfiles.add(dest)
456 elif self._map.copymap.pop(dest, None):
456 elif self._map.copymap.pop(dest, None):
457 self._updatedfiles.add(dest)
457 self._updatedfiles.add(dest)
458
458
459 def copied(self, file):
459 def copied(self, file):
460 return self._map.copymap.get(file, None)
460 return self._map.copymap.get(file, None)
461
461
462 def copies(self):
462 def copies(self):
463 return self._map.copymap
463 return self._map.copymap
464
464
465 @requires_no_parents_change
465 @requires_no_parents_change
466 def set_tracked(self, filename):
466 def set_tracked(self, filename):
467 """a "public" method for generic code to mark a file as tracked
467 """a "public" method for generic code to mark a file as tracked
468
468
469 This function is to be called outside of "update/merge" case. For
469 This function is to be called outside of "update/merge" case. For
470 example by a command like `hg add X`.
470 example by a command like `hg add X`.
471
471
472 return True the file was previously untracked, False otherwise.
472 return True the file was previously untracked, False otherwise.
473 """
473 """
474 entry = self._map.get(filename)
474 entry = self._map.get(filename)
475 if entry is None:
475 if entry is None:
476 self._add(filename)
476 self._add(filename)
477 return True
477 return True
478 elif not entry.tracked:
478 elif not entry.tracked:
479 self.normallookup(filename)
479 self.normallookup(filename)
480 return True
480 return True
481 return False
481 return False
482
482
483 @requires_no_parents_change
483 @requires_no_parents_change
484 def set_untracked(self, filename):
484 def set_untracked(self, filename):
485 """a "public" method for generic code to mark a file as untracked
485 """a "public" method for generic code to mark a file as untracked
486
486
487 This function is to be called outside of "update/merge" case. For
487 This function is to be called outside of "update/merge" case. For
488 example by a command like `hg remove X`.
488 example by a command like `hg remove X`.
489
489
490 return True the file was previously tracked, False otherwise.
490 return True the file was previously tracked, False otherwise.
491 """
491 """
492 entry = self._map.get(filename)
492 entry = self._map.get(filename)
493 if entry is None:
493 if entry is None:
494 return False
494 return False
495 elif entry.added:
495 elif entry.added:
496 self._drop(filename)
496 self._drop(filename)
497 return True
497 return True
498 else:
498 else:
499 self._remove(filename)
499 self._remove(filename)
500 return True
500 return True
501
501
502 @requires_parents_change
502 @requires_parents_change
503 def update_file_reference(
503 def update_file_reference(
504 self,
504 self,
505 filename,
505 filename,
506 p1_tracked,
506 p1_tracked,
507 ):
507 ):
508 """Set a file as tracked in the parent (or not)
508 """Set a file as tracked in the parent (or not)
509
509
510 This is to be called when adjust the dirstate to a new parent after an history
510 This is to be called when adjust the dirstate to a new parent after an history
511 rewriting operation.
511 rewriting operation.
512
512
513 It should not be called during a merge (p2 != nullid) and only within
513 It should not be called during a merge (p2 != nullid) and only within
514 a `with dirstate.parentchange():` context.
514 a `with dirstate.parentchange():` context.
515 """
515 """
516 if self.in_merge:
516 if self.in_merge:
517 msg = b'update_file_reference should not be called when merging'
517 msg = b'update_file_reference should not be called when merging'
518 raise error.ProgrammingError(msg)
518 raise error.ProgrammingError(msg)
519 entry = self._map.get(filename)
519 entry = self._map.get(filename)
520 if entry is None:
520 if entry is None:
521 wc_tracked = False
521 wc_tracked = False
522 else:
522 else:
523 wc_tracked = entry.tracked
523 wc_tracked = entry.tracked
524 if p1_tracked and wc_tracked:
524 if p1_tracked and wc_tracked:
525 # the underlying reference might have changed, we will have to
525 # the underlying reference might have changed, we will have to
526 # check it.
526 # check it.
527 self.normallookup(filename)
527 self.normallookup(filename)
528 elif not (p1_tracked or wc_tracked):
528 elif not (p1_tracked or wc_tracked):
529 # the file is no longer relevant to anyone
529 # the file is no longer relevant to anyone
530 self._drop(filename)
530 self._drop(filename)
531 elif (not p1_tracked) and wc_tracked:
531 elif (not p1_tracked) and wc_tracked:
532 if not entry.added:
532 if not entry.added:
533 self._add(filename)
533 self._add(filename)
534 elif p1_tracked and not wc_tracked:
534 elif p1_tracked and not wc_tracked:
535 if entry is None or not entry.removed:
535 if entry is None or not entry.removed:
536 self._remove(filename)
536 self._remove(filename)
537 else:
537 else:
538 assert False, 'unreachable'
538 assert False, 'unreachable'
539
539
540 @requires_parents_change
540 @requires_parents_change
541 def update_file(
541 def update_file(
542 self,
542 self,
543 filename,
543 filename,
544 wc_tracked,
544 wc_tracked,
545 p1_tracked,
545 p1_tracked,
546 p2_tracked=False,
546 p2_tracked=False,
547 merged=False,
547 merged=False,
548 clean_p1=False,
548 clean_p1=False,
549 clean_p2=False,
549 clean_p2=False,
550 possibly_dirty=False,
550 possibly_dirty=False,
551 ):
551 ):
552 """update the information about a file in the dirstate
552 """update the information about a file in the dirstate
553
553
554 This is to be called when the direstates parent changes to keep track
554 This is to be called when the direstates parent changes to keep track
555 of what is the file situation in regards to the working copy and its parent.
555 of what is the file situation in regards to the working copy and its parent.
556
556
557 This function must be called within a `dirstate.parentchange` context.
557 This function must be called within a `dirstate.parentchange` context.
558
558
559 note: the API is at an early stage and we might need to ajust it
559 note: the API is at an early stage and we might need to ajust it
560 depending of what information ends up being relevant and useful to
560 depending of what information ends up being relevant and useful to
561 other processing.
561 other processing.
562 """
562 """
563 if not self.pendingparentchange():
563 if not self.pendingparentchange():
564 msg = b'calling `update_file` outside of a parentchange context'
564 msg = b'calling `update_file` outside of a parentchange context'
565 raise error.ProgrammingError(msg)
565 raise error.ProgrammingError(msg)
566 if merged and (clean_p1 or clean_p2):
566 if merged and (clean_p1 or clean_p2):
567 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
567 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
568 raise error.ProgrammingError(msg)
568 raise error.ProgrammingError(msg)
569 assert not (merged and (clean_p1 or clean_p1))
569 assert not (merged and (clean_p1 or clean_p1))
570 if not (p1_tracked or p2_tracked or wc_tracked):
570 if not (p1_tracked or p2_tracked or wc_tracked):
571 self._drop(filename)
571 self._drop(filename)
572 elif merged:
572 elif merged:
573 assert wc_tracked
573 assert wc_tracked
574 if not self.in_merge:
574 if not self.in_merge:
575 self.normallookup(filename)
575 self.normallookup(filename)
576 self.otherparent(filename)
576 self.otherparent(filename)
577 elif not (p1_tracked or p2_tracked) and wc_tracked:
577 elif not (p1_tracked or p2_tracked) and wc_tracked:
578 self._addpath(filename, added=True, possibly_dirty=possibly_dirty)
578 self._addpath(filename, added=True, possibly_dirty=possibly_dirty)
579 self._map.copymap.pop(filename, None)
579 self._map.copymap.pop(filename, None)
580 elif (p1_tracked or p2_tracked) and not wc_tracked:
580 elif (p1_tracked or p2_tracked) and not wc_tracked:
581 self._remove(filename)
581 self._remove(filename)
582 elif clean_p2 and wc_tracked:
582 elif clean_p2 and wc_tracked:
583 assert p2_tracked
583 assert p2_tracked
584 self.otherparent(filename)
584 self.otherparent(filename)
585 elif not p1_tracked and p2_tracked and wc_tracked:
585 elif not p1_tracked and p2_tracked and wc_tracked:
586 self._addpath(filename, from_p2=True, possibly_dirty=possibly_dirty)
586 self._addpath(filename, from_p2=True, possibly_dirty=possibly_dirty)
587 self._map.copymap.pop(filename, None)
587 self._map.copymap.pop(filename, None)
588 elif possibly_dirty:
588 elif possibly_dirty:
589 self._addpath(filename, possibly_dirty=possibly_dirty)
589 self._addpath(filename, possibly_dirty=possibly_dirty)
590 elif wc_tracked:
590 elif wc_tracked:
591 self.normal(filename)
591 self.normal(filename)
592 # XXX We need something for file that are dirty after an update
592 # XXX We need something for file that are dirty after an update
593 else:
593 else:
594 assert False, 'unreachable'
594 assert False, 'unreachable'
595
595
596 @requires_parents_change
596 @requires_parents_change
597 def update_parent_file_data(self, f, filedata):
597 def update_parent_file_data(self, f, filedata):
598 """update the information about the content of a file
598 """update the information about the content of a file
599
599
600 This function should be called within a `dirstate.parentchange` context.
600 This function should be called within a `dirstate.parentchange` context.
601 """
601 """
602 self.normal(f, parentfiledata=filedata)
602 self.normal(f, parentfiledata=filedata)
603
603
604 def _addpath(
604 def _addpath(
605 self,
605 self,
606 f,
606 f,
607 mode=0,
607 mode=0,
608 size=None,
608 size=None,
609 mtime=None,
609 mtime=None,
610 added=False,
610 added=False,
611 merged=False,
611 merged=False,
612 from_p2=False,
612 from_p2=False,
613 possibly_dirty=False,
613 possibly_dirty=False,
614 ):
614 ):
615 entry = self._map.get(f)
615 entry = self._map.get(f)
616 if added or entry is not None and entry.removed:
616 if added or entry is not None and entry.removed:
617 scmutil.checkfilename(f)
617 scmutil.checkfilename(f)
618 if self._map.hastrackeddir(f):
618 if self._map.hastrackeddir(f):
619 msg = _(b'directory %r already in dirstate')
619 msg = _(b'directory %r already in dirstate')
620 msg %= pycompat.bytestr(f)
620 msg %= pycompat.bytestr(f)
621 raise error.Abort(msg)
621 raise error.Abort(msg)
622 # shadows
622 # shadows
623 for d in pathutil.finddirs(f):
623 for d in pathutil.finddirs(f):
624 if self._map.hastrackeddir(d):
624 if self._map.hastrackeddir(d):
625 break
625 break
626 entry = self._map.get(d)
626 entry = self._map.get(d)
627 if entry is not None and not entry.removed:
627 if entry is not None and not entry.removed:
628 msg = _(b'file %r in dirstate clashes with %r')
628 msg = _(b'file %r in dirstate clashes with %r')
629 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
629 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
630 raise error.Abort(msg)
630 raise error.Abort(msg)
631 self._dirty = True
631 self._dirty = True
632 self._updatedfiles.add(f)
632 self._updatedfiles.add(f)
633 self._map.addfile(
633 self._map.addfile(
634 f,
634 f,
635 mode=mode,
635 mode=mode,
636 size=size,
636 size=size,
637 mtime=mtime,
637 mtime=mtime,
638 added=added,
638 added=added,
639 merged=merged,
639 merged=merged,
640 from_p2=from_p2,
640 from_p2=from_p2,
641 possibly_dirty=possibly_dirty,
641 possibly_dirty=possibly_dirty,
642 )
642 )
643
643
644 def normal(self, f, parentfiledata=None):
644 def normal(self, f, parentfiledata=None):
645 """Mark a file normal and clean.
645 """Mark a file normal and clean.
646
646
647 parentfiledata: (mode, size, mtime) of the clean file
647 parentfiledata: (mode, size, mtime) of the clean file
648
648
649 parentfiledata should be computed from memory (for mode,
649 parentfiledata should be computed from memory (for mode,
650 size), as or close as possible from the point where we
650 size), as or close as possible from the point where we
651 determined the file was clean, to limit the risk of the
651 determined the file was clean, to limit the risk of the
652 file having been changed by an external process between the
652 file having been changed by an external process between the
653 moment where the file was determined to be clean and now."""
653 moment where the file was determined to be clean and now."""
654 if parentfiledata:
654 if parentfiledata:
655 (mode, size, mtime) = parentfiledata
655 (mode, size, mtime) = parentfiledata
656 else:
656 else:
657 s = os.lstat(self._join(f))
657 s = os.lstat(self._join(f))
658 mode = s.st_mode
658 mode = s.st_mode
659 size = s.st_size
659 size = s.st_size
660 mtime = s[stat.ST_MTIME]
660 mtime = s[stat.ST_MTIME]
661 self._addpath(f, mode=mode, size=size, mtime=mtime)
661 self._addpath(f, mode=mode, size=size, mtime=mtime)
662 self._map.copymap.pop(f, None)
662 self._map.copymap.pop(f, None)
663 if f in self._map.nonnormalset:
663 if f in self._map.nonnormalset:
664 self._map.nonnormalset.remove(f)
664 self._map.nonnormalset.remove(f)
665 if mtime > self._lastnormaltime:
665 if mtime > self._lastnormaltime:
666 # Remember the most recent modification timeslot for status(),
666 # Remember the most recent modification timeslot for status(),
667 # to make sure we won't miss future size-preserving file content
667 # to make sure we won't miss future size-preserving file content
668 # modifications that happen within the same timeslot.
668 # modifications that happen within the same timeslot.
669 self._lastnormaltime = mtime
669 self._lastnormaltime = mtime
670
670
671 def normallookup(self, f):
671 def normallookup(self, f):
672 '''Mark a file normal, but possibly dirty.'''
672 '''Mark a file normal, but possibly dirty.'''
673 if self.in_merge:
673 if self.in_merge:
674 # if there is a merge going on and the file was either
674 # if there is a merge going on and the file was either
675 # "merged" or coming from other parent (-2) before
675 # "merged" or coming from other parent (-2) before
676 # being removed, restore that state.
676 # being removed, restore that state.
677 entry = self._map.get(f)
677 entry = self._map.get(f)
678 if entry is not None:
678 if entry is not None:
679 # XXX this should probably be dealt with a a lower level
679 # XXX this should probably be dealt with a a lower level
680 # (see `merged_removed` and `from_p2_removed`)
680 # (see `merged_removed` and `from_p2_removed`)
681 if entry.merged_removed or entry.from_p2_removed:
681 if entry.merged_removed or entry.from_p2_removed:
682 source = self._map.copymap.get(f)
682 source = self._map.copymap.get(f)
683 if entry.merged_removed:
683 if entry.merged_removed:
684 self.merge(f)
684 self.merge(f)
685 elif entry.from_p2_removed:
685 elif entry.from_p2_removed:
686 self.otherparent(f)
686 self.otherparent(f)
687 if source is not None:
687 if source is not None:
688 self.copy(source, f)
688 self.copy(source, f)
689 return
689 return
690 elif entry.merged or entry.from_p2:
690 elif entry.merged or entry.from_p2:
691 return
691 return
692 self._addpath(f, possibly_dirty=True)
692 self._addpath(f, possibly_dirty=True)
693 self._map.copymap.pop(f, None)
693 self._map.copymap.pop(f, None)
694
694
695 def otherparent(self, f):
695 def otherparent(self, f):
696 '''Mark as coming from the other parent, always dirty.'''
696 '''Mark as coming from the other parent, always dirty.'''
697 if not self.in_merge:
697 if not self.in_merge:
698 msg = _(b"setting %r to other parent only allowed in merges") % f
698 msg = _(b"setting %r to other parent only allowed in merges") % f
699 raise error.Abort(msg)
699 raise error.Abort(msg)
700 entry = self._map.get(f)
700 entry = self._map.get(f)
701 if entry is not None and entry.tracked:
701 if entry is not None and entry.tracked:
702 # merge-like
702 # merge-like
703 self._addpath(f, merged=True)
703 self._addpath(f, merged=True)
704 else:
704 else:
705 # add-like
705 # add-like
706 self._addpath(f, from_p2=True)
706 self._addpath(f, from_p2=True)
707 self._map.copymap.pop(f, None)
707 self._map.copymap.pop(f, None)
708
708
709 def add(self, f):
709 def add(self, f):
710 '''Mark a file added.'''
710 '''Mark a file added.'''
711 if not self.pendingparentchange():
712 util.nouideprecwarn(
713 b"do not use `add` outside of update/merge context."
714 b" Use `set_tracked`",
715 b'6.0',
716 stacklevel=2,
717 )
711 self._add(f)
718 self._add(f)
712
719
713 def _add(self, filename):
720 def _add(self, filename):
714 """internal function to mark a file as added"""
721 """internal function to mark a file as added"""
715 self._addpath(filename, added=True)
722 self._addpath(filename, added=True)
716 self._map.copymap.pop(filename, None)
723 self._map.copymap.pop(filename, None)
717
724
718 def remove(self, f):
725 def remove(self, f):
719 '''Mark a file removed'''
726 '''Mark a file removed'''
720 self._remove(f)
727 self._remove(f)
721
728
722 def _remove(self, filename):
729 def _remove(self, filename):
723 """internal function to mark a file removed"""
730 """internal function to mark a file removed"""
724 self._dirty = True
731 self._dirty = True
725 self._updatedfiles.add(filename)
732 self._updatedfiles.add(filename)
726 self._map.removefile(filename, in_merge=self.in_merge)
733 self._map.removefile(filename, in_merge=self.in_merge)
727
734
728 def merge(self, f):
735 def merge(self, f):
729 '''Mark a file merged.'''
736 '''Mark a file merged.'''
730 if not self.in_merge:
737 if not self.in_merge:
731 return self.normallookup(f)
738 return self.normallookup(f)
732 return self.otherparent(f)
739 return self.otherparent(f)
733
740
734 def drop(self, f):
741 def drop(self, f):
735 '''Drop a file from the dirstate'''
742 '''Drop a file from the dirstate'''
736 self._drop(f)
743 self._drop(f)
737
744
738 def _drop(self, filename):
745 def _drop(self, filename):
739 """internal function to drop a file from the dirstate"""
746 """internal function to drop a file from the dirstate"""
740 if self._map.dropfile(filename):
747 if self._map.dropfile(filename):
741 self._dirty = True
748 self._dirty = True
742 self._updatedfiles.add(filename)
749 self._updatedfiles.add(filename)
743 self._map.copymap.pop(filename, None)
750 self._map.copymap.pop(filename, None)
744
751
745 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
752 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
746 if exists is None:
753 if exists is None:
747 exists = os.path.lexists(os.path.join(self._root, path))
754 exists = os.path.lexists(os.path.join(self._root, path))
748 if not exists:
755 if not exists:
749 # Maybe a path component exists
756 # Maybe a path component exists
750 if not ignoremissing and b'/' in path:
757 if not ignoremissing and b'/' in path:
751 d, f = path.rsplit(b'/', 1)
758 d, f = path.rsplit(b'/', 1)
752 d = self._normalize(d, False, ignoremissing, None)
759 d = self._normalize(d, False, ignoremissing, None)
753 folded = d + b"/" + f
760 folded = d + b"/" + f
754 else:
761 else:
755 # No path components, preserve original case
762 # No path components, preserve original case
756 folded = path
763 folded = path
757 else:
764 else:
758 # recursively normalize leading directory components
765 # recursively normalize leading directory components
759 # against dirstate
766 # against dirstate
760 if b'/' in normed:
767 if b'/' in normed:
761 d, f = normed.rsplit(b'/', 1)
768 d, f = normed.rsplit(b'/', 1)
762 d = self._normalize(d, False, ignoremissing, True)
769 d = self._normalize(d, False, ignoremissing, True)
763 r = self._root + b"/" + d
770 r = self._root + b"/" + d
764 folded = d + b"/" + util.fspath(f, r)
771 folded = d + b"/" + util.fspath(f, r)
765 else:
772 else:
766 folded = util.fspath(normed, self._root)
773 folded = util.fspath(normed, self._root)
767 storemap[normed] = folded
774 storemap[normed] = folded
768
775
769 return folded
776 return folded
770
777
771 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
778 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
772 normed = util.normcase(path)
779 normed = util.normcase(path)
773 folded = self._map.filefoldmap.get(normed, None)
780 folded = self._map.filefoldmap.get(normed, None)
774 if folded is None:
781 if folded is None:
775 if isknown:
782 if isknown:
776 folded = path
783 folded = path
777 else:
784 else:
778 folded = self._discoverpath(
785 folded = self._discoverpath(
779 path, normed, ignoremissing, exists, self._map.filefoldmap
786 path, normed, ignoremissing, exists, self._map.filefoldmap
780 )
787 )
781 return folded
788 return folded
782
789
783 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
790 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
784 normed = util.normcase(path)
791 normed = util.normcase(path)
785 folded = self._map.filefoldmap.get(normed, None)
792 folded = self._map.filefoldmap.get(normed, None)
786 if folded is None:
793 if folded is None:
787 folded = self._map.dirfoldmap.get(normed, None)
794 folded = self._map.dirfoldmap.get(normed, None)
788 if folded is None:
795 if folded is None:
789 if isknown:
796 if isknown:
790 folded = path
797 folded = path
791 else:
798 else:
792 # store discovered result in dirfoldmap so that future
799 # store discovered result in dirfoldmap so that future
793 # normalizefile calls don't start matching directories
800 # normalizefile calls don't start matching directories
794 folded = self._discoverpath(
801 folded = self._discoverpath(
795 path, normed, ignoremissing, exists, self._map.dirfoldmap
802 path, normed, ignoremissing, exists, self._map.dirfoldmap
796 )
803 )
797 return folded
804 return folded
798
805
799 def normalize(self, path, isknown=False, ignoremissing=False):
806 def normalize(self, path, isknown=False, ignoremissing=False):
800 """
807 """
801 normalize the case of a pathname when on a casefolding filesystem
808 normalize the case of a pathname when on a casefolding filesystem
802
809
803 isknown specifies whether the filename came from walking the
810 isknown specifies whether the filename came from walking the
804 disk, to avoid extra filesystem access.
811 disk, to avoid extra filesystem access.
805
812
806 If ignoremissing is True, missing path are returned
813 If ignoremissing is True, missing path are returned
807 unchanged. Otherwise, we try harder to normalize possibly
814 unchanged. Otherwise, we try harder to normalize possibly
808 existing path components.
815 existing path components.
809
816
810 The normalized case is determined based on the following precedence:
817 The normalized case is determined based on the following precedence:
811
818
812 - version of name already stored in the dirstate
819 - version of name already stored in the dirstate
813 - version of name stored on disk
820 - version of name stored on disk
814 - version provided via command arguments
821 - version provided via command arguments
815 """
822 """
816
823
817 if self._checkcase:
824 if self._checkcase:
818 return self._normalize(path, isknown, ignoremissing)
825 return self._normalize(path, isknown, ignoremissing)
819 return path
826 return path
820
827
821 def clear(self):
828 def clear(self):
822 self._map.clear()
829 self._map.clear()
823 self._lastnormaltime = 0
830 self._lastnormaltime = 0
824 self._updatedfiles.clear()
831 self._updatedfiles.clear()
825 self._dirty = True
832 self._dirty = True
826
833
827 def rebuild(self, parent, allfiles, changedfiles=None):
834 def rebuild(self, parent, allfiles, changedfiles=None):
828 if changedfiles is None:
835 if changedfiles is None:
829 # Rebuild entire dirstate
836 # Rebuild entire dirstate
830 to_lookup = allfiles
837 to_lookup = allfiles
831 to_drop = []
838 to_drop = []
832 lastnormaltime = self._lastnormaltime
839 lastnormaltime = self._lastnormaltime
833 self.clear()
840 self.clear()
834 self._lastnormaltime = lastnormaltime
841 self._lastnormaltime = lastnormaltime
835 elif len(changedfiles) < 10:
842 elif len(changedfiles) < 10:
836 # Avoid turning allfiles into a set, which can be expensive if it's
843 # Avoid turning allfiles into a set, which can be expensive if it's
837 # large.
844 # large.
838 to_lookup = []
845 to_lookup = []
839 to_drop = []
846 to_drop = []
840 for f in changedfiles:
847 for f in changedfiles:
841 if f in allfiles:
848 if f in allfiles:
842 to_lookup.append(f)
849 to_lookup.append(f)
843 else:
850 else:
844 to_drop.append(f)
851 to_drop.append(f)
845 else:
852 else:
846 changedfilesset = set(changedfiles)
853 changedfilesset = set(changedfiles)
847 to_lookup = changedfilesset & set(allfiles)
854 to_lookup = changedfilesset & set(allfiles)
848 to_drop = changedfilesset - to_lookup
855 to_drop = changedfilesset - to_lookup
849
856
850 if self._origpl is None:
857 if self._origpl is None:
851 self._origpl = self._pl
858 self._origpl = self._pl
852 self._map.setparents(parent, self._nodeconstants.nullid)
859 self._map.setparents(parent, self._nodeconstants.nullid)
853
860
854 for f in to_lookup:
861 for f in to_lookup:
855 self.normallookup(f)
862 self.normallookup(f)
856 for f in to_drop:
863 for f in to_drop:
857 self._drop(f)
864 self._drop(f)
858
865
859 self._dirty = True
866 self._dirty = True
860
867
861 def identity(self):
868 def identity(self):
862 """Return identity of dirstate itself to detect changing in storage
869 """Return identity of dirstate itself to detect changing in storage
863
870
864 If identity of previous dirstate is equal to this, writing
871 If identity of previous dirstate is equal to this, writing
865 changes based on the former dirstate out can keep consistency.
872 changes based on the former dirstate out can keep consistency.
866 """
873 """
867 return self._map.identity
874 return self._map.identity
868
875
869 def write(self, tr):
876 def write(self, tr):
870 if not self._dirty:
877 if not self._dirty:
871 return
878 return
872
879
873 filename = self._filename
880 filename = self._filename
874 if tr:
881 if tr:
875 # 'dirstate.write()' is not only for writing in-memory
882 # 'dirstate.write()' is not only for writing in-memory
876 # changes out, but also for dropping ambiguous timestamp.
883 # changes out, but also for dropping ambiguous timestamp.
877 # delayed writing re-raise "ambiguous timestamp issue".
884 # delayed writing re-raise "ambiguous timestamp issue".
878 # See also the wiki page below for detail:
885 # See also the wiki page below for detail:
879 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
886 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
880
887
881 # emulate dropping timestamp in 'parsers.pack_dirstate'
888 # emulate dropping timestamp in 'parsers.pack_dirstate'
882 now = _getfsnow(self._opener)
889 now = _getfsnow(self._opener)
883 self._map.clearambiguoustimes(self._updatedfiles, now)
890 self._map.clearambiguoustimes(self._updatedfiles, now)
884
891
885 # emulate that all 'dirstate.normal' results are written out
892 # emulate that all 'dirstate.normal' results are written out
886 self._lastnormaltime = 0
893 self._lastnormaltime = 0
887 self._updatedfiles.clear()
894 self._updatedfiles.clear()
888
895
889 # delay writing in-memory changes out
896 # delay writing in-memory changes out
890 tr.addfilegenerator(
897 tr.addfilegenerator(
891 b'dirstate',
898 b'dirstate',
892 (self._filename,),
899 (self._filename,),
893 self._writedirstate,
900 self._writedirstate,
894 location=b'plain',
901 location=b'plain',
895 )
902 )
896 return
903 return
897
904
898 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
905 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
899 self._writedirstate(st)
906 self._writedirstate(st)
900
907
901 def addparentchangecallback(self, category, callback):
908 def addparentchangecallback(self, category, callback):
902 """add a callback to be called when the wd parents are changed
909 """add a callback to be called when the wd parents are changed
903
910
904 Callback will be called with the following arguments:
911 Callback will be called with the following arguments:
905 dirstate, (oldp1, oldp2), (newp1, newp2)
912 dirstate, (oldp1, oldp2), (newp1, newp2)
906
913
907 Category is a unique identifier to allow overwriting an old callback
914 Category is a unique identifier to allow overwriting an old callback
908 with a newer callback.
915 with a newer callback.
909 """
916 """
910 self._plchangecallbacks[category] = callback
917 self._plchangecallbacks[category] = callback
911
918
912 def _writedirstate(self, st):
919 def _writedirstate(self, st):
913 # notify callbacks about parents change
920 # notify callbacks about parents change
914 if self._origpl is not None and self._origpl != self._pl:
921 if self._origpl is not None and self._origpl != self._pl:
915 for c, callback in sorted(
922 for c, callback in sorted(
916 pycompat.iteritems(self._plchangecallbacks)
923 pycompat.iteritems(self._plchangecallbacks)
917 ):
924 ):
918 callback(self, self._origpl, self._pl)
925 callback(self, self._origpl, self._pl)
919 self._origpl = None
926 self._origpl = None
920 # use the modification time of the newly created temporary file as the
927 # use the modification time of the newly created temporary file as the
921 # filesystem's notion of 'now'
928 # filesystem's notion of 'now'
922 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
929 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
923
930
924 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
931 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
925 # timestamp of each entries in dirstate, because of 'now > mtime'
932 # timestamp of each entries in dirstate, because of 'now > mtime'
926 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
933 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
927 if delaywrite > 0:
934 if delaywrite > 0:
928 # do we have any files to delay for?
935 # do we have any files to delay for?
929 for f, e in pycompat.iteritems(self._map):
936 for f, e in pycompat.iteritems(self._map):
930 if e.need_delay(now):
937 if e.need_delay(now):
931 import time # to avoid useless import
938 import time # to avoid useless import
932
939
933 # rather than sleep n seconds, sleep until the next
940 # rather than sleep n seconds, sleep until the next
934 # multiple of n seconds
941 # multiple of n seconds
935 clock = time.time()
942 clock = time.time()
936 start = int(clock) - (int(clock) % delaywrite)
943 start = int(clock) - (int(clock) % delaywrite)
937 end = start + delaywrite
944 end = start + delaywrite
938 time.sleep(end - clock)
945 time.sleep(end - clock)
939 now = end # trust our estimate that the end is near now
946 now = end # trust our estimate that the end is near now
940 break
947 break
941
948
942 self._map.write(st, now)
949 self._map.write(st, now)
943 self._lastnormaltime = 0
950 self._lastnormaltime = 0
944 self._dirty = False
951 self._dirty = False
945
952
946 def _dirignore(self, f):
953 def _dirignore(self, f):
947 if self._ignore(f):
954 if self._ignore(f):
948 return True
955 return True
949 for p in pathutil.finddirs(f):
956 for p in pathutil.finddirs(f):
950 if self._ignore(p):
957 if self._ignore(p):
951 return True
958 return True
952 return False
959 return False
953
960
954 def _ignorefiles(self):
961 def _ignorefiles(self):
955 files = []
962 files = []
956 if os.path.exists(self._join(b'.hgignore')):
963 if os.path.exists(self._join(b'.hgignore')):
957 files.append(self._join(b'.hgignore'))
964 files.append(self._join(b'.hgignore'))
958 for name, path in self._ui.configitems(b"ui"):
965 for name, path in self._ui.configitems(b"ui"):
959 if name == b'ignore' or name.startswith(b'ignore.'):
966 if name == b'ignore' or name.startswith(b'ignore.'):
960 # we need to use os.path.join here rather than self._join
967 # we need to use os.path.join here rather than self._join
961 # because path is arbitrary and user-specified
968 # because path is arbitrary and user-specified
962 files.append(os.path.join(self._rootdir, util.expandpath(path)))
969 files.append(os.path.join(self._rootdir, util.expandpath(path)))
963 return files
970 return files
964
971
965 def _ignorefileandline(self, f):
972 def _ignorefileandline(self, f):
966 files = collections.deque(self._ignorefiles())
973 files = collections.deque(self._ignorefiles())
967 visited = set()
974 visited = set()
968 while files:
975 while files:
969 i = files.popleft()
976 i = files.popleft()
970 patterns = matchmod.readpatternfile(
977 patterns = matchmod.readpatternfile(
971 i, self._ui.warn, sourceinfo=True
978 i, self._ui.warn, sourceinfo=True
972 )
979 )
973 for pattern, lineno, line in patterns:
980 for pattern, lineno, line in patterns:
974 kind, p = matchmod._patsplit(pattern, b'glob')
981 kind, p = matchmod._patsplit(pattern, b'glob')
975 if kind == b"subinclude":
982 if kind == b"subinclude":
976 if p not in visited:
983 if p not in visited:
977 files.append(p)
984 files.append(p)
978 continue
985 continue
979 m = matchmod.match(
986 m = matchmod.match(
980 self._root, b'', [], [pattern], warn=self._ui.warn
987 self._root, b'', [], [pattern], warn=self._ui.warn
981 )
988 )
982 if m(f):
989 if m(f):
983 return (i, lineno, line)
990 return (i, lineno, line)
984 visited.add(i)
991 visited.add(i)
985 return (None, -1, b"")
992 return (None, -1, b"")
986
993
987 def _walkexplicit(self, match, subrepos):
994 def _walkexplicit(self, match, subrepos):
988 """Get stat data about the files explicitly specified by match.
995 """Get stat data about the files explicitly specified by match.
989
996
990 Return a triple (results, dirsfound, dirsnotfound).
997 Return a triple (results, dirsfound, dirsnotfound).
991 - results is a mapping from filename to stat result. It also contains
998 - results is a mapping from filename to stat result. It also contains
992 listings mapping subrepos and .hg to None.
999 listings mapping subrepos and .hg to None.
993 - dirsfound is a list of files found to be directories.
1000 - dirsfound is a list of files found to be directories.
994 - dirsnotfound is a list of files that the dirstate thinks are
1001 - dirsnotfound is a list of files that the dirstate thinks are
995 directories and that were not found."""
1002 directories and that were not found."""
996
1003
997 def badtype(mode):
1004 def badtype(mode):
998 kind = _(b'unknown')
1005 kind = _(b'unknown')
999 if stat.S_ISCHR(mode):
1006 if stat.S_ISCHR(mode):
1000 kind = _(b'character device')
1007 kind = _(b'character device')
1001 elif stat.S_ISBLK(mode):
1008 elif stat.S_ISBLK(mode):
1002 kind = _(b'block device')
1009 kind = _(b'block device')
1003 elif stat.S_ISFIFO(mode):
1010 elif stat.S_ISFIFO(mode):
1004 kind = _(b'fifo')
1011 kind = _(b'fifo')
1005 elif stat.S_ISSOCK(mode):
1012 elif stat.S_ISSOCK(mode):
1006 kind = _(b'socket')
1013 kind = _(b'socket')
1007 elif stat.S_ISDIR(mode):
1014 elif stat.S_ISDIR(mode):
1008 kind = _(b'directory')
1015 kind = _(b'directory')
1009 return _(b'unsupported file type (type is %s)') % kind
1016 return _(b'unsupported file type (type is %s)') % kind
1010
1017
1011 badfn = match.bad
1018 badfn = match.bad
1012 dmap = self._map
1019 dmap = self._map
1013 lstat = os.lstat
1020 lstat = os.lstat
1014 getkind = stat.S_IFMT
1021 getkind = stat.S_IFMT
1015 dirkind = stat.S_IFDIR
1022 dirkind = stat.S_IFDIR
1016 regkind = stat.S_IFREG
1023 regkind = stat.S_IFREG
1017 lnkkind = stat.S_IFLNK
1024 lnkkind = stat.S_IFLNK
1018 join = self._join
1025 join = self._join
1019 dirsfound = []
1026 dirsfound = []
1020 foundadd = dirsfound.append
1027 foundadd = dirsfound.append
1021 dirsnotfound = []
1028 dirsnotfound = []
1022 notfoundadd = dirsnotfound.append
1029 notfoundadd = dirsnotfound.append
1023
1030
1024 if not match.isexact() and self._checkcase:
1031 if not match.isexact() and self._checkcase:
1025 normalize = self._normalize
1032 normalize = self._normalize
1026 else:
1033 else:
1027 normalize = None
1034 normalize = None
1028
1035
1029 files = sorted(match.files())
1036 files = sorted(match.files())
1030 subrepos.sort()
1037 subrepos.sort()
1031 i, j = 0, 0
1038 i, j = 0, 0
1032 while i < len(files) and j < len(subrepos):
1039 while i < len(files) and j < len(subrepos):
1033 subpath = subrepos[j] + b"/"
1040 subpath = subrepos[j] + b"/"
1034 if files[i] < subpath:
1041 if files[i] < subpath:
1035 i += 1
1042 i += 1
1036 continue
1043 continue
1037 while i < len(files) and files[i].startswith(subpath):
1044 while i < len(files) and files[i].startswith(subpath):
1038 del files[i]
1045 del files[i]
1039 j += 1
1046 j += 1
1040
1047
1041 if not files or b'' in files:
1048 if not files or b'' in files:
1042 files = [b'']
1049 files = [b'']
1043 # constructing the foldmap is expensive, so don't do it for the
1050 # constructing the foldmap is expensive, so don't do it for the
1044 # common case where files is ['']
1051 # common case where files is ['']
1045 normalize = None
1052 normalize = None
1046 results = dict.fromkeys(subrepos)
1053 results = dict.fromkeys(subrepos)
1047 results[b'.hg'] = None
1054 results[b'.hg'] = None
1048
1055
1049 for ff in files:
1056 for ff in files:
1050 if normalize:
1057 if normalize:
1051 nf = normalize(ff, False, True)
1058 nf = normalize(ff, False, True)
1052 else:
1059 else:
1053 nf = ff
1060 nf = ff
1054 if nf in results:
1061 if nf in results:
1055 continue
1062 continue
1056
1063
1057 try:
1064 try:
1058 st = lstat(join(nf))
1065 st = lstat(join(nf))
1059 kind = getkind(st.st_mode)
1066 kind = getkind(st.st_mode)
1060 if kind == dirkind:
1067 if kind == dirkind:
1061 if nf in dmap:
1068 if nf in dmap:
1062 # file replaced by dir on disk but still in dirstate
1069 # file replaced by dir on disk but still in dirstate
1063 results[nf] = None
1070 results[nf] = None
1064 foundadd((nf, ff))
1071 foundadd((nf, ff))
1065 elif kind == regkind or kind == lnkkind:
1072 elif kind == regkind or kind == lnkkind:
1066 results[nf] = st
1073 results[nf] = st
1067 else:
1074 else:
1068 badfn(ff, badtype(kind))
1075 badfn(ff, badtype(kind))
1069 if nf in dmap:
1076 if nf in dmap:
1070 results[nf] = None
1077 results[nf] = None
1071 except OSError as inst: # nf not found on disk - it is dirstate only
1078 except OSError as inst: # nf not found on disk - it is dirstate only
1072 if nf in dmap: # does it exactly match a missing file?
1079 if nf in dmap: # does it exactly match a missing file?
1073 results[nf] = None
1080 results[nf] = None
1074 else: # does it match a missing directory?
1081 else: # does it match a missing directory?
1075 if self._map.hasdir(nf):
1082 if self._map.hasdir(nf):
1076 notfoundadd(nf)
1083 notfoundadd(nf)
1077 else:
1084 else:
1078 badfn(ff, encoding.strtolocal(inst.strerror))
1085 badfn(ff, encoding.strtolocal(inst.strerror))
1079
1086
1080 # match.files() may contain explicitly-specified paths that shouldn't
1087 # match.files() may contain explicitly-specified paths that shouldn't
1081 # be taken; drop them from the list of files found. dirsfound/notfound
1088 # be taken; drop them from the list of files found. dirsfound/notfound
1082 # aren't filtered here because they will be tested later.
1089 # aren't filtered here because they will be tested later.
1083 if match.anypats():
1090 if match.anypats():
1084 for f in list(results):
1091 for f in list(results):
1085 if f == b'.hg' or f in subrepos:
1092 if f == b'.hg' or f in subrepos:
1086 # keep sentinel to disable further out-of-repo walks
1093 # keep sentinel to disable further out-of-repo walks
1087 continue
1094 continue
1088 if not match(f):
1095 if not match(f):
1089 del results[f]
1096 del results[f]
1090
1097
1091 # Case insensitive filesystems cannot rely on lstat() failing to detect
1098 # Case insensitive filesystems cannot rely on lstat() failing to detect
1092 # a case-only rename. Prune the stat object for any file that does not
1099 # a case-only rename. Prune the stat object for any file that does not
1093 # match the case in the filesystem, if there are multiple files that
1100 # match the case in the filesystem, if there are multiple files that
1094 # normalize to the same path.
1101 # normalize to the same path.
1095 if match.isexact() and self._checkcase:
1102 if match.isexact() and self._checkcase:
1096 normed = {}
1103 normed = {}
1097
1104
1098 for f, st in pycompat.iteritems(results):
1105 for f, st in pycompat.iteritems(results):
1099 if st is None:
1106 if st is None:
1100 continue
1107 continue
1101
1108
1102 nc = util.normcase(f)
1109 nc = util.normcase(f)
1103 paths = normed.get(nc)
1110 paths = normed.get(nc)
1104
1111
1105 if paths is None:
1112 if paths is None:
1106 paths = set()
1113 paths = set()
1107 normed[nc] = paths
1114 normed[nc] = paths
1108
1115
1109 paths.add(f)
1116 paths.add(f)
1110
1117
1111 for norm, paths in pycompat.iteritems(normed):
1118 for norm, paths in pycompat.iteritems(normed):
1112 if len(paths) > 1:
1119 if len(paths) > 1:
1113 for path in paths:
1120 for path in paths:
1114 folded = self._discoverpath(
1121 folded = self._discoverpath(
1115 path, norm, True, None, self._map.dirfoldmap
1122 path, norm, True, None, self._map.dirfoldmap
1116 )
1123 )
1117 if path != folded:
1124 if path != folded:
1118 results[path] = None
1125 results[path] = None
1119
1126
1120 return results, dirsfound, dirsnotfound
1127 return results, dirsfound, dirsnotfound
1121
1128
1122 def walk(self, match, subrepos, unknown, ignored, full=True):
1129 def walk(self, match, subrepos, unknown, ignored, full=True):
1123 """
1130 """
1124 Walk recursively through the directory tree, finding all files
1131 Walk recursively through the directory tree, finding all files
1125 matched by match.
1132 matched by match.
1126
1133
1127 If full is False, maybe skip some known-clean files.
1134 If full is False, maybe skip some known-clean files.
1128
1135
1129 Return a dict mapping filename to stat-like object (either
1136 Return a dict mapping filename to stat-like object (either
1130 mercurial.osutil.stat instance or return value of os.stat()).
1137 mercurial.osutil.stat instance or return value of os.stat()).
1131
1138
1132 """
1139 """
1133 # full is a flag that extensions that hook into walk can use -- this
1140 # full is a flag that extensions that hook into walk can use -- this
1134 # implementation doesn't use it at all. This satisfies the contract
1141 # implementation doesn't use it at all. This satisfies the contract
1135 # because we only guarantee a "maybe".
1142 # because we only guarantee a "maybe".
1136
1143
1137 if ignored:
1144 if ignored:
1138 ignore = util.never
1145 ignore = util.never
1139 dirignore = util.never
1146 dirignore = util.never
1140 elif unknown:
1147 elif unknown:
1141 ignore = self._ignore
1148 ignore = self._ignore
1142 dirignore = self._dirignore
1149 dirignore = self._dirignore
1143 else:
1150 else:
1144 # if not unknown and not ignored, drop dir recursion and step 2
1151 # if not unknown and not ignored, drop dir recursion and step 2
1145 ignore = util.always
1152 ignore = util.always
1146 dirignore = util.always
1153 dirignore = util.always
1147
1154
1148 matchfn = match.matchfn
1155 matchfn = match.matchfn
1149 matchalways = match.always()
1156 matchalways = match.always()
1150 matchtdir = match.traversedir
1157 matchtdir = match.traversedir
1151 dmap = self._map
1158 dmap = self._map
1152 listdir = util.listdir
1159 listdir = util.listdir
1153 lstat = os.lstat
1160 lstat = os.lstat
1154 dirkind = stat.S_IFDIR
1161 dirkind = stat.S_IFDIR
1155 regkind = stat.S_IFREG
1162 regkind = stat.S_IFREG
1156 lnkkind = stat.S_IFLNK
1163 lnkkind = stat.S_IFLNK
1157 join = self._join
1164 join = self._join
1158
1165
1159 exact = skipstep3 = False
1166 exact = skipstep3 = False
1160 if match.isexact(): # match.exact
1167 if match.isexact(): # match.exact
1161 exact = True
1168 exact = True
1162 dirignore = util.always # skip step 2
1169 dirignore = util.always # skip step 2
1163 elif match.prefix(): # match.match, no patterns
1170 elif match.prefix(): # match.match, no patterns
1164 skipstep3 = True
1171 skipstep3 = True
1165
1172
1166 if not exact and self._checkcase:
1173 if not exact and self._checkcase:
1167 normalize = self._normalize
1174 normalize = self._normalize
1168 normalizefile = self._normalizefile
1175 normalizefile = self._normalizefile
1169 skipstep3 = False
1176 skipstep3 = False
1170 else:
1177 else:
1171 normalize = self._normalize
1178 normalize = self._normalize
1172 normalizefile = None
1179 normalizefile = None
1173
1180
1174 # step 1: find all explicit files
1181 # step 1: find all explicit files
1175 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1182 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1176 if matchtdir:
1183 if matchtdir:
1177 for d in work:
1184 for d in work:
1178 matchtdir(d[0])
1185 matchtdir(d[0])
1179 for d in dirsnotfound:
1186 for d in dirsnotfound:
1180 matchtdir(d)
1187 matchtdir(d)
1181
1188
1182 skipstep3 = skipstep3 and not (work or dirsnotfound)
1189 skipstep3 = skipstep3 and not (work or dirsnotfound)
1183 work = [d for d in work if not dirignore(d[0])]
1190 work = [d for d in work if not dirignore(d[0])]
1184
1191
1185 # step 2: visit subdirectories
1192 # step 2: visit subdirectories
1186 def traverse(work, alreadynormed):
1193 def traverse(work, alreadynormed):
1187 wadd = work.append
1194 wadd = work.append
1188 while work:
1195 while work:
1189 tracing.counter('dirstate.walk work', len(work))
1196 tracing.counter('dirstate.walk work', len(work))
1190 nd = work.pop()
1197 nd = work.pop()
1191 visitentries = match.visitchildrenset(nd)
1198 visitentries = match.visitchildrenset(nd)
1192 if not visitentries:
1199 if not visitentries:
1193 continue
1200 continue
1194 if visitentries == b'this' or visitentries == b'all':
1201 if visitentries == b'this' or visitentries == b'all':
1195 visitentries = None
1202 visitentries = None
1196 skip = None
1203 skip = None
1197 if nd != b'':
1204 if nd != b'':
1198 skip = b'.hg'
1205 skip = b'.hg'
1199 try:
1206 try:
1200 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1207 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1201 entries = listdir(join(nd), stat=True, skip=skip)
1208 entries = listdir(join(nd), stat=True, skip=skip)
1202 except OSError as inst:
1209 except OSError as inst:
1203 if inst.errno in (errno.EACCES, errno.ENOENT):
1210 if inst.errno in (errno.EACCES, errno.ENOENT):
1204 match.bad(
1211 match.bad(
1205 self.pathto(nd), encoding.strtolocal(inst.strerror)
1212 self.pathto(nd), encoding.strtolocal(inst.strerror)
1206 )
1213 )
1207 continue
1214 continue
1208 raise
1215 raise
1209 for f, kind, st in entries:
1216 for f, kind, st in entries:
1210 # Some matchers may return files in the visitentries set,
1217 # Some matchers may return files in the visitentries set,
1211 # instead of 'this', if the matcher explicitly mentions them
1218 # instead of 'this', if the matcher explicitly mentions them
1212 # and is not an exactmatcher. This is acceptable; we do not
1219 # and is not an exactmatcher. This is acceptable; we do not
1213 # make any hard assumptions about file-or-directory below
1220 # make any hard assumptions about file-or-directory below
1214 # based on the presence of `f` in visitentries. If
1221 # based on the presence of `f` in visitentries. If
1215 # visitchildrenset returned a set, we can always skip the
1222 # visitchildrenset returned a set, we can always skip the
1216 # entries *not* in the set it provided regardless of whether
1223 # entries *not* in the set it provided regardless of whether
1217 # they're actually a file or a directory.
1224 # they're actually a file or a directory.
1218 if visitentries and f not in visitentries:
1225 if visitentries and f not in visitentries:
1219 continue
1226 continue
1220 if normalizefile:
1227 if normalizefile:
1221 # even though f might be a directory, we're only
1228 # even though f might be a directory, we're only
1222 # interested in comparing it to files currently in the
1229 # interested in comparing it to files currently in the
1223 # dmap -- therefore normalizefile is enough
1230 # dmap -- therefore normalizefile is enough
1224 nf = normalizefile(
1231 nf = normalizefile(
1225 nd and (nd + b"/" + f) or f, True, True
1232 nd and (nd + b"/" + f) or f, True, True
1226 )
1233 )
1227 else:
1234 else:
1228 nf = nd and (nd + b"/" + f) or f
1235 nf = nd and (nd + b"/" + f) or f
1229 if nf not in results:
1236 if nf not in results:
1230 if kind == dirkind:
1237 if kind == dirkind:
1231 if not ignore(nf):
1238 if not ignore(nf):
1232 if matchtdir:
1239 if matchtdir:
1233 matchtdir(nf)
1240 matchtdir(nf)
1234 wadd(nf)
1241 wadd(nf)
1235 if nf in dmap and (matchalways or matchfn(nf)):
1242 if nf in dmap and (matchalways or matchfn(nf)):
1236 results[nf] = None
1243 results[nf] = None
1237 elif kind == regkind or kind == lnkkind:
1244 elif kind == regkind or kind == lnkkind:
1238 if nf in dmap:
1245 if nf in dmap:
1239 if matchalways or matchfn(nf):
1246 if matchalways or matchfn(nf):
1240 results[nf] = st
1247 results[nf] = st
1241 elif (matchalways or matchfn(nf)) and not ignore(
1248 elif (matchalways or matchfn(nf)) and not ignore(
1242 nf
1249 nf
1243 ):
1250 ):
1244 # unknown file -- normalize if necessary
1251 # unknown file -- normalize if necessary
1245 if not alreadynormed:
1252 if not alreadynormed:
1246 nf = normalize(nf, False, True)
1253 nf = normalize(nf, False, True)
1247 results[nf] = st
1254 results[nf] = st
1248 elif nf in dmap and (matchalways or matchfn(nf)):
1255 elif nf in dmap and (matchalways or matchfn(nf)):
1249 results[nf] = None
1256 results[nf] = None
1250
1257
1251 for nd, d in work:
1258 for nd, d in work:
1252 # alreadynormed means that processwork doesn't have to do any
1259 # alreadynormed means that processwork doesn't have to do any
1253 # expensive directory normalization
1260 # expensive directory normalization
1254 alreadynormed = not normalize or nd == d
1261 alreadynormed = not normalize or nd == d
1255 traverse([d], alreadynormed)
1262 traverse([d], alreadynormed)
1256
1263
1257 for s in subrepos:
1264 for s in subrepos:
1258 del results[s]
1265 del results[s]
1259 del results[b'.hg']
1266 del results[b'.hg']
1260
1267
1261 # step 3: visit remaining files from dmap
1268 # step 3: visit remaining files from dmap
1262 if not skipstep3 and not exact:
1269 if not skipstep3 and not exact:
1263 # If a dmap file is not in results yet, it was either
1270 # If a dmap file is not in results yet, it was either
1264 # a) not matching matchfn b) ignored, c) missing, or d) under a
1271 # a) not matching matchfn b) ignored, c) missing, or d) under a
1265 # symlink directory.
1272 # symlink directory.
1266 if not results and matchalways:
1273 if not results and matchalways:
1267 visit = [f for f in dmap]
1274 visit = [f for f in dmap]
1268 else:
1275 else:
1269 visit = [f for f in dmap if f not in results and matchfn(f)]
1276 visit = [f for f in dmap if f not in results and matchfn(f)]
1270 visit.sort()
1277 visit.sort()
1271
1278
1272 if unknown:
1279 if unknown:
1273 # unknown == True means we walked all dirs under the roots
1280 # unknown == True means we walked all dirs under the roots
1274 # that wasn't ignored, and everything that matched was stat'ed
1281 # that wasn't ignored, and everything that matched was stat'ed
1275 # and is already in results.
1282 # and is already in results.
1276 # The rest must thus be ignored or under a symlink.
1283 # The rest must thus be ignored or under a symlink.
1277 audit_path = pathutil.pathauditor(self._root, cached=True)
1284 audit_path = pathutil.pathauditor(self._root, cached=True)
1278
1285
1279 for nf in iter(visit):
1286 for nf in iter(visit):
1280 # If a stat for the same file was already added with a
1287 # If a stat for the same file was already added with a
1281 # different case, don't add one for this, since that would
1288 # different case, don't add one for this, since that would
1282 # make it appear as if the file exists under both names
1289 # make it appear as if the file exists under both names
1283 # on disk.
1290 # on disk.
1284 if (
1291 if (
1285 normalizefile
1292 normalizefile
1286 and normalizefile(nf, True, True) in results
1293 and normalizefile(nf, True, True) in results
1287 ):
1294 ):
1288 results[nf] = None
1295 results[nf] = None
1289 # Report ignored items in the dmap as long as they are not
1296 # Report ignored items in the dmap as long as they are not
1290 # under a symlink directory.
1297 # under a symlink directory.
1291 elif audit_path.check(nf):
1298 elif audit_path.check(nf):
1292 try:
1299 try:
1293 results[nf] = lstat(join(nf))
1300 results[nf] = lstat(join(nf))
1294 # file was just ignored, no links, and exists
1301 # file was just ignored, no links, and exists
1295 except OSError:
1302 except OSError:
1296 # file doesn't exist
1303 # file doesn't exist
1297 results[nf] = None
1304 results[nf] = None
1298 else:
1305 else:
1299 # It's either missing or under a symlink directory
1306 # It's either missing or under a symlink directory
1300 # which we in this case report as missing
1307 # which we in this case report as missing
1301 results[nf] = None
1308 results[nf] = None
1302 else:
1309 else:
1303 # We may not have walked the full directory tree above,
1310 # We may not have walked the full directory tree above,
1304 # so stat and check everything we missed.
1311 # so stat and check everything we missed.
1305 iv = iter(visit)
1312 iv = iter(visit)
1306 for st in util.statfiles([join(i) for i in visit]):
1313 for st in util.statfiles([join(i) for i in visit]):
1307 results[next(iv)] = st
1314 results[next(iv)] = st
1308 return results
1315 return results
1309
1316
1310 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1317 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1311 # Force Rayon (Rust parallelism library) to respect the number of
1318 # Force Rayon (Rust parallelism library) to respect the number of
1312 # workers. This is a temporary workaround until Rust code knows
1319 # workers. This is a temporary workaround until Rust code knows
1313 # how to read the config file.
1320 # how to read the config file.
1314 numcpus = self._ui.configint(b"worker", b"numcpus")
1321 numcpus = self._ui.configint(b"worker", b"numcpus")
1315 if numcpus is not None:
1322 if numcpus is not None:
1316 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1323 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1317
1324
1318 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1325 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1319 if not workers_enabled:
1326 if not workers_enabled:
1320 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1327 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1321
1328
1322 (
1329 (
1323 lookup,
1330 lookup,
1324 modified,
1331 modified,
1325 added,
1332 added,
1326 removed,
1333 removed,
1327 deleted,
1334 deleted,
1328 clean,
1335 clean,
1329 ignored,
1336 ignored,
1330 unknown,
1337 unknown,
1331 warnings,
1338 warnings,
1332 bad,
1339 bad,
1333 traversed,
1340 traversed,
1334 dirty,
1341 dirty,
1335 ) = rustmod.status(
1342 ) = rustmod.status(
1336 self._map._rustmap,
1343 self._map._rustmap,
1337 matcher,
1344 matcher,
1338 self._rootdir,
1345 self._rootdir,
1339 self._ignorefiles(),
1346 self._ignorefiles(),
1340 self._checkexec,
1347 self._checkexec,
1341 self._lastnormaltime,
1348 self._lastnormaltime,
1342 bool(list_clean),
1349 bool(list_clean),
1343 bool(list_ignored),
1350 bool(list_ignored),
1344 bool(list_unknown),
1351 bool(list_unknown),
1345 bool(matcher.traversedir),
1352 bool(matcher.traversedir),
1346 )
1353 )
1347
1354
1348 self._dirty |= dirty
1355 self._dirty |= dirty
1349
1356
1350 if matcher.traversedir:
1357 if matcher.traversedir:
1351 for dir in traversed:
1358 for dir in traversed:
1352 matcher.traversedir(dir)
1359 matcher.traversedir(dir)
1353
1360
1354 if self._ui.warn:
1361 if self._ui.warn:
1355 for item in warnings:
1362 for item in warnings:
1356 if isinstance(item, tuple):
1363 if isinstance(item, tuple):
1357 file_path, syntax = item
1364 file_path, syntax = item
1358 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1365 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1359 file_path,
1366 file_path,
1360 syntax,
1367 syntax,
1361 )
1368 )
1362 self._ui.warn(msg)
1369 self._ui.warn(msg)
1363 else:
1370 else:
1364 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1371 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1365 self._ui.warn(
1372 self._ui.warn(
1366 msg
1373 msg
1367 % (
1374 % (
1368 pathutil.canonpath(
1375 pathutil.canonpath(
1369 self._rootdir, self._rootdir, item
1376 self._rootdir, self._rootdir, item
1370 ),
1377 ),
1371 b"No such file or directory",
1378 b"No such file or directory",
1372 )
1379 )
1373 )
1380 )
1374
1381
1375 for (fn, message) in bad:
1382 for (fn, message) in bad:
1376 matcher.bad(fn, encoding.strtolocal(message))
1383 matcher.bad(fn, encoding.strtolocal(message))
1377
1384
1378 status = scmutil.status(
1385 status = scmutil.status(
1379 modified=modified,
1386 modified=modified,
1380 added=added,
1387 added=added,
1381 removed=removed,
1388 removed=removed,
1382 deleted=deleted,
1389 deleted=deleted,
1383 unknown=unknown,
1390 unknown=unknown,
1384 ignored=ignored,
1391 ignored=ignored,
1385 clean=clean,
1392 clean=clean,
1386 )
1393 )
1387 return (lookup, status)
1394 return (lookup, status)
1388
1395
1389 def status(self, match, subrepos, ignored, clean, unknown):
1396 def status(self, match, subrepos, ignored, clean, unknown):
1390 """Determine the status of the working copy relative to the
1397 """Determine the status of the working copy relative to the
1391 dirstate and return a pair of (unsure, status), where status is of type
1398 dirstate and return a pair of (unsure, status), where status is of type
1392 scmutil.status and:
1399 scmutil.status and:
1393
1400
1394 unsure:
1401 unsure:
1395 files that might have been modified since the dirstate was
1402 files that might have been modified since the dirstate was
1396 written, but need to be read to be sure (size is the same
1403 written, but need to be read to be sure (size is the same
1397 but mtime differs)
1404 but mtime differs)
1398 status.modified:
1405 status.modified:
1399 files that have definitely been modified since the dirstate
1406 files that have definitely been modified since the dirstate
1400 was written (different size or mode)
1407 was written (different size or mode)
1401 status.clean:
1408 status.clean:
1402 files that have definitely not been modified since the
1409 files that have definitely not been modified since the
1403 dirstate was written
1410 dirstate was written
1404 """
1411 """
1405 listignored, listclean, listunknown = ignored, clean, unknown
1412 listignored, listclean, listunknown = ignored, clean, unknown
1406 lookup, modified, added, unknown, ignored = [], [], [], [], []
1413 lookup, modified, added, unknown, ignored = [], [], [], [], []
1407 removed, deleted, clean = [], [], []
1414 removed, deleted, clean = [], [], []
1408
1415
1409 dmap = self._map
1416 dmap = self._map
1410 dmap.preload()
1417 dmap.preload()
1411
1418
1412 use_rust = True
1419 use_rust = True
1413
1420
1414 allowed_matchers = (
1421 allowed_matchers = (
1415 matchmod.alwaysmatcher,
1422 matchmod.alwaysmatcher,
1416 matchmod.exactmatcher,
1423 matchmod.exactmatcher,
1417 matchmod.includematcher,
1424 matchmod.includematcher,
1418 )
1425 )
1419
1426
1420 if rustmod is None:
1427 if rustmod is None:
1421 use_rust = False
1428 use_rust = False
1422 elif self._checkcase:
1429 elif self._checkcase:
1423 # Case-insensitive filesystems are not handled yet
1430 # Case-insensitive filesystems are not handled yet
1424 use_rust = False
1431 use_rust = False
1425 elif subrepos:
1432 elif subrepos:
1426 use_rust = False
1433 use_rust = False
1427 elif sparse.enabled:
1434 elif sparse.enabled:
1428 use_rust = False
1435 use_rust = False
1429 elif not isinstance(match, allowed_matchers):
1436 elif not isinstance(match, allowed_matchers):
1430 # Some matchers have yet to be implemented
1437 # Some matchers have yet to be implemented
1431 use_rust = False
1438 use_rust = False
1432
1439
1433 if use_rust:
1440 if use_rust:
1434 try:
1441 try:
1435 return self._rust_status(
1442 return self._rust_status(
1436 match, listclean, listignored, listunknown
1443 match, listclean, listignored, listunknown
1437 )
1444 )
1438 except rustmod.FallbackError:
1445 except rustmod.FallbackError:
1439 pass
1446 pass
1440
1447
1441 def noop(f):
1448 def noop(f):
1442 pass
1449 pass
1443
1450
1444 dcontains = dmap.__contains__
1451 dcontains = dmap.__contains__
1445 dget = dmap.__getitem__
1452 dget = dmap.__getitem__
1446 ladd = lookup.append # aka "unsure"
1453 ladd = lookup.append # aka "unsure"
1447 madd = modified.append
1454 madd = modified.append
1448 aadd = added.append
1455 aadd = added.append
1449 uadd = unknown.append if listunknown else noop
1456 uadd = unknown.append if listunknown else noop
1450 iadd = ignored.append if listignored else noop
1457 iadd = ignored.append if listignored else noop
1451 radd = removed.append
1458 radd = removed.append
1452 dadd = deleted.append
1459 dadd = deleted.append
1453 cadd = clean.append if listclean else noop
1460 cadd = clean.append if listclean else noop
1454 mexact = match.exact
1461 mexact = match.exact
1455 dirignore = self._dirignore
1462 dirignore = self._dirignore
1456 checkexec = self._checkexec
1463 checkexec = self._checkexec
1457 copymap = self._map.copymap
1464 copymap = self._map.copymap
1458 lastnormaltime = self._lastnormaltime
1465 lastnormaltime = self._lastnormaltime
1459
1466
1460 # We need to do full walks when either
1467 # We need to do full walks when either
1461 # - we're listing all clean files, or
1468 # - we're listing all clean files, or
1462 # - match.traversedir does something, because match.traversedir should
1469 # - match.traversedir does something, because match.traversedir should
1463 # be called for every dir in the working dir
1470 # be called for every dir in the working dir
1464 full = listclean or match.traversedir is not None
1471 full = listclean or match.traversedir is not None
1465 for fn, st in pycompat.iteritems(
1472 for fn, st in pycompat.iteritems(
1466 self.walk(match, subrepos, listunknown, listignored, full=full)
1473 self.walk(match, subrepos, listunknown, listignored, full=full)
1467 ):
1474 ):
1468 if not dcontains(fn):
1475 if not dcontains(fn):
1469 if (listignored or mexact(fn)) and dirignore(fn):
1476 if (listignored or mexact(fn)) and dirignore(fn):
1470 if listignored:
1477 if listignored:
1471 iadd(fn)
1478 iadd(fn)
1472 else:
1479 else:
1473 uadd(fn)
1480 uadd(fn)
1474 continue
1481 continue
1475
1482
1476 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1483 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1477 # written like that for performance reasons. dmap[fn] is not a
1484 # written like that for performance reasons. dmap[fn] is not a
1478 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1485 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1479 # opcode has fast paths when the value to be unpacked is a tuple or
1486 # opcode has fast paths when the value to be unpacked is a tuple or
1480 # a list, but falls back to creating a full-fledged iterator in
1487 # a list, but falls back to creating a full-fledged iterator in
1481 # general. That is much slower than simply accessing and storing the
1488 # general. That is much slower than simply accessing and storing the
1482 # tuple members one by one.
1489 # tuple members one by one.
1483 t = dget(fn)
1490 t = dget(fn)
1484 mode = t.mode
1491 mode = t.mode
1485 size = t.size
1492 size = t.size
1486 time = t.mtime
1493 time = t.mtime
1487
1494
1488 if not st and t.tracked:
1495 if not st and t.tracked:
1489 dadd(fn)
1496 dadd(fn)
1490 elif t.merged:
1497 elif t.merged:
1491 madd(fn)
1498 madd(fn)
1492 elif t.added:
1499 elif t.added:
1493 aadd(fn)
1500 aadd(fn)
1494 elif t.removed:
1501 elif t.removed:
1495 radd(fn)
1502 radd(fn)
1496 elif t.tracked:
1503 elif t.tracked:
1497 if (
1504 if (
1498 size >= 0
1505 size >= 0
1499 and (
1506 and (
1500 (size != st.st_size and size != st.st_size & _rangemask)
1507 (size != st.st_size and size != st.st_size & _rangemask)
1501 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1508 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1502 )
1509 )
1503 or t.from_p2
1510 or t.from_p2
1504 or fn in copymap
1511 or fn in copymap
1505 ):
1512 ):
1506 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1513 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1507 # issue6456: Size returned may be longer due to
1514 # issue6456: Size returned may be longer due to
1508 # encryption on EXT-4 fscrypt, undecided.
1515 # encryption on EXT-4 fscrypt, undecided.
1509 ladd(fn)
1516 ladd(fn)
1510 else:
1517 else:
1511 madd(fn)
1518 madd(fn)
1512 elif (
1519 elif (
1513 time != st[stat.ST_MTIME]
1520 time != st[stat.ST_MTIME]
1514 and time != st[stat.ST_MTIME] & _rangemask
1521 and time != st[stat.ST_MTIME] & _rangemask
1515 ):
1522 ):
1516 ladd(fn)
1523 ladd(fn)
1517 elif st[stat.ST_MTIME] == lastnormaltime:
1524 elif st[stat.ST_MTIME] == lastnormaltime:
1518 # fn may have just been marked as normal and it may have
1525 # fn may have just been marked as normal and it may have
1519 # changed in the same second without changing its size.
1526 # changed in the same second without changing its size.
1520 # This can happen if we quickly do multiple commits.
1527 # This can happen if we quickly do multiple commits.
1521 # Force lookup, so we don't miss such a racy file change.
1528 # Force lookup, so we don't miss such a racy file change.
1522 ladd(fn)
1529 ladd(fn)
1523 elif listclean:
1530 elif listclean:
1524 cadd(fn)
1531 cadd(fn)
1525 status = scmutil.status(
1532 status = scmutil.status(
1526 modified, added, removed, deleted, unknown, ignored, clean
1533 modified, added, removed, deleted, unknown, ignored, clean
1527 )
1534 )
1528 return (lookup, status)
1535 return (lookup, status)
1529
1536
1530 def matches(self, match):
1537 def matches(self, match):
1531 """
1538 """
1532 return files in the dirstate (in whatever state) filtered by match
1539 return files in the dirstate (in whatever state) filtered by match
1533 """
1540 """
1534 dmap = self._map
1541 dmap = self._map
1535 if rustmod is not None:
1542 if rustmod is not None:
1536 dmap = self._map._rustmap
1543 dmap = self._map._rustmap
1537
1544
1538 if match.always():
1545 if match.always():
1539 return dmap.keys()
1546 return dmap.keys()
1540 files = match.files()
1547 files = match.files()
1541 if match.isexact():
1548 if match.isexact():
1542 # fast path -- filter the other way around, since typically files is
1549 # fast path -- filter the other way around, since typically files is
1543 # much smaller than dmap
1550 # much smaller than dmap
1544 return [f for f in files if f in dmap]
1551 return [f for f in files if f in dmap]
1545 if match.prefix() and all(fn in dmap for fn in files):
1552 if match.prefix() and all(fn in dmap for fn in files):
1546 # fast path -- all the values are known to be files, so just return
1553 # fast path -- all the values are known to be files, so just return
1547 # that
1554 # that
1548 return list(files)
1555 return list(files)
1549 return [f for f in dmap if match(f)]
1556 return [f for f in dmap if match(f)]
1550
1557
1551 def _actualfilename(self, tr):
1558 def _actualfilename(self, tr):
1552 if tr:
1559 if tr:
1553 return self._pendingfilename
1560 return self._pendingfilename
1554 else:
1561 else:
1555 return self._filename
1562 return self._filename
1556
1563
1557 def savebackup(self, tr, backupname):
1564 def savebackup(self, tr, backupname):
1558 '''Save current dirstate into backup file'''
1565 '''Save current dirstate into backup file'''
1559 filename = self._actualfilename(tr)
1566 filename = self._actualfilename(tr)
1560 assert backupname != filename
1567 assert backupname != filename
1561
1568
1562 # use '_writedirstate' instead of 'write' to write changes certainly,
1569 # use '_writedirstate' instead of 'write' to write changes certainly,
1563 # because the latter omits writing out if transaction is running.
1570 # because the latter omits writing out if transaction is running.
1564 # output file will be used to create backup of dirstate at this point.
1571 # output file will be used to create backup of dirstate at this point.
1565 if self._dirty or not self._opener.exists(filename):
1572 if self._dirty or not self._opener.exists(filename):
1566 self._writedirstate(
1573 self._writedirstate(
1567 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1574 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1568 )
1575 )
1569
1576
1570 if tr:
1577 if tr:
1571 # ensure that subsequent tr.writepending returns True for
1578 # ensure that subsequent tr.writepending returns True for
1572 # changes written out above, even if dirstate is never
1579 # changes written out above, even if dirstate is never
1573 # changed after this
1580 # changed after this
1574 tr.addfilegenerator(
1581 tr.addfilegenerator(
1575 b'dirstate',
1582 b'dirstate',
1576 (self._filename,),
1583 (self._filename,),
1577 self._writedirstate,
1584 self._writedirstate,
1578 location=b'plain',
1585 location=b'plain',
1579 )
1586 )
1580
1587
1581 # ensure that pending file written above is unlinked at
1588 # ensure that pending file written above is unlinked at
1582 # failure, even if tr.writepending isn't invoked until the
1589 # failure, even if tr.writepending isn't invoked until the
1583 # end of this transaction
1590 # end of this transaction
1584 tr.registertmp(filename, location=b'plain')
1591 tr.registertmp(filename, location=b'plain')
1585
1592
1586 self._opener.tryunlink(backupname)
1593 self._opener.tryunlink(backupname)
1587 # hardlink backup is okay because _writedirstate is always called
1594 # hardlink backup is okay because _writedirstate is always called
1588 # with an "atomictemp=True" file.
1595 # with an "atomictemp=True" file.
1589 util.copyfile(
1596 util.copyfile(
1590 self._opener.join(filename),
1597 self._opener.join(filename),
1591 self._opener.join(backupname),
1598 self._opener.join(backupname),
1592 hardlink=True,
1599 hardlink=True,
1593 )
1600 )
1594
1601
1595 def restorebackup(self, tr, backupname):
1602 def restorebackup(self, tr, backupname):
1596 '''Restore dirstate by backup file'''
1603 '''Restore dirstate by backup file'''
1597 # this "invalidate()" prevents "wlock.release()" from writing
1604 # this "invalidate()" prevents "wlock.release()" from writing
1598 # changes of dirstate out after restoring from backup file
1605 # changes of dirstate out after restoring from backup file
1599 self.invalidate()
1606 self.invalidate()
1600 filename = self._actualfilename(tr)
1607 filename = self._actualfilename(tr)
1601 o = self._opener
1608 o = self._opener
1602 if util.samefile(o.join(backupname), o.join(filename)):
1609 if util.samefile(o.join(backupname), o.join(filename)):
1603 o.unlink(backupname)
1610 o.unlink(backupname)
1604 else:
1611 else:
1605 o.rename(backupname, filename, checkambig=True)
1612 o.rename(backupname, filename, checkambig=True)
1606
1613
1607 def clearbackup(self, tr, backupname):
1614 def clearbackup(self, tr, backupname):
1608 '''Clear backup file'''
1615 '''Clear backup file'''
1609 self._opener.unlink(backupname)
1616 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now