##// END OF EJS Templates
dirstate: drop duplicated check...
marmoute -
r48470:ec1d63e6 default
parent child Browse files
Show More
@@ -1,1630 +1,1627
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = parsers.DirstateItem
48 DirstateItem = parsers.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._updatedfiles = set()
133 self._updatedfiles = set()
134 self._mapcls = dirstatemap.dirstatemap
134 self._mapcls = dirstatemap.dirstatemap
135 # Access and cache cwd early, so we don't access it for the first time
135 # Access and cache cwd early, so we don't access it for the first time
136 # after a working-copy update caused it to not exist (accessing it then
136 # after a working-copy update caused it to not exist (accessing it then
137 # raises an exception).
137 # raises an exception).
138 self._cwd
138 self._cwd
139
139
140 def prefetch_parents(self):
140 def prefetch_parents(self):
141 """make sure the parents are loaded
141 """make sure the parents are loaded
142
142
143 Used to avoid a race condition.
143 Used to avoid a race condition.
144 """
144 """
145 self._pl
145 self._pl
146
146
147 @contextlib.contextmanager
147 @contextlib.contextmanager
148 def parentchange(self):
148 def parentchange(self):
149 """Context manager for handling dirstate parents.
149 """Context manager for handling dirstate parents.
150
150
151 If an exception occurs in the scope of the context manager,
151 If an exception occurs in the scope of the context manager,
152 the incoherent dirstate won't be written when wlock is
152 the incoherent dirstate won't be written when wlock is
153 released.
153 released.
154 """
154 """
155 self._parentwriters += 1
155 self._parentwriters += 1
156 yield
156 yield
157 # Typically we want the "undo" step of a context manager in a
157 # Typically we want the "undo" step of a context manager in a
158 # finally block so it happens even when an exception
158 # finally block so it happens even when an exception
159 # occurs. In this case, however, we only want to decrement
159 # occurs. In this case, however, we only want to decrement
160 # parentwriters if the code in the with statement exits
160 # parentwriters if the code in the with statement exits
161 # normally, so we don't have a try/finally here on purpose.
161 # normally, so we don't have a try/finally here on purpose.
162 self._parentwriters -= 1
162 self._parentwriters -= 1
163
163
164 def pendingparentchange(self):
164 def pendingparentchange(self):
165 """Returns true if the dirstate is in the middle of a set of changes
165 """Returns true if the dirstate is in the middle of a set of changes
166 that modify the dirstate parent.
166 that modify the dirstate parent.
167 """
167 """
168 return self._parentwriters > 0
168 return self._parentwriters > 0
169
169
170 @propertycache
170 @propertycache
171 def _map(self):
171 def _map(self):
172 """Return the dirstate contents (see documentation for dirstatemap)."""
172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 self._map = self._mapcls(
173 self._map = self._mapcls(
174 self._ui,
174 self._ui,
175 self._opener,
175 self._opener,
176 self._root,
176 self._root,
177 self._nodeconstants,
177 self._nodeconstants,
178 self._use_dirstate_v2,
178 self._use_dirstate_v2,
179 )
179 )
180 return self._map
180 return self._map
181
181
182 @property
182 @property
183 def _sparsematcher(self):
183 def _sparsematcher(self):
184 """The matcher for the sparse checkout.
184 """The matcher for the sparse checkout.
185
185
186 The working directory may not include every file from a manifest. The
186 The working directory may not include every file from a manifest. The
187 matcher obtained by this property will match a path if it is to be
187 matcher obtained by this property will match a path if it is to be
188 included in the working directory.
188 included in the working directory.
189 """
189 """
190 # TODO there is potential to cache this property. For now, the matcher
190 # TODO there is potential to cache this property. For now, the matcher
191 # is resolved on every access. (But the called function does use a
191 # is resolved on every access. (But the called function does use a
192 # cache to keep the lookup fast.)
192 # cache to keep the lookup fast.)
193 return self._sparsematchfn()
193 return self._sparsematchfn()
194
194
195 @repocache(b'branch')
195 @repocache(b'branch')
196 def _branch(self):
196 def _branch(self):
197 try:
197 try:
198 return self._opener.read(b"branch").strip() or b"default"
198 return self._opener.read(b"branch").strip() or b"default"
199 except IOError as inst:
199 except IOError as inst:
200 if inst.errno != errno.ENOENT:
200 if inst.errno != errno.ENOENT:
201 raise
201 raise
202 return b"default"
202 return b"default"
203
203
204 @property
204 @property
205 def _pl(self):
205 def _pl(self):
206 return self._map.parents()
206 return self._map.parents()
207
207
208 def hasdir(self, d):
208 def hasdir(self, d):
209 return self._map.hastrackeddir(d)
209 return self._map.hastrackeddir(d)
210
210
211 @rootcache(b'.hgignore')
211 @rootcache(b'.hgignore')
212 def _ignore(self):
212 def _ignore(self):
213 files = self._ignorefiles()
213 files = self._ignorefiles()
214 if not files:
214 if not files:
215 return matchmod.never()
215 return matchmod.never()
216
216
217 pats = [b'include:%s' % f for f in files]
217 pats = [b'include:%s' % f for f in files]
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219
219
220 @propertycache
220 @propertycache
221 def _slash(self):
221 def _slash(self):
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223
223
224 @propertycache
224 @propertycache
225 def _checklink(self):
225 def _checklink(self):
226 return util.checklink(self._root)
226 return util.checklink(self._root)
227
227
228 @propertycache
228 @propertycache
229 def _checkexec(self):
229 def _checkexec(self):
230 return bool(util.checkexec(self._root))
230 return bool(util.checkexec(self._root))
231
231
232 @propertycache
232 @propertycache
233 def _checkcase(self):
233 def _checkcase(self):
234 return not util.fscasesensitive(self._join(b'.hg'))
234 return not util.fscasesensitive(self._join(b'.hg'))
235
235
236 def _join(self, f):
236 def _join(self, f):
237 # much faster than os.path.join()
237 # much faster than os.path.join()
238 # it's safe because f is always a relative path
238 # it's safe because f is always a relative path
239 return self._rootdir + f
239 return self._rootdir + f
240
240
241 def flagfunc(self, buildfallback):
241 def flagfunc(self, buildfallback):
242 if self._checklink and self._checkexec:
242 if self._checklink and self._checkexec:
243
243
244 def f(x):
244 def f(x):
245 try:
245 try:
246 st = os.lstat(self._join(x))
246 st = os.lstat(self._join(x))
247 if util.statislink(st):
247 if util.statislink(st):
248 return b'l'
248 return b'l'
249 if util.statisexec(st):
249 if util.statisexec(st):
250 return b'x'
250 return b'x'
251 except OSError:
251 except OSError:
252 pass
252 pass
253 return b''
253 return b''
254
254
255 return f
255 return f
256
256
257 fallback = buildfallback()
257 fallback = buildfallback()
258 if self._checklink:
258 if self._checklink:
259
259
260 def f(x):
260 def f(x):
261 if os.path.islink(self._join(x)):
261 if os.path.islink(self._join(x)):
262 return b'l'
262 return b'l'
263 if b'x' in fallback(x):
263 if b'x' in fallback(x):
264 return b'x'
264 return b'x'
265 return b''
265 return b''
266
266
267 return f
267 return f
268 if self._checkexec:
268 if self._checkexec:
269
269
270 def f(x):
270 def f(x):
271 if b'l' in fallback(x):
271 if b'l' in fallback(x):
272 return b'l'
272 return b'l'
273 if util.isexec(self._join(x)):
273 if util.isexec(self._join(x)):
274 return b'x'
274 return b'x'
275 return b''
275 return b''
276
276
277 return f
277 return f
278 else:
278 else:
279 return fallback
279 return fallback
280
280
281 @propertycache
281 @propertycache
282 def _cwd(self):
282 def _cwd(self):
283 # internal config: ui.forcecwd
283 # internal config: ui.forcecwd
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 if forcecwd:
285 if forcecwd:
286 return forcecwd
286 return forcecwd
287 return encoding.getcwd()
287 return encoding.getcwd()
288
288
289 def getcwd(self):
289 def getcwd(self):
290 """Return the path from which a canonical path is calculated.
290 """Return the path from which a canonical path is calculated.
291
291
292 This path should be used to resolve file patterns or to convert
292 This path should be used to resolve file patterns or to convert
293 canonical paths back to file paths for display. It shouldn't be
293 canonical paths back to file paths for display. It shouldn't be
294 used to get real file paths. Use vfs functions instead.
294 used to get real file paths. Use vfs functions instead.
295 """
295 """
296 cwd = self._cwd
296 cwd = self._cwd
297 if cwd == self._root:
297 if cwd == self._root:
298 return b''
298 return b''
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 rootsep = self._root
300 rootsep = self._root
301 if not util.endswithsep(rootsep):
301 if not util.endswithsep(rootsep):
302 rootsep += pycompat.ossep
302 rootsep += pycompat.ossep
303 if cwd.startswith(rootsep):
303 if cwd.startswith(rootsep):
304 return cwd[len(rootsep) :]
304 return cwd[len(rootsep) :]
305 else:
305 else:
306 # we're outside the repo. return an absolute path.
306 # we're outside the repo. return an absolute path.
307 return cwd
307 return cwd
308
308
309 def pathto(self, f, cwd=None):
309 def pathto(self, f, cwd=None):
310 if cwd is None:
310 if cwd is None:
311 cwd = self.getcwd()
311 cwd = self.getcwd()
312 path = util.pathto(self._root, cwd, f)
312 path = util.pathto(self._root, cwd, f)
313 if self._slash:
313 if self._slash:
314 return util.pconvert(path)
314 return util.pconvert(path)
315 return path
315 return path
316
316
317 def __getitem__(self, key):
317 def __getitem__(self, key):
318 """Return the current state of key (a filename) in the dirstate.
318 """Return the current state of key (a filename) in the dirstate.
319
319
320 States are:
320 States are:
321 n normal
321 n normal
322 m needs merging
322 m needs merging
323 r marked for removal
323 r marked for removal
324 a marked for addition
324 a marked for addition
325 ? not tracked
325 ? not tracked
326
326
327 XXX The "state" is a bit obscure to be in the "public" API. we should
327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 consider migrating all user of this to going through the dirstate entry
328 consider migrating all user of this to going through the dirstate entry
329 instead.
329 instead.
330 """
330 """
331 entry = self._map.get(key)
331 entry = self._map.get(key)
332 if entry is not None:
332 if entry is not None:
333 return entry.state
333 return entry.state
334 return b'?'
334 return b'?'
335
335
336 def __contains__(self, key):
336 def __contains__(self, key):
337 return key in self._map
337 return key in self._map
338
338
339 def __iter__(self):
339 def __iter__(self):
340 return iter(sorted(self._map))
340 return iter(sorted(self._map))
341
341
342 def items(self):
342 def items(self):
343 return pycompat.iteritems(self._map)
343 return pycompat.iteritems(self._map)
344
344
345 iteritems = items
345 iteritems = items
346
346
347 def directories(self):
347 def directories(self):
348 return self._map.directories()
348 return self._map.directories()
349
349
350 def parents(self):
350 def parents(self):
351 return [self._validate(p) for p in self._pl]
351 return [self._validate(p) for p in self._pl]
352
352
353 def p1(self):
353 def p1(self):
354 return self._validate(self._pl[0])
354 return self._validate(self._pl[0])
355
355
356 def p2(self):
356 def p2(self):
357 return self._validate(self._pl[1])
357 return self._validate(self._pl[1])
358
358
359 @property
359 @property
360 def in_merge(self):
360 def in_merge(self):
361 """True if a merge is in progress"""
361 """True if a merge is in progress"""
362 return self._pl[1] != self._nodeconstants.nullid
362 return self._pl[1] != self._nodeconstants.nullid
363
363
364 def branch(self):
364 def branch(self):
365 return encoding.tolocal(self._branch)
365 return encoding.tolocal(self._branch)
366
366
367 def setparents(self, p1, p2=None):
367 def setparents(self, p1, p2=None):
368 """Set dirstate parents to p1 and p2.
368 """Set dirstate parents to p1 and p2.
369
369
370 When moving from two parents to one, "merged" entries a
370 When moving from two parents to one, "merged" entries a
371 adjusted to normal and previous copy records discarded and
371 adjusted to normal and previous copy records discarded and
372 returned by the call.
372 returned by the call.
373
373
374 See localrepo.setparents()
374 See localrepo.setparents()
375 """
375 """
376 if p2 is None:
376 if p2 is None:
377 p2 = self._nodeconstants.nullid
377 p2 = self._nodeconstants.nullid
378 if self._parentwriters == 0:
378 if self._parentwriters == 0:
379 raise ValueError(
379 raise ValueError(
380 b"cannot set dirstate parent outside of "
380 b"cannot set dirstate parent outside of "
381 b"dirstate.parentchange context manager"
381 b"dirstate.parentchange context manager"
382 )
382 )
383
383
384 self._dirty = True
384 self._dirty = True
385 oldp2 = self._pl[1]
385 oldp2 = self._pl[1]
386 if self._origpl is None:
386 if self._origpl is None:
387 self._origpl = self._pl
387 self._origpl = self._pl
388 self._map.setparents(p1, p2)
388 self._map.setparents(p1, p2)
389 copies = {}
389 copies = {}
390 if (
390 if (
391 oldp2 != self._nodeconstants.nullid
391 oldp2 != self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
393 ):
393 ):
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395
395
396 for f in candidatefiles:
396 for f in candidatefiles:
397 s = self._map.get(f)
397 s = self._map.get(f)
398 if s is None:
398 if s is None:
399 continue
399 continue
400
400
401 # Discard "merged" markers when moving away from a merge state
401 # Discard "merged" markers when moving away from a merge state
402 if s.merged:
402 if s.merged:
403 source = self._map.copymap.get(f)
403 source = self._map.copymap.get(f)
404 if source:
404 if source:
405 copies[f] = source
405 copies[f] = source
406 self.normallookup(f)
406 self.normallookup(f)
407 # Also fix up otherparent markers
407 # Also fix up otherparent markers
408 elif s.from_p2:
408 elif s.from_p2:
409 source = self._map.copymap.get(f)
409 source = self._map.copymap.get(f)
410 if source:
410 if source:
411 copies[f] = source
411 copies[f] = source
412 self._add(f)
412 self._add(f)
413 return copies
413 return copies
414
414
415 def setbranch(self, branch):
415 def setbranch(self, branch):
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 try:
418 try:
419 f.write(self._branch + b'\n')
419 f.write(self._branch + b'\n')
420 f.close()
420 f.close()
421
421
422 # make sure filecache has the correct stat info for _branch after
422 # make sure filecache has the correct stat info for _branch after
423 # replacing the underlying file
423 # replacing the underlying file
424 ce = self._filecache[b'_branch']
424 ce = self._filecache[b'_branch']
425 if ce:
425 if ce:
426 ce.refresh()
426 ce.refresh()
427 except: # re-raises
427 except: # re-raises
428 f.discard()
428 f.discard()
429 raise
429 raise
430
430
431 def invalidate(self):
431 def invalidate(self):
432 """Causes the next access to reread the dirstate.
432 """Causes the next access to reread the dirstate.
433
433
434 This is different from localrepo.invalidatedirstate() because it always
434 This is different from localrepo.invalidatedirstate() because it always
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 check whether the dirstate has changed before rereading it."""
436 check whether the dirstate has changed before rereading it."""
437
437
438 for a in ("_map", "_branch", "_ignore"):
438 for a in ("_map", "_branch", "_ignore"):
439 if a in self.__dict__:
439 if a in self.__dict__:
440 delattr(self, a)
440 delattr(self, a)
441 self._lastnormaltime = 0
441 self._lastnormaltime = 0
442 self._dirty = False
442 self._dirty = False
443 self._updatedfiles.clear()
443 self._updatedfiles.clear()
444 self._parentwriters = 0
444 self._parentwriters = 0
445 self._origpl = None
445 self._origpl = None
446
446
447 def copy(self, source, dest):
447 def copy(self, source, dest):
448 """Mark dest as a copy of source. Unmark dest if source is None."""
448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 if source == dest:
449 if source == dest:
450 return
450 return
451 self._dirty = True
451 self._dirty = True
452 if source is not None:
452 if source is not None:
453 self._map.copymap[dest] = source
453 self._map.copymap[dest] = source
454 self._updatedfiles.add(source)
454 self._updatedfiles.add(source)
455 self._updatedfiles.add(dest)
455 self._updatedfiles.add(dest)
456 elif self._map.copymap.pop(dest, None):
456 elif self._map.copymap.pop(dest, None):
457 self._updatedfiles.add(dest)
457 self._updatedfiles.add(dest)
458
458
459 def copied(self, file):
459 def copied(self, file):
460 return self._map.copymap.get(file, None)
460 return self._map.copymap.get(file, None)
461
461
462 def copies(self):
462 def copies(self):
463 return self._map.copymap
463 return self._map.copymap
464
464
465 @requires_no_parents_change
465 @requires_no_parents_change
466 def set_tracked(self, filename):
466 def set_tracked(self, filename):
467 """a "public" method for generic code to mark a file as tracked
467 """a "public" method for generic code to mark a file as tracked
468
468
469 This function is to be called outside of "update/merge" case. For
469 This function is to be called outside of "update/merge" case. For
470 example by a command like `hg add X`.
470 example by a command like `hg add X`.
471
471
472 return True the file was previously untracked, False otherwise.
472 return True the file was previously untracked, False otherwise.
473 """
473 """
474 entry = self._map.get(filename)
474 entry = self._map.get(filename)
475 if entry is None:
475 if entry is None:
476 self._add(filename)
476 self._add(filename)
477 return True
477 return True
478 elif not entry.tracked:
478 elif not entry.tracked:
479 self.normallookup(filename)
479 self.normallookup(filename)
480 return True
480 return True
481 return False
481 return False
482
482
483 @requires_no_parents_change
483 @requires_no_parents_change
484 def set_untracked(self, filename):
484 def set_untracked(self, filename):
485 """a "public" method for generic code to mark a file as untracked
485 """a "public" method for generic code to mark a file as untracked
486
486
487 This function is to be called outside of "update/merge" case. For
487 This function is to be called outside of "update/merge" case. For
488 example by a command like `hg remove X`.
488 example by a command like `hg remove X`.
489
489
490 return True the file was previously tracked, False otherwise.
490 return True the file was previously tracked, False otherwise.
491 """
491 """
492 entry = self._map.get(filename)
492 entry = self._map.get(filename)
493 if entry is None:
493 if entry is None:
494 return False
494 return False
495 elif entry.added:
495 elif entry.added:
496 self._drop(filename)
496 self._drop(filename)
497 return True
497 return True
498 else:
498 else:
499 self._remove(filename)
499 self._remove(filename)
500 return True
500 return True
501
501
502 @requires_parents_change
502 @requires_parents_change
503 def update_file_reference(
503 def update_file_reference(
504 self,
504 self,
505 filename,
505 filename,
506 p1_tracked,
506 p1_tracked,
507 ):
507 ):
508 """Set a file as tracked in the parent (or not)
508 """Set a file as tracked in the parent (or not)
509
509
510 This is to be called when adjust the dirstate to a new parent after an history
510 This is to be called when adjust the dirstate to a new parent after an history
511 rewriting operation.
511 rewriting operation.
512
512
513 It should not be called during a merge (p2 != nullid) and only within
513 It should not be called during a merge (p2 != nullid) and only within
514 a `with dirstate.parentchange():` context.
514 a `with dirstate.parentchange():` context.
515 """
515 """
516 if self.in_merge:
516 if self.in_merge:
517 msg = b'update_file_reference should not be called when merging'
517 msg = b'update_file_reference should not be called when merging'
518 raise error.ProgrammingError(msg)
518 raise error.ProgrammingError(msg)
519 entry = self._map.get(filename)
519 entry = self._map.get(filename)
520 if entry is None:
520 if entry is None:
521 wc_tracked = False
521 wc_tracked = False
522 else:
522 else:
523 wc_tracked = entry.tracked
523 wc_tracked = entry.tracked
524 if p1_tracked and wc_tracked:
524 if p1_tracked and wc_tracked:
525 # the underlying reference might have changed, we will have to
525 # the underlying reference might have changed, we will have to
526 # check it.
526 # check it.
527 self.normallookup(filename)
527 self.normallookup(filename)
528 elif not (p1_tracked or wc_tracked):
528 elif not (p1_tracked or wc_tracked):
529 # the file is no longer relevant to anyone
529 # the file is no longer relevant to anyone
530 self._drop(filename)
530 self._drop(filename)
531 elif (not p1_tracked) and wc_tracked:
531 elif (not p1_tracked) and wc_tracked:
532 if not entry.added:
532 if not entry.added:
533 self._add(filename)
533 self._add(filename)
534 elif p1_tracked and not wc_tracked:
534 elif p1_tracked and not wc_tracked:
535 if entry is None or not entry.removed:
535 if entry is None or not entry.removed:
536 self._remove(filename)
536 self._remove(filename)
537 else:
537 else:
538 assert False, 'unreachable'
538 assert False, 'unreachable'
539
539
540 @requires_parents_change
540 @requires_parents_change
541 def update_file(
541 def update_file(
542 self,
542 self,
543 filename,
543 filename,
544 wc_tracked,
544 wc_tracked,
545 p1_tracked,
545 p1_tracked,
546 p2_tracked=False,
546 p2_tracked=False,
547 merged=False,
547 merged=False,
548 clean_p1=False,
548 clean_p1=False,
549 clean_p2=False,
549 clean_p2=False,
550 possibly_dirty=False,
550 possibly_dirty=False,
551 ):
551 ):
552 """update the information about a file in the dirstate
552 """update the information about a file in the dirstate
553
553
554 This is to be called when the direstates parent changes to keep track
554 This is to be called when the direstates parent changes to keep track
555 of what is the file situation in regards to the working copy and its parent.
555 of what is the file situation in regards to the working copy and its parent.
556
556
557 This function must be called within a `dirstate.parentchange` context.
557 This function must be called within a `dirstate.parentchange` context.
558
558
559 note: the API is at an early stage and we might need to ajust it
559 note: the API is at an early stage and we might need to ajust it
560 depending of what information ends up being relevant and useful to
560 depending of what information ends up being relevant and useful to
561 other processing.
561 other processing.
562 """
562 """
563 if not self.pendingparentchange():
564 msg = b'calling `update_file` outside of a parentchange context'
565 raise error.ProgrammingError(msg)
566 if merged and (clean_p1 or clean_p2):
563 if merged and (clean_p1 or clean_p2):
567 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
564 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
568 raise error.ProgrammingError(msg)
565 raise error.ProgrammingError(msg)
569 assert not (merged and (clean_p1 or clean_p1))
566 assert not (merged and (clean_p1 or clean_p1))
570 if not (p1_tracked or p2_tracked or wc_tracked):
567 if not (p1_tracked or p2_tracked or wc_tracked):
571 self._drop(filename)
568 self._drop(filename)
572 elif merged:
569 elif merged:
573 assert wc_tracked
570 assert wc_tracked
574 if not self.in_merge:
571 if not self.in_merge:
575 self.normallookup(filename)
572 self.normallookup(filename)
576 self.otherparent(filename)
573 self.otherparent(filename)
577 elif not (p1_tracked or p2_tracked) and wc_tracked:
574 elif not (p1_tracked or p2_tracked) and wc_tracked:
578 self._addpath(filename, added=True, possibly_dirty=possibly_dirty)
575 self._addpath(filename, added=True, possibly_dirty=possibly_dirty)
579 self._map.copymap.pop(filename, None)
576 self._map.copymap.pop(filename, None)
580 elif (p1_tracked or p2_tracked) and not wc_tracked:
577 elif (p1_tracked or p2_tracked) and not wc_tracked:
581 self._remove(filename)
578 self._remove(filename)
582 elif clean_p2 and wc_tracked:
579 elif clean_p2 and wc_tracked:
583 assert p2_tracked
580 assert p2_tracked
584 self.otherparent(filename)
581 self.otherparent(filename)
585 elif not p1_tracked and p2_tracked and wc_tracked:
582 elif not p1_tracked and p2_tracked and wc_tracked:
586 self._addpath(filename, from_p2=True, possibly_dirty=possibly_dirty)
583 self._addpath(filename, from_p2=True, possibly_dirty=possibly_dirty)
587 self._map.copymap.pop(filename, None)
584 self._map.copymap.pop(filename, None)
588 elif possibly_dirty:
585 elif possibly_dirty:
589 self._addpath(filename, possibly_dirty=possibly_dirty)
586 self._addpath(filename, possibly_dirty=possibly_dirty)
590 elif wc_tracked:
587 elif wc_tracked:
591 self.normal(filename)
588 self.normal(filename)
592 # XXX We need something for file that are dirty after an update
589 # XXX We need something for file that are dirty after an update
593 else:
590 else:
594 assert False, 'unreachable'
591 assert False, 'unreachable'
595
592
596 @requires_parents_change
593 @requires_parents_change
597 def update_parent_file_data(self, f, filedata):
594 def update_parent_file_data(self, f, filedata):
598 """update the information about the content of a file
595 """update the information about the content of a file
599
596
600 This function should be called within a `dirstate.parentchange` context.
597 This function should be called within a `dirstate.parentchange` context.
601 """
598 """
602 self.normal(f, parentfiledata=filedata)
599 self.normal(f, parentfiledata=filedata)
603
600
604 def _addpath(
601 def _addpath(
605 self,
602 self,
606 f,
603 f,
607 mode=0,
604 mode=0,
608 size=None,
605 size=None,
609 mtime=None,
606 mtime=None,
610 added=False,
607 added=False,
611 merged=False,
608 merged=False,
612 from_p2=False,
609 from_p2=False,
613 possibly_dirty=False,
610 possibly_dirty=False,
614 ):
611 ):
615 entry = self._map.get(f)
612 entry = self._map.get(f)
616 if added or entry is not None and entry.removed:
613 if added or entry is not None and entry.removed:
617 scmutil.checkfilename(f)
614 scmutil.checkfilename(f)
618 if self._map.hastrackeddir(f):
615 if self._map.hastrackeddir(f):
619 msg = _(b'directory %r already in dirstate')
616 msg = _(b'directory %r already in dirstate')
620 msg %= pycompat.bytestr(f)
617 msg %= pycompat.bytestr(f)
621 raise error.Abort(msg)
618 raise error.Abort(msg)
622 # shadows
619 # shadows
623 for d in pathutil.finddirs(f):
620 for d in pathutil.finddirs(f):
624 if self._map.hastrackeddir(d):
621 if self._map.hastrackeddir(d):
625 break
622 break
626 entry = self._map.get(d)
623 entry = self._map.get(d)
627 if entry is not None and not entry.removed:
624 if entry is not None and not entry.removed:
628 msg = _(b'file %r in dirstate clashes with %r')
625 msg = _(b'file %r in dirstate clashes with %r')
629 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
626 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
630 raise error.Abort(msg)
627 raise error.Abort(msg)
631 self._dirty = True
628 self._dirty = True
632 self._updatedfiles.add(f)
629 self._updatedfiles.add(f)
633 self._map.addfile(
630 self._map.addfile(
634 f,
631 f,
635 mode=mode,
632 mode=mode,
636 size=size,
633 size=size,
637 mtime=mtime,
634 mtime=mtime,
638 added=added,
635 added=added,
639 merged=merged,
636 merged=merged,
640 from_p2=from_p2,
637 from_p2=from_p2,
641 possibly_dirty=possibly_dirty,
638 possibly_dirty=possibly_dirty,
642 )
639 )
643
640
644 def normal(self, f, parentfiledata=None):
641 def normal(self, f, parentfiledata=None):
645 """Mark a file normal and clean.
642 """Mark a file normal and clean.
646
643
647 parentfiledata: (mode, size, mtime) of the clean file
644 parentfiledata: (mode, size, mtime) of the clean file
648
645
649 parentfiledata should be computed from memory (for mode,
646 parentfiledata should be computed from memory (for mode,
650 size), as or close as possible from the point where we
647 size), as or close as possible from the point where we
651 determined the file was clean, to limit the risk of the
648 determined the file was clean, to limit the risk of the
652 file having been changed by an external process between the
649 file having been changed by an external process between the
653 moment where the file was determined to be clean and now."""
650 moment where the file was determined to be clean and now."""
654 if parentfiledata:
651 if parentfiledata:
655 (mode, size, mtime) = parentfiledata
652 (mode, size, mtime) = parentfiledata
656 else:
653 else:
657 s = os.lstat(self._join(f))
654 s = os.lstat(self._join(f))
658 mode = s.st_mode
655 mode = s.st_mode
659 size = s.st_size
656 size = s.st_size
660 mtime = s[stat.ST_MTIME]
657 mtime = s[stat.ST_MTIME]
661 self._addpath(f, mode=mode, size=size, mtime=mtime)
658 self._addpath(f, mode=mode, size=size, mtime=mtime)
662 self._map.copymap.pop(f, None)
659 self._map.copymap.pop(f, None)
663 if f in self._map.nonnormalset:
660 if f in self._map.nonnormalset:
664 self._map.nonnormalset.remove(f)
661 self._map.nonnormalset.remove(f)
665 if mtime > self._lastnormaltime:
662 if mtime > self._lastnormaltime:
666 # Remember the most recent modification timeslot for status(),
663 # Remember the most recent modification timeslot for status(),
667 # to make sure we won't miss future size-preserving file content
664 # to make sure we won't miss future size-preserving file content
668 # modifications that happen within the same timeslot.
665 # modifications that happen within the same timeslot.
669 self._lastnormaltime = mtime
666 self._lastnormaltime = mtime
670
667
671 def normallookup(self, f):
668 def normallookup(self, f):
672 '''Mark a file normal, but possibly dirty.'''
669 '''Mark a file normal, but possibly dirty.'''
673 if self.in_merge:
670 if self.in_merge:
674 # if there is a merge going on and the file was either
671 # if there is a merge going on and the file was either
675 # "merged" or coming from other parent (-2) before
672 # "merged" or coming from other parent (-2) before
676 # being removed, restore that state.
673 # being removed, restore that state.
677 entry = self._map.get(f)
674 entry = self._map.get(f)
678 if entry is not None:
675 if entry is not None:
679 # XXX this should probably be dealt with a a lower level
676 # XXX this should probably be dealt with a a lower level
680 # (see `merged_removed` and `from_p2_removed`)
677 # (see `merged_removed` and `from_p2_removed`)
681 if entry.merged_removed or entry.from_p2_removed:
678 if entry.merged_removed or entry.from_p2_removed:
682 source = self._map.copymap.get(f)
679 source = self._map.copymap.get(f)
683 if entry.merged_removed:
680 if entry.merged_removed:
684 self.merge(f)
681 self.merge(f)
685 elif entry.from_p2_removed:
682 elif entry.from_p2_removed:
686 self.otherparent(f)
683 self.otherparent(f)
687 if source is not None:
684 if source is not None:
688 self.copy(source, f)
685 self.copy(source, f)
689 return
686 return
690 elif entry.merged or entry.from_p2:
687 elif entry.merged or entry.from_p2:
691 return
688 return
692 self._addpath(f, possibly_dirty=True)
689 self._addpath(f, possibly_dirty=True)
693 self._map.copymap.pop(f, None)
690 self._map.copymap.pop(f, None)
694
691
695 def otherparent(self, f):
692 def otherparent(self, f):
696 '''Mark as coming from the other parent, always dirty.'''
693 '''Mark as coming from the other parent, always dirty.'''
697 if not self.in_merge:
694 if not self.in_merge:
698 msg = _(b"setting %r to other parent only allowed in merges") % f
695 msg = _(b"setting %r to other parent only allowed in merges") % f
699 raise error.Abort(msg)
696 raise error.Abort(msg)
700 entry = self._map.get(f)
697 entry = self._map.get(f)
701 if entry is not None and entry.tracked:
698 if entry is not None and entry.tracked:
702 # merge-like
699 # merge-like
703 self._addpath(f, merged=True)
700 self._addpath(f, merged=True)
704 else:
701 else:
705 # add-like
702 # add-like
706 self._addpath(f, from_p2=True)
703 self._addpath(f, from_p2=True)
707 self._map.copymap.pop(f, None)
704 self._map.copymap.pop(f, None)
708
705
709 def add(self, f):
706 def add(self, f):
710 '''Mark a file added.'''
707 '''Mark a file added.'''
711 if not self.pendingparentchange():
708 if not self.pendingparentchange():
712 util.nouideprecwarn(
709 util.nouideprecwarn(
713 b"do not use `add` outside of update/merge context."
710 b"do not use `add` outside of update/merge context."
714 b" Use `set_tracked`",
711 b" Use `set_tracked`",
715 b'6.0',
712 b'6.0',
716 stacklevel=2,
713 stacklevel=2,
717 )
714 )
718 self._add(f)
715 self._add(f)
719
716
720 def _add(self, filename):
717 def _add(self, filename):
721 """internal function to mark a file as added"""
718 """internal function to mark a file as added"""
722 self._addpath(filename, added=True)
719 self._addpath(filename, added=True)
723 self._map.copymap.pop(filename, None)
720 self._map.copymap.pop(filename, None)
724
721
725 def remove(self, f):
722 def remove(self, f):
726 '''Mark a file removed'''
723 '''Mark a file removed'''
727 if not self.pendingparentchange():
724 if not self.pendingparentchange():
728 util.nouideprecwarn(
725 util.nouideprecwarn(
729 b"do not use `remove` outside of update/merge context."
726 b"do not use `remove` outside of update/merge context."
730 b" Use `set_untracked`",
727 b" Use `set_untracked`",
731 b'6.0',
728 b'6.0',
732 stacklevel=2,
729 stacklevel=2,
733 )
730 )
734 self._remove(f)
731 self._remove(f)
735
732
736 def _remove(self, filename):
733 def _remove(self, filename):
737 """internal function to mark a file removed"""
734 """internal function to mark a file removed"""
738 self._dirty = True
735 self._dirty = True
739 self._updatedfiles.add(filename)
736 self._updatedfiles.add(filename)
740 self._map.removefile(filename, in_merge=self.in_merge)
737 self._map.removefile(filename, in_merge=self.in_merge)
741
738
742 def merge(self, f):
739 def merge(self, f):
743 '''Mark a file merged.'''
740 '''Mark a file merged.'''
744 if not self.in_merge:
741 if not self.in_merge:
745 return self.normallookup(f)
742 return self.normallookup(f)
746 return self.otherparent(f)
743 return self.otherparent(f)
747
744
748 def drop(self, f):
745 def drop(self, f):
749 '''Drop a file from the dirstate'''
746 '''Drop a file from the dirstate'''
750 if not self.pendingparentchange():
747 if not self.pendingparentchange():
751 util.nouideprecwarn(
748 util.nouideprecwarn(
752 b"do not use `drop` outside of update/merge context."
749 b"do not use `drop` outside of update/merge context."
753 b" Use `set_untracked`",
750 b" Use `set_untracked`",
754 b'6.0',
751 b'6.0',
755 stacklevel=2,
752 stacklevel=2,
756 )
753 )
757 self._drop(f)
754 self._drop(f)
758
755
759 def _drop(self, filename):
756 def _drop(self, filename):
760 """internal function to drop a file from the dirstate"""
757 """internal function to drop a file from the dirstate"""
761 if self._map.dropfile(filename):
758 if self._map.dropfile(filename):
762 self._dirty = True
759 self._dirty = True
763 self._updatedfiles.add(filename)
760 self._updatedfiles.add(filename)
764 self._map.copymap.pop(filename, None)
761 self._map.copymap.pop(filename, None)
765
762
766 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
763 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
767 if exists is None:
764 if exists is None:
768 exists = os.path.lexists(os.path.join(self._root, path))
765 exists = os.path.lexists(os.path.join(self._root, path))
769 if not exists:
766 if not exists:
770 # Maybe a path component exists
767 # Maybe a path component exists
771 if not ignoremissing and b'/' in path:
768 if not ignoremissing and b'/' in path:
772 d, f = path.rsplit(b'/', 1)
769 d, f = path.rsplit(b'/', 1)
773 d = self._normalize(d, False, ignoremissing, None)
770 d = self._normalize(d, False, ignoremissing, None)
774 folded = d + b"/" + f
771 folded = d + b"/" + f
775 else:
772 else:
776 # No path components, preserve original case
773 # No path components, preserve original case
777 folded = path
774 folded = path
778 else:
775 else:
779 # recursively normalize leading directory components
776 # recursively normalize leading directory components
780 # against dirstate
777 # against dirstate
781 if b'/' in normed:
778 if b'/' in normed:
782 d, f = normed.rsplit(b'/', 1)
779 d, f = normed.rsplit(b'/', 1)
783 d = self._normalize(d, False, ignoremissing, True)
780 d = self._normalize(d, False, ignoremissing, True)
784 r = self._root + b"/" + d
781 r = self._root + b"/" + d
785 folded = d + b"/" + util.fspath(f, r)
782 folded = d + b"/" + util.fspath(f, r)
786 else:
783 else:
787 folded = util.fspath(normed, self._root)
784 folded = util.fspath(normed, self._root)
788 storemap[normed] = folded
785 storemap[normed] = folded
789
786
790 return folded
787 return folded
791
788
792 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
789 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
793 normed = util.normcase(path)
790 normed = util.normcase(path)
794 folded = self._map.filefoldmap.get(normed, None)
791 folded = self._map.filefoldmap.get(normed, None)
795 if folded is None:
792 if folded is None:
796 if isknown:
793 if isknown:
797 folded = path
794 folded = path
798 else:
795 else:
799 folded = self._discoverpath(
796 folded = self._discoverpath(
800 path, normed, ignoremissing, exists, self._map.filefoldmap
797 path, normed, ignoremissing, exists, self._map.filefoldmap
801 )
798 )
802 return folded
799 return folded
803
800
804 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
801 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
805 normed = util.normcase(path)
802 normed = util.normcase(path)
806 folded = self._map.filefoldmap.get(normed, None)
803 folded = self._map.filefoldmap.get(normed, None)
807 if folded is None:
804 if folded is None:
808 folded = self._map.dirfoldmap.get(normed, None)
805 folded = self._map.dirfoldmap.get(normed, None)
809 if folded is None:
806 if folded is None:
810 if isknown:
807 if isknown:
811 folded = path
808 folded = path
812 else:
809 else:
813 # store discovered result in dirfoldmap so that future
810 # store discovered result in dirfoldmap so that future
814 # normalizefile calls don't start matching directories
811 # normalizefile calls don't start matching directories
815 folded = self._discoverpath(
812 folded = self._discoverpath(
816 path, normed, ignoremissing, exists, self._map.dirfoldmap
813 path, normed, ignoremissing, exists, self._map.dirfoldmap
817 )
814 )
818 return folded
815 return folded
819
816
820 def normalize(self, path, isknown=False, ignoremissing=False):
817 def normalize(self, path, isknown=False, ignoremissing=False):
821 """
818 """
822 normalize the case of a pathname when on a casefolding filesystem
819 normalize the case of a pathname when on a casefolding filesystem
823
820
824 isknown specifies whether the filename came from walking the
821 isknown specifies whether the filename came from walking the
825 disk, to avoid extra filesystem access.
822 disk, to avoid extra filesystem access.
826
823
827 If ignoremissing is True, missing path are returned
824 If ignoremissing is True, missing path are returned
828 unchanged. Otherwise, we try harder to normalize possibly
825 unchanged. Otherwise, we try harder to normalize possibly
829 existing path components.
826 existing path components.
830
827
831 The normalized case is determined based on the following precedence:
828 The normalized case is determined based on the following precedence:
832
829
833 - version of name already stored in the dirstate
830 - version of name already stored in the dirstate
834 - version of name stored on disk
831 - version of name stored on disk
835 - version provided via command arguments
832 - version provided via command arguments
836 """
833 """
837
834
838 if self._checkcase:
835 if self._checkcase:
839 return self._normalize(path, isknown, ignoremissing)
836 return self._normalize(path, isknown, ignoremissing)
840 return path
837 return path
841
838
842 def clear(self):
839 def clear(self):
843 self._map.clear()
840 self._map.clear()
844 self._lastnormaltime = 0
841 self._lastnormaltime = 0
845 self._updatedfiles.clear()
842 self._updatedfiles.clear()
846 self._dirty = True
843 self._dirty = True
847
844
848 def rebuild(self, parent, allfiles, changedfiles=None):
845 def rebuild(self, parent, allfiles, changedfiles=None):
849 if changedfiles is None:
846 if changedfiles is None:
850 # Rebuild entire dirstate
847 # Rebuild entire dirstate
851 to_lookup = allfiles
848 to_lookup = allfiles
852 to_drop = []
849 to_drop = []
853 lastnormaltime = self._lastnormaltime
850 lastnormaltime = self._lastnormaltime
854 self.clear()
851 self.clear()
855 self._lastnormaltime = lastnormaltime
852 self._lastnormaltime = lastnormaltime
856 elif len(changedfiles) < 10:
853 elif len(changedfiles) < 10:
857 # Avoid turning allfiles into a set, which can be expensive if it's
854 # Avoid turning allfiles into a set, which can be expensive if it's
858 # large.
855 # large.
859 to_lookup = []
856 to_lookup = []
860 to_drop = []
857 to_drop = []
861 for f in changedfiles:
858 for f in changedfiles:
862 if f in allfiles:
859 if f in allfiles:
863 to_lookup.append(f)
860 to_lookup.append(f)
864 else:
861 else:
865 to_drop.append(f)
862 to_drop.append(f)
866 else:
863 else:
867 changedfilesset = set(changedfiles)
864 changedfilesset = set(changedfiles)
868 to_lookup = changedfilesset & set(allfiles)
865 to_lookup = changedfilesset & set(allfiles)
869 to_drop = changedfilesset - to_lookup
866 to_drop = changedfilesset - to_lookup
870
867
871 if self._origpl is None:
868 if self._origpl is None:
872 self._origpl = self._pl
869 self._origpl = self._pl
873 self._map.setparents(parent, self._nodeconstants.nullid)
870 self._map.setparents(parent, self._nodeconstants.nullid)
874
871
875 for f in to_lookup:
872 for f in to_lookup:
876 self.normallookup(f)
873 self.normallookup(f)
877 for f in to_drop:
874 for f in to_drop:
878 self._drop(f)
875 self._drop(f)
879
876
880 self._dirty = True
877 self._dirty = True
881
878
882 def identity(self):
879 def identity(self):
883 """Return identity of dirstate itself to detect changing in storage
880 """Return identity of dirstate itself to detect changing in storage
884
881
885 If identity of previous dirstate is equal to this, writing
882 If identity of previous dirstate is equal to this, writing
886 changes based on the former dirstate out can keep consistency.
883 changes based on the former dirstate out can keep consistency.
887 """
884 """
888 return self._map.identity
885 return self._map.identity
889
886
890 def write(self, tr):
887 def write(self, tr):
891 if not self._dirty:
888 if not self._dirty:
892 return
889 return
893
890
894 filename = self._filename
891 filename = self._filename
895 if tr:
892 if tr:
896 # 'dirstate.write()' is not only for writing in-memory
893 # 'dirstate.write()' is not only for writing in-memory
897 # changes out, but also for dropping ambiguous timestamp.
894 # changes out, but also for dropping ambiguous timestamp.
898 # delayed writing re-raise "ambiguous timestamp issue".
895 # delayed writing re-raise "ambiguous timestamp issue".
899 # See also the wiki page below for detail:
896 # See also the wiki page below for detail:
900 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
897 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
901
898
902 # emulate dropping timestamp in 'parsers.pack_dirstate'
899 # emulate dropping timestamp in 'parsers.pack_dirstate'
903 now = _getfsnow(self._opener)
900 now = _getfsnow(self._opener)
904 self._map.clearambiguoustimes(self._updatedfiles, now)
901 self._map.clearambiguoustimes(self._updatedfiles, now)
905
902
906 # emulate that all 'dirstate.normal' results are written out
903 # emulate that all 'dirstate.normal' results are written out
907 self._lastnormaltime = 0
904 self._lastnormaltime = 0
908 self._updatedfiles.clear()
905 self._updatedfiles.clear()
909
906
910 # delay writing in-memory changes out
907 # delay writing in-memory changes out
911 tr.addfilegenerator(
908 tr.addfilegenerator(
912 b'dirstate',
909 b'dirstate',
913 (self._filename,),
910 (self._filename,),
914 self._writedirstate,
911 self._writedirstate,
915 location=b'plain',
912 location=b'plain',
916 )
913 )
917 return
914 return
918
915
919 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
916 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
920 self._writedirstate(st)
917 self._writedirstate(st)
921
918
922 def addparentchangecallback(self, category, callback):
919 def addparentchangecallback(self, category, callback):
923 """add a callback to be called when the wd parents are changed
920 """add a callback to be called when the wd parents are changed
924
921
925 Callback will be called with the following arguments:
922 Callback will be called with the following arguments:
926 dirstate, (oldp1, oldp2), (newp1, newp2)
923 dirstate, (oldp1, oldp2), (newp1, newp2)
927
924
928 Category is a unique identifier to allow overwriting an old callback
925 Category is a unique identifier to allow overwriting an old callback
929 with a newer callback.
926 with a newer callback.
930 """
927 """
931 self._plchangecallbacks[category] = callback
928 self._plchangecallbacks[category] = callback
932
929
933 def _writedirstate(self, st):
930 def _writedirstate(self, st):
934 # notify callbacks about parents change
931 # notify callbacks about parents change
935 if self._origpl is not None and self._origpl != self._pl:
932 if self._origpl is not None and self._origpl != self._pl:
936 for c, callback in sorted(
933 for c, callback in sorted(
937 pycompat.iteritems(self._plchangecallbacks)
934 pycompat.iteritems(self._plchangecallbacks)
938 ):
935 ):
939 callback(self, self._origpl, self._pl)
936 callback(self, self._origpl, self._pl)
940 self._origpl = None
937 self._origpl = None
941 # use the modification time of the newly created temporary file as the
938 # use the modification time of the newly created temporary file as the
942 # filesystem's notion of 'now'
939 # filesystem's notion of 'now'
943 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
940 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
944
941
945 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
942 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
946 # timestamp of each entries in dirstate, because of 'now > mtime'
943 # timestamp of each entries in dirstate, because of 'now > mtime'
947 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
944 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
948 if delaywrite > 0:
945 if delaywrite > 0:
949 # do we have any files to delay for?
946 # do we have any files to delay for?
950 for f, e in pycompat.iteritems(self._map):
947 for f, e in pycompat.iteritems(self._map):
951 if e.need_delay(now):
948 if e.need_delay(now):
952 import time # to avoid useless import
949 import time # to avoid useless import
953
950
954 # rather than sleep n seconds, sleep until the next
951 # rather than sleep n seconds, sleep until the next
955 # multiple of n seconds
952 # multiple of n seconds
956 clock = time.time()
953 clock = time.time()
957 start = int(clock) - (int(clock) % delaywrite)
954 start = int(clock) - (int(clock) % delaywrite)
958 end = start + delaywrite
955 end = start + delaywrite
959 time.sleep(end - clock)
956 time.sleep(end - clock)
960 now = end # trust our estimate that the end is near now
957 now = end # trust our estimate that the end is near now
961 break
958 break
962
959
963 self._map.write(st, now)
960 self._map.write(st, now)
964 self._lastnormaltime = 0
961 self._lastnormaltime = 0
965 self._dirty = False
962 self._dirty = False
966
963
967 def _dirignore(self, f):
964 def _dirignore(self, f):
968 if self._ignore(f):
965 if self._ignore(f):
969 return True
966 return True
970 for p in pathutil.finddirs(f):
967 for p in pathutil.finddirs(f):
971 if self._ignore(p):
968 if self._ignore(p):
972 return True
969 return True
973 return False
970 return False
974
971
975 def _ignorefiles(self):
972 def _ignorefiles(self):
976 files = []
973 files = []
977 if os.path.exists(self._join(b'.hgignore')):
974 if os.path.exists(self._join(b'.hgignore')):
978 files.append(self._join(b'.hgignore'))
975 files.append(self._join(b'.hgignore'))
979 for name, path in self._ui.configitems(b"ui"):
976 for name, path in self._ui.configitems(b"ui"):
980 if name == b'ignore' or name.startswith(b'ignore.'):
977 if name == b'ignore' or name.startswith(b'ignore.'):
981 # we need to use os.path.join here rather than self._join
978 # we need to use os.path.join here rather than self._join
982 # because path is arbitrary and user-specified
979 # because path is arbitrary and user-specified
983 files.append(os.path.join(self._rootdir, util.expandpath(path)))
980 files.append(os.path.join(self._rootdir, util.expandpath(path)))
984 return files
981 return files
985
982
986 def _ignorefileandline(self, f):
983 def _ignorefileandline(self, f):
987 files = collections.deque(self._ignorefiles())
984 files = collections.deque(self._ignorefiles())
988 visited = set()
985 visited = set()
989 while files:
986 while files:
990 i = files.popleft()
987 i = files.popleft()
991 patterns = matchmod.readpatternfile(
988 patterns = matchmod.readpatternfile(
992 i, self._ui.warn, sourceinfo=True
989 i, self._ui.warn, sourceinfo=True
993 )
990 )
994 for pattern, lineno, line in patterns:
991 for pattern, lineno, line in patterns:
995 kind, p = matchmod._patsplit(pattern, b'glob')
992 kind, p = matchmod._patsplit(pattern, b'glob')
996 if kind == b"subinclude":
993 if kind == b"subinclude":
997 if p not in visited:
994 if p not in visited:
998 files.append(p)
995 files.append(p)
999 continue
996 continue
1000 m = matchmod.match(
997 m = matchmod.match(
1001 self._root, b'', [], [pattern], warn=self._ui.warn
998 self._root, b'', [], [pattern], warn=self._ui.warn
1002 )
999 )
1003 if m(f):
1000 if m(f):
1004 return (i, lineno, line)
1001 return (i, lineno, line)
1005 visited.add(i)
1002 visited.add(i)
1006 return (None, -1, b"")
1003 return (None, -1, b"")
1007
1004
1008 def _walkexplicit(self, match, subrepos):
1005 def _walkexplicit(self, match, subrepos):
1009 """Get stat data about the files explicitly specified by match.
1006 """Get stat data about the files explicitly specified by match.
1010
1007
1011 Return a triple (results, dirsfound, dirsnotfound).
1008 Return a triple (results, dirsfound, dirsnotfound).
1012 - results is a mapping from filename to stat result. It also contains
1009 - results is a mapping from filename to stat result. It also contains
1013 listings mapping subrepos and .hg to None.
1010 listings mapping subrepos and .hg to None.
1014 - dirsfound is a list of files found to be directories.
1011 - dirsfound is a list of files found to be directories.
1015 - dirsnotfound is a list of files that the dirstate thinks are
1012 - dirsnotfound is a list of files that the dirstate thinks are
1016 directories and that were not found."""
1013 directories and that were not found."""
1017
1014
1018 def badtype(mode):
1015 def badtype(mode):
1019 kind = _(b'unknown')
1016 kind = _(b'unknown')
1020 if stat.S_ISCHR(mode):
1017 if stat.S_ISCHR(mode):
1021 kind = _(b'character device')
1018 kind = _(b'character device')
1022 elif stat.S_ISBLK(mode):
1019 elif stat.S_ISBLK(mode):
1023 kind = _(b'block device')
1020 kind = _(b'block device')
1024 elif stat.S_ISFIFO(mode):
1021 elif stat.S_ISFIFO(mode):
1025 kind = _(b'fifo')
1022 kind = _(b'fifo')
1026 elif stat.S_ISSOCK(mode):
1023 elif stat.S_ISSOCK(mode):
1027 kind = _(b'socket')
1024 kind = _(b'socket')
1028 elif stat.S_ISDIR(mode):
1025 elif stat.S_ISDIR(mode):
1029 kind = _(b'directory')
1026 kind = _(b'directory')
1030 return _(b'unsupported file type (type is %s)') % kind
1027 return _(b'unsupported file type (type is %s)') % kind
1031
1028
1032 badfn = match.bad
1029 badfn = match.bad
1033 dmap = self._map
1030 dmap = self._map
1034 lstat = os.lstat
1031 lstat = os.lstat
1035 getkind = stat.S_IFMT
1032 getkind = stat.S_IFMT
1036 dirkind = stat.S_IFDIR
1033 dirkind = stat.S_IFDIR
1037 regkind = stat.S_IFREG
1034 regkind = stat.S_IFREG
1038 lnkkind = stat.S_IFLNK
1035 lnkkind = stat.S_IFLNK
1039 join = self._join
1036 join = self._join
1040 dirsfound = []
1037 dirsfound = []
1041 foundadd = dirsfound.append
1038 foundadd = dirsfound.append
1042 dirsnotfound = []
1039 dirsnotfound = []
1043 notfoundadd = dirsnotfound.append
1040 notfoundadd = dirsnotfound.append
1044
1041
1045 if not match.isexact() and self._checkcase:
1042 if not match.isexact() and self._checkcase:
1046 normalize = self._normalize
1043 normalize = self._normalize
1047 else:
1044 else:
1048 normalize = None
1045 normalize = None
1049
1046
1050 files = sorted(match.files())
1047 files = sorted(match.files())
1051 subrepos.sort()
1048 subrepos.sort()
1052 i, j = 0, 0
1049 i, j = 0, 0
1053 while i < len(files) and j < len(subrepos):
1050 while i < len(files) and j < len(subrepos):
1054 subpath = subrepos[j] + b"/"
1051 subpath = subrepos[j] + b"/"
1055 if files[i] < subpath:
1052 if files[i] < subpath:
1056 i += 1
1053 i += 1
1057 continue
1054 continue
1058 while i < len(files) and files[i].startswith(subpath):
1055 while i < len(files) and files[i].startswith(subpath):
1059 del files[i]
1056 del files[i]
1060 j += 1
1057 j += 1
1061
1058
1062 if not files or b'' in files:
1059 if not files or b'' in files:
1063 files = [b'']
1060 files = [b'']
1064 # constructing the foldmap is expensive, so don't do it for the
1061 # constructing the foldmap is expensive, so don't do it for the
1065 # common case where files is ['']
1062 # common case where files is ['']
1066 normalize = None
1063 normalize = None
1067 results = dict.fromkeys(subrepos)
1064 results = dict.fromkeys(subrepos)
1068 results[b'.hg'] = None
1065 results[b'.hg'] = None
1069
1066
1070 for ff in files:
1067 for ff in files:
1071 if normalize:
1068 if normalize:
1072 nf = normalize(ff, False, True)
1069 nf = normalize(ff, False, True)
1073 else:
1070 else:
1074 nf = ff
1071 nf = ff
1075 if nf in results:
1072 if nf in results:
1076 continue
1073 continue
1077
1074
1078 try:
1075 try:
1079 st = lstat(join(nf))
1076 st = lstat(join(nf))
1080 kind = getkind(st.st_mode)
1077 kind = getkind(st.st_mode)
1081 if kind == dirkind:
1078 if kind == dirkind:
1082 if nf in dmap:
1079 if nf in dmap:
1083 # file replaced by dir on disk but still in dirstate
1080 # file replaced by dir on disk but still in dirstate
1084 results[nf] = None
1081 results[nf] = None
1085 foundadd((nf, ff))
1082 foundadd((nf, ff))
1086 elif kind == regkind or kind == lnkkind:
1083 elif kind == regkind or kind == lnkkind:
1087 results[nf] = st
1084 results[nf] = st
1088 else:
1085 else:
1089 badfn(ff, badtype(kind))
1086 badfn(ff, badtype(kind))
1090 if nf in dmap:
1087 if nf in dmap:
1091 results[nf] = None
1088 results[nf] = None
1092 except OSError as inst: # nf not found on disk - it is dirstate only
1089 except OSError as inst: # nf not found on disk - it is dirstate only
1093 if nf in dmap: # does it exactly match a missing file?
1090 if nf in dmap: # does it exactly match a missing file?
1094 results[nf] = None
1091 results[nf] = None
1095 else: # does it match a missing directory?
1092 else: # does it match a missing directory?
1096 if self._map.hasdir(nf):
1093 if self._map.hasdir(nf):
1097 notfoundadd(nf)
1094 notfoundadd(nf)
1098 else:
1095 else:
1099 badfn(ff, encoding.strtolocal(inst.strerror))
1096 badfn(ff, encoding.strtolocal(inst.strerror))
1100
1097
1101 # match.files() may contain explicitly-specified paths that shouldn't
1098 # match.files() may contain explicitly-specified paths that shouldn't
1102 # be taken; drop them from the list of files found. dirsfound/notfound
1099 # be taken; drop them from the list of files found. dirsfound/notfound
1103 # aren't filtered here because they will be tested later.
1100 # aren't filtered here because they will be tested later.
1104 if match.anypats():
1101 if match.anypats():
1105 for f in list(results):
1102 for f in list(results):
1106 if f == b'.hg' or f in subrepos:
1103 if f == b'.hg' or f in subrepos:
1107 # keep sentinel to disable further out-of-repo walks
1104 # keep sentinel to disable further out-of-repo walks
1108 continue
1105 continue
1109 if not match(f):
1106 if not match(f):
1110 del results[f]
1107 del results[f]
1111
1108
1112 # Case insensitive filesystems cannot rely on lstat() failing to detect
1109 # Case insensitive filesystems cannot rely on lstat() failing to detect
1113 # a case-only rename. Prune the stat object for any file that does not
1110 # a case-only rename. Prune the stat object for any file that does not
1114 # match the case in the filesystem, if there are multiple files that
1111 # match the case in the filesystem, if there are multiple files that
1115 # normalize to the same path.
1112 # normalize to the same path.
1116 if match.isexact() and self._checkcase:
1113 if match.isexact() and self._checkcase:
1117 normed = {}
1114 normed = {}
1118
1115
1119 for f, st in pycompat.iteritems(results):
1116 for f, st in pycompat.iteritems(results):
1120 if st is None:
1117 if st is None:
1121 continue
1118 continue
1122
1119
1123 nc = util.normcase(f)
1120 nc = util.normcase(f)
1124 paths = normed.get(nc)
1121 paths = normed.get(nc)
1125
1122
1126 if paths is None:
1123 if paths is None:
1127 paths = set()
1124 paths = set()
1128 normed[nc] = paths
1125 normed[nc] = paths
1129
1126
1130 paths.add(f)
1127 paths.add(f)
1131
1128
1132 for norm, paths in pycompat.iteritems(normed):
1129 for norm, paths in pycompat.iteritems(normed):
1133 if len(paths) > 1:
1130 if len(paths) > 1:
1134 for path in paths:
1131 for path in paths:
1135 folded = self._discoverpath(
1132 folded = self._discoverpath(
1136 path, norm, True, None, self._map.dirfoldmap
1133 path, norm, True, None, self._map.dirfoldmap
1137 )
1134 )
1138 if path != folded:
1135 if path != folded:
1139 results[path] = None
1136 results[path] = None
1140
1137
1141 return results, dirsfound, dirsnotfound
1138 return results, dirsfound, dirsnotfound
1142
1139
1143 def walk(self, match, subrepos, unknown, ignored, full=True):
1140 def walk(self, match, subrepos, unknown, ignored, full=True):
1144 """
1141 """
1145 Walk recursively through the directory tree, finding all files
1142 Walk recursively through the directory tree, finding all files
1146 matched by match.
1143 matched by match.
1147
1144
1148 If full is False, maybe skip some known-clean files.
1145 If full is False, maybe skip some known-clean files.
1149
1146
1150 Return a dict mapping filename to stat-like object (either
1147 Return a dict mapping filename to stat-like object (either
1151 mercurial.osutil.stat instance or return value of os.stat()).
1148 mercurial.osutil.stat instance or return value of os.stat()).
1152
1149
1153 """
1150 """
1154 # full is a flag that extensions that hook into walk can use -- this
1151 # full is a flag that extensions that hook into walk can use -- this
1155 # implementation doesn't use it at all. This satisfies the contract
1152 # implementation doesn't use it at all. This satisfies the contract
1156 # because we only guarantee a "maybe".
1153 # because we only guarantee a "maybe".
1157
1154
1158 if ignored:
1155 if ignored:
1159 ignore = util.never
1156 ignore = util.never
1160 dirignore = util.never
1157 dirignore = util.never
1161 elif unknown:
1158 elif unknown:
1162 ignore = self._ignore
1159 ignore = self._ignore
1163 dirignore = self._dirignore
1160 dirignore = self._dirignore
1164 else:
1161 else:
1165 # if not unknown and not ignored, drop dir recursion and step 2
1162 # if not unknown and not ignored, drop dir recursion and step 2
1166 ignore = util.always
1163 ignore = util.always
1167 dirignore = util.always
1164 dirignore = util.always
1168
1165
1169 matchfn = match.matchfn
1166 matchfn = match.matchfn
1170 matchalways = match.always()
1167 matchalways = match.always()
1171 matchtdir = match.traversedir
1168 matchtdir = match.traversedir
1172 dmap = self._map
1169 dmap = self._map
1173 listdir = util.listdir
1170 listdir = util.listdir
1174 lstat = os.lstat
1171 lstat = os.lstat
1175 dirkind = stat.S_IFDIR
1172 dirkind = stat.S_IFDIR
1176 regkind = stat.S_IFREG
1173 regkind = stat.S_IFREG
1177 lnkkind = stat.S_IFLNK
1174 lnkkind = stat.S_IFLNK
1178 join = self._join
1175 join = self._join
1179
1176
1180 exact = skipstep3 = False
1177 exact = skipstep3 = False
1181 if match.isexact(): # match.exact
1178 if match.isexact(): # match.exact
1182 exact = True
1179 exact = True
1183 dirignore = util.always # skip step 2
1180 dirignore = util.always # skip step 2
1184 elif match.prefix(): # match.match, no patterns
1181 elif match.prefix(): # match.match, no patterns
1185 skipstep3 = True
1182 skipstep3 = True
1186
1183
1187 if not exact and self._checkcase:
1184 if not exact and self._checkcase:
1188 normalize = self._normalize
1185 normalize = self._normalize
1189 normalizefile = self._normalizefile
1186 normalizefile = self._normalizefile
1190 skipstep3 = False
1187 skipstep3 = False
1191 else:
1188 else:
1192 normalize = self._normalize
1189 normalize = self._normalize
1193 normalizefile = None
1190 normalizefile = None
1194
1191
1195 # step 1: find all explicit files
1192 # step 1: find all explicit files
1196 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1193 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1197 if matchtdir:
1194 if matchtdir:
1198 for d in work:
1195 for d in work:
1199 matchtdir(d[0])
1196 matchtdir(d[0])
1200 for d in dirsnotfound:
1197 for d in dirsnotfound:
1201 matchtdir(d)
1198 matchtdir(d)
1202
1199
1203 skipstep3 = skipstep3 and not (work or dirsnotfound)
1200 skipstep3 = skipstep3 and not (work or dirsnotfound)
1204 work = [d for d in work if not dirignore(d[0])]
1201 work = [d for d in work if not dirignore(d[0])]
1205
1202
1206 # step 2: visit subdirectories
1203 # step 2: visit subdirectories
1207 def traverse(work, alreadynormed):
1204 def traverse(work, alreadynormed):
1208 wadd = work.append
1205 wadd = work.append
1209 while work:
1206 while work:
1210 tracing.counter('dirstate.walk work', len(work))
1207 tracing.counter('dirstate.walk work', len(work))
1211 nd = work.pop()
1208 nd = work.pop()
1212 visitentries = match.visitchildrenset(nd)
1209 visitentries = match.visitchildrenset(nd)
1213 if not visitentries:
1210 if not visitentries:
1214 continue
1211 continue
1215 if visitentries == b'this' or visitentries == b'all':
1212 if visitentries == b'this' or visitentries == b'all':
1216 visitentries = None
1213 visitentries = None
1217 skip = None
1214 skip = None
1218 if nd != b'':
1215 if nd != b'':
1219 skip = b'.hg'
1216 skip = b'.hg'
1220 try:
1217 try:
1221 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1218 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1222 entries = listdir(join(nd), stat=True, skip=skip)
1219 entries = listdir(join(nd), stat=True, skip=skip)
1223 except OSError as inst:
1220 except OSError as inst:
1224 if inst.errno in (errno.EACCES, errno.ENOENT):
1221 if inst.errno in (errno.EACCES, errno.ENOENT):
1225 match.bad(
1222 match.bad(
1226 self.pathto(nd), encoding.strtolocal(inst.strerror)
1223 self.pathto(nd), encoding.strtolocal(inst.strerror)
1227 )
1224 )
1228 continue
1225 continue
1229 raise
1226 raise
1230 for f, kind, st in entries:
1227 for f, kind, st in entries:
1231 # Some matchers may return files in the visitentries set,
1228 # Some matchers may return files in the visitentries set,
1232 # instead of 'this', if the matcher explicitly mentions them
1229 # instead of 'this', if the matcher explicitly mentions them
1233 # and is not an exactmatcher. This is acceptable; we do not
1230 # and is not an exactmatcher. This is acceptable; we do not
1234 # make any hard assumptions about file-or-directory below
1231 # make any hard assumptions about file-or-directory below
1235 # based on the presence of `f` in visitentries. If
1232 # based on the presence of `f` in visitentries. If
1236 # visitchildrenset returned a set, we can always skip the
1233 # visitchildrenset returned a set, we can always skip the
1237 # entries *not* in the set it provided regardless of whether
1234 # entries *not* in the set it provided regardless of whether
1238 # they're actually a file or a directory.
1235 # they're actually a file or a directory.
1239 if visitentries and f not in visitentries:
1236 if visitentries and f not in visitentries:
1240 continue
1237 continue
1241 if normalizefile:
1238 if normalizefile:
1242 # even though f might be a directory, we're only
1239 # even though f might be a directory, we're only
1243 # interested in comparing it to files currently in the
1240 # interested in comparing it to files currently in the
1244 # dmap -- therefore normalizefile is enough
1241 # dmap -- therefore normalizefile is enough
1245 nf = normalizefile(
1242 nf = normalizefile(
1246 nd and (nd + b"/" + f) or f, True, True
1243 nd and (nd + b"/" + f) or f, True, True
1247 )
1244 )
1248 else:
1245 else:
1249 nf = nd and (nd + b"/" + f) or f
1246 nf = nd and (nd + b"/" + f) or f
1250 if nf not in results:
1247 if nf not in results:
1251 if kind == dirkind:
1248 if kind == dirkind:
1252 if not ignore(nf):
1249 if not ignore(nf):
1253 if matchtdir:
1250 if matchtdir:
1254 matchtdir(nf)
1251 matchtdir(nf)
1255 wadd(nf)
1252 wadd(nf)
1256 if nf in dmap and (matchalways or matchfn(nf)):
1253 if nf in dmap and (matchalways or matchfn(nf)):
1257 results[nf] = None
1254 results[nf] = None
1258 elif kind == regkind or kind == lnkkind:
1255 elif kind == regkind or kind == lnkkind:
1259 if nf in dmap:
1256 if nf in dmap:
1260 if matchalways or matchfn(nf):
1257 if matchalways or matchfn(nf):
1261 results[nf] = st
1258 results[nf] = st
1262 elif (matchalways or matchfn(nf)) and not ignore(
1259 elif (matchalways or matchfn(nf)) and not ignore(
1263 nf
1260 nf
1264 ):
1261 ):
1265 # unknown file -- normalize if necessary
1262 # unknown file -- normalize if necessary
1266 if not alreadynormed:
1263 if not alreadynormed:
1267 nf = normalize(nf, False, True)
1264 nf = normalize(nf, False, True)
1268 results[nf] = st
1265 results[nf] = st
1269 elif nf in dmap and (matchalways or matchfn(nf)):
1266 elif nf in dmap and (matchalways or matchfn(nf)):
1270 results[nf] = None
1267 results[nf] = None
1271
1268
1272 for nd, d in work:
1269 for nd, d in work:
1273 # alreadynormed means that processwork doesn't have to do any
1270 # alreadynormed means that processwork doesn't have to do any
1274 # expensive directory normalization
1271 # expensive directory normalization
1275 alreadynormed = not normalize or nd == d
1272 alreadynormed = not normalize or nd == d
1276 traverse([d], alreadynormed)
1273 traverse([d], alreadynormed)
1277
1274
1278 for s in subrepos:
1275 for s in subrepos:
1279 del results[s]
1276 del results[s]
1280 del results[b'.hg']
1277 del results[b'.hg']
1281
1278
1282 # step 3: visit remaining files from dmap
1279 # step 3: visit remaining files from dmap
1283 if not skipstep3 and not exact:
1280 if not skipstep3 and not exact:
1284 # If a dmap file is not in results yet, it was either
1281 # If a dmap file is not in results yet, it was either
1285 # a) not matching matchfn b) ignored, c) missing, or d) under a
1282 # a) not matching matchfn b) ignored, c) missing, or d) under a
1286 # symlink directory.
1283 # symlink directory.
1287 if not results and matchalways:
1284 if not results and matchalways:
1288 visit = [f for f in dmap]
1285 visit = [f for f in dmap]
1289 else:
1286 else:
1290 visit = [f for f in dmap if f not in results and matchfn(f)]
1287 visit = [f for f in dmap if f not in results and matchfn(f)]
1291 visit.sort()
1288 visit.sort()
1292
1289
1293 if unknown:
1290 if unknown:
1294 # unknown == True means we walked all dirs under the roots
1291 # unknown == True means we walked all dirs under the roots
1295 # that wasn't ignored, and everything that matched was stat'ed
1292 # that wasn't ignored, and everything that matched was stat'ed
1296 # and is already in results.
1293 # and is already in results.
1297 # The rest must thus be ignored or under a symlink.
1294 # The rest must thus be ignored or under a symlink.
1298 audit_path = pathutil.pathauditor(self._root, cached=True)
1295 audit_path = pathutil.pathauditor(self._root, cached=True)
1299
1296
1300 for nf in iter(visit):
1297 for nf in iter(visit):
1301 # If a stat for the same file was already added with a
1298 # If a stat for the same file was already added with a
1302 # different case, don't add one for this, since that would
1299 # different case, don't add one for this, since that would
1303 # make it appear as if the file exists under both names
1300 # make it appear as if the file exists under both names
1304 # on disk.
1301 # on disk.
1305 if (
1302 if (
1306 normalizefile
1303 normalizefile
1307 and normalizefile(nf, True, True) in results
1304 and normalizefile(nf, True, True) in results
1308 ):
1305 ):
1309 results[nf] = None
1306 results[nf] = None
1310 # Report ignored items in the dmap as long as they are not
1307 # Report ignored items in the dmap as long as they are not
1311 # under a symlink directory.
1308 # under a symlink directory.
1312 elif audit_path.check(nf):
1309 elif audit_path.check(nf):
1313 try:
1310 try:
1314 results[nf] = lstat(join(nf))
1311 results[nf] = lstat(join(nf))
1315 # file was just ignored, no links, and exists
1312 # file was just ignored, no links, and exists
1316 except OSError:
1313 except OSError:
1317 # file doesn't exist
1314 # file doesn't exist
1318 results[nf] = None
1315 results[nf] = None
1319 else:
1316 else:
1320 # It's either missing or under a symlink directory
1317 # It's either missing or under a symlink directory
1321 # which we in this case report as missing
1318 # which we in this case report as missing
1322 results[nf] = None
1319 results[nf] = None
1323 else:
1320 else:
1324 # We may not have walked the full directory tree above,
1321 # We may not have walked the full directory tree above,
1325 # so stat and check everything we missed.
1322 # so stat and check everything we missed.
1326 iv = iter(visit)
1323 iv = iter(visit)
1327 for st in util.statfiles([join(i) for i in visit]):
1324 for st in util.statfiles([join(i) for i in visit]):
1328 results[next(iv)] = st
1325 results[next(iv)] = st
1329 return results
1326 return results
1330
1327
1331 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1328 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1332 # Force Rayon (Rust parallelism library) to respect the number of
1329 # Force Rayon (Rust parallelism library) to respect the number of
1333 # workers. This is a temporary workaround until Rust code knows
1330 # workers. This is a temporary workaround until Rust code knows
1334 # how to read the config file.
1331 # how to read the config file.
1335 numcpus = self._ui.configint(b"worker", b"numcpus")
1332 numcpus = self._ui.configint(b"worker", b"numcpus")
1336 if numcpus is not None:
1333 if numcpus is not None:
1337 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1334 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1338
1335
1339 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1336 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1340 if not workers_enabled:
1337 if not workers_enabled:
1341 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1338 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1342
1339
1343 (
1340 (
1344 lookup,
1341 lookup,
1345 modified,
1342 modified,
1346 added,
1343 added,
1347 removed,
1344 removed,
1348 deleted,
1345 deleted,
1349 clean,
1346 clean,
1350 ignored,
1347 ignored,
1351 unknown,
1348 unknown,
1352 warnings,
1349 warnings,
1353 bad,
1350 bad,
1354 traversed,
1351 traversed,
1355 dirty,
1352 dirty,
1356 ) = rustmod.status(
1353 ) = rustmod.status(
1357 self._map._rustmap,
1354 self._map._rustmap,
1358 matcher,
1355 matcher,
1359 self._rootdir,
1356 self._rootdir,
1360 self._ignorefiles(),
1357 self._ignorefiles(),
1361 self._checkexec,
1358 self._checkexec,
1362 self._lastnormaltime,
1359 self._lastnormaltime,
1363 bool(list_clean),
1360 bool(list_clean),
1364 bool(list_ignored),
1361 bool(list_ignored),
1365 bool(list_unknown),
1362 bool(list_unknown),
1366 bool(matcher.traversedir),
1363 bool(matcher.traversedir),
1367 )
1364 )
1368
1365
1369 self._dirty |= dirty
1366 self._dirty |= dirty
1370
1367
1371 if matcher.traversedir:
1368 if matcher.traversedir:
1372 for dir in traversed:
1369 for dir in traversed:
1373 matcher.traversedir(dir)
1370 matcher.traversedir(dir)
1374
1371
1375 if self._ui.warn:
1372 if self._ui.warn:
1376 for item in warnings:
1373 for item in warnings:
1377 if isinstance(item, tuple):
1374 if isinstance(item, tuple):
1378 file_path, syntax = item
1375 file_path, syntax = item
1379 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1376 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1380 file_path,
1377 file_path,
1381 syntax,
1378 syntax,
1382 )
1379 )
1383 self._ui.warn(msg)
1380 self._ui.warn(msg)
1384 else:
1381 else:
1385 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1382 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1386 self._ui.warn(
1383 self._ui.warn(
1387 msg
1384 msg
1388 % (
1385 % (
1389 pathutil.canonpath(
1386 pathutil.canonpath(
1390 self._rootdir, self._rootdir, item
1387 self._rootdir, self._rootdir, item
1391 ),
1388 ),
1392 b"No such file or directory",
1389 b"No such file or directory",
1393 )
1390 )
1394 )
1391 )
1395
1392
1396 for (fn, message) in bad:
1393 for (fn, message) in bad:
1397 matcher.bad(fn, encoding.strtolocal(message))
1394 matcher.bad(fn, encoding.strtolocal(message))
1398
1395
1399 status = scmutil.status(
1396 status = scmutil.status(
1400 modified=modified,
1397 modified=modified,
1401 added=added,
1398 added=added,
1402 removed=removed,
1399 removed=removed,
1403 deleted=deleted,
1400 deleted=deleted,
1404 unknown=unknown,
1401 unknown=unknown,
1405 ignored=ignored,
1402 ignored=ignored,
1406 clean=clean,
1403 clean=clean,
1407 )
1404 )
1408 return (lookup, status)
1405 return (lookup, status)
1409
1406
1410 def status(self, match, subrepos, ignored, clean, unknown):
1407 def status(self, match, subrepos, ignored, clean, unknown):
1411 """Determine the status of the working copy relative to the
1408 """Determine the status of the working copy relative to the
1412 dirstate and return a pair of (unsure, status), where status is of type
1409 dirstate and return a pair of (unsure, status), where status is of type
1413 scmutil.status and:
1410 scmutil.status and:
1414
1411
1415 unsure:
1412 unsure:
1416 files that might have been modified since the dirstate was
1413 files that might have been modified since the dirstate was
1417 written, but need to be read to be sure (size is the same
1414 written, but need to be read to be sure (size is the same
1418 but mtime differs)
1415 but mtime differs)
1419 status.modified:
1416 status.modified:
1420 files that have definitely been modified since the dirstate
1417 files that have definitely been modified since the dirstate
1421 was written (different size or mode)
1418 was written (different size or mode)
1422 status.clean:
1419 status.clean:
1423 files that have definitely not been modified since the
1420 files that have definitely not been modified since the
1424 dirstate was written
1421 dirstate was written
1425 """
1422 """
1426 listignored, listclean, listunknown = ignored, clean, unknown
1423 listignored, listclean, listunknown = ignored, clean, unknown
1427 lookup, modified, added, unknown, ignored = [], [], [], [], []
1424 lookup, modified, added, unknown, ignored = [], [], [], [], []
1428 removed, deleted, clean = [], [], []
1425 removed, deleted, clean = [], [], []
1429
1426
1430 dmap = self._map
1427 dmap = self._map
1431 dmap.preload()
1428 dmap.preload()
1432
1429
1433 use_rust = True
1430 use_rust = True
1434
1431
1435 allowed_matchers = (
1432 allowed_matchers = (
1436 matchmod.alwaysmatcher,
1433 matchmod.alwaysmatcher,
1437 matchmod.exactmatcher,
1434 matchmod.exactmatcher,
1438 matchmod.includematcher,
1435 matchmod.includematcher,
1439 )
1436 )
1440
1437
1441 if rustmod is None:
1438 if rustmod is None:
1442 use_rust = False
1439 use_rust = False
1443 elif self._checkcase:
1440 elif self._checkcase:
1444 # Case-insensitive filesystems are not handled yet
1441 # Case-insensitive filesystems are not handled yet
1445 use_rust = False
1442 use_rust = False
1446 elif subrepos:
1443 elif subrepos:
1447 use_rust = False
1444 use_rust = False
1448 elif sparse.enabled:
1445 elif sparse.enabled:
1449 use_rust = False
1446 use_rust = False
1450 elif not isinstance(match, allowed_matchers):
1447 elif not isinstance(match, allowed_matchers):
1451 # Some matchers have yet to be implemented
1448 # Some matchers have yet to be implemented
1452 use_rust = False
1449 use_rust = False
1453
1450
1454 if use_rust:
1451 if use_rust:
1455 try:
1452 try:
1456 return self._rust_status(
1453 return self._rust_status(
1457 match, listclean, listignored, listunknown
1454 match, listclean, listignored, listunknown
1458 )
1455 )
1459 except rustmod.FallbackError:
1456 except rustmod.FallbackError:
1460 pass
1457 pass
1461
1458
1462 def noop(f):
1459 def noop(f):
1463 pass
1460 pass
1464
1461
1465 dcontains = dmap.__contains__
1462 dcontains = dmap.__contains__
1466 dget = dmap.__getitem__
1463 dget = dmap.__getitem__
1467 ladd = lookup.append # aka "unsure"
1464 ladd = lookup.append # aka "unsure"
1468 madd = modified.append
1465 madd = modified.append
1469 aadd = added.append
1466 aadd = added.append
1470 uadd = unknown.append if listunknown else noop
1467 uadd = unknown.append if listunknown else noop
1471 iadd = ignored.append if listignored else noop
1468 iadd = ignored.append if listignored else noop
1472 radd = removed.append
1469 radd = removed.append
1473 dadd = deleted.append
1470 dadd = deleted.append
1474 cadd = clean.append if listclean else noop
1471 cadd = clean.append if listclean else noop
1475 mexact = match.exact
1472 mexact = match.exact
1476 dirignore = self._dirignore
1473 dirignore = self._dirignore
1477 checkexec = self._checkexec
1474 checkexec = self._checkexec
1478 copymap = self._map.copymap
1475 copymap = self._map.copymap
1479 lastnormaltime = self._lastnormaltime
1476 lastnormaltime = self._lastnormaltime
1480
1477
1481 # We need to do full walks when either
1478 # We need to do full walks when either
1482 # - we're listing all clean files, or
1479 # - we're listing all clean files, or
1483 # - match.traversedir does something, because match.traversedir should
1480 # - match.traversedir does something, because match.traversedir should
1484 # be called for every dir in the working dir
1481 # be called for every dir in the working dir
1485 full = listclean or match.traversedir is not None
1482 full = listclean or match.traversedir is not None
1486 for fn, st in pycompat.iteritems(
1483 for fn, st in pycompat.iteritems(
1487 self.walk(match, subrepos, listunknown, listignored, full=full)
1484 self.walk(match, subrepos, listunknown, listignored, full=full)
1488 ):
1485 ):
1489 if not dcontains(fn):
1486 if not dcontains(fn):
1490 if (listignored or mexact(fn)) and dirignore(fn):
1487 if (listignored or mexact(fn)) and dirignore(fn):
1491 if listignored:
1488 if listignored:
1492 iadd(fn)
1489 iadd(fn)
1493 else:
1490 else:
1494 uadd(fn)
1491 uadd(fn)
1495 continue
1492 continue
1496
1493
1497 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1494 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1498 # written like that for performance reasons. dmap[fn] is not a
1495 # written like that for performance reasons. dmap[fn] is not a
1499 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1496 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1500 # opcode has fast paths when the value to be unpacked is a tuple or
1497 # opcode has fast paths when the value to be unpacked is a tuple or
1501 # a list, but falls back to creating a full-fledged iterator in
1498 # a list, but falls back to creating a full-fledged iterator in
1502 # general. That is much slower than simply accessing and storing the
1499 # general. That is much slower than simply accessing and storing the
1503 # tuple members one by one.
1500 # tuple members one by one.
1504 t = dget(fn)
1501 t = dget(fn)
1505 mode = t.mode
1502 mode = t.mode
1506 size = t.size
1503 size = t.size
1507 time = t.mtime
1504 time = t.mtime
1508
1505
1509 if not st and t.tracked:
1506 if not st and t.tracked:
1510 dadd(fn)
1507 dadd(fn)
1511 elif t.merged:
1508 elif t.merged:
1512 madd(fn)
1509 madd(fn)
1513 elif t.added:
1510 elif t.added:
1514 aadd(fn)
1511 aadd(fn)
1515 elif t.removed:
1512 elif t.removed:
1516 radd(fn)
1513 radd(fn)
1517 elif t.tracked:
1514 elif t.tracked:
1518 if (
1515 if (
1519 size >= 0
1516 size >= 0
1520 and (
1517 and (
1521 (size != st.st_size and size != st.st_size & _rangemask)
1518 (size != st.st_size and size != st.st_size & _rangemask)
1522 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1519 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1523 )
1520 )
1524 or t.from_p2
1521 or t.from_p2
1525 or fn in copymap
1522 or fn in copymap
1526 ):
1523 ):
1527 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1524 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1528 # issue6456: Size returned may be longer due to
1525 # issue6456: Size returned may be longer due to
1529 # encryption on EXT-4 fscrypt, undecided.
1526 # encryption on EXT-4 fscrypt, undecided.
1530 ladd(fn)
1527 ladd(fn)
1531 else:
1528 else:
1532 madd(fn)
1529 madd(fn)
1533 elif (
1530 elif (
1534 time != st[stat.ST_MTIME]
1531 time != st[stat.ST_MTIME]
1535 and time != st[stat.ST_MTIME] & _rangemask
1532 and time != st[stat.ST_MTIME] & _rangemask
1536 ):
1533 ):
1537 ladd(fn)
1534 ladd(fn)
1538 elif st[stat.ST_MTIME] == lastnormaltime:
1535 elif st[stat.ST_MTIME] == lastnormaltime:
1539 # fn may have just been marked as normal and it may have
1536 # fn may have just been marked as normal and it may have
1540 # changed in the same second without changing its size.
1537 # changed in the same second without changing its size.
1541 # This can happen if we quickly do multiple commits.
1538 # This can happen if we quickly do multiple commits.
1542 # Force lookup, so we don't miss such a racy file change.
1539 # Force lookup, so we don't miss such a racy file change.
1543 ladd(fn)
1540 ladd(fn)
1544 elif listclean:
1541 elif listclean:
1545 cadd(fn)
1542 cadd(fn)
1546 status = scmutil.status(
1543 status = scmutil.status(
1547 modified, added, removed, deleted, unknown, ignored, clean
1544 modified, added, removed, deleted, unknown, ignored, clean
1548 )
1545 )
1549 return (lookup, status)
1546 return (lookup, status)
1550
1547
1551 def matches(self, match):
1548 def matches(self, match):
1552 """
1549 """
1553 return files in the dirstate (in whatever state) filtered by match
1550 return files in the dirstate (in whatever state) filtered by match
1554 """
1551 """
1555 dmap = self._map
1552 dmap = self._map
1556 if rustmod is not None:
1553 if rustmod is not None:
1557 dmap = self._map._rustmap
1554 dmap = self._map._rustmap
1558
1555
1559 if match.always():
1556 if match.always():
1560 return dmap.keys()
1557 return dmap.keys()
1561 files = match.files()
1558 files = match.files()
1562 if match.isexact():
1559 if match.isexact():
1563 # fast path -- filter the other way around, since typically files is
1560 # fast path -- filter the other way around, since typically files is
1564 # much smaller than dmap
1561 # much smaller than dmap
1565 return [f for f in files if f in dmap]
1562 return [f for f in files if f in dmap]
1566 if match.prefix() and all(fn in dmap for fn in files):
1563 if match.prefix() and all(fn in dmap for fn in files):
1567 # fast path -- all the values are known to be files, so just return
1564 # fast path -- all the values are known to be files, so just return
1568 # that
1565 # that
1569 return list(files)
1566 return list(files)
1570 return [f for f in dmap if match(f)]
1567 return [f for f in dmap if match(f)]
1571
1568
1572 def _actualfilename(self, tr):
1569 def _actualfilename(self, tr):
1573 if tr:
1570 if tr:
1574 return self._pendingfilename
1571 return self._pendingfilename
1575 else:
1572 else:
1576 return self._filename
1573 return self._filename
1577
1574
1578 def savebackup(self, tr, backupname):
1575 def savebackup(self, tr, backupname):
1579 '''Save current dirstate into backup file'''
1576 '''Save current dirstate into backup file'''
1580 filename = self._actualfilename(tr)
1577 filename = self._actualfilename(tr)
1581 assert backupname != filename
1578 assert backupname != filename
1582
1579
1583 # use '_writedirstate' instead of 'write' to write changes certainly,
1580 # use '_writedirstate' instead of 'write' to write changes certainly,
1584 # because the latter omits writing out if transaction is running.
1581 # because the latter omits writing out if transaction is running.
1585 # output file will be used to create backup of dirstate at this point.
1582 # output file will be used to create backup of dirstate at this point.
1586 if self._dirty or not self._opener.exists(filename):
1583 if self._dirty or not self._opener.exists(filename):
1587 self._writedirstate(
1584 self._writedirstate(
1588 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1585 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1589 )
1586 )
1590
1587
1591 if tr:
1588 if tr:
1592 # ensure that subsequent tr.writepending returns True for
1589 # ensure that subsequent tr.writepending returns True for
1593 # changes written out above, even if dirstate is never
1590 # changes written out above, even if dirstate is never
1594 # changed after this
1591 # changed after this
1595 tr.addfilegenerator(
1592 tr.addfilegenerator(
1596 b'dirstate',
1593 b'dirstate',
1597 (self._filename,),
1594 (self._filename,),
1598 self._writedirstate,
1595 self._writedirstate,
1599 location=b'plain',
1596 location=b'plain',
1600 )
1597 )
1601
1598
1602 # ensure that pending file written above is unlinked at
1599 # ensure that pending file written above is unlinked at
1603 # failure, even if tr.writepending isn't invoked until the
1600 # failure, even if tr.writepending isn't invoked until the
1604 # end of this transaction
1601 # end of this transaction
1605 tr.registertmp(filename, location=b'plain')
1602 tr.registertmp(filename, location=b'plain')
1606
1603
1607 self._opener.tryunlink(backupname)
1604 self._opener.tryunlink(backupname)
1608 # hardlink backup is okay because _writedirstate is always called
1605 # hardlink backup is okay because _writedirstate is always called
1609 # with an "atomictemp=True" file.
1606 # with an "atomictemp=True" file.
1610 util.copyfile(
1607 util.copyfile(
1611 self._opener.join(filename),
1608 self._opener.join(filename),
1612 self._opener.join(backupname),
1609 self._opener.join(backupname),
1613 hardlink=True,
1610 hardlink=True,
1614 )
1611 )
1615
1612
1616 def restorebackup(self, tr, backupname):
1613 def restorebackup(self, tr, backupname):
1617 '''Restore dirstate by backup file'''
1614 '''Restore dirstate by backup file'''
1618 # this "invalidate()" prevents "wlock.release()" from writing
1615 # this "invalidate()" prevents "wlock.release()" from writing
1619 # changes of dirstate out after restoring from backup file
1616 # changes of dirstate out after restoring from backup file
1620 self.invalidate()
1617 self.invalidate()
1621 filename = self._actualfilename(tr)
1618 filename = self._actualfilename(tr)
1622 o = self._opener
1619 o = self._opener
1623 if util.samefile(o.join(backupname), o.join(filename)):
1620 if util.samefile(o.join(backupname), o.join(filename)):
1624 o.unlink(backupname)
1621 o.unlink(backupname)
1625 else:
1622 else:
1626 o.rename(backupname, filename, checkambig=True)
1623 o.rename(backupname, filename, checkambig=True)
1627
1624
1628 def clearbackup(self, tr, backupname):
1625 def clearbackup(self, tr, backupname):
1629 '''Clear backup file'''
1626 '''Clear backup file'''
1630 self._opener.unlink(backupname)
1627 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now