##// END OF EJS Templates
dirstate: extract the logic to check file/dirname collision when adding a file...
marmoute -
r48787:2e0ff394 default
parent child Browse files
Show More
@@ -1,1612 +1,1615 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = parsers.DirstateItem
48 DirstateItem = parsers.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._updatedfiles = set()
133 self._updatedfiles = set()
134 self._mapcls = dirstatemap.dirstatemap
134 self._mapcls = dirstatemap.dirstatemap
135 # Access and cache cwd early, so we don't access it for the first time
135 # Access and cache cwd early, so we don't access it for the first time
136 # after a working-copy update caused it to not exist (accessing it then
136 # after a working-copy update caused it to not exist (accessing it then
137 # raises an exception).
137 # raises an exception).
138 self._cwd
138 self._cwd
139
139
140 def prefetch_parents(self):
140 def prefetch_parents(self):
141 """make sure the parents are loaded
141 """make sure the parents are loaded
142
142
143 Used to avoid a race condition.
143 Used to avoid a race condition.
144 """
144 """
145 self._pl
145 self._pl
146
146
147 @contextlib.contextmanager
147 @contextlib.contextmanager
148 def parentchange(self):
148 def parentchange(self):
149 """Context manager for handling dirstate parents.
149 """Context manager for handling dirstate parents.
150
150
151 If an exception occurs in the scope of the context manager,
151 If an exception occurs in the scope of the context manager,
152 the incoherent dirstate won't be written when wlock is
152 the incoherent dirstate won't be written when wlock is
153 released.
153 released.
154 """
154 """
155 self._parentwriters += 1
155 self._parentwriters += 1
156 yield
156 yield
157 # Typically we want the "undo" step of a context manager in a
157 # Typically we want the "undo" step of a context manager in a
158 # finally block so it happens even when an exception
158 # finally block so it happens even when an exception
159 # occurs. In this case, however, we only want to decrement
159 # occurs. In this case, however, we only want to decrement
160 # parentwriters if the code in the with statement exits
160 # parentwriters if the code in the with statement exits
161 # normally, so we don't have a try/finally here on purpose.
161 # normally, so we don't have a try/finally here on purpose.
162 self._parentwriters -= 1
162 self._parentwriters -= 1
163
163
164 def pendingparentchange(self):
164 def pendingparentchange(self):
165 """Returns true if the dirstate is in the middle of a set of changes
165 """Returns true if the dirstate is in the middle of a set of changes
166 that modify the dirstate parent.
166 that modify the dirstate parent.
167 """
167 """
168 return self._parentwriters > 0
168 return self._parentwriters > 0
169
169
170 @propertycache
170 @propertycache
171 def _map(self):
171 def _map(self):
172 """Return the dirstate contents (see documentation for dirstatemap)."""
172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 self._map = self._mapcls(
173 self._map = self._mapcls(
174 self._ui,
174 self._ui,
175 self._opener,
175 self._opener,
176 self._root,
176 self._root,
177 self._nodeconstants,
177 self._nodeconstants,
178 self._use_dirstate_v2,
178 self._use_dirstate_v2,
179 )
179 )
180 return self._map
180 return self._map
181
181
182 @property
182 @property
183 def _sparsematcher(self):
183 def _sparsematcher(self):
184 """The matcher for the sparse checkout.
184 """The matcher for the sparse checkout.
185
185
186 The working directory may not include every file from a manifest. The
186 The working directory may not include every file from a manifest. The
187 matcher obtained by this property will match a path if it is to be
187 matcher obtained by this property will match a path if it is to be
188 included in the working directory.
188 included in the working directory.
189 """
189 """
190 # TODO there is potential to cache this property. For now, the matcher
190 # TODO there is potential to cache this property. For now, the matcher
191 # is resolved on every access. (But the called function does use a
191 # is resolved on every access. (But the called function does use a
192 # cache to keep the lookup fast.)
192 # cache to keep the lookup fast.)
193 return self._sparsematchfn()
193 return self._sparsematchfn()
194
194
195 @repocache(b'branch')
195 @repocache(b'branch')
196 def _branch(self):
196 def _branch(self):
197 try:
197 try:
198 return self._opener.read(b"branch").strip() or b"default"
198 return self._opener.read(b"branch").strip() or b"default"
199 except IOError as inst:
199 except IOError as inst:
200 if inst.errno != errno.ENOENT:
200 if inst.errno != errno.ENOENT:
201 raise
201 raise
202 return b"default"
202 return b"default"
203
203
204 @property
204 @property
205 def _pl(self):
205 def _pl(self):
206 return self._map.parents()
206 return self._map.parents()
207
207
208 def hasdir(self, d):
208 def hasdir(self, d):
209 return self._map.hastrackeddir(d)
209 return self._map.hastrackeddir(d)
210
210
211 @rootcache(b'.hgignore')
211 @rootcache(b'.hgignore')
212 def _ignore(self):
212 def _ignore(self):
213 files = self._ignorefiles()
213 files = self._ignorefiles()
214 if not files:
214 if not files:
215 return matchmod.never()
215 return matchmod.never()
216
216
217 pats = [b'include:%s' % f for f in files]
217 pats = [b'include:%s' % f for f in files]
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219
219
220 @propertycache
220 @propertycache
221 def _slash(self):
221 def _slash(self):
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223
223
224 @propertycache
224 @propertycache
225 def _checklink(self):
225 def _checklink(self):
226 return util.checklink(self._root)
226 return util.checklink(self._root)
227
227
228 @propertycache
228 @propertycache
229 def _checkexec(self):
229 def _checkexec(self):
230 return bool(util.checkexec(self._root))
230 return bool(util.checkexec(self._root))
231
231
232 @propertycache
232 @propertycache
233 def _checkcase(self):
233 def _checkcase(self):
234 return not util.fscasesensitive(self._join(b'.hg'))
234 return not util.fscasesensitive(self._join(b'.hg'))
235
235
236 def _join(self, f):
236 def _join(self, f):
237 # much faster than os.path.join()
237 # much faster than os.path.join()
238 # it's safe because f is always a relative path
238 # it's safe because f is always a relative path
239 return self._rootdir + f
239 return self._rootdir + f
240
240
241 def flagfunc(self, buildfallback):
241 def flagfunc(self, buildfallback):
242 if self._checklink and self._checkexec:
242 if self._checklink and self._checkexec:
243
243
244 def f(x):
244 def f(x):
245 try:
245 try:
246 st = os.lstat(self._join(x))
246 st = os.lstat(self._join(x))
247 if util.statislink(st):
247 if util.statislink(st):
248 return b'l'
248 return b'l'
249 if util.statisexec(st):
249 if util.statisexec(st):
250 return b'x'
250 return b'x'
251 except OSError:
251 except OSError:
252 pass
252 pass
253 return b''
253 return b''
254
254
255 return f
255 return f
256
256
257 fallback = buildfallback()
257 fallback = buildfallback()
258 if self._checklink:
258 if self._checklink:
259
259
260 def f(x):
260 def f(x):
261 if os.path.islink(self._join(x)):
261 if os.path.islink(self._join(x)):
262 return b'l'
262 return b'l'
263 if b'x' in fallback(x):
263 if b'x' in fallback(x):
264 return b'x'
264 return b'x'
265 return b''
265 return b''
266
266
267 return f
267 return f
268 if self._checkexec:
268 if self._checkexec:
269
269
270 def f(x):
270 def f(x):
271 if b'l' in fallback(x):
271 if b'l' in fallback(x):
272 return b'l'
272 return b'l'
273 if util.isexec(self._join(x)):
273 if util.isexec(self._join(x)):
274 return b'x'
274 return b'x'
275 return b''
275 return b''
276
276
277 return f
277 return f
278 else:
278 else:
279 return fallback
279 return fallback
280
280
281 @propertycache
281 @propertycache
282 def _cwd(self):
282 def _cwd(self):
283 # internal config: ui.forcecwd
283 # internal config: ui.forcecwd
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 if forcecwd:
285 if forcecwd:
286 return forcecwd
286 return forcecwd
287 return encoding.getcwd()
287 return encoding.getcwd()
288
288
289 def getcwd(self):
289 def getcwd(self):
290 """Return the path from which a canonical path is calculated.
290 """Return the path from which a canonical path is calculated.
291
291
292 This path should be used to resolve file patterns or to convert
292 This path should be used to resolve file patterns or to convert
293 canonical paths back to file paths for display. It shouldn't be
293 canonical paths back to file paths for display. It shouldn't be
294 used to get real file paths. Use vfs functions instead.
294 used to get real file paths. Use vfs functions instead.
295 """
295 """
296 cwd = self._cwd
296 cwd = self._cwd
297 if cwd == self._root:
297 if cwd == self._root:
298 return b''
298 return b''
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 rootsep = self._root
300 rootsep = self._root
301 if not util.endswithsep(rootsep):
301 if not util.endswithsep(rootsep):
302 rootsep += pycompat.ossep
302 rootsep += pycompat.ossep
303 if cwd.startswith(rootsep):
303 if cwd.startswith(rootsep):
304 return cwd[len(rootsep) :]
304 return cwd[len(rootsep) :]
305 else:
305 else:
306 # we're outside the repo. return an absolute path.
306 # we're outside the repo. return an absolute path.
307 return cwd
307 return cwd
308
308
309 def pathto(self, f, cwd=None):
309 def pathto(self, f, cwd=None):
310 if cwd is None:
310 if cwd is None:
311 cwd = self.getcwd()
311 cwd = self.getcwd()
312 path = util.pathto(self._root, cwd, f)
312 path = util.pathto(self._root, cwd, f)
313 if self._slash:
313 if self._slash:
314 return util.pconvert(path)
314 return util.pconvert(path)
315 return path
315 return path
316
316
317 def __getitem__(self, key):
317 def __getitem__(self, key):
318 """Return the current state of key (a filename) in the dirstate.
318 """Return the current state of key (a filename) in the dirstate.
319
319
320 States are:
320 States are:
321 n normal
321 n normal
322 m needs merging
322 m needs merging
323 r marked for removal
323 r marked for removal
324 a marked for addition
324 a marked for addition
325 ? not tracked
325 ? not tracked
326
326
327 XXX The "state" is a bit obscure to be in the "public" API. we should
327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 consider migrating all user of this to going through the dirstate entry
328 consider migrating all user of this to going through the dirstate entry
329 instead.
329 instead.
330 """
330 """
331 entry = self._map.get(key)
331 entry = self._map.get(key)
332 if entry is not None:
332 if entry is not None:
333 return entry.state
333 return entry.state
334 return b'?'
334 return b'?'
335
335
336 def __contains__(self, key):
336 def __contains__(self, key):
337 return key in self._map
337 return key in self._map
338
338
339 def __iter__(self):
339 def __iter__(self):
340 return iter(sorted(self._map))
340 return iter(sorted(self._map))
341
341
342 def items(self):
342 def items(self):
343 return pycompat.iteritems(self._map)
343 return pycompat.iteritems(self._map)
344
344
345 iteritems = items
345 iteritems = items
346
346
347 def directories(self):
347 def directories(self):
348 return self._map.directories()
348 return self._map.directories()
349
349
350 def parents(self):
350 def parents(self):
351 return [self._validate(p) for p in self._pl]
351 return [self._validate(p) for p in self._pl]
352
352
353 def p1(self):
353 def p1(self):
354 return self._validate(self._pl[0])
354 return self._validate(self._pl[0])
355
355
356 def p2(self):
356 def p2(self):
357 return self._validate(self._pl[1])
357 return self._validate(self._pl[1])
358
358
359 @property
359 @property
360 def in_merge(self):
360 def in_merge(self):
361 """True if a merge is in progress"""
361 """True if a merge is in progress"""
362 return self._pl[1] != self._nodeconstants.nullid
362 return self._pl[1] != self._nodeconstants.nullid
363
363
364 def branch(self):
364 def branch(self):
365 return encoding.tolocal(self._branch)
365 return encoding.tolocal(self._branch)
366
366
367 def setparents(self, p1, p2=None):
367 def setparents(self, p1, p2=None):
368 """Set dirstate parents to p1 and p2.
368 """Set dirstate parents to p1 and p2.
369
369
370 When moving from two parents to one, "merged" entries a
370 When moving from two parents to one, "merged" entries a
371 adjusted to normal and previous copy records discarded and
371 adjusted to normal and previous copy records discarded and
372 returned by the call.
372 returned by the call.
373
373
374 See localrepo.setparents()
374 See localrepo.setparents()
375 """
375 """
376 if p2 is None:
376 if p2 is None:
377 p2 = self._nodeconstants.nullid
377 p2 = self._nodeconstants.nullid
378 if self._parentwriters == 0:
378 if self._parentwriters == 0:
379 raise ValueError(
379 raise ValueError(
380 b"cannot set dirstate parent outside of "
380 b"cannot set dirstate parent outside of "
381 b"dirstate.parentchange context manager"
381 b"dirstate.parentchange context manager"
382 )
382 )
383
383
384 self._dirty = True
384 self._dirty = True
385 oldp2 = self._pl[1]
385 oldp2 = self._pl[1]
386 if self._origpl is None:
386 if self._origpl is None:
387 self._origpl = self._pl
387 self._origpl = self._pl
388 self._map.setparents(p1, p2)
388 self._map.setparents(p1, p2)
389 copies = {}
389 copies = {}
390 if (
390 if (
391 oldp2 != self._nodeconstants.nullid
391 oldp2 != self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
393 ):
393 ):
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395
395
396 for f in candidatefiles:
396 for f in candidatefiles:
397 s = self._map.get(f)
397 s = self._map.get(f)
398 if s is None:
398 if s is None:
399 continue
399 continue
400
400
401 # Discard "merged" markers when moving away from a merge state
401 # Discard "merged" markers when moving away from a merge state
402 if s.merged:
402 if s.merged:
403 source = self._map.copymap.get(f)
403 source = self._map.copymap.get(f)
404 if source:
404 if source:
405 copies[f] = source
405 copies[f] = source
406 self._normallookup(f)
406 self._normallookup(f)
407 # Also fix up otherparent markers
407 # Also fix up otherparent markers
408 elif s.from_p2:
408 elif s.from_p2:
409 source = self._map.copymap.get(f)
409 source = self._map.copymap.get(f)
410 if source:
410 if source:
411 copies[f] = source
411 copies[f] = source
412 self._add(f)
412 self._add(f)
413 return copies
413 return copies
414
414
415 def setbranch(self, branch):
415 def setbranch(self, branch):
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 try:
418 try:
419 f.write(self._branch + b'\n')
419 f.write(self._branch + b'\n')
420 f.close()
420 f.close()
421
421
422 # make sure filecache has the correct stat info for _branch after
422 # make sure filecache has the correct stat info for _branch after
423 # replacing the underlying file
423 # replacing the underlying file
424 ce = self._filecache[b'_branch']
424 ce = self._filecache[b'_branch']
425 if ce:
425 if ce:
426 ce.refresh()
426 ce.refresh()
427 except: # re-raises
427 except: # re-raises
428 f.discard()
428 f.discard()
429 raise
429 raise
430
430
431 def invalidate(self):
431 def invalidate(self):
432 """Causes the next access to reread the dirstate.
432 """Causes the next access to reread the dirstate.
433
433
434 This is different from localrepo.invalidatedirstate() because it always
434 This is different from localrepo.invalidatedirstate() because it always
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 check whether the dirstate has changed before rereading it."""
436 check whether the dirstate has changed before rereading it."""
437
437
438 for a in ("_map", "_branch", "_ignore"):
438 for a in ("_map", "_branch", "_ignore"):
439 if a in self.__dict__:
439 if a in self.__dict__:
440 delattr(self, a)
440 delattr(self, a)
441 self._lastnormaltime = 0
441 self._lastnormaltime = 0
442 self._dirty = False
442 self._dirty = False
443 self._updatedfiles.clear()
443 self._updatedfiles.clear()
444 self._parentwriters = 0
444 self._parentwriters = 0
445 self._origpl = None
445 self._origpl = None
446
446
447 def copy(self, source, dest):
447 def copy(self, source, dest):
448 """Mark dest as a copy of source. Unmark dest if source is None."""
448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 if source == dest:
449 if source == dest:
450 return
450 return
451 self._dirty = True
451 self._dirty = True
452 if source is not None:
452 if source is not None:
453 self._map.copymap[dest] = source
453 self._map.copymap[dest] = source
454 self._updatedfiles.add(source)
454 self._updatedfiles.add(source)
455 self._updatedfiles.add(dest)
455 self._updatedfiles.add(dest)
456 elif self._map.copymap.pop(dest, None):
456 elif self._map.copymap.pop(dest, None):
457 self._updatedfiles.add(dest)
457 self._updatedfiles.add(dest)
458
458
459 def copied(self, file):
459 def copied(self, file):
460 return self._map.copymap.get(file, None)
460 return self._map.copymap.get(file, None)
461
461
462 def copies(self):
462 def copies(self):
463 return self._map.copymap
463 return self._map.copymap
464
464
465 @requires_no_parents_change
465 @requires_no_parents_change
466 def set_tracked(self, filename):
466 def set_tracked(self, filename):
467 """a "public" method for generic code to mark a file as tracked
467 """a "public" method for generic code to mark a file as tracked
468
468
469 This function is to be called outside of "update/merge" case. For
469 This function is to be called outside of "update/merge" case. For
470 example by a command like `hg add X`.
470 example by a command like `hg add X`.
471
471
472 return True the file was previously untracked, False otherwise.
472 return True the file was previously untracked, False otherwise.
473 """
473 """
474 entry = self._map.get(filename)
474 entry = self._map.get(filename)
475 if entry is None:
475 if entry is None:
476 self._add(filename)
476 self._add(filename)
477 return True
477 return True
478 elif not entry.tracked:
478 elif not entry.tracked:
479 self._normallookup(filename)
479 self._normallookup(filename)
480 return True
480 return True
481 # XXX This is probably overkill for more case, but we need this to
481 # XXX This is probably overkill for more case, but we need this to
482 # fully replace the `normallookup` call with `set_tracked` one.
482 # fully replace the `normallookup` call with `set_tracked` one.
483 # Consider smoothing this in the future.
483 # Consider smoothing this in the future.
484 self.set_possibly_dirty(filename)
484 self.set_possibly_dirty(filename)
485 return False
485 return False
486
486
487 @requires_no_parents_change
487 @requires_no_parents_change
488 def set_untracked(self, filename):
488 def set_untracked(self, filename):
489 """a "public" method for generic code to mark a file as untracked
489 """a "public" method for generic code to mark a file as untracked
490
490
491 This function is to be called outside of "update/merge" case. For
491 This function is to be called outside of "update/merge" case. For
492 example by a command like `hg remove X`.
492 example by a command like `hg remove X`.
493
493
494 return True the file was previously tracked, False otherwise.
494 return True the file was previously tracked, False otherwise.
495 """
495 """
496 ret = self._map.set_untracked(filename)
496 ret = self._map.set_untracked(filename)
497 if ret:
497 if ret:
498 self._dirty = True
498 self._dirty = True
499 self._updatedfiles.add(filename)
499 self._updatedfiles.add(filename)
500 return ret
500 return ret
501
501
502 @requires_no_parents_change
502 @requires_no_parents_change
503 def set_clean(self, filename, parentfiledata=None):
503 def set_clean(self, filename, parentfiledata=None):
504 """record that the current state of the file on disk is known to be clean"""
504 """record that the current state of the file on disk is known to be clean"""
505 self._dirty = True
505 self._dirty = True
506 self._updatedfiles.add(filename)
506 self._updatedfiles.add(filename)
507 if parentfiledata:
507 if parentfiledata:
508 (mode, size, mtime) = parentfiledata
508 (mode, size, mtime) = parentfiledata
509 else:
509 else:
510 (mode, size, mtime) = self._get_filedata(filename)
510 (mode, size, mtime) = self._get_filedata(filename)
511 self._addpath(filename, mode=mode, size=size, mtime=mtime)
511 self._addpath(filename, mode=mode, size=size, mtime=mtime)
512 self._map.copymap.pop(filename, None)
512 self._map.copymap.pop(filename, None)
513 if filename in self._map.nonnormalset:
513 if filename in self._map.nonnormalset:
514 self._map.nonnormalset.remove(filename)
514 self._map.nonnormalset.remove(filename)
515 if mtime > self._lastnormaltime:
515 if mtime > self._lastnormaltime:
516 # Remember the most recent modification timeslot for status(),
516 # Remember the most recent modification timeslot for status(),
517 # to make sure we won't miss future size-preserving file content
517 # to make sure we won't miss future size-preserving file content
518 # modifications that happen within the same timeslot.
518 # modifications that happen within the same timeslot.
519 self._lastnormaltime = mtime
519 self._lastnormaltime = mtime
520
520
521 @requires_no_parents_change
521 @requires_no_parents_change
522 def set_possibly_dirty(self, filename):
522 def set_possibly_dirty(self, filename):
523 """record that the current state of the file on disk is unknown"""
523 """record that the current state of the file on disk is unknown"""
524 self._dirty = True
524 self._dirty = True
525 self._updatedfiles.add(filename)
525 self._updatedfiles.add(filename)
526 self._map.set_possibly_dirty(filename)
526 self._map.set_possibly_dirty(filename)
527
527
528 @requires_parents_change
528 @requires_parents_change
529 def update_file_p1(
529 def update_file_p1(
530 self,
530 self,
531 filename,
531 filename,
532 p1_tracked,
532 p1_tracked,
533 ):
533 ):
534 """Set a file as tracked in the parent (or not)
534 """Set a file as tracked in the parent (or not)
535
535
536 This is to be called when adjust the dirstate to a new parent after an history
536 This is to be called when adjust the dirstate to a new parent after an history
537 rewriting operation.
537 rewriting operation.
538
538
539 It should not be called during a merge (p2 != nullid) and only within
539 It should not be called during a merge (p2 != nullid) and only within
540 a `with dirstate.parentchange():` context.
540 a `with dirstate.parentchange():` context.
541 """
541 """
542 if self.in_merge:
542 if self.in_merge:
543 msg = b'update_file_reference should not be called when merging'
543 msg = b'update_file_reference should not be called when merging'
544 raise error.ProgrammingError(msg)
544 raise error.ProgrammingError(msg)
545 entry = self._map.get(filename)
545 entry = self._map.get(filename)
546 if entry is None:
546 if entry is None:
547 wc_tracked = False
547 wc_tracked = False
548 else:
548 else:
549 wc_tracked = entry.tracked
549 wc_tracked = entry.tracked
550 possibly_dirty = False
550 possibly_dirty = False
551 if p1_tracked and wc_tracked:
551 if p1_tracked and wc_tracked:
552 # the underlying reference might have changed, we will have to
552 # the underlying reference might have changed, we will have to
553 # check it.
553 # check it.
554 possibly_dirty = True
554 possibly_dirty = True
555 elif not (p1_tracked or wc_tracked):
555 elif not (p1_tracked or wc_tracked):
556 # the file is no longer relevant to anyone
556 # the file is no longer relevant to anyone
557 self._drop(filename)
557 self._drop(filename)
558 elif (not p1_tracked) and wc_tracked:
558 elif (not p1_tracked) and wc_tracked:
559 if entry is not None and entry.added:
559 if entry is not None and entry.added:
560 return # avoid dropping copy information (maybe?)
560 return # avoid dropping copy information (maybe?)
561 elif p1_tracked and not wc_tracked:
561 elif p1_tracked and not wc_tracked:
562 pass
562 pass
563 else:
563 else:
564 assert False, 'unreachable'
564 assert False, 'unreachable'
565
565
566 # this mean we are doing call for file we do not really care about the
566 # this mean we are doing call for file we do not really care about the
567 # data (eg: added or removed), however this should be a minor overhead
567 # data (eg: added or removed), however this should be a minor overhead
568 # compared to the overall update process calling this.
568 # compared to the overall update process calling this.
569 parentfiledata = None
569 parentfiledata = None
570 if wc_tracked:
570 if wc_tracked:
571 parentfiledata = self._get_filedata(filename)
571 parentfiledata = self._get_filedata(filename)
572
572
573 self._updatedfiles.add(filename)
573 self._updatedfiles.add(filename)
574 self._map.reset_state(
574 self._map.reset_state(
575 filename,
575 filename,
576 wc_tracked,
576 wc_tracked,
577 p1_tracked,
577 p1_tracked,
578 possibly_dirty=possibly_dirty,
578 possibly_dirty=possibly_dirty,
579 parentfiledata=parentfiledata,
579 parentfiledata=parentfiledata,
580 )
580 )
581 if (
581 if (
582 parentfiledata is not None
582 parentfiledata is not None
583 and parentfiledata[2] > self._lastnormaltime
583 and parentfiledata[2] > self._lastnormaltime
584 ):
584 ):
585 # Remember the most recent modification timeslot for status(),
585 # Remember the most recent modification timeslot for status(),
586 # to make sure we won't miss future size-preserving file content
586 # to make sure we won't miss future size-preserving file content
587 # modifications that happen within the same timeslot.
587 # modifications that happen within the same timeslot.
588 self._lastnormaltime = parentfiledata[2]
588 self._lastnormaltime = parentfiledata[2]
589
589
590 @requires_parents_change
590 @requires_parents_change
591 def update_file(
591 def update_file(
592 self,
592 self,
593 filename,
593 filename,
594 wc_tracked,
594 wc_tracked,
595 p1_tracked,
595 p1_tracked,
596 p2_tracked=False,
596 p2_tracked=False,
597 merged=False,
597 merged=False,
598 clean_p1=False,
598 clean_p1=False,
599 clean_p2=False,
599 clean_p2=False,
600 possibly_dirty=False,
600 possibly_dirty=False,
601 parentfiledata=None,
601 parentfiledata=None,
602 ):
602 ):
603 """update the information about a file in the dirstate
603 """update the information about a file in the dirstate
604
604
605 This is to be called when the direstates parent changes to keep track
605 This is to be called when the direstates parent changes to keep track
606 of what is the file situation in regards to the working copy and its parent.
606 of what is the file situation in regards to the working copy and its parent.
607
607
608 This function must be called within a `dirstate.parentchange` context.
608 This function must be called within a `dirstate.parentchange` context.
609
609
610 note: the API is at an early stage and we might need to adjust it
610 note: the API is at an early stage and we might need to adjust it
611 depending of what information ends up being relevant and useful to
611 depending of what information ends up being relevant and useful to
612 other processing.
612 other processing.
613 """
613 """
614 if merged and (clean_p1 or clean_p2):
614 if merged and (clean_p1 or clean_p2):
615 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
615 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
616 raise error.ProgrammingError(msg)
616 raise error.ProgrammingError(msg)
617
617
618 # note: I do not think we need to double check name clash here since we
618 # note: I do not think we need to double check name clash here since we
619 # are in a update/merge case that should already have taken care of
619 # are in a update/merge case that should already have taken care of
620 # this. The test agrees
620 # this. The test agrees
621
621
622 self._dirty = True
622 self._dirty = True
623 self._updatedfiles.add(filename)
623 self._updatedfiles.add(filename)
624
624
625 need_parent_file_data = (
625 need_parent_file_data = (
626 not (possibly_dirty or clean_p2 or merged)
626 not (possibly_dirty or clean_p2 or merged)
627 and wc_tracked
627 and wc_tracked
628 and p1_tracked
628 and p1_tracked
629 )
629 )
630
630
631 # this mean we are doing call for file we do not really care about the
631 # this mean we are doing call for file we do not really care about the
632 # data (eg: added or removed), however this should be a minor overhead
632 # data (eg: added or removed), however this should be a minor overhead
633 # compared to the overall update process calling this.
633 # compared to the overall update process calling this.
634 if need_parent_file_data:
634 if need_parent_file_data:
635 if parentfiledata is None:
635 if parentfiledata is None:
636 parentfiledata = self._get_filedata(filename)
636 parentfiledata = self._get_filedata(filename)
637 mtime = parentfiledata[2]
637 mtime = parentfiledata[2]
638
638
639 if mtime > self._lastnormaltime:
639 if mtime > self._lastnormaltime:
640 # Remember the most recent modification timeslot for
640 # Remember the most recent modification timeslot for
641 # status(), to make sure we won't miss future
641 # status(), to make sure we won't miss future
642 # size-preserving file content modifications that happen
642 # size-preserving file content modifications that happen
643 # within the same timeslot.
643 # within the same timeslot.
644 self._lastnormaltime = mtime
644 self._lastnormaltime = mtime
645
645
646 self._map.reset_state(
646 self._map.reset_state(
647 filename,
647 filename,
648 wc_tracked,
648 wc_tracked,
649 p1_tracked,
649 p1_tracked,
650 p2_tracked=p2_tracked,
650 p2_tracked=p2_tracked,
651 merged=merged,
651 merged=merged,
652 clean_p1=clean_p1,
652 clean_p1=clean_p1,
653 clean_p2=clean_p2,
653 clean_p2=clean_p2,
654 possibly_dirty=possibly_dirty,
654 possibly_dirty=possibly_dirty,
655 parentfiledata=parentfiledata,
655 parentfiledata=parentfiledata,
656 )
656 )
657 if (
657 if (
658 parentfiledata is not None
658 parentfiledata is not None
659 and parentfiledata[2] > self._lastnormaltime
659 and parentfiledata[2] > self._lastnormaltime
660 ):
660 ):
661 # Remember the most recent modification timeslot for status(),
661 # Remember the most recent modification timeslot for status(),
662 # to make sure we won't miss future size-preserving file content
662 # to make sure we won't miss future size-preserving file content
663 # modifications that happen within the same timeslot.
663 # modifications that happen within the same timeslot.
664 self._lastnormaltime = parentfiledata[2]
664 self._lastnormaltime = parentfiledata[2]
665
665
666 def _addpath(
666 def _addpath(
667 self,
667 self,
668 f,
668 f,
669 mode=0,
669 mode=0,
670 size=None,
670 size=None,
671 mtime=None,
671 mtime=None,
672 added=False,
672 added=False,
673 merged=False,
673 merged=False,
674 from_p2=False,
674 from_p2=False,
675 possibly_dirty=False,
675 possibly_dirty=False,
676 ):
676 ):
677 entry = self._map.get(f)
677 entry = self._map.get(f)
678 if added or entry is not None and entry.removed:
678 if added or entry is not None and entry.removed:
679 scmutil.checkfilename(f)
679 self._check_new_tracked_filename(f)
680 if self._map.hastrackeddir(f):
681 msg = _(b'directory %r already in dirstate')
682 msg %= pycompat.bytestr(f)
683 raise error.Abort(msg)
684 # shadows
685 for d in pathutil.finddirs(f):
686 if self._map.hastrackeddir(d):
687 break
688 entry = self._map.get(d)
689 if entry is not None and not entry.removed:
690 msg = _(b'file %r in dirstate clashes with %r')
691 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
692 raise error.Abort(msg)
693 self._dirty = True
680 self._dirty = True
694 self._updatedfiles.add(f)
681 self._updatedfiles.add(f)
695 self._map.addfile(
682 self._map.addfile(
696 f,
683 f,
697 mode=mode,
684 mode=mode,
698 size=size,
685 size=size,
699 mtime=mtime,
686 mtime=mtime,
700 added=added,
687 added=added,
701 merged=merged,
688 merged=merged,
702 from_p2=from_p2,
689 from_p2=from_p2,
703 possibly_dirty=possibly_dirty,
690 possibly_dirty=possibly_dirty,
704 )
691 )
705
692
693 def _check_new_tracked_filename(self, filename):
694 scmutil.checkfilename(filename)
695 if self._map.hastrackeddir(filename):
696 msg = _(b'directory %r already in dirstate')
697 msg %= pycompat.bytestr(filename)
698 raise error.Abort(msg)
699 # shadows
700 for d in pathutil.finddirs(filename):
701 if self._map.hastrackeddir(d):
702 break
703 entry = self._map.get(d)
704 if entry is not None and not entry.removed:
705 msg = _(b'file %r in dirstate clashes with %r')
706 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
707 raise error.Abort(msg)
708
706 def _get_filedata(self, filename):
709 def _get_filedata(self, filename):
707 """returns"""
710 """returns"""
708 s = os.lstat(self._join(filename))
711 s = os.lstat(self._join(filename))
709 mode = s.st_mode
712 mode = s.st_mode
710 size = s.st_size
713 size = s.st_size
711 mtime = s[stat.ST_MTIME]
714 mtime = s[stat.ST_MTIME]
712 return (mode, size, mtime)
715 return (mode, size, mtime)
713
716
714 def _normallookup(self, f):
717 def _normallookup(self, f):
715 '''Mark a file normal, but possibly dirty.'''
718 '''Mark a file normal, but possibly dirty.'''
716 if self.in_merge:
719 if self.in_merge:
717 # if there is a merge going on and the file was either
720 # if there is a merge going on and the file was either
718 # "merged" or coming from other parent (-2) before
721 # "merged" or coming from other parent (-2) before
719 # being removed, restore that state.
722 # being removed, restore that state.
720 entry = self._map.get(f)
723 entry = self._map.get(f)
721 if entry is not None:
724 if entry is not None:
722 # XXX this should probably be dealt with a a lower level
725 # XXX this should probably be dealt with a a lower level
723 # (see `merged_removed` and `from_p2_removed`)
726 # (see `merged_removed` and `from_p2_removed`)
724 if entry.merged_removed or entry.from_p2_removed:
727 if entry.merged_removed or entry.from_p2_removed:
725 source = self._map.copymap.get(f)
728 source = self._map.copymap.get(f)
726 self._addpath(f, from_p2=True)
729 self._addpath(f, from_p2=True)
727 self._map.copymap.pop(f, None)
730 self._map.copymap.pop(f, None)
728 if source is not None:
731 if source is not None:
729 self.copy(source, f)
732 self.copy(source, f)
730 return
733 return
731 elif entry.merged or entry.from_p2:
734 elif entry.merged or entry.from_p2:
732 return
735 return
733 self._addpath(f, possibly_dirty=True)
736 self._addpath(f, possibly_dirty=True)
734 self._map.copymap.pop(f, None)
737 self._map.copymap.pop(f, None)
735
738
736 def _add(self, filename):
739 def _add(self, filename):
737 """internal function to mark a file as added"""
740 """internal function to mark a file as added"""
738 self._addpath(filename, added=True)
741 self._addpath(filename, added=True)
739 self._map.copymap.pop(filename, None)
742 self._map.copymap.pop(filename, None)
740
743
741 def _drop(self, filename):
744 def _drop(self, filename):
742 """internal function to drop a file from the dirstate"""
745 """internal function to drop a file from the dirstate"""
743 if self._map.dropfile(filename):
746 if self._map.dropfile(filename):
744 self._dirty = True
747 self._dirty = True
745 self._updatedfiles.add(filename)
748 self._updatedfiles.add(filename)
746
749
747 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
750 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
748 if exists is None:
751 if exists is None:
749 exists = os.path.lexists(os.path.join(self._root, path))
752 exists = os.path.lexists(os.path.join(self._root, path))
750 if not exists:
753 if not exists:
751 # Maybe a path component exists
754 # Maybe a path component exists
752 if not ignoremissing and b'/' in path:
755 if not ignoremissing and b'/' in path:
753 d, f = path.rsplit(b'/', 1)
756 d, f = path.rsplit(b'/', 1)
754 d = self._normalize(d, False, ignoremissing, None)
757 d = self._normalize(d, False, ignoremissing, None)
755 folded = d + b"/" + f
758 folded = d + b"/" + f
756 else:
759 else:
757 # No path components, preserve original case
760 # No path components, preserve original case
758 folded = path
761 folded = path
759 else:
762 else:
760 # recursively normalize leading directory components
763 # recursively normalize leading directory components
761 # against dirstate
764 # against dirstate
762 if b'/' in normed:
765 if b'/' in normed:
763 d, f = normed.rsplit(b'/', 1)
766 d, f = normed.rsplit(b'/', 1)
764 d = self._normalize(d, False, ignoremissing, True)
767 d = self._normalize(d, False, ignoremissing, True)
765 r = self._root + b"/" + d
768 r = self._root + b"/" + d
766 folded = d + b"/" + util.fspath(f, r)
769 folded = d + b"/" + util.fspath(f, r)
767 else:
770 else:
768 folded = util.fspath(normed, self._root)
771 folded = util.fspath(normed, self._root)
769 storemap[normed] = folded
772 storemap[normed] = folded
770
773
771 return folded
774 return folded
772
775
773 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
776 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
774 normed = util.normcase(path)
777 normed = util.normcase(path)
775 folded = self._map.filefoldmap.get(normed, None)
778 folded = self._map.filefoldmap.get(normed, None)
776 if folded is None:
779 if folded is None:
777 if isknown:
780 if isknown:
778 folded = path
781 folded = path
779 else:
782 else:
780 folded = self._discoverpath(
783 folded = self._discoverpath(
781 path, normed, ignoremissing, exists, self._map.filefoldmap
784 path, normed, ignoremissing, exists, self._map.filefoldmap
782 )
785 )
783 return folded
786 return folded
784
787
785 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
788 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
786 normed = util.normcase(path)
789 normed = util.normcase(path)
787 folded = self._map.filefoldmap.get(normed, None)
790 folded = self._map.filefoldmap.get(normed, None)
788 if folded is None:
791 if folded is None:
789 folded = self._map.dirfoldmap.get(normed, None)
792 folded = self._map.dirfoldmap.get(normed, None)
790 if folded is None:
793 if folded is None:
791 if isknown:
794 if isknown:
792 folded = path
795 folded = path
793 else:
796 else:
794 # store discovered result in dirfoldmap so that future
797 # store discovered result in dirfoldmap so that future
795 # normalizefile calls don't start matching directories
798 # normalizefile calls don't start matching directories
796 folded = self._discoverpath(
799 folded = self._discoverpath(
797 path, normed, ignoremissing, exists, self._map.dirfoldmap
800 path, normed, ignoremissing, exists, self._map.dirfoldmap
798 )
801 )
799 return folded
802 return folded
800
803
801 def normalize(self, path, isknown=False, ignoremissing=False):
804 def normalize(self, path, isknown=False, ignoremissing=False):
802 """
805 """
803 normalize the case of a pathname when on a casefolding filesystem
806 normalize the case of a pathname when on a casefolding filesystem
804
807
805 isknown specifies whether the filename came from walking the
808 isknown specifies whether the filename came from walking the
806 disk, to avoid extra filesystem access.
809 disk, to avoid extra filesystem access.
807
810
808 If ignoremissing is True, missing path are returned
811 If ignoremissing is True, missing path are returned
809 unchanged. Otherwise, we try harder to normalize possibly
812 unchanged. Otherwise, we try harder to normalize possibly
810 existing path components.
813 existing path components.
811
814
812 The normalized case is determined based on the following precedence:
815 The normalized case is determined based on the following precedence:
813
816
814 - version of name already stored in the dirstate
817 - version of name already stored in the dirstate
815 - version of name stored on disk
818 - version of name stored on disk
816 - version provided via command arguments
819 - version provided via command arguments
817 """
820 """
818
821
819 if self._checkcase:
822 if self._checkcase:
820 return self._normalize(path, isknown, ignoremissing)
823 return self._normalize(path, isknown, ignoremissing)
821 return path
824 return path
822
825
823 def clear(self):
826 def clear(self):
824 self._map.clear()
827 self._map.clear()
825 self._lastnormaltime = 0
828 self._lastnormaltime = 0
826 self._updatedfiles.clear()
829 self._updatedfiles.clear()
827 self._dirty = True
830 self._dirty = True
828
831
829 def rebuild(self, parent, allfiles, changedfiles=None):
832 def rebuild(self, parent, allfiles, changedfiles=None):
830 if changedfiles is None:
833 if changedfiles is None:
831 # Rebuild entire dirstate
834 # Rebuild entire dirstate
832 to_lookup = allfiles
835 to_lookup = allfiles
833 to_drop = []
836 to_drop = []
834 lastnormaltime = self._lastnormaltime
837 lastnormaltime = self._lastnormaltime
835 self.clear()
838 self.clear()
836 self._lastnormaltime = lastnormaltime
839 self._lastnormaltime = lastnormaltime
837 elif len(changedfiles) < 10:
840 elif len(changedfiles) < 10:
838 # Avoid turning allfiles into a set, which can be expensive if it's
841 # Avoid turning allfiles into a set, which can be expensive if it's
839 # large.
842 # large.
840 to_lookup = []
843 to_lookup = []
841 to_drop = []
844 to_drop = []
842 for f in changedfiles:
845 for f in changedfiles:
843 if f in allfiles:
846 if f in allfiles:
844 to_lookup.append(f)
847 to_lookup.append(f)
845 else:
848 else:
846 to_drop.append(f)
849 to_drop.append(f)
847 else:
850 else:
848 changedfilesset = set(changedfiles)
851 changedfilesset = set(changedfiles)
849 to_lookup = changedfilesset & set(allfiles)
852 to_lookup = changedfilesset & set(allfiles)
850 to_drop = changedfilesset - to_lookup
853 to_drop = changedfilesset - to_lookup
851
854
852 if self._origpl is None:
855 if self._origpl is None:
853 self._origpl = self._pl
856 self._origpl = self._pl
854 self._map.setparents(parent, self._nodeconstants.nullid)
857 self._map.setparents(parent, self._nodeconstants.nullid)
855
858
856 for f in to_lookup:
859 for f in to_lookup:
857 self._normallookup(f)
860 self._normallookup(f)
858 for f in to_drop:
861 for f in to_drop:
859 self._drop(f)
862 self._drop(f)
860
863
861 self._dirty = True
864 self._dirty = True
862
865
863 def identity(self):
866 def identity(self):
864 """Return identity of dirstate itself to detect changing in storage
867 """Return identity of dirstate itself to detect changing in storage
865
868
866 If identity of previous dirstate is equal to this, writing
869 If identity of previous dirstate is equal to this, writing
867 changes based on the former dirstate out can keep consistency.
870 changes based on the former dirstate out can keep consistency.
868 """
871 """
869 return self._map.identity
872 return self._map.identity
870
873
871 def write(self, tr):
874 def write(self, tr):
872 if not self._dirty:
875 if not self._dirty:
873 return
876 return
874
877
875 filename = self._filename
878 filename = self._filename
876 if tr:
879 if tr:
877 # 'dirstate.write()' is not only for writing in-memory
880 # 'dirstate.write()' is not only for writing in-memory
878 # changes out, but also for dropping ambiguous timestamp.
881 # changes out, but also for dropping ambiguous timestamp.
879 # delayed writing re-raise "ambiguous timestamp issue".
882 # delayed writing re-raise "ambiguous timestamp issue".
880 # See also the wiki page below for detail:
883 # See also the wiki page below for detail:
881 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
884 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
882
885
883 # emulate dropping timestamp in 'parsers.pack_dirstate'
886 # emulate dropping timestamp in 'parsers.pack_dirstate'
884 now = _getfsnow(self._opener)
887 now = _getfsnow(self._opener)
885 self._map.clearambiguoustimes(self._updatedfiles, now)
888 self._map.clearambiguoustimes(self._updatedfiles, now)
886
889
887 # emulate that all 'dirstate.normal' results are written out
890 # emulate that all 'dirstate.normal' results are written out
888 self._lastnormaltime = 0
891 self._lastnormaltime = 0
889 self._updatedfiles.clear()
892 self._updatedfiles.clear()
890
893
891 # delay writing in-memory changes out
894 # delay writing in-memory changes out
892 tr.addfilegenerator(
895 tr.addfilegenerator(
893 b'dirstate',
896 b'dirstate',
894 (self._filename,),
897 (self._filename,),
895 lambda f: self._writedirstate(tr, f),
898 lambda f: self._writedirstate(tr, f),
896 location=b'plain',
899 location=b'plain',
897 )
900 )
898 return
901 return
899
902
900 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
903 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
901 self._writedirstate(tr, st)
904 self._writedirstate(tr, st)
902
905
903 def addparentchangecallback(self, category, callback):
906 def addparentchangecallback(self, category, callback):
904 """add a callback to be called when the wd parents are changed
907 """add a callback to be called when the wd parents are changed
905
908
906 Callback will be called with the following arguments:
909 Callback will be called with the following arguments:
907 dirstate, (oldp1, oldp2), (newp1, newp2)
910 dirstate, (oldp1, oldp2), (newp1, newp2)
908
911
909 Category is a unique identifier to allow overwriting an old callback
912 Category is a unique identifier to allow overwriting an old callback
910 with a newer callback.
913 with a newer callback.
911 """
914 """
912 self._plchangecallbacks[category] = callback
915 self._plchangecallbacks[category] = callback
913
916
914 def _writedirstate(self, tr, st):
917 def _writedirstate(self, tr, st):
915 # notify callbacks about parents change
918 # notify callbacks about parents change
916 if self._origpl is not None and self._origpl != self._pl:
919 if self._origpl is not None and self._origpl != self._pl:
917 for c, callback in sorted(
920 for c, callback in sorted(
918 pycompat.iteritems(self._plchangecallbacks)
921 pycompat.iteritems(self._plchangecallbacks)
919 ):
922 ):
920 callback(self, self._origpl, self._pl)
923 callback(self, self._origpl, self._pl)
921 self._origpl = None
924 self._origpl = None
922 # use the modification time of the newly created temporary file as the
925 # use the modification time of the newly created temporary file as the
923 # filesystem's notion of 'now'
926 # filesystem's notion of 'now'
924 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
927 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
925
928
926 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
929 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
927 # timestamp of each entries in dirstate, because of 'now > mtime'
930 # timestamp of each entries in dirstate, because of 'now > mtime'
928 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
931 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
929 if delaywrite > 0:
932 if delaywrite > 0:
930 # do we have any files to delay for?
933 # do we have any files to delay for?
931 for f, e in pycompat.iteritems(self._map):
934 for f, e in pycompat.iteritems(self._map):
932 if e.need_delay(now):
935 if e.need_delay(now):
933 import time # to avoid useless import
936 import time # to avoid useless import
934
937
935 # rather than sleep n seconds, sleep until the next
938 # rather than sleep n seconds, sleep until the next
936 # multiple of n seconds
939 # multiple of n seconds
937 clock = time.time()
940 clock = time.time()
938 start = int(clock) - (int(clock) % delaywrite)
941 start = int(clock) - (int(clock) % delaywrite)
939 end = start + delaywrite
942 end = start + delaywrite
940 time.sleep(end - clock)
943 time.sleep(end - clock)
941 now = end # trust our estimate that the end is near now
944 now = end # trust our estimate that the end is near now
942 break
945 break
943
946
944 self._map.write(tr, st, now)
947 self._map.write(tr, st, now)
945 self._lastnormaltime = 0
948 self._lastnormaltime = 0
946 self._dirty = False
949 self._dirty = False
947
950
948 def _dirignore(self, f):
951 def _dirignore(self, f):
949 if self._ignore(f):
952 if self._ignore(f):
950 return True
953 return True
951 for p in pathutil.finddirs(f):
954 for p in pathutil.finddirs(f):
952 if self._ignore(p):
955 if self._ignore(p):
953 return True
956 return True
954 return False
957 return False
955
958
956 def _ignorefiles(self):
959 def _ignorefiles(self):
957 files = []
960 files = []
958 if os.path.exists(self._join(b'.hgignore')):
961 if os.path.exists(self._join(b'.hgignore')):
959 files.append(self._join(b'.hgignore'))
962 files.append(self._join(b'.hgignore'))
960 for name, path in self._ui.configitems(b"ui"):
963 for name, path in self._ui.configitems(b"ui"):
961 if name == b'ignore' or name.startswith(b'ignore.'):
964 if name == b'ignore' or name.startswith(b'ignore.'):
962 # we need to use os.path.join here rather than self._join
965 # we need to use os.path.join here rather than self._join
963 # because path is arbitrary and user-specified
966 # because path is arbitrary and user-specified
964 files.append(os.path.join(self._rootdir, util.expandpath(path)))
967 files.append(os.path.join(self._rootdir, util.expandpath(path)))
965 return files
968 return files
966
969
967 def _ignorefileandline(self, f):
970 def _ignorefileandline(self, f):
968 files = collections.deque(self._ignorefiles())
971 files = collections.deque(self._ignorefiles())
969 visited = set()
972 visited = set()
970 while files:
973 while files:
971 i = files.popleft()
974 i = files.popleft()
972 patterns = matchmod.readpatternfile(
975 patterns = matchmod.readpatternfile(
973 i, self._ui.warn, sourceinfo=True
976 i, self._ui.warn, sourceinfo=True
974 )
977 )
975 for pattern, lineno, line in patterns:
978 for pattern, lineno, line in patterns:
976 kind, p = matchmod._patsplit(pattern, b'glob')
979 kind, p = matchmod._patsplit(pattern, b'glob')
977 if kind == b"subinclude":
980 if kind == b"subinclude":
978 if p not in visited:
981 if p not in visited:
979 files.append(p)
982 files.append(p)
980 continue
983 continue
981 m = matchmod.match(
984 m = matchmod.match(
982 self._root, b'', [], [pattern], warn=self._ui.warn
985 self._root, b'', [], [pattern], warn=self._ui.warn
983 )
986 )
984 if m(f):
987 if m(f):
985 return (i, lineno, line)
988 return (i, lineno, line)
986 visited.add(i)
989 visited.add(i)
987 return (None, -1, b"")
990 return (None, -1, b"")
988
991
989 def _walkexplicit(self, match, subrepos):
992 def _walkexplicit(self, match, subrepos):
990 """Get stat data about the files explicitly specified by match.
993 """Get stat data about the files explicitly specified by match.
991
994
992 Return a triple (results, dirsfound, dirsnotfound).
995 Return a triple (results, dirsfound, dirsnotfound).
993 - results is a mapping from filename to stat result. It also contains
996 - results is a mapping from filename to stat result. It also contains
994 listings mapping subrepos and .hg to None.
997 listings mapping subrepos and .hg to None.
995 - dirsfound is a list of files found to be directories.
998 - dirsfound is a list of files found to be directories.
996 - dirsnotfound is a list of files that the dirstate thinks are
999 - dirsnotfound is a list of files that the dirstate thinks are
997 directories and that were not found."""
1000 directories and that were not found."""
998
1001
999 def badtype(mode):
1002 def badtype(mode):
1000 kind = _(b'unknown')
1003 kind = _(b'unknown')
1001 if stat.S_ISCHR(mode):
1004 if stat.S_ISCHR(mode):
1002 kind = _(b'character device')
1005 kind = _(b'character device')
1003 elif stat.S_ISBLK(mode):
1006 elif stat.S_ISBLK(mode):
1004 kind = _(b'block device')
1007 kind = _(b'block device')
1005 elif stat.S_ISFIFO(mode):
1008 elif stat.S_ISFIFO(mode):
1006 kind = _(b'fifo')
1009 kind = _(b'fifo')
1007 elif stat.S_ISSOCK(mode):
1010 elif stat.S_ISSOCK(mode):
1008 kind = _(b'socket')
1011 kind = _(b'socket')
1009 elif stat.S_ISDIR(mode):
1012 elif stat.S_ISDIR(mode):
1010 kind = _(b'directory')
1013 kind = _(b'directory')
1011 return _(b'unsupported file type (type is %s)') % kind
1014 return _(b'unsupported file type (type is %s)') % kind
1012
1015
1013 badfn = match.bad
1016 badfn = match.bad
1014 dmap = self._map
1017 dmap = self._map
1015 lstat = os.lstat
1018 lstat = os.lstat
1016 getkind = stat.S_IFMT
1019 getkind = stat.S_IFMT
1017 dirkind = stat.S_IFDIR
1020 dirkind = stat.S_IFDIR
1018 regkind = stat.S_IFREG
1021 regkind = stat.S_IFREG
1019 lnkkind = stat.S_IFLNK
1022 lnkkind = stat.S_IFLNK
1020 join = self._join
1023 join = self._join
1021 dirsfound = []
1024 dirsfound = []
1022 foundadd = dirsfound.append
1025 foundadd = dirsfound.append
1023 dirsnotfound = []
1026 dirsnotfound = []
1024 notfoundadd = dirsnotfound.append
1027 notfoundadd = dirsnotfound.append
1025
1028
1026 if not match.isexact() and self._checkcase:
1029 if not match.isexact() and self._checkcase:
1027 normalize = self._normalize
1030 normalize = self._normalize
1028 else:
1031 else:
1029 normalize = None
1032 normalize = None
1030
1033
1031 files = sorted(match.files())
1034 files = sorted(match.files())
1032 subrepos.sort()
1035 subrepos.sort()
1033 i, j = 0, 0
1036 i, j = 0, 0
1034 while i < len(files) and j < len(subrepos):
1037 while i < len(files) and j < len(subrepos):
1035 subpath = subrepos[j] + b"/"
1038 subpath = subrepos[j] + b"/"
1036 if files[i] < subpath:
1039 if files[i] < subpath:
1037 i += 1
1040 i += 1
1038 continue
1041 continue
1039 while i < len(files) and files[i].startswith(subpath):
1042 while i < len(files) and files[i].startswith(subpath):
1040 del files[i]
1043 del files[i]
1041 j += 1
1044 j += 1
1042
1045
1043 if not files or b'' in files:
1046 if not files or b'' in files:
1044 files = [b'']
1047 files = [b'']
1045 # constructing the foldmap is expensive, so don't do it for the
1048 # constructing the foldmap is expensive, so don't do it for the
1046 # common case where files is ['']
1049 # common case where files is ['']
1047 normalize = None
1050 normalize = None
1048 results = dict.fromkeys(subrepos)
1051 results = dict.fromkeys(subrepos)
1049 results[b'.hg'] = None
1052 results[b'.hg'] = None
1050
1053
1051 for ff in files:
1054 for ff in files:
1052 if normalize:
1055 if normalize:
1053 nf = normalize(ff, False, True)
1056 nf = normalize(ff, False, True)
1054 else:
1057 else:
1055 nf = ff
1058 nf = ff
1056 if nf in results:
1059 if nf in results:
1057 continue
1060 continue
1058
1061
1059 try:
1062 try:
1060 st = lstat(join(nf))
1063 st = lstat(join(nf))
1061 kind = getkind(st.st_mode)
1064 kind = getkind(st.st_mode)
1062 if kind == dirkind:
1065 if kind == dirkind:
1063 if nf in dmap:
1066 if nf in dmap:
1064 # file replaced by dir on disk but still in dirstate
1067 # file replaced by dir on disk but still in dirstate
1065 results[nf] = None
1068 results[nf] = None
1066 foundadd((nf, ff))
1069 foundadd((nf, ff))
1067 elif kind == regkind or kind == lnkkind:
1070 elif kind == regkind or kind == lnkkind:
1068 results[nf] = st
1071 results[nf] = st
1069 else:
1072 else:
1070 badfn(ff, badtype(kind))
1073 badfn(ff, badtype(kind))
1071 if nf in dmap:
1074 if nf in dmap:
1072 results[nf] = None
1075 results[nf] = None
1073 except OSError as inst: # nf not found on disk - it is dirstate only
1076 except OSError as inst: # nf not found on disk - it is dirstate only
1074 if nf in dmap: # does it exactly match a missing file?
1077 if nf in dmap: # does it exactly match a missing file?
1075 results[nf] = None
1078 results[nf] = None
1076 else: # does it match a missing directory?
1079 else: # does it match a missing directory?
1077 if self._map.hasdir(nf):
1080 if self._map.hasdir(nf):
1078 notfoundadd(nf)
1081 notfoundadd(nf)
1079 else:
1082 else:
1080 badfn(ff, encoding.strtolocal(inst.strerror))
1083 badfn(ff, encoding.strtolocal(inst.strerror))
1081
1084
1082 # match.files() may contain explicitly-specified paths that shouldn't
1085 # match.files() may contain explicitly-specified paths that shouldn't
1083 # be taken; drop them from the list of files found. dirsfound/notfound
1086 # be taken; drop them from the list of files found. dirsfound/notfound
1084 # aren't filtered here because they will be tested later.
1087 # aren't filtered here because they will be tested later.
1085 if match.anypats():
1088 if match.anypats():
1086 for f in list(results):
1089 for f in list(results):
1087 if f == b'.hg' or f in subrepos:
1090 if f == b'.hg' or f in subrepos:
1088 # keep sentinel to disable further out-of-repo walks
1091 # keep sentinel to disable further out-of-repo walks
1089 continue
1092 continue
1090 if not match(f):
1093 if not match(f):
1091 del results[f]
1094 del results[f]
1092
1095
1093 # Case insensitive filesystems cannot rely on lstat() failing to detect
1096 # Case insensitive filesystems cannot rely on lstat() failing to detect
1094 # a case-only rename. Prune the stat object for any file that does not
1097 # a case-only rename. Prune the stat object for any file that does not
1095 # match the case in the filesystem, if there are multiple files that
1098 # match the case in the filesystem, if there are multiple files that
1096 # normalize to the same path.
1099 # normalize to the same path.
1097 if match.isexact() and self._checkcase:
1100 if match.isexact() and self._checkcase:
1098 normed = {}
1101 normed = {}
1099
1102
1100 for f, st in pycompat.iteritems(results):
1103 for f, st in pycompat.iteritems(results):
1101 if st is None:
1104 if st is None:
1102 continue
1105 continue
1103
1106
1104 nc = util.normcase(f)
1107 nc = util.normcase(f)
1105 paths = normed.get(nc)
1108 paths = normed.get(nc)
1106
1109
1107 if paths is None:
1110 if paths is None:
1108 paths = set()
1111 paths = set()
1109 normed[nc] = paths
1112 normed[nc] = paths
1110
1113
1111 paths.add(f)
1114 paths.add(f)
1112
1115
1113 for norm, paths in pycompat.iteritems(normed):
1116 for norm, paths in pycompat.iteritems(normed):
1114 if len(paths) > 1:
1117 if len(paths) > 1:
1115 for path in paths:
1118 for path in paths:
1116 folded = self._discoverpath(
1119 folded = self._discoverpath(
1117 path, norm, True, None, self._map.dirfoldmap
1120 path, norm, True, None, self._map.dirfoldmap
1118 )
1121 )
1119 if path != folded:
1122 if path != folded:
1120 results[path] = None
1123 results[path] = None
1121
1124
1122 return results, dirsfound, dirsnotfound
1125 return results, dirsfound, dirsnotfound
1123
1126
1124 def walk(self, match, subrepos, unknown, ignored, full=True):
1127 def walk(self, match, subrepos, unknown, ignored, full=True):
1125 """
1128 """
1126 Walk recursively through the directory tree, finding all files
1129 Walk recursively through the directory tree, finding all files
1127 matched by match.
1130 matched by match.
1128
1131
1129 If full is False, maybe skip some known-clean files.
1132 If full is False, maybe skip some known-clean files.
1130
1133
1131 Return a dict mapping filename to stat-like object (either
1134 Return a dict mapping filename to stat-like object (either
1132 mercurial.osutil.stat instance or return value of os.stat()).
1135 mercurial.osutil.stat instance or return value of os.stat()).
1133
1136
1134 """
1137 """
1135 # full is a flag that extensions that hook into walk can use -- this
1138 # full is a flag that extensions that hook into walk can use -- this
1136 # implementation doesn't use it at all. This satisfies the contract
1139 # implementation doesn't use it at all. This satisfies the contract
1137 # because we only guarantee a "maybe".
1140 # because we only guarantee a "maybe".
1138
1141
1139 if ignored:
1142 if ignored:
1140 ignore = util.never
1143 ignore = util.never
1141 dirignore = util.never
1144 dirignore = util.never
1142 elif unknown:
1145 elif unknown:
1143 ignore = self._ignore
1146 ignore = self._ignore
1144 dirignore = self._dirignore
1147 dirignore = self._dirignore
1145 else:
1148 else:
1146 # if not unknown and not ignored, drop dir recursion and step 2
1149 # if not unknown and not ignored, drop dir recursion and step 2
1147 ignore = util.always
1150 ignore = util.always
1148 dirignore = util.always
1151 dirignore = util.always
1149
1152
1150 matchfn = match.matchfn
1153 matchfn = match.matchfn
1151 matchalways = match.always()
1154 matchalways = match.always()
1152 matchtdir = match.traversedir
1155 matchtdir = match.traversedir
1153 dmap = self._map
1156 dmap = self._map
1154 listdir = util.listdir
1157 listdir = util.listdir
1155 lstat = os.lstat
1158 lstat = os.lstat
1156 dirkind = stat.S_IFDIR
1159 dirkind = stat.S_IFDIR
1157 regkind = stat.S_IFREG
1160 regkind = stat.S_IFREG
1158 lnkkind = stat.S_IFLNK
1161 lnkkind = stat.S_IFLNK
1159 join = self._join
1162 join = self._join
1160
1163
1161 exact = skipstep3 = False
1164 exact = skipstep3 = False
1162 if match.isexact(): # match.exact
1165 if match.isexact(): # match.exact
1163 exact = True
1166 exact = True
1164 dirignore = util.always # skip step 2
1167 dirignore = util.always # skip step 2
1165 elif match.prefix(): # match.match, no patterns
1168 elif match.prefix(): # match.match, no patterns
1166 skipstep3 = True
1169 skipstep3 = True
1167
1170
1168 if not exact and self._checkcase:
1171 if not exact and self._checkcase:
1169 normalize = self._normalize
1172 normalize = self._normalize
1170 normalizefile = self._normalizefile
1173 normalizefile = self._normalizefile
1171 skipstep3 = False
1174 skipstep3 = False
1172 else:
1175 else:
1173 normalize = self._normalize
1176 normalize = self._normalize
1174 normalizefile = None
1177 normalizefile = None
1175
1178
1176 # step 1: find all explicit files
1179 # step 1: find all explicit files
1177 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1180 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1178 if matchtdir:
1181 if matchtdir:
1179 for d in work:
1182 for d in work:
1180 matchtdir(d[0])
1183 matchtdir(d[0])
1181 for d in dirsnotfound:
1184 for d in dirsnotfound:
1182 matchtdir(d)
1185 matchtdir(d)
1183
1186
1184 skipstep3 = skipstep3 and not (work or dirsnotfound)
1187 skipstep3 = skipstep3 and not (work or dirsnotfound)
1185 work = [d for d in work if not dirignore(d[0])]
1188 work = [d for d in work if not dirignore(d[0])]
1186
1189
1187 # step 2: visit subdirectories
1190 # step 2: visit subdirectories
1188 def traverse(work, alreadynormed):
1191 def traverse(work, alreadynormed):
1189 wadd = work.append
1192 wadd = work.append
1190 while work:
1193 while work:
1191 tracing.counter('dirstate.walk work', len(work))
1194 tracing.counter('dirstate.walk work', len(work))
1192 nd = work.pop()
1195 nd = work.pop()
1193 visitentries = match.visitchildrenset(nd)
1196 visitentries = match.visitchildrenset(nd)
1194 if not visitentries:
1197 if not visitentries:
1195 continue
1198 continue
1196 if visitentries == b'this' or visitentries == b'all':
1199 if visitentries == b'this' or visitentries == b'all':
1197 visitentries = None
1200 visitentries = None
1198 skip = None
1201 skip = None
1199 if nd != b'':
1202 if nd != b'':
1200 skip = b'.hg'
1203 skip = b'.hg'
1201 try:
1204 try:
1202 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1205 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1203 entries = listdir(join(nd), stat=True, skip=skip)
1206 entries = listdir(join(nd), stat=True, skip=skip)
1204 except OSError as inst:
1207 except OSError as inst:
1205 if inst.errno in (errno.EACCES, errno.ENOENT):
1208 if inst.errno in (errno.EACCES, errno.ENOENT):
1206 match.bad(
1209 match.bad(
1207 self.pathto(nd), encoding.strtolocal(inst.strerror)
1210 self.pathto(nd), encoding.strtolocal(inst.strerror)
1208 )
1211 )
1209 continue
1212 continue
1210 raise
1213 raise
1211 for f, kind, st in entries:
1214 for f, kind, st in entries:
1212 # Some matchers may return files in the visitentries set,
1215 # Some matchers may return files in the visitentries set,
1213 # instead of 'this', if the matcher explicitly mentions them
1216 # instead of 'this', if the matcher explicitly mentions them
1214 # and is not an exactmatcher. This is acceptable; we do not
1217 # and is not an exactmatcher. This is acceptable; we do not
1215 # make any hard assumptions about file-or-directory below
1218 # make any hard assumptions about file-or-directory below
1216 # based on the presence of `f` in visitentries. If
1219 # based on the presence of `f` in visitentries. If
1217 # visitchildrenset returned a set, we can always skip the
1220 # visitchildrenset returned a set, we can always skip the
1218 # entries *not* in the set it provided regardless of whether
1221 # entries *not* in the set it provided regardless of whether
1219 # they're actually a file or a directory.
1222 # they're actually a file or a directory.
1220 if visitentries and f not in visitentries:
1223 if visitentries and f not in visitentries:
1221 continue
1224 continue
1222 if normalizefile:
1225 if normalizefile:
1223 # even though f might be a directory, we're only
1226 # even though f might be a directory, we're only
1224 # interested in comparing it to files currently in the
1227 # interested in comparing it to files currently in the
1225 # dmap -- therefore normalizefile is enough
1228 # dmap -- therefore normalizefile is enough
1226 nf = normalizefile(
1229 nf = normalizefile(
1227 nd and (nd + b"/" + f) or f, True, True
1230 nd and (nd + b"/" + f) or f, True, True
1228 )
1231 )
1229 else:
1232 else:
1230 nf = nd and (nd + b"/" + f) or f
1233 nf = nd and (nd + b"/" + f) or f
1231 if nf not in results:
1234 if nf not in results:
1232 if kind == dirkind:
1235 if kind == dirkind:
1233 if not ignore(nf):
1236 if not ignore(nf):
1234 if matchtdir:
1237 if matchtdir:
1235 matchtdir(nf)
1238 matchtdir(nf)
1236 wadd(nf)
1239 wadd(nf)
1237 if nf in dmap and (matchalways or matchfn(nf)):
1240 if nf in dmap and (matchalways or matchfn(nf)):
1238 results[nf] = None
1241 results[nf] = None
1239 elif kind == regkind or kind == lnkkind:
1242 elif kind == regkind or kind == lnkkind:
1240 if nf in dmap:
1243 if nf in dmap:
1241 if matchalways or matchfn(nf):
1244 if matchalways or matchfn(nf):
1242 results[nf] = st
1245 results[nf] = st
1243 elif (matchalways or matchfn(nf)) and not ignore(
1246 elif (matchalways or matchfn(nf)) and not ignore(
1244 nf
1247 nf
1245 ):
1248 ):
1246 # unknown file -- normalize if necessary
1249 # unknown file -- normalize if necessary
1247 if not alreadynormed:
1250 if not alreadynormed:
1248 nf = normalize(nf, False, True)
1251 nf = normalize(nf, False, True)
1249 results[nf] = st
1252 results[nf] = st
1250 elif nf in dmap and (matchalways or matchfn(nf)):
1253 elif nf in dmap and (matchalways or matchfn(nf)):
1251 results[nf] = None
1254 results[nf] = None
1252
1255
1253 for nd, d in work:
1256 for nd, d in work:
1254 # alreadynormed means that processwork doesn't have to do any
1257 # alreadynormed means that processwork doesn't have to do any
1255 # expensive directory normalization
1258 # expensive directory normalization
1256 alreadynormed = not normalize or nd == d
1259 alreadynormed = not normalize or nd == d
1257 traverse([d], alreadynormed)
1260 traverse([d], alreadynormed)
1258
1261
1259 for s in subrepos:
1262 for s in subrepos:
1260 del results[s]
1263 del results[s]
1261 del results[b'.hg']
1264 del results[b'.hg']
1262
1265
1263 # step 3: visit remaining files from dmap
1266 # step 3: visit remaining files from dmap
1264 if not skipstep3 and not exact:
1267 if not skipstep3 and not exact:
1265 # If a dmap file is not in results yet, it was either
1268 # If a dmap file is not in results yet, it was either
1266 # a) not matching matchfn b) ignored, c) missing, or d) under a
1269 # a) not matching matchfn b) ignored, c) missing, or d) under a
1267 # symlink directory.
1270 # symlink directory.
1268 if not results and matchalways:
1271 if not results and matchalways:
1269 visit = [f for f in dmap]
1272 visit = [f for f in dmap]
1270 else:
1273 else:
1271 visit = [f for f in dmap if f not in results and matchfn(f)]
1274 visit = [f for f in dmap if f not in results and matchfn(f)]
1272 visit.sort()
1275 visit.sort()
1273
1276
1274 if unknown:
1277 if unknown:
1275 # unknown == True means we walked all dirs under the roots
1278 # unknown == True means we walked all dirs under the roots
1276 # that wasn't ignored, and everything that matched was stat'ed
1279 # that wasn't ignored, and everything that matched was stat'ed
1277 # and is already in results.
1280 # and is already in results.
1278 # The rest must thus be ignored or under a symlink.
1281 # The rest must thus be ignored or under a symlink.
1279 audit_path = pathutil.pathauditor(self._root, cached=True)
1282 audit_path = pathutil.pathauditor(self._root, cached=True)
1280
1283
1281 for nf in iter(visit):
1284 for nf in iter(visit):
1282 # If a stat for the same file was already added with a
1285 # If a stat for the same file was already added with a
1283 # different case, don't add one for this, since that would
1286 # different case, don't add one for this, since that would
1284 # make it appear as if the file exists under both names
1287 # make it appear as if the file exists under both names
1285 # on disk.
1288 # on disk.
1286 if (
1289 if (
1287 normalizefile
1290 normalizefile
1288 and normalizefile(nf, True, True) in results
1291 and normalizefile(nf, True, True) in results
1289 ):
1292 ):
1290 results[nf] = None
1293 results[nf] = None
1291 # Report ignored items in the dmap as long as they are not
1294 # Report ignored items in the dmap as long as they are not
1292 # under a symlink directory.
1295 # under a symlink directory.
1293 elif audit_path.check(nf):
1296 elif audit_path.check(nf):
1294 try:
1297 try:
1295 results[nf] = lstat(join(nf))
1298 results[nf] = lstat(join(nf))
1296 # file was just ignored, no links, and exists
1299 # file was just ignored, no links, and exists
1297 except OSError:
1300 except OSError:
1298 # file doesn't exist
1301 # file doesn't exist
1299 results[nf] = None
1302 results[nf] = None
1300 else:
1303 else:
1301 # It's either missing or under a symlink directory
1304 # It's either missing or under a symlink directory
1302 # which we in this case report as missing
1305 # which we in this case report as missing
1303 results[nf] = None
1306 results[nf] = None
1304 else:
1307 else:
1305 # We may not have walked the full directory tree above,
1308 # We may not have walked the full directory tree above,
1306 # so stat and check everything we missed.
1309 # so stat and check everything we missed.
1307 iv = iter(visit)
1310 iv = iter(visit)
1308 for st in util.statfiles([join(i) for i in visit]):
1311 for st in util.statfiles([join(i) for i in visit]):
1309 results[next(iv)] = st
1312 results[next(iv)] = st
1310 return results
1313 return results
1311
1314
1312 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1315 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1313 # Force Rayon (Rust parallelism library) to respect the number of
1316 # Force Rayon (Rust parallelism library) to respect the number of
1314 # workers. This is a temporary workaround until Rust code knows
1317 # workers. This is a temporary workaround until Rust code knows
1315 # how to read the config file.
1318 # how to read the config file.
1316 numcpus = self._ui.configint(b"worker", b"numcpus")
1319 numcpus = self._ui.configint(b"worker", b"numcpus")
1317 if numcpus is not None:
1320 if numcpus is not None:
1318 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1321 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1319
1322
1320 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1323 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1321 if not workers_enabled:
1324 if not workers_enabled:
1322 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1325 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1323
1326
1324 (
1327 (
1325 lookup,
1328 lookup,
1326 modified,
1329 modified,
1327 added,
1330 added,
1328 removed,
1331 removed,
1329 deleted,
1332 deleted,
1330 clean,
1333 clean,
1331 ignored,
1334 ignored,
1332 unknown,
1335 unknown,
1333 warnings,
1336 warnings,
1334 bad,
1337 bad,
1335 traversed,
1338 traversed,
1336 dirty,
1339 dirty,
1337 ) = rustmod.status(
1340 ) = rustmod.status(
1338 self._map._rustmap,
1341 self._map._rustmap,
1339 matcher,
1342 matcher,
1340 self._rootdir,
1343 self._rootdir,
1341 self._ignorefiles(),
1344 self._ignorefiles(),
1342 self._checkexec,
1345 self._checkexec,
1343 self._lastnormaltime,
1346 self._lastnormaltime,
1344 bool(list_clean),
1347 bool(list_clean),
1345 bool(list_ignored),
1348 bool(list_ignored),
1346 bool(list_unknown),
1349 bool(list_unknown),
1347 bool(matcher.traversedir),
1350 bool(matcher.traversedir),
1348 )
1351 )
1349
1352
1350 self._dirty |= dirty
1353 self._dirty |= dirty
1351
1354
1352 if matcher.traversedir:
1355 if matcher.traversedir:
1353 for dir in traversed:
1356 for dir in traversed:
1354 matcher.traversedir(dir)
1357 matcher.traversedir(dir)
1355
1358
1356 if self._ui.warn:
1359 if self._ui.warn:
1357 for item in warnings:
1360 for item in warnings:
1358 if isinstance(item, tuple):
1361 if isinstance(item, tuple):
1359 file_path, syntax = item
1362 file_path, syntax = item
1360 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1363 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1361 file_path,
1364 file_path,
1362 syntax,
1365 syntax,
1363 )
1366 )
1364 self._ui.warn(msg)
1367 self._ui.warn(msg)
1365 else:
1368 else:
1366 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1369 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1367 self._ui.warn(
1370 self._ui.warn(
1368 msg
1371 msg
1369 % (
1372 % (
1370 pathutil.canonpath(
1373 pathutil.canonpath(
1371 self._rootdir, self._rootdir, item
1374 self._rootdir, self._rootdir, item
1372 ),
1375 ),
1373 b"No such file or directory",
1376 b"No such file or directory",
1374 )
1377 )
1375 )
1378 )
1376
1379
1377 for (fn, message) in bad:
1380 for (fn, message) in bad:
1378 matcher.bad(fn, encoding.strtolocal(message))
1381 matcher.bad(fn, encoding.strtolocal(message))
1379
1382
1380 status = scmutil.status(
1383 status = scmutil.status(
1381 modified=modified,
1384 modified=modified,
1382 added=added,
1385 added=added,
1383 removed=removed,
1386 removed=removed,
1384 deleted=deleted,
1387 deleted=deleted,
1385 unknown=unknown,
1388 unknown=unknown,
1386 ignored=ignored,
1389 ignored=ignored,
1387 clean=clean,
1390 clean=clean,
1388 )
1391 )
1389 return (lookup, status)
1392 return (lookup, status)
1390
1393
1391 def status(self, match, subrepos, ignored, clean, unknown):
1394 def status(self, match, subrepos, ignored, clean, unknown):
1392 """Determine the status of the working copy relative to the
1395 """Determine the status of the working copy relative to the
1393 dirstate and return a pair of (unsure, status), where status is of type
1396 dirstate and return a pair of (unsure, status), where status is of type
1394 scmutil.status and:
1397 scmutil.status and:
1395
1398
1396 unsure:
1399 unsure:
1397 files that might have been modified since the dirstate was
1400 files that might have been modified since the dirstate was
1398 written, but need to be read to be sure (size is the same
1401 written, but need to be read to be sure (size is the same
1399 but mtime differs)
1402 but mtime differs)
1400 status.modified:
1403 status.modified:
1401 files that have definitely been modified since the dirstate
1404 files that have definitely been modified since the dirstate
1402 was written (different size or mode)
1405 was written (different size or mode)
1403 status.clean:
1406 status.clean:
1404 files that have definitely not been modified since the
1407 files that have definitely not been modified since the
1405 dirstate was written
1408 dirstate was written
1406 """
1409 """
1407 listignored, listclean, listunknown = ignored, clean, unknown
1410 listignored, listclean, listunknown = ignored, clean, unknown
1408 lookup, modified, added, unknown, ignored = [], [], [], [], []
1411 lookup, modified, added, unknown, ignored = [], [], [], [], []
1409 removed, deleted, clean = [], [], []
1412 removed, deleted, clean = [], [], []
1410
1413
1411 dmap = self._map
1414 dmap = self._map
1412 dmap.preload()
1415 dmap.preload()
1413
1416
1414 use_rust = True
1417 use_rust = True
1415
1418
1416 allowed_matchers = (
1419 allowed_matchers = (
1417 matchmod.alwaysmatcher,
1420 matchmod.alwaysmatcher,
1418 matchmod.exactmatcher,
1421 matchmod.exactmatcher,
1419 matchmod.includematcher,
1422 matchmod.includematcher,
1420 )
1423 )
1421
1424
1422 if rustmod is None:
1425 if rustmod is None:
1423 use_rust = False
1426 use_rust = False
1424 elif self._checkcase:
1427 elif self._checkcase:
1425 # Case-insensitive filesystems are not handled yet
1428 # Case-insensitive filesystems are not handled yet
1426 use_rust = False
1429 use_rust = False
1427 elif subrepos:
1430 elif subrepos:
1428 use_rust = False
1431 use_rust = False
1429 elif sparse.enabled:
1432 elif sparse.enabled:
1430 use_rust = False
1433 use_rust = False
1431 elif not isinstance(match, allowed_matchers):
1434 elif not isinstance(match, allowed_matchers):
1432 # Some matchers have yet to be implemented
1435 # Some matchers have yet to be implemented
1433 use_rust = False
1436 use_rust = False
1434
1437
1435 if use_rust:
1438 if use_rust:
1436 try:
1439 try:
1437 return self._rust_status(
1440 return self._rust_status(
1438 match, listclean, listignored, listunknown
1441 match, listclean, listignored, listunknown
1439 )
1442 )
1440 except rustmod.FallbackError:
1443 except rustmod.FallbackError:
1441 pass
1444 pass
1442
1445
1443 def noop(f):
1446 def noop(f):
1444 pass
1447 pass
1445
1448
1446 dcontains = dmap.__contains__
1449 dcontains = dmap.__contains__
1447 dget = dmap.__getitem__
1450 dget = dmap.__getitem__
1448 ladd = lookup.append # aka "unsure"
1451 ladd = lookup.append # aka "unsure"
1449 madd = modified.append
1452 madd = modified.append
1450 aadd = added.append
1453 aadd = added.append
1451 uadd = unknown.append if listunknown else noop
1454 uadd = unknown.append if listunknown else noop
1452 iadd = ignored.append if listignored else noop
1455 iadd = ignored.append if listignored else noop
1453 radd = removed.append
1456 radd = removed.append
1454 dadd = deleted.append
1457 dadd = deleted.append
1455 cadd = clean.append if listclean else noop
1458 cadd = clean.append if listclean else noop
1456 mexact = match.exact
1459 mexact = match.exact
1457 dirignore = self._dirignore
1460 dirignore = self._dirignore
1458 checkexec = self._checkexec
1461 checkexec = self._checkexec
1459 copymap = self._map.copymap
1462 copymap = self._map.copymap
1460 lastnormaltime = self._lastnormaltime
1463 lastnormaltime = self._lastnormaltime
1461
1464
1462 # We need to do full walks when either
1465 # We need to do full walks when either
1463 # - we're listing all clean files, or
1466 # - we're listing all clean files, or
1464 # - match.traversedir does something, because match.traversedir should
1467 # - match.traversedir does something, because match.traversedir should
1465 # be called for every dir in the working dir
1468 # be called for every dir in the working dir
1466 full = listclean or match.traversedir is not None
1469 full = listclean or match.traversedir is not None
1467 for fn, st in pycompat.iteritems(
1470 for fn, st in pycompat.iteritems(
1468 self.walk(match, subrepos, listunknown, listignored, full=full)
1471 self.walk(match, subrepos, listunknown, listignored, full=full)
1469 ):
1472 ):
1470 if not dcontains(fn):
1473 if not dcontains(fn):
1471 if (listignored or mexact(fn)) and dirignore(fn):
1474 if (listignored or mexact(fn)) and dirignore(fn):
1472 if listignored:
1475 if listignored:
1473 iadd(fn)
1476 iadd(fn)
1474 else:
1477 else:
1475 uadd(fn)
1478 uadd(fn)
1476 continue
1479 continue
1477
1480
1478 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1481 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1479 # written like that for performance reasons. dmap[fn] is not a
1482 # written like that for performance reasons. dmap[fn] is not a
1480 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1483 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1481 # opcode has fast paths when the value to be unpacked is a tuple or
1484 # opcode has fast paths when the value to be unpacked is a tuple or
1482 # a list, but falls back to creating a full-fledged iterator in
1485 # a list, but falls back to creating a full-fledged iterator in
1483 # general. That is much slower than simply accessing and storing the
1486 # general. That is much slower than simply accessing and storing the
1484 # tuple members one by one.
1487 # tuple members one by one.
1485 t = dget(fn)
1488 t = dget(fn)
1486 mode = t.mode
1489 mode = t.mode
1487 size = t.size
1490 size = t.size
1488 time = t.mtime
1491 time = t.mtime
1489
1492
1490 if not st and t.tracked:
1493 if not st and t.tracked:
1491 dadd(fn)
1494 dadd(fn)
1492 elif t.merged:
1495 elif t.merged:
1493 madd(fn)
1496 madd(fn)
1494 elif t.added:
1497 elif t.added:
1495 aadd(fn)
1498 aadd(fn)
1496 elif t.removed:
1499 elif t.removed:
1497 radd(fn)
1500 radd(fn)
1498 elif t.tracked:
1501 elif t.tracked:
1499 if (
1502 if (
1500 size >= 0
1503 size >= 0
1501 and (
1504 and (
1502 (size != st.st_size and size != st.st_size & _rangemask)
1505 (size != st.st_size and size != st.st_size & _rangemask)
1503 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1506 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1504 )
1507 )
1505 or t.from_p2
1508 or t.from_p2
1506 or fn in copymap
1509 or fn in copymap
1507 ):
1510 ):
1508 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1511 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1509 # issue6456: Size returned may be longer due to
1512 # issue6456: Size returned may be longer due to
1510 # encryption on EXT-4 fscrypt, undecided.
1513 # encryption on EXT-4 fscrypt, undecided.
1511 ladd(fn)
1514 ladd(fn)
1512 else:
1515 else:
1513 madd(fn)
1516 madd(fn)
1514 elif (
1517 elif (
1515 time != st[stat.ST_MTIME]
1518 time != st[stat.ST_MTIME]
1516 and time != st[stat.ST_MTIME] & _rangemask
1519 and time != st[stat.ST_MTIME] & _rangemask
1517 ):
1520 ):
1518 ladd(fn)
1521 ladd(fn)
1519 elif st[stat.ST_MTIME] == lastnormaltime:
1522 elif st[stat.ST_MTIME] == lastnormaltime:
1520 # fn may have just been marked as normal and it may have
1523 # fn may have just been marked as normal and it may have
1521 # changed in the same second without changing its size.
1524 # changed in the same second without changing its size.
1522 # This can happen if we quickly do multiple commits.
1525 # This can happen if we quickly do multiple commits.
1523 # Force lookup, so we don't miss such a racy file change.
1526 # Force lookup, so we don't miss such a racy file change.
1524 ladd(fn)
1527 ladd(fn)
1525 elif listclean:
1528 elif listclean:
1526 cadd(fn)
1529 cadd(fn)
1527 status = scmutil.status(
1530 status = scmutil.status(
1528 modified, added, removed, deleted, unknown, ignored, clean
1531 modified, added, removed, deleted, unknown, ignored, clean
1529 )
1532 )
1530 return (lookup, status)
1533 return (lookup, status)
1531
1534
1532 def matches(self, match):
1535 def matches(self, match):
1533 """
1536 """
1534 return files in the dirstate (in whatever state) filtered by match
1537 return files in the dirstate (in whatever state) filtered by match
1535 """
1538 """
1536 dmap = self._map
1539 dmap = self._map
1537 if rustmod is not None:
1540 if rustmod is not None:
1538 dmap = self._map._rustmap
1541 dmap = self._map._rustmap
1539
1542
1540 if match.always():
1543 if match.always():
1541 return dmap.keys()
1544 return dmap.keys()
1542 files = match.files()
1545 files = match.files()
1543 if match.isexact():
1546 if match.isexact():
1544 # fast path -- filter the other way around, since typically files is
1547 # fast path -- filter the other way around, since typically files is
1545 # much smaller than dmap
1548 # much smaller than dmap
1546 return [f for f in files if f in dmap]
1549 return [f for f in files if f in dmap]
1547 if match.prefix() and all(fn in dmap for fn in files):
1550 if match.prefix() and all(fn in dmap for fn in files):
1548 # fast path -- all the values are known to be files, so just return
1551 # fast path -- all the values are known to be files, so just return
1549 # that
1552 # that
1550 return list(files)
1553 return list(files)
1551 return [f for f in dmap if match(f)]
1554 return [f for f in dmap if match(f)]
1552
1555
1553 def _actualfilename(self, tr):
1556 def _actualfilename(self, tr):
1554 if tr:
1557 if tr:
1555 return self._pendingfilename
1558 return self._pendingfilename
1556 else:
1559 else:
1557 return self._filename
1560 return self._filename
1558
1561
1559 def savebackup(self, tr, backupname):
1562 def savebackup(self, tr, backupname):
1560 '''Save current dirstate into backup file'''
1563 '''Save current dirstate into backup file'''
1561 filename = self._actualfilename(tr)
1564 filename = self._actualfilename(tr)
1562 assert backupname != filename
1565 assert backupname != filename
1563
1566
1564 # use '_writedirstate' instead of 'write' to write changes certainly,
1567 # use '_writedirstate' instead of 'write' to write changes certainly,
1565 # because the latter omits writing out if transaction is running.
1568 # because the latter omits writing out if transaction is running.
1566 # output file will be used to create backup of dirstate at this point.
1569 # output file will be used to create backup of dirstate at this point.
1567 if self._dirty or not self._opener.exists(filename):
1570 if self._dirty or not self._opener.exists(filename):
1568 self._writedirstate(
1571 self._writedirstate(
1569 tr,
1572 tr,
1570 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1573 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1571 )
1574 )
1572
1575
1573 if tr:
1576 if tr:
1574 # ensure that subsequent tr.writepending returns True for
1577 # ensure that subsequent tr.writepending returns True for
1575 # changes written out above, even if dirstate is never
1578 # changes written out above, even if dirstate is never
1576 # changed after this
1579 # changed after this
1577 tr.addfilegenerator(
1580 tr.addfilegenerator(
1578 b'dirstate',
1581 b'dirstate',
1579 (self._filename,),
1582 (self._filename,),
1580 lambda f: self._writedirstate(tr, f),
1583 lambda f: self._writedirstate(tr, f),
1581 location=b'plain',
1584 location=b'plain',
1582 )
1585 )
1583
1586
1584 # ensure that pending file written above is unlinked at
1587 # ensure that pending file written above is unlinked at
1585 # failure, even if tr.writepending isn't invoked until the
1588 # failure, even if tr.writepending isn't invoked until the
1586 # end of this transaction
1589 # end of this transaction
1587 tr.registertmp(filename, location=b'plain')
1590 tr.registertmp(filename, location=b'plain')
1588
1591
1589 self._opener.tryunlink(backupname)
1592 self._opener.tryunlink(backupname)
1590 # hardlink backup is okay because _writedirstate is always called
1593 # hardlink backup is okay because _writedirstate is always called
1591 # with an "atomictemp=True" file.
1594 # with an "atomictemp=True" file.
1592 util.copyfile(
1595 util.copyfile(
1593 self._opener.join(filename),
1596 self._opener.join(filename),
1594 self._opener.join(backupname),
1597 self._opener.join(backupname),
1595 hardlink=True,
1598 hardlink=True,
1596 )
1599 )
1597
1600
1598 def restorebackup(self, tr, backupname):
1601 def restorebackup(self, tr, backupname):
1599 '''Restore dirstate by backup file'''
1602 '''Restore dirstate by backup file'''
1600 # this "invalidate()" prevents "wlock.release()" from writing
1603 # this "invalidate()" prevents "wlock.release()" from writing
1601 # changes of dirstate out after restoring from backup file
1604 # changes of dirstate out after restoring from backup file
1602 self.invalidate()
1605 self.invalidate()
1603 filename = self._actualfilename(tr)
1606 filename = self._actualfilename(tr)
1604 o = self._opener
1607 o = self._opener
1605 if util.samefile(o.join(backupname), o.join(filename)):
1608 if util.samefile(o.join(backupname), o.join(filename)):
1606 o.unlink(backupname)
1609 o.unlink(backupname)
1607 else:
1610 else:
1608 o.rename(backupname, filename, checkambig=True)
1611 o.rename(backupname, filename, checkambig=True)
1609
1612
1610 def clearbackup(self, tr, backupname):
1613 def clearbackup(self, tr, backupname):
1611 '''Clear backup file'''
1614 '''Clear backup file'''
1612 self._opener.unlink(backupname)
1615 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now