##// END OF EJS Templates
dirstate: use `reset_state` in `update_file_p1`...
marmoute -
r48494:1c06ef8f default
parent child Browse files
Show More
@@ -1,1640 +1,1656 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = parsers.DirstateItem
48 DirstateItem = parsers.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._updatedfiles = set()
133 self._updatedfiles = set()
134 self._mapcls = dirstatemap.dirstatemap
134 self._mapcls = dirstatemap.dirstatemap
135 # Access and cache cwd early, so we don't access it for the first time
135 # Access and cache cwd early, so we don't access it for the first time
136 # after a working-copy update caused it to not exist (accessing it then
136 # after a working-copy update caused it to not exist (accessing it then
137 # raises an exception).
137 # raises an exception).
138 self._cwd
138 self._cwd
139
139
140 def prefetch_parents(self):
140 def prefetch_parents(self):
141 """make sure the parents are loaded
141 """make sure the parents are loaded
142
142
143 Used to avoid a race condition.
143 Used to avoid a race condition.
144 """
144 """
145 self._pl
145 self._pl
146
146
147 @contextlib.contextmanager
147 @contextlib.contextmanager
148 def parentchange(self):
148 def parentchange(self):
149 """Context manager for handling dirstate parents.
149 """Context manager for handling dirstate parents.
150
150
151 If an exception occurs in the scope of the context manager,
151 If an exception occurs in the scope of the context manager,
152 the incoherent dirstate won't be written when wlock is
152 the incoherent dirstate won't be written when wlock is
153 released.
153 released.
154 """
154 """
155 self._parentwriters += 1
155 self._parentwriters += 1
156 yield
156 yield
157 # Typically we want the "undo" step of a context manager in a
157 # Typically we want the "undo" step of a context manager in a
158 # finally block so it happens even when an exception
158 # finally block so it happens even when an exception
159 # occurs. In this case, however, we only want to decrement
159 # occurs. In this case, however, we only want to decrement
160 # parentwriters if the code in the with statement exits
160 # parentwriters if the code in the with statement exits
161 # normally, so we don't have a try/finally here on purpose.
161 # normally, so we don't have a try/finally here on purpose.
162 self._parentwriters -= 1
162 self._parentwriters -= 1
163
163
164 def pendingparentchange(self):
164 def pendingparentchange(self):
165 """Returns true if the dirstate is in the middle of a set of changes
165 """Returns true if the dirstate is in the middle of a set of changes
166 that modify the dirstate parent.
166 that modify the dirstate parent.
167 """
167 """
168 return self._parentwriters > 0
168 return self._parentwriters > 0
169
169
170 @propertycache
170 @propertycache
171 def _map(self):
171 def _map(self):
172 """Return the dirstate contents (see documentation for dirstatemap)."""
172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 self._map = self._mapcls(
173 self._map = self._mapcls(
174 self._ui,
174 self._ui,
175 self._opener,
175 self._opener,
176 self._root,
176 self._root,
177 self._nodeconstants,
177 self._nodeconstants,
178 self._use_dirstate_v2,
178 self._use_dirstate_v2,
179 )
179 )
180 return self._map
180 return self._map
181
181
182 @property
182 @property
183 def _sparsematcher(self):
183 def _sparsematcher(self):
184 """The matcher for the sparse checkout.
184 """The matcher for the sparse checkout.
185
185
186 The working directory may not include every file from a manifest. The
186 The working directory may not include every file from a manifest. The
187 matcher obtained by this property will match a path if it is to be
187 matcher obtained by this property will match a path if it is to be
188 included in the working directory.
188 included in the working directory.
189 """
189 """
190 # TODO there is potential to cache this property. For now, the matcher
190 # TODO there is potential to cache this property. For now, the matcher
191 # is resolved on every access. (But the called function does use a
191 # is resolved on every access. (But the called function does use a
192 # cache to keep the lookup fast.)
192 # cache to keep the lookup fast.)
193 return self._sparsematchfn()
193 return self._sparsematchfn()
194
194
195 @repocache(b'branch')
195 @repocache(b'branch')
196 def _branch(self):
196 def _branch(self):
197 try:
197 try:
198 return self._opener.read(b"branch").strip() or b"default"
198 return self._opener.read(b"branch").strip() or b"default"
199 except IOError as inst:
199 except IOError as inst:
200 if inst.errno != errno.ENOENT:
200 if inst.errno != errno.ENOENT:
201 raise
201 raise
202 return b"default"
202 return b"default"
203
203
204 @property
204 @property
205 def _pl(self):
205 def _pl(self):
206 return self._map.parents()
206 return self._map.parents()
207
207
208 def hasdir(self, d):
208 def hasdir(self, d):
209 return self._map.hastrackeddir(d)
209 return self._map.hastrackeddir(d)
210
210
211 @rootcache(b'.hgignore')
211 @rootcache(b'.hgignore')
212 def _ignore(self):
212 def _ignore(self):
213 files = self._ignorefiles()
213 files = self._ignorefiles()
214 if not files:
214 if not files:
215 return matchmod.never()
215 return matchmod.never()
216
216
217 pats = [b'include:%s' % f for f in files]
217 pats = [b'include:%s' % f for f in files]
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219
219
220 @propertycache
220 @propertycache
221 def _slash(self):
221 def _slash(self):
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223
223
224 @propertycache
224 @propertycache
225 def _checklink(self):
225 def _checklink(self):
226 return util.checklink(self._root)
226 return util.checklink(self._root)
227
227
228 @propertycache
228 @propertycache
229 def _checkexec(self):
229 def _checkexec(self):
230 return bool(util.checkexec(self._root))
230 return bool(util.checkexec(self._root))
231
231
232 @propertycache
232 @propertycache
233 def _checkcase(self):
233 def _checkcase(self):
234 return not util.fscasesensitive(self._join(b'.hg'))
234 return not util.fscasesensitive(self._join(b'.hg'))
235
235
236 def _join(self, f):
236 def _join(self, f):
237 # much faster than os.path.join()
237 # much faster than os.path.join()
238 # it's safe because f is always a relative path
238 # it's safe because f is always a relative path
239 return self._rootdir + f
239 return self._rootdir + f
240
240
241 def flagfunc(self, buildfallback):
241 def flagfunc(self, buildfallback):
242 if self._checklink and self._checkexec:
242 if self._checklink and self._checkexec:
243
243
244 def f(x):
244 def f(x):
245 try:
245 try:
246 st = os.lstat(self._join(x))
246 st = os.lstat(self._join(x))
247 if util.statislink(st):
247 if util.statislink(st):
248 return b'l'
248 return b'l'
249 if util.statisexec(st):
249 if util.statisexec(st):
250 return b'x'
250 return b'x'
251 except OSError:
251 except OSError:
252 pass
252 pass
253 return b''
253 return b''
254
254
255 return f
255 return f
256
256
257 fallback = buildfallback()
257 fallback = buildfallback()
258 if self._checklink:
258 if self._checklink:
259
259
260 def f(x):
260 def f(x):
261 if os.path.islink(self._join(x)):
261 if os.path.islink(self._join(x)):
262 return b'l'
262 return b'l'
263 if b'x' in fallback(x):
263 if b'x' in fallback(x):
264 return b'x'
264 return b'x'
265 return b''
265 return b''
266
266
267 return f
267 return f
268 if self._checkexec:
268 if self._checkexec:
269
269
270 def f(x):
270 def f(x):
271 if b'l' in fallback(x):
271 if b'l' in fallback(x):
272 return b'l'
272 return b'l'
273 if util.isexec(self._join(x)):
273 if util.isexec(self._join(x)):
274 return b'x'
274 return b'x'
275 return b''
275 return b''
276
276
277 return f
277 return f
278 else:
278 else:
279 return fallback
279 return fallback
280
280
281 @propertycache
281 @propertycache
282 def _cwd(self):
282 def _cwd(self):
283 # internal config: ui.forcecwd
283 # internal config: ui.forcecwd
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 if forcecwd:
285 if forcecwd:
286 return forcecwd
286 return forcecwd
287 return encoding.getcwd()
287 return encoding.getcwd()
288
288
289 def getcwd(self):
289 def getcwd(self):
290 """Return the path from which a canonical path is calculated.
290 """Return the path from which a canonical path is calculated.
291
291
292 This path should be used to resolve file patterns or to convert
292 This path should be used to resolve file patterns or to convert
293 canonical paths back to file paths for display. It shouldn't be
293 canonical paths back to file paths for display. It shouldn't be
294 used to get real file paths. Use vfs functions instead.
294 used to get real file paths. Use vfs functions instead.
295 """
295 """
296 cwd = self._cwd
296 cwd = self._cwd
297 if cwd == self._root:
297 if cwd == self._root:
298 return b''
298 return b''
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 rootsep = self._root
300 rootsep = self._root
301 if not util.endswithsep(rootsep):
301 if not util.endswithsep(rootsep):
302 rootsep += pycompat.ossep
302 rootsep += pycompat.ossep
303 if cwd.startswith(rootsep):
303 if cwd.startswith(rootsep):
304 return cwd[len(rootsep) :]
304 return cwd[len(rootsep) :]
305 else:
305 else:
306 # we're outside the repo. return an absolute path.
306 # we're outside the repo. return an absolute path.
307 return cwd
307 return cwd
308
308
309 def pathto(self, f, cwd=None):
309 def pathto(self, f, cwd=None):
310 if cwd is None:
310 if cwd is None:
311 cwd = self.getcwd()
311 cwd = self.getcwd()
312 path = util.pathto(self._root, cwd, f)
312 path = util.pathto(self._root, cwd, f)
313 if self._slash:
313 if self._slash:
314 return util.pconvert(path)
314 return util.pconvert(path)
315 return path
315 return path
316
316
317 def __getitem__(self, key):
317 def __getitem__(self, key):
318 """Return the current state of key (a filename) in the dirstate.
318 """Return the current state of key (a filename) in the dirstate.
319
319
320 States are:
320 States are:
321 n normal
321 n normal
322 m needs merging
322 m needs merging
323 r marked for removal
323 r marked for removal
324 a marked for addition
324 a marked for addition
325 ? not tracked
325 ? not tracked
326
326
327 XXX The "state" is a bit obscure to be in the "public" API. we should
327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 consider migrating all user of this to going through the dirstate entry
328 consider migrating all user of this to going through the dirstate entry
329 instead.
329 instead.
330 """
330 """
331 entry = self._map.get(key)
331 entry = self._map.get(key)
332 if entry is not None:
332 if entry is not None:
333 return entry.state
333 return entry.state
334 return b'?'
334 return b'?'
335
335
336 def __contains__(self, key):
336 def __contains__(self, key):
337 return key in self._map
337 return key in self._map
338
338
339 def __iter__(self):
339 def __iter__(self):
340 return iter(sorted(self._map))
340 return iter(sorted(self._map))
341
341
342 def items(self):
342 def items(self):
343 return pycompat.iteritems(self._map)
343 return pycompat.iteritems(self._map)
344
344
345 iteritems = items
345 iteritems = items
346
346
347 def directories(self):
347 def directories(self):
348 return self._map.directories()
348 return self._map.directories()
349
349
350 def parents(self):
350 def parents(self):
351 return [self._validate(p) for p in self._pl]
351 return [self._validate(p) for p in self._pl]
352
352
353 def p1(self):
353 def p1(self):
354 return self._validate(self._pl[0])
354 return self._validate(self._pl[0])
355
355
356 def p2(self):
356 def p2(self):
357 return self._validate(self._pl[1])
357 return self._validate(self._pl[1])
358
358
359 @property
359 @property
360 def in_merge(self):
360 def in_merge(self):
361 """True if a merge is in progress"""
361 """True if a merge is in progress"""
362 return self._pl[1] != self._nodeconstants.nullid
362 return self._pl[1] != self._nodeconstants.nullid
363
363
364 def branch(self):
364 def branch(self):
365 return encoding.tolocal(self._branch)
365 return encoding.tolocal(self._branch)
366
366
367 def setparents(self, p1, p2=None):
367 def setparents(self, p1, p2=None):
368 """Set dirstate parents to p1 and p2.
368 """Set dirstate parents to p1 and p2.
369
369
370 When moving from two parents to one, "merged" entries a
370 When moving from two parents to one, "merged" entries a
371 adjusted to normal and previous copy records discarded and
371 adjusted to normal and previous copy records discarded and
372 returned by the call.
372 returned by the call.
373
373
374 See localrepo.setparents()
374 See localrepo.setparents()
375 """
375 """
376 if p2 is None:
376 if p2 is None:
377 p2 = self._nodeconstants.nullid
377 p2 = self._nodeconstants.nullid
378 if self._parentwriters == 0:
378 if self._parentwriters == 0:
379 raise ValueError(
379 raise ValueError(
380 b"cannot set dirstate parent outside of "
380 b"cannot set dirstate parent outside of "
381 b"dirstate.parentchange context manager"
381 b"dirstate.parentchange context manager"
382 )
382 )
383
383
384 self._dirty = True
384 self._dirty = True
385 oldp2 = self._pl[1]
385 oldp2 = self._pl[1]
386 if self._origpl is None:
386 if self._origpl is None:
387 self._origpl = self._pl
387 self._origpl = self._pl
388 self._map.setparents(p1, p2)
388 self._map.setparents(p1, p2)
389 copies = {}
389 copies = {}
390 if (
390 if (
391 oldp2 != self._nodeconstants.nullid
391 oldp2 != self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
393 ):
393 ):
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395
395
396 for f in candidatefiles:
396 for f in candidatefiles:
397 s = self._map.get(f)
397 s = self._map.get(f)
398 if s is None:
398 if s is None:
399 continue
399 continue
400
400
401 # Discard "merged" markers when moving away from a merge state
401 # Discard "merged" markers when moving away from a merge state
402 if s.merged:
402 if s.merged:
403 source = self._map.copymap.get(f)
403 source = self._map.copymap.get(f)
404 if source:
404 if source:
405 copies[f] = source
405 copies[f] = source
406 self.normallookup(f)
406 self.normallookup(f)
407 # Also fix up otherparent markers
407 # Also fix up otherparent markers
408 elif s.from_p2:
408 elif s.from_p2:
409 source = self._map.copymap.get(f)
409 source = self._map.copymap.get(f)
410 if source:
410 if source:
411 copies[f] = source
411 copies[f] = source
412 self._add(f)
412 self._add(f)
413 return copies
413 return copies
414
414
415 def setbranch(self, branch):
415 def setbranch(self, branch):
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 try:
418 try:
419 f.write(self._branch + b'\n')
419 f.write(self._branch + b'\n')
420 f.close()
420 f.close()
421
421
422 # make sure filecache has the correct stat info for _branch after
422 # make sure filecache has the correct stat info for _branch after
423 # replacing the underlying file
423 # replacing the underlying file
424 ce = self._filecache[b'_branch']
424 ce = self._filecache[b'_branch']
425 if ce:
425 if ce:
426 ce.refresh()
426 ce.refresh()
427 except: # re-raises
427 except: # re-raises
428 f.discard()
428 f.discard()
429 raise
429 raise
430
430
431 def invalidate(self):
431 def invalidate(self):
432 """Causes the next access to reread the dirstate.
432 """Causes the next access to reread the dirstate.
433
433
434 This is different from localrepo.invalidatedirstate() because it always
434 This is different from localrepo.invalidatedirstate() because it always
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 check whether the dirstate has changed before rereading it."""
436 check whether the dirstate has changed before rereading it."""
437
437
438 for a in ("_map", "_branch", "_ignore"):
438 for a in ("_map", "_branch", "_ignore"):
439 if a in self.__dict__:
439 if a in self.__dict__:
440 delattr(self, a)
440 delattr(self, a)
441 self._lastnormaltime = 0
441 self._lastnormaltime = 0
442 self._dirty = False
442 self._dirty = False
443 self._updatedfiles.clear()
443 self._updatedfiles.clear()
444 self._parentwriters = 0
444 self._parentwriters = 0
445 self._origpl = None
445 self._origpl = None
446
446
447 def copy(self, source, dest):
447 def copy(self, source, dest):
448 """Mark dest as a copy of source. Unmark dest if source is None."""
448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 if source == dest:
449 if source == dest:
450 return
450 return
451 self._dirty = True
451 self._dirty = True
452 if source is not None:
452 if source is not None:
453 self._map.copymap[dest] = source
453 self._map.copymap[dest] = source
454 self._updatedfiles.add(source)
454 self._updatedfiles.add(source)
455 self._updatedfiles.add(dest)
455 self._updatedfiles.add(dest)
456 elif self._map.copymap.pop(dest, None):
456 elif self._map.copymap.pop(dest, None):
457 self._updatedfiles.add(dest)
457 self._updatedfiles.add(dest)
458
458
459 def copied(self, file):
459 def copied(self, file):
460 return self._map.copymap.get(file, None)
460 return self._map.copymap.get(file, None)
461
461
462 def copies(self):
462 def copies(self):
463 return self._map.copymap
463 return self._map.copymap
464
464
465 @requires_no_parents_change
465 @requires_no_parents_change
466 def set_tracked(self, filename):
466 def set_tracked(self, filename):
467 """a "public" method for generic code to mark a file as tracked
467 """a "public" method for generic code to mark a file as tracked
468
468
469 This function is to be called outside of "update/merge" case. For
469 This function is to be called outside of "update/merge" case. For
470 example by a command like `hg add X`.
470 example by a command like `hg add X`.
471
471
472 return True the file was previously untracked, False otherwise.
472 return True the file was previously untracked, False otherwise.
473 """
473 """
474 entry = self._map.get(filename)
474 entry = self._map.get(filename)
475 if entry is None:
475 if entry is None:
476 self._add(filename)
476 self._add(filename)
477 return True
477 return True
478 elif not entry.tracked:
478 elif not entry.tracked:
479 self.normallookup(filename)
479 self.normallookup(filename)
480 return True
480 return True
481 return False
481 return False
482
482
483 @requires_no_parents_change
483 @requires_no_parents_change
484 def set_untracked(self, filename):
484 def set_untracked(self, filename):
485 """a "public" method for generic code to mark a file as untracked
485 """a "public" method for generic code to mark a file as untracked
486
486
487 This function is to be called outside of "update/merge" case. For
487 This function is to be called outside of "update/merge" case. For
488 example by a command like `hg remove X`.
488 example by a command like `hg remove X`.
489
489
490 return True the file was previously tracked, False otherwise.
490 return True the file was previously tracked, False otherwise.
491 """
491 """
492 entry = self._map.get(filename)
492 entry = self._map.get(filename)
493 if entry is None:
493 if entry is None:
494 return False
494 return False
495 elif entry.added:
495 elif entry.added:
496 self._drop(filename)
496 self._drop(filename)
497 return True
497 return True
498 else:
498 else:
499 self._remove(filename)
499 self._remove(filename)
500 return True
500 return True
501
501
502 @requires_parents_change
502 @requires_parents_change
503 def update_file_p1(
503 def update_file_p1(
504 self,
504 self,
505 filename,
505 filename,
506 p1_tracked,
506 p1_tracked,
507 ):
507 ):
508 """Set a file as tracked in the parent (or not)
508 """Set a file as tracked in the parent (or not)
509
509
510 This is to be called when adjust the dirstate to a new parent after an history
510 This is to be called when adjust the dirstate to a new parent after an history
511 rewriting operation.
511 rewriting operation.
512
512
513 It should not be called during a merge (p2 != nullid) and only within
513 It should not be called during a merge (p2 != nullid) and only within
514 a `with dirstate.parentchange():` context.
514 a `with dirstate.parentchange():` context.
515 """
515 """
516 if self.in_merge:
516 if self.in_merge:
517 msg = b'update_file_reference should not be called when merging'
517 msg = b'update_file_reference should not be called when merging'
518 raise error.ProgrammingError(msg)
518 raise error.ProgrammingError(msg)
519 entry = self._map.get(filename)
519 entry = self._map.get(filename)
520 if entry is None:
520 if entry is None:
521 wc_tracked = False
521 wc_tracked = False
522 else:
522 else:
523 wc_tracked = entry.tracked
523 wc_tracked = entry.tracked
524 possibly_dirty = False
524 if p1_tracked and wc_tracked:
525 if p1_tracked and wc_tracked:
525 # the underlying reference might have changed, we will have to
526 # the underlying reference might have changed, we will have to
526 # check it.
527 # check it.
527 self.normallookup(filename)
528 possibly_dirty = True
528 elif not (p1_tracked or wc_tracked):
529 elif not (p1_tracked or wc_tracked):
529 # the file is no longer relevant to anyone
530 # the file is no longer relevant to anyone
530 self._drop(filename)
531 self._drop(filename)
531 elif (not p1_tracked) and wc_tracked:
532 elif (not p1_tracked) and wc_tracked:
532 if not entry.added:
533 if entry is not None and entry.added:
533 self._add(filename)
534 return # avoid dropping copy information (maybe?)
534 elif p1_tracked and not wc_tracked:
535 elif p1_tracked and not wc_tracked:
535 if entry is None or not entry.removed:
536 pass
536 self._remove(filename)
537 else:
537 else:
538 assert False, 'unreachable'
538 assert False, 'unreachable'
539
539
540 # this mean we are doing call for file we do not really care about the
541 # data (eg: added or removed), however this should be a minor overhead
542 # compared to the overall update process calling this.
543 parentfiledata = None
544 if wc_tracked:
545 parentfiledata = self._get_filedata(filename)
546
547 self._updatedfiles.add(filename)
548 self._map.reset_state(
549 filename,
550 wc_tracked,
551 p1_tracked,
552 possibly_dirty=possibly_dirty,
553 parentfiledata=parentfiledata,
554 )
555
540 @requires_parents_change
556 @requires_parents_change
541 def update_file(
557 def update_file(
542 self,
558 self,
543 filename,
559 filename,
544 wc_tracked,
560 wc_tracked,
545 p1_tracked,
561 p1_tracked,
546 p2_tracked=False,
562 p2_tracked=False,
547 merged=False,
563 merged=False,
548 clean_p1=False,
564 clean_p1=False,
549 clean_p2=False,
565 clean_p2=False,
550 possibly_dirty=False,
566 possibly_dirty=False,
551 parentfiledata=None,
567 parentfiledata=None,
552 ):
568 ):
553 """update the information about a file in the dirstate
569 """update the information about a file in the dirstate
554
570
555 This is to be called when the direstates parent changes to keep track
571 This is to be called when the direstates parent changes to keep track
556 of what is the file situation in regards to the working copy and its parent.
572 of what is the file situation in regards to the working copy and its parent.
557
573
558 This function must be called within a `dirstate.parentchange` context.
574 This function must be called within a `dirstate.parentchange` context.
559
575
560 note: the API is at an early stage and we might need to ajust it
576 note: the API is at an early stage and we might need to ajust it
561 depending of what information ends up being relevant and useful to
577 depending of what information ends up being relevant and useful to
562 other processing.
578 other processing.
563 """
579 """
564 if merged and (clean_p1 or clean_p2):
580 if merged and (clean_p1 or clean_p2):
565 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
581 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
566 raise error.ProgrammingError(msg)
582 raise error.ProgrammingError(msg)
567
583
568 # note: I do not think we need to double check name clash here since we
584 # note: I do not think we need to double check name clash here since we
569 # are in a update/merge case that should already have taken care of
585 # are in a update/merge case that should already have taken care of
570 # this. The test agrees
586 # this. The test agrees
571
587
572 self._dirty = True
588 self._dirty = True
573 self._updatedfiles.add(filename)
589 self._updatedfiles.add(filename)
574
590
575 need_parent_file_data = (
591 need_parent_file_data = (
576 not (possibly_dirty or clean_p2 or merged)
592 not (possibly_dirty or clean_p2 or merged)
577 and wc_tracked
593 and wc_tracked
578 and p1_tracked
594 and p1_tracked
579 )
595 )
580
596
581 # this mean we are doing call for file we do not really care about the
597 # this mean we are doing call for file we do not really care about the
582 # data (eg: added or removed), however this should be a minor overhead
598 # data (eg: added or removed), however this should be a minor overhead
583 # compared to the overall update process calling this.
599 # compared to the overall update process calling this.
584 if need_parent_file_data:
600 if need_parent_file_data:
585 if parentfiledata is None:
601 if parentfiledata is None:
586 parentfiledata = self._get_filedata(filename)
602 parentfiledata = self._get_filedata(filename)
587 mtime = parentfiledata[2]
603 mtime = parentfiledata[2]
588
604
589 if mtime > self._lastnormaltime:
605 if mtime > self._lastnormaltime:
590 # Remember the most recent modification timeslot for
606 # Remember the most recent modification timeslot for
591 # status(), to make sure we won't miss future
607 # status(), to make sure we won't miss future
592 # size-preserving file content modifications that happen
608 # size-preserving file content modifications that happen
593 # within the same timeslot.
609 # within the same timeslot.
594 self._lastnormaltime = mtime
610 self._lastnormaltime = mtime
595
611
596 self._map.reset_state(
612 self._map.reset_state(
597 filename,
613 filename,
598 wc_tracked,
614 wc_tracked,
599 p1_tracked,
615 p1_tracked,
600 p2_tracked=p2_tracked,
616 p2_tracked=p2_tracked,
601 merged=merged,
617 merged=merged,
602 clean_p1=clean_p1,
618 clean_p1=clean_p1,
603 clean_p2=clean_p2,
619 clean_p2=clean_p2,
604 possibly_dirty=possibly_dirty,
620 possibly_dirty=possibly_dirty,
605 parentfiledata=parentfiledata,
621 parentfiledata=parentfiledata,
606 )
622 )
607
623
608 def _addpath(
624 def _addpath(
609 self,
625 self,
610 f,
626 f,
611 mode=0,
627 mode=0,
612 size=None,
628 size=None,
613 mtime=None,
629 mtime=None,
614 added=False,
630 added=False,
615 merged=False,
631 merged=False,
616 from_p2=False,
632 from_p2=False,
617 possibly_dirty=False,
633 possibly_dirty=False,
618 ):
634 ):
619 entry = self._map.get(f)
635 entry = self._map.get(f)
620 if added or entry is not None and entry.removed:
636 if added or entry is not None and entry.removed:
621 scmutil.checkfilename(f)
637 scmutil.checkfilename(f)
622 if self._map.hastrackeddir(f):
638 if self._map.hastrackeddir(f):
623 msg = _(b'directory %r already in dirstate')
639 msg = _(b'directory %r already in dirstate')
624 msg %= pycompat.bytestr(f)
640 msg %= pycompat.bytestr(f)
625 raise error.Abort(msg)
641 raise error.Abort(msg)
626 # shadows
642 # shadows
627 for d in pathutil.finddirs(f):
643 for d in pathutil.finddirs(f):
628 if self._map.hastrackeddir(d):
644 if self._map.hastrackeddir(d):
629 break
645 break
630 entry = self._map.get(d)
646 entry = self._map.get(d)
631 if entry is not None and not entry.removed:
647 if entry is not None and not entry.removed:
632 msg = _(b'file %r in dirstate clashes with %r')
648 msg = _(b'file %r in dirstate clashes with %r')
633 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
649 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
634 raise error.Abort(msg)
650 raise error.Abort(msg)
635 self._dirty = True
651 self._dirty = True
636 self._updatedfiles.add(f)
652 self._updatedfiles.add(f)
637 self._map.addfile(
653 self._map.addfile(
638 f,
654 f,
639 mode=mode,
655 mode=mode,
640 size=size,
656 size=size,
641 mtime=mtime,
657 mtime=mtime,
642 added=added,
658 added=added,
643 merged=merged,
659 merged=merged,
644 from_p2=from_p2,
660 from_p2=from_p2,
645 possibly_dirty=possibly_dirty,
661 possibly_dirty=possibly_dirty,
646 )
662 )
647
663
648 def _get_filedata(self, filename):
664 def _get_filedata(self, filename):
649 """returns"""
665 """returns"""
650 s = os.lstat(self._join(filename))
666 s = os.lstat(self._join(filename))
651 mode = s.st_mode
667 mode = s.st_mode
652 size = s.st_size
668 size = s.st_size
653 mtime = s[stat.ST_MTIME]
669 mtime = s[stat.ST_MTIME]
654 return (mode, size, mtime)
670 return (mode, size, mtime)
655
671
656 def normal(self, f, parentfiledata=None):
672 def normal(self, f, parentfiledata=None):
657 """Mark a file normal and clean.
673 """Mark a file normal and clean.
658
674
659 parentfiledata: (mode, size, mtime) of the clean file
675 parentfiledata: (mode, size, mtime) of the clean file
660
676
661 parentfiledata should be computed from memory (for mode,
677 parentfiledata should be computed from memory (for mode,
662 size), as or close as possible from the point where we
678 size), as or close as possible from the point where we
663 determined the file was clean, to limit the risk of the
679 determined the file was clean, to limit the risk of the
664 file having been changed by an external process between the
680 file having been changed by an external process between the
665 moment where the file was determined to be clean and now."""
681 moment where the file was determined to be clean and now."""
666 if parentfiledata:
682 if parentfiledata:
667 (mode, size, mtime) = parentfiledata
683 (mode, size, mtime) = parentfiledata
668 else:
684 else:
669 (mode, size, mtime) = self._get_filedata(f)
685 (mode, size, mtime) = self._get_filedata(f)
670 self._addpath(f, mode=mode, size=size, mtime=mtime)
686 self._addpath(f, mode=mode, size=size, mtime=mtime)
671 self._map.copymap.pop(f, None)
687 self._map.copymap.pop(f, None)
672 if f in self._map.nonnormalset:
688 if f in self._map.nonnormalset:
673 self._map.nonnormalset.remove(f)
689 self._map.nonnormalset.remove(f)
674 if mtime > self._lastnormaltime:
690 if mtime > self._lastnormaltime:
675 # Remember the most recent modification timeslot for status(),
691 # Remember the most recent modification timeslot for status(),
676 # to make sure we won't miss future size-preserving file content
692 # to make sure we won't miss future size-preserving file content
677 # modifications that happen within the same timeslot.
693 # modifications that happen within the same timeslot.
678 self._lastnormaltime = mtime
694 self._lastnormaltime = mtime
679
695
680 def normallookup(self, f):
696 def normallookup(self, f):
681 '''Mark a file normal, but possibly dirty.'''
697 '''Mark a file normal, but possibly dirty.'''
682 if self.in_merge:
698 if self.in_merge:
683 # if there is a merge going on and the file was either
699 # if there is a merge going on and the file was either
684 # "merged" or coming from other parent (-2) before
700 # "merged" or coming from other parent (-2) before
685 # being removed, restore that state.
701 # being removed, restore that state.
686 entry = self._map.get(f)
702 entry = self._map.get(f)
687 if entry is not None:
703 if entry is not None:
688 # XXX this should probably be dealt with a a lower level
704 # XXX this should probably be dealt with a a lower level
689 # (see `merged_removed` and `from_p2_removed`)
705 # (see `merged_removed` and `from_p2_removed`)
690 if entry.merged_removed or entry.from_p2_removed:
706 if entry.merged_removed or entry.from_p2_removed:
691 source = self._map.copymap.get(f)
707 source = self._map.copymap.get(f)
692 if entry.merged_removed:
708 if entry.merged_removed:
693 self.merge(f)
709 self.merge(f)
694 elif entry.from_p2_removed:
710 elif entry.from_p2_removed:
695 self.otherparent(f)
711 self.otherparent(f)
696 if source is not None:
712 if source is not None:
697 self.copy(source, f)
713 self.copy(source, f)
698 return
714 return
699 elif entry.merged or entry.from_p2:
715 elif entry.merged or entry.from_p2:
700 return
716 return
701 self._addpath(f, possibly_dirty=True)
717 self._addpath(f, possibly_dirty=True)
702 self._map.copymap.pop(f, None)
718 self._map.copymap.pop(f, None)
703
719
704 def otherparent(self, f):
720 def otherparent(self, f):
705 '''Mark as coming from the other parent, always dirty.'''
721 '''Mark as coming from the other parent, always dirty.'''
706 if not self.in_merge:
722 if not self.in_merge:
707 msg = _(b"setting %r to other parent only allowed in merges") % f
723 msg = _(b"setting %r to other parent only allowed in merges") % f
708 raise error.Abort(msg)
724 raise error.Abort(msg)
709 entry = self._map.get(f)
725 entry = self._map.get(f)
710 if entry is not None and entry.tracked:
726 if entry is not None and entry.tracked:
711 # merge-like
727 # merge-like
712 self._addpath(f, merged=True)
728 self._addpath(f, merged=True)
713 else:
729 else:
714 # add-like
730 # add-like
715 self._addpath(f, from_p2=True)
731 self._addpath(f, from_p2=True)
716 self._map.copymap.pop(f, None)
732 self._map.copymap.pop(f, None)
717
733
718 def add(self, f):
734 def add(self, f):
719 '''Mark a file added.'''
735 '''Mark a file added.'''
720 if not self.pendingparentchange():
736 if not self.pendingparentchange():
721 util.nouideprecwarn(
737 util.nouideprecwarn(
722 b"do not use `add` outside of update/merge context."
738 b"do not use `add` outside of update/merge context."
723 b" Use `set_tracked`",
739 b" Use `set_tracked`",
724 b'6.0',
740 b'6.0',
725 stacklevel=2,
741 stacklevel=2,
726 )
742 )
727 self._add(f)
743 self._add(f)
728
744
729 def _add(self, filename):
745 def _add(self, filename):
730 """internal function to mark a file as added"""
746 """internal function to mark a file as added"""
731 self._addpath(filename, added=True)
747 self._addpath(filename, added=True)
732 self._map.copymap.pop(filename, None)
748 self._map.copymap.pop(filename, None)
733
749
734 def remove(self, f):
750 def remove(self, f):
735 '''Mark a file removed'''
751 '''Mark a file removed'''
736 if not self.pendingparentchange():
752 if not self.pendingparentchange():
737 util.nouideprecwarn(
753 util.nouideprecwarn(
738 b"do not use `remove` outside of update/merge context."
754 b"do not use `remove` outside of update/merge context."
739 b" Use `set_untracked`",
755 b" Use `set_untracked`",
740 b'6.0',
756 b'6.0',
741 stacklevel=2,
757 stacklevel=2,
742 )
758 )
743 self._remove(f)
759 self._remove(f)
744
760
745 def _remove(self, filename):
761 def _remove(self, filename):
746 """internal function to mark a file removed"""
762 """internal function to mark a file removed"""
747 self._dirty = True
763 self._dirty = True
748 self._updatedfiles.add(filename)
764 self._updatedfiles.add(filename)
749 self._map.removefile(filename, in_merge=self.in_merge)
765 self._map.removefile(filename, in_merge=self.in_merge)
750
766
751 def merge(self, f):
767 def merge(self, f):
752 '''Mark a file merged.'''
768 '''Mark a file merged.'''
753 if not self.in_merge:
769 if not self.in_merge:
754 return self.normallookup(f)
770 return self.normallookup(f)
755 return self.otherparent(f)
771 return self.otherparent(f)
756
772
757 def drop(self, f):
773 def drop(self, f):
758 '''Drop a file from the dirstate'''
774 '''Drop a file from the dirstate'''
759 if not self.pendingparentchange():
775 if not self.pendingparentchange():
760 util.nouideprecwarn(
776 util.nouideprecwarn(
761 b"do not use `drop` outside of update/merge context."
777 b"do not use `drop` outside of update/merge context."
762 b" Use `set_untracked`",
778 b" Use `set_untracked`",
763 b'6.0',
779 b'6.0',
764 stacklevel=2,
780 stacklevel=2,
765 )
781 )
766 self._drop(f)
782 self._drop(f)
767
783
768 def _drop(self, filename):
784 def _drop(self, filename):
769 """internal function to drop a file from the dirstate"""
785 """internal function to drop a file from the dirstate"""
770 if self._map.dropfile(filename):
786 if self._map.dropfile(filename):
771 self._dirty = True
787 self._dirty = True
772 self._updatedfiles.add(filename)
788 self._updatedfiles.add(filename)
773 self._map.copymap.pop(filename, None)
789 self._map.copymap.pop(filename, None)
774
790
775 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
791 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
776 if exists is None:
792 if exists is None:
777 exists = os.path.lexists(os.path.join(self._root, path))
793 exists = os.path.lexists(os.path.join(self._root, path))
778 if not exists:
794 if not exists:
779 # Maybe a path component exists
795 # Maybe a path component exists
780 if not ignoremissing and b'/' in path:
796 if not ignoremissing and b'/' in path:
781 d, f = path.rsplit(b'/', 1)
797 d, f = path.rsplit(b'/', 1)
782 d = self._normalize(d, False, ignoremissing, None)
798 d = self._normalize(d, False, ignoremissing, None)
783 folded = d + b"/" + f
799 folded = d + b"/" + f
784 else:
800 else:
785 # No path components, preserve original case
801 # No path components, preserve original case
786 folded = path
802 folded = path
787 else:
803 else:
788 # recursively normalize leading directory components
804 # recursively normalize leading directory components
789 # against dirstate
805 # against dirstate
790 if b'/' in normed:
806 if b'/' in normed:
791 d, f = normed.rsplit(b'/', 1)
807 d, f = normed.rsplit(b'/', 1)
792 d = self._normalize(d, False, ignoremissing, True)
808 d = self._normalize(d, False, ignoremissing, True)
793 r = self._root + b"/" + d
809 r = self._root + b"/" + d
794 folded = d + b"/" + util.fspath(f, r)
810 folded = d + b"/" + util.fspath(f, r)
795 else:
811 else:
796 folded = util.fspath(normed, self._root)
812 folded = util.fspath(normed, self._root)
797 storemap[normed] = folded
813 storemap[normed] = folded
798
814
799 return folded
815 return folded
800
816
801 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
817 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
802 normed = util.normcase(path)
818 normed = util.normcase(path)
803 folded = self._map.filefoldmap.get(normed, None)
819 folded = self._map.filefoldmap.get(normed, None)
804 if folded is None:
820 if folded is None:
805 if isknown:
821 if isknown:
806 folded = path
822 folded = path
807 else:
823 else:
808 folded = self._discoverpath(
824 folded = self._discoverpath(
809 path, normed, ignoremissing, exists, self._map.filefoldmap
825 path, normed, ignoremissing, exists, self._map.filefoldmap
810 )
826 )
811 return folded
827 return folded
812
828
813 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
829 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
814 normed = util.normcase(path)
830 normed = util.normcase(path)
815 folded = self._map.filefoldmap.get(normed, None)
831 folded = self._map.filefoldmap.get(normed, None)
816 if folded is None:
832 if folded is None:
817 folded = self._map.dirfoldmap.get(normed, None)
833 folded = self._map.dirfoldmap.get(normed, None)
818 if folded is None:
834 if folded is None:
819 if isknown:
835 if isknown:
820 folded = path
836 folded = path
821 else:
837 else:
822 # store discovered result in dirfoldmap so that future
838 # store discovered result in dirfoldmap so that future
823 # normalizefile calls don't start matching directories
839 # normalizefile calls don't start matching directories
824 folded = self._discoverpath(
840 folded = self._discoverpath(
825 path, normed, ignoremissing, exists, self._map.dirfoldmap
841 path, normed, ignoremissing, exists, self._map.dirfoldmap
826 )
842 )
827 return folded
843 return folded
828
844
829 def normalize(self, path, isknown=False, ignoremissing=False):
845 def normalize(self, path, isknown=False, ignoremissing=False):
830 """
846 """
831 normalize the case of a pathname when on a casefolding filesystem
847 normalize the case of a pathname when on a casefolding filesystem
832
848
833 isknown specifies whether the filename came from walking the
849 isknown specifies whether the filename came from walking the
834 disk, to avoid extra filesystem access.
850 disk, to avoid extra filesystem access.
835
851
836 If ignoremissing is True, missing path are returned
852 If ignoremissing is True, missing path are returned
837 unchanged. Otherwise, we try harder to normalize possibly
853 unchanged. Otherwise, we try harder to normalize possibly
838 existing path components.
854 existing path components.
839
855
840 The normalized case is determined based on the following precedence:
856 The normalized case is determined based on the following precedence:
841
857
842 - version of name already stored in the dirstate
858 - version of name already stored in the dirstate
843 - version of name stored on disk
859 - version of name stored on disk
844 - version provided via command arguments
860 - version provided via command arguments
845 """
861 """
846
862
847 if self._checkcase:
863 if self._checkcase:
848 return self._normalize(path, isknown, ignoremissing)
864 return self._normalize(path, isknown, ignoremissing)
849 return path
865 return path
850
866
851 def clear(self):
867 def clear(self):
852 self._map.clear()
868 self._map.clear()
853 self._lastnormaltime = 0
869 self._lastnormaltime = 0
854 self._updatedfiles.clear()
870 self._updatedfiles.clear()
855 self._dirty = True
871 self._dirty = True
856
872
857 def rebuild(self, parent, allfiles, changedfiles=None):
873 def rebuild(self, parent, allfiles, changedfiles=None):
858 if changedfiles is None:
874 if changedfiles is None:
859 # Rebuild entire dirstate
875 # Rebuild entire dirstate
860 to_lookup = allfiles
876 to_lookup = allfiles
861 to_drop = []
877 to_drop = []
862 lastnormaltime = self._lastnormaltime
878 lastnormaltime = self._lastnormaltime
863 self.clear()
879 self.clear()
864 self._lastnormaltime = lastnormaltime
880 self._lastnormaltime = lastnormaltime
865 elif len(changedfiles) < 10:
881 elif len(changedfiles) < 10:
866 # Avoid turning allfiles into a set, which can be expensive if it's
882 # Avoid turning allfiles into a set, which can be expensive if it's
867 # large.
883 # large.
868 to_lookup = []
884 to_lookup = []
869 to_drop = []
885 to_drop = []
870 for f in changedfiles:
886 for f in changedfiles:
871 if f in allfiles:
887 if f in allfiles:
872 to_lookup.append(f)
888 to_lookup.append(f)
873 else:
889 else:
874 to_drop.append(f)
890 to_drop.append(f)
875 else:
891 else:
876 changedfilesset = set(changedfiles)
892 changedfilesset = set(changedfiles)
877 to_lookup = changedfilesset & set(allfiles)
893 to_lookup = changedfilesset & set(allfiles)
878 to_drop = changedfilesset - to_lookup
894 to_drop = changedfilesset - to_lookup
879
895
880 if self._origpl is None:
896 if self._origpl is None:
881 self._origpl = self._pl
897 self._origpl = self._pl
882 self._map.setparents(parent, self._nodeconstants.nullid)
898 self._map.setparents(parent, self._nodeconstants.nullid)
883
899
884 for f in to_lookup:
900 for f in to_lookup:
885 self.normallookup(f)
901 self.normallookup(f)
886 for f in to_drop:
902 for f in to_drop:
887 self._drop(f)
903 self._drop(f)
888
904
889 self._dirty = True
905 self._dirty = True
890
906
891 def identity(self):
907 def identity(self):
892 """Return identity of dirstate itself to detect changing in storage
908 """Return identity of dirstate itself to detect changing in storage
893
909
894 If identity of previous dirstate is equal to this, writing
910 If identity of previous dirstate is equal to this, writing
895 changes based on the former dirstate out can keep consistency.
911 changes based on the former dirstate out can keep consistency.
896 """
912 """
897 return self._map.identity
913 return self._map.identity
898
914
899 def write(self, tr):
915 def write(self, tr):
900 if not self._dirty:
916 if not self._dirty:
901 return
917 return
902
918
903 filename = self._filename
919 filename = self._filename
904 if tr:
920 if tr:
905 # 'dirstate.write()' is not only for writing in-memory
921 # 'dirstate.write()' is not only for writing in-memory
906 # changes out, but also for dropping ambiguous timestamp.
922 # changes out, but also for dropping ambiguous timestamp.
907 # delayed writing re-raise "ambiguous timestamp issue".
923 # delayed writing re-raise "ambiguous timestamp issue".
908 # See also the wiki page below for detail:
924 # See also the wiki page below for detail:
909 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
925 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
910
926
911 # emulate dropping timestamp in 'parsers.pack_dirstate'
927 # emulate dropping timestamp in 'parsers.pack_dirstate'
912 now = _getfsnow(self._opener)
928 now = _getfsnow(self._opener)
913 self._map.clearambiguoustimes(self._updatedfiles, now)
929 self._map.clearambiguoustimes(self._updatedfiles, now)
914
930
915 # emulate that all 'dirstate.normal' results are written out
931 # emulate that all 'dirstate.normal' results are written out
916 self._lastnormaltime = 0
932 self._lastnormaltime = 0
917 self._updatedfiles.clear()
933 self._updatedfiles.clear()
918
934
919 # delay writing in-memory changes out
935 # delay writing in-memory changes out
920 tr.addfilegenerator(
936 tr.addfilegenerator(
921 b'dirstate',
937 b'dirstate',
922 (self._filename,),
938 (self._filename,),
923 lambda f: self._writedirstate(tr, f),
939 lambda f: self._writedirstate(tr, f),
924 location=b'plain',
940 location=b'plain',
925 )
941 )
926 return
942 return
927
943
928 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
944 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
929 self._writedirstate(tr, st)
945 self._writedirstate(tr, st)
930
946
931 def addparentchangecallback(self, category, callback):
947 def addparentchangecallback(self, category, callback):
932 """add a callback to be called when the wd parents are changed
948 """add a callback to be called when the wd parents are changed
933
949
934 Callback will be called with the following arguments:
950 Callback will be called with the following arguments:
935 dirstate, (oldp1, oldp2), (newp1, newp2)
951 dirstate, (oldp1, oldp2), (newp1, newp2)
936
952
937 Category is a unique identifier to allow overwriting an old callback
953 Category is a unique identifier to allow overwriting an old callback
938 with a newer callback.
954 with a newer callback.
939 """
955 """
940 self._plchangecallbacks[category] = callback
956 self._plchangecallbacks[category] = callback
941
957
942 def _writedirstate(self, tr, st):
958 def _writedirstate(self, tr, st):
943 # notify callbacks about parents change
959 # notify callbacks about parents change
944 if self._origpl is not None and self._origpl != self._pl:
960 if self._origpl is not None and self._origpl != self._pl:
945 for c, callback in sorted(
961 for c, callback in sorted(
946 pycompat.iteritems(self._plchangecallbacks)
962 pycompat.iteritems(self._plchangecallbacks)
947 ):
963 ):
948 callback(self, self._origpl, self._pl)
964 callback(self, self._origpl, self._pl)
949 self._origpl = None
965 self._origpl = None
950 # use the modification time of the newly created temporary file as the
966 # use the modification time of the newly created temporary file as the
951 # filesystem's notion of 'now'
967 # filesystem's notion of 'now'
952 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
968 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
953
969
954 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
970 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
955 # timestamp of each entries in dirstate, because of 'now > mtime'
971 # timestamp of each entries in dirstate, because of 'now > mtime'
956 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
972 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
957 if delaywrite > 0:
973 if delaywrite > 0:
958 # do we have any files to delay for?
974 # do we have any files to delay for?
959 for f, e in pycompat.iteritems(self._map):
975 for f, e in pycompat.iteritems(self._map):
960 if e.need_delay(now):
976 if e.need_delay(now):
961 import time # to avoid useless import
977 import time # to avoid useless import
962
978
963 # rather than sleep n seconds, sleep until the next
979 # rather than sleep n seconds, sleep until the next
964 # multiple of n seconds
980 # multiple of n seconds
965 clock = time.time()
981 clock = time.time()
966 start = int(clock) - (int(clock) % delaywrite)
982 start = int(clock) - (int(clock) % delaywrite)
967 end = start + delaywrite
983 end = start + delaywrite
968 time.sleep(end - clock)
984 time.sleep(end - clock)
969 now = end # trust our estimate that the end is near now
985 now = end # trust our estimate that the end is near now
970 break
986 break
971
987
972 self._map.write(tr, st, now)
988 self._map.write(tr, st, now)
973 self._lastnormaltime = 0
989 self._lastnormaltime = 0
974 self._dirty = False
990 self._dirty = False
975
991
976 def _dirignore(self, f):
992 def _dirignore(self, f):
977 if self._ignore(f):
993 if self._ignore(f):
978 return True
994 return True
979 for p in pathutil.finddirs(f):
995 for p in pathutil.finddirs(f):
980 if self._ignore(p):
996 if self._ignore(p):
981 return True
997 return True
982 return False
998 return False
983
999
984 def _ignorefiles(self):
1000 def _ignorefiles(self):
985 files = []
1001 files = []
986 if os.path.exists(self._join(b'.hgignore')):
1002 if os.path.exists(self._join(b'.hgignore')):
987 files.append(self._join(b'.hgignore'))
1003 files.append(self._join(b'.hgignore'))
988 for name, path in self._ui.configitems(b"ui"):
1004 for name, path in self._ui.configitems(b"ui"):
989 if name == b'ignore' or name.startswith(b'ignore.'):
1005 if name == b'ignore' or name.startswith(b'ignore.'):
990 # we need to use os.path.join here rather than self._join
1006 # we need to use os.path.join here rather than self._join
991 # because path is arbitrary and user-specified
1007 # because path is arbitrary and user-specified
992 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1008 files.append(os.path.join(self._rootdir, util.expandpath(path)))
993 return files
1009 return files
994
1010
995 def _ignorefileandline(self, f):
1011 def _ignorefileandline(self, f):
996 files = collections.deque(self._ignorefiles())
1012 files = collections.deque(self._ignorefiles())
997 visited = set()
1013 visited = set()
998 while files:
1014 while files:
999 i = files.popleft()
1015 i = files.popleft()
1000 patterns = matchmod.readpatternfile(
1016 patterns = matchmod.readpatternfile(
1001 i, self._ui.warn, sourceinfo=True
1017 i, self._ui.warn, sourceinfo=True
1002 )
1018 )
1003 for pattern, lineno, line in patterns:
1019 for pattern, lineno, line in patterns:
1004 kind, p = matchmod._patsplit(pattern, b'glob')
1020 kind, p = matchmod._patsplit(pattern, b'glob')
1005 if kind == b"subinclude":
1021 if kind == b"subinclude":
1006 if p not in visited:
1022 if p not in visited:
1007 files.append(p)
1023 files.append(p)
1008 continue
1024 continue
1009 m = matchmod.match(
1025 m = matchmod.match(
1010 self._root, b'', [], [pattern], warn=self._ui.warn
1026 self._root, b'', [], [pattern], warn=self._ui.warn
1011 )
1027 )
1012 if m(f):
1028 if m(f):
1013 return (i, lineno, line)
1029 return (i, lineno, line)
1014 visited.add(i)
1030 visited.add(i)
1015 return (None, -1, b"")
1031 return (None, -1, b"")
1016
1032
1017 def _walkexplicit(self, match, subrepos):
1033 def _walkexplicit(self, match, subrepos):
1018 """Get stat data about the files explicitly specified by match.
1034 """Get stat data about the files explicitly specified by match.
1019
1035
1020 Return a triple (results, dirsfound, dirsnotfound).
1036 Return a triple (results, dirsfound, dirsnotfound).
1021 - results is a mapping from filename to stat result. It also contains
1037 - results is a mapping from filename to stat result. It also contains
1022 listings mapping subrepos and .hg to None.
1038 listings mapping subrepos and .hg to None.
1023 - dirsfound is a list of files found to be directories.
1039 - dirsfound is a list of files found to be directories.
1024 - dirsnotfound is a list of files that the dirstate thinks are
1040 - dirsnotfound is a list of files that the dirstate thinks are
1025 directories and that were not found."""
1041 directories and that were not found."""
1026
1042
1027 def badtype(mode):
1043 def badtype(mode):
1028 kind = _(b'unknown')
1044 kind = _(b'unknown')
1029 if stat.S_ISCHR(mode):
1045 if stat.S_ISCHR(mode):
1030 kind = _(b'character device')
1046 kind = _(b'character device')
1031 elif stat.S_ISBLK(mode):
1047 elif stat.S_ISBLK(mode):
1032 kind = _(b'block device')
1048 kind = _(b'block device')
1033 elif stat.S_ISFIFO(mode):
1049 elif stat.S_ISFIFO(mode):
1034 kind = _(b'fifo')
1050 kind = _(b'fifo')
1035 elif stat.S_ISSOCK(mode):
1051 elif stat.S_ISSOCK(mode):
1036 kind = _(b'socket')
1052 kind = _(b'socket')
1037 elif stat.S_ISDIR(mode):
1053 elif stat.S_ISDIR(mode):
1038 kind = _(b'directory')
1054 kind = _(b'directory')
1039 return _(b'unsupported file type (type is %s)') % kind
1055 return _(b'unsupported file type (type is %s)') % kind
1040
1056
1041 badfn = match.bad
1057 badfn = match.bad
1042 dmap = self._map
1058 dmap = self._map
1043 lstat = os.lstat
1059 lstat = os.lstat
1044 getkind = stat.S_IFMT
1060 getkind = stat.S_IFMT
1045 dirkind = stat.S_IFDIR
1061 dirkind = stat.S_IFDIR
1046 regkind = stat.S_IFREG
1062 regkind = stat.S_IFREG
1047 lnkkind = stat.S_IFLNK
1063 lnkkind = stat.S_IFLNK
1048 join = self._join
1064 join = self._join
1049 dirsfound = []
1065 dirsfound = []
1050 foundadd = dirsfound.append
1066 foundadd = dirsfound.append
1051 dirsnotfound = []
1067 dirsnotfound = []
1052 notfoundadd = dirsnotfound.append
1068 notfoundadd = dirsnotfound.append
1053
1069
1054 if not match.isexact() and self._checkcase:
1070 if not match.isexact() and self._checkcase:
1055 normalize = self._normalize
1071 normalize = self._normalize
1056 else:
1072 else:
1057 normalize = None
1073 normalize = None
1058
1074
1059 files = sorted(match.files())
1075 files = sorted(match.files())
1060 subrepos.sort()
1076 subrepos.sort()
1061 i, j = 0, 0
1077 i, j = 0, 0
1062 while i < len(files) and j < len(subrepos):
1078 while i < len(files) and j < len(subrepos):
1063 subpath = subrepos[j] + b"/"
1079 subpath = subrepos[j] + b"/"
1064 if files[i] < subpath:
1080 if files[i] < subpath:
1065 i += 1
1081 i += 1
1066 continue
1082 continue
1067 while i < len(files) and files[i].startswith(subpath):
1083 while i < len(files) and files[i].startswith(subpath):
1068 del files[i]
1084 del files[i]
1069 j += 1
1085 j += 1
1070
1086
1071 if not files or b'' in files:
1087 if not files or b'' in files:
1072 files = [b'']
1088 files = [b'']
1073 # constructing the foldmap is expensive, so don't do it for the
1089 # constructing the foldmap is expensive, so don't do it for the
1074 # common case where files is ['']
1090 # common case where files is ['']
1075 normalize = None
1091 normalize = None
1076 results = dict.fromkeys(subrepos)
1092 results = dict.fromkeys(subrepos)
1077 results[b'.hg'] = None
1093 results[b'.hg'] = None
1078
1094
1079 for ff in files:
1095 for ff in files:
1080 if normalize:
1096 if normalize:
1081 nf = normalize(ff, False, True)
1097 nf = normalize(ff, False, True)
1082 else:
1098 else:
1083 nf = ff
1099 nf = ff
1084 if nf in results:
1100 if nf in results:
1085 continue
1101 continue
1086
1102
1087 try:
1103 try:
1088 st = lstat(join(nf))
1104 st = lstat(join(nf))
1089 kind = getkind(st.st_mode)
1105 kind = getkind(st.st_mode)
1090 if kind == dirkind:
1106 if kind == dirkind:
1091 if nf in dmap:
1107 if nf in dmap:
1092 # file replaced by dir on disk but still in dirstate
1108 # file replaced by dir on disk but still in dirstate
1093 results[nf] = None
1109 results[nf] = None
1094 foundadd((nf, ff))
1110 foundadd((nf, ff))
1095 elif kind == regkind or kind == lnkkind:
1111 elif kind == regkind or kind == lnkkind:
1096 results[nf] = st
1112 results[nf] = st
1097 else:
1113 else:
1098 badfn(ff, badtype(kind))
1114 badfn(ff, badtype(kind))
1099 if nf in dmap:
1115 if nf in dmap:
1100 results[nf] = None
1116 results[nf] = None
1101 except OSError as inst: # nf not found on disk - it is dirstate only
1117 except OSError as inst: # nf not found on disk - it is dirstate only
1102 if nf in dmap: # does it exactly match a missing file?
1118 if nf in dmap: # does it exactly match a missing file?
1103 results[nf] = None
1119 results[nf] = None
1104 else: # does it match a missing directory?
1120 else: # does it match a missing directory?
1105 if self._map.hasdir(nf):
1121 if self._map.hasdir(nf):
1106 notfoundadd(nf)
1122 notfoundadd(nf)
1107 else:
1123 else:
1108 badfn(ff, encoding.strtolocal(inst.strerror))
1124 badfn(ff, encoding.strtolocal(inst.strerror))
1109
1125
1110 # match.files() may contain explicitly-specified paths that shouldn't
1126 # match.files() may contain explicitly-specified paths that shouldn't
1111 # be taken; drop them from the list of files found. dirsfound/notfound
1127 # be taken; drop them from the list of files found. dirsfound/notfound
1112 # aren't filtered here because they will be tested later.
1128 # aren't filtered here because they will be tested later.
1113 if match.anypats():
1129 if match.anypats():
1114 for f in list(results):
1130 for f in list(results):
1115 if f == b'.hg' or f in subrepos:
1131 if f == b'.hg' or f in subrepos:
1116 # keep sentinel to disable further out-of-repo walks
1132 # keep sentinel to disable further out-of-repo walks
1117 continue
1133 continue
1118 if not match(f):
1134 if not match(f):
1119 del results[f]
1135 del results[f]
1120
1136
1121 # Case insensitive filesystems cannot rely on lstat() failing to detect
1137 # Case insensitive filesystems cannot rely on lstat() failing to detect
1122 # a case-only rename. Prune the stat object for any file that does not
1138 # a case-only rename. Prune the stat object for any file that does not
1123 # match the case in the filesystem, if there are multiple files that
1139 # match the case in the filesystem, if there are multiple files that
1124 # normalize to the same path.
1140 # normalize to the same path.
1125 if match.isexact() and self._checkcase:
1141 if match.isexact() and self._checkcase:
1126 normed = {}
1142 normed = {}
1127
1143
1128 for f, st in pycompat.iteritems(results):
1144 for f, st in pycompat.iteritems(results):
1129 if st is None:
1145 if st is None:
1130 continue
1146 continue
1131
1147
1132 nc = util.normcase(f)
1148 nc = util.normcase(f)
1133 paths = normed.get(nc)
1149 paths = normed.get(nc)
1134
1150
1135 if paths is None:
1151 if paths is None:
1136 paths = set()
1152 paths = set()
1137 normed[nc] = paths
1153 normed[nc] = paths
1138
1154
1139 paths.add(f)
1155 paths.add(f)
1140
1156
1141 for norm, paths in pycompat.iteritems(normed):
1157 for norm, paths in pycompat.iteritems(normed):
1142 if len(paths) > 1:
1158 if len(paths) > 1:
1143 for path in paths:
1159 for path in paths:
1144 folded = self._discoverpath(
1160 folded = self._discoverpath(
1145 path, norm, True, None, self._map.dirfoldmap
1161 path, norm, True, None, self._map.dirfoldmap
1146 )
1162 )
1147 if path != folded:
1163 if path != folded:
1148 results[path] = None
1164 results[path] = None
1149
1165
1150 return results, dirsfound, dirsnotfound
1166 return results, dirsfound, dirsnotfound
1151
1167
1152 def walk(self, match, subrepos, unknown, ignored, full=True):
1168 def walk(self, match, subrepos, unknown, ignored, full=True):
1153 """
1169 """
1154 Walk recursively through the directory tree, finding all files
1170 Walk recursively through the directory tree, finding all files
1155 matched by match.
1171 matched by match.
1156
1172
1157 If full is False, maybe skip some known-clean files.
1173 If full is False, maybe skip some known-clean files.
1158
1174
1159 Return a dict mapping filename to stat-like object (either
1175 Return a dict mapping filename to stat-like object (either
1160 mercurial.osutil.stat instance or return value of os.stat()).
1176 mercurial.osutil.stat instance or return value of os.stat()).
1161
1177
1162 """
1178 """
1163 # full is a flag that extensions that hook into walk can use -- this
1179 # full is a flag that extensions that hook into walk can use -- this
1164 # implementation doesn't use it at all. This satisfies the contract
1180 # implementation doesn't use it at all. This satisfies the contract
1165 # because we only guarantee a "maybe".
1181 # because we only guarantee a "maybe".
1166
1182
1167 if ignored:
1183 if ignored:
1168 ignore = util.never
1184 ignore = util.never
1169 dirignore = util.never
1185 dirignore = util.never
1170 elif unknown:
1186 elif unknown:
1171 ignore = self._ignore
1187 ignore = self._ignore
1172 dirignore = self._dirignore
1188 dirignore = self._dirignore
1173 else:
1189 else:
1174 # if not unknown and not ignored, drop dir recursion and step 2
1190 # if not unknown and not ignored, drop dir recursion and step 2
1175 ignore = util.always
1191 ignore = util.always
1176 dirignore = util.always
1192 dirignore = util.always
1177
1193
1178 matchfn = match.matchfn
1194 matchfn = match.matchfn
1179 matchalways = match.always()
1195 matchalways = match.always()
1180 matchtdir = match.traversedir
1196 matchtdir = match.traversedir
1181 dmap = self._map
1197 dmap = self._map
1182 listdir = util.listdir
1198 listdir = util.listdir
1183 lstat = os.lstat
1199 lstat = os.lstat
1184 dirkind = stat.S_IFDIR
1200 dirkind = stat.S_IFDIR
1185 regkind = stat.S_IFREG
1201 regkind = stat.S_IFREG
1186 lnkkind = stat.S_IFLNK
1202 lnkkind = stat.S_IFLNK
1187 join = self._join
1203 join = self._join
1188
1204
1189 exact = skipstep3 = False
1205 exact = skipstep3 = False
1190 if match.isexact(): # match.exact
1206 if match.isexact(): # match.exact
1191 exact = True
1207 exact = True
1192 dirignore = util.always # skip step 2
1208 dirignore = util.always # skip step 2
1193 elif match.prefix(): # match.match, no patterns
1209 elif match.prefix(): # match.match, no patterns
1194 skipstep3 = True
1210 skipstep3 = True
1195
1211
1196 if not exact and self._checkcase:
1212 if not exact and self._checkcase:
1197 normalize = self._normalize
1213 normalize = self._normalize
1198 normalizefile = self._normalizefile
1214 normalizefile = self._normalizefile
1199 skipstep3 = False
1215 skipstep3 = False
1200 else:
1216 else:
1201 normalize = self._normalize
1217 normalize = self._normalize
1202 normalizefile = None
1218 normalizefile = None
1203
1219
1204 # step 1: find all explicit files
1220 # step 1: find all explicit files
1205 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1221 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1206 if matchtdir:
1222 if matchtdir:
1207 for d in work:
1223 for d in work:
1208 matchtdir(d[0])
1224 matchtdir(d[0])
1209 for d in dirsnotfound:
1225 for d in dirsnotfound:
1210 matchtdir(d)
1226 matchtdir(d)
1211
1227
1212 skipstep3 = skipstep3 and not (work or dirsnotfound)
1228 skipstep3 = skipstep3 and not (work or dirsnotfound)
1213 work = [d for d in work if not dirignore(d[0])]
1229 work = [d for d in work if not dirignore(d[0])]
1214
1230
1215 # step 2: visit subdirectories
1231 # step 2: visit subdirectories
1216 def traverse(work, alreadynormed):
1232 def traverse(work, alreadynormed):
1217 wadd = work.append
1233 wadd = work.append
1218 while work:
1234 while work:
1219 tracing.counter('dirstate.walk work', len(work))
1235 tracing.counter('dirstate.walk work', len(work))
1220 nd = work.pop()
1236 nd = work.pop()
1221 visitentries = match.visitchildrenset(nd)
1237 visitentries = match.visitchildrenset(nd)
1222 if not visitentries:
1238 if not visitentries:
1223 continue
1239 continue
1224 if visitentries == b'this' or visitentries == b'all':
1240 if visitentries == b'this' or visitentries == b'all':
1225 visitentries = None
1241 visitentries = None
1226 skip = None
1242 skip = None
1227 if nd != b'':
1243 if nd != b'':
1228 skip = b'.hg'
1244 skip = b'.hg'
1229 try:
1245 try:
1230 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1246 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1231 entries = listdir(join(nd), stat=True, skip=skip)
1247 entries = listdir(join(nd), stat=True, skip=skip)
1232 except OSError as inst:
1248 except OSError as inst:
1233 if inst.errno in (errno.EACCES, errno.ENOENT):
1249 if inst.errno in (errno.EACCES, errno.ENOENT):
1234 match.bad(
1250 match.bad(
1235 self.pathto(nd), encoding.strtolocal(inst.strerror)
1251 self.pathto(nd), encoding.strtolocal(inst.strerror)
1236 )
1252 )
1237 continue
1253 continue
1238 raise
1254 raise
1239 for f, kind, st in entries:
1255 for f, kind, st in entries:
1240 # Some matchers may return files in the visitentries set,
1256 # Some matchers may return files in the visitentries set,
1241 # instead of 'this', if the matcher explicitly mentions them
1257 # instead of 'this', if the matcher explicitly mentions them
1242 # and is not an exactmatcher. This is acceptable; we do not
1258 # and is not an exactmatcher. This is acceptable; we do not
1243 # make any hard assumptions about file-or-directory below
1259 # make any hard assumptions about file-or-directory below
1244 # based on the presence of `f` in visitentries. If
1260 # based on the presence of `f` in visitentries. If
1245 # visitchildrenset returned a set, we can always skip the
1261 # visitchildrenset returned a set, we can always skip the
1246 # entries *not* in the set it provided regardless of whether
1262 # entries *not* in the set it provided regardless of whether
1247 # they're actually a file or a directory.
1263 # they're actually a file or a directory.
1248 if visitentries and f not in visitentries:
1264 if visitentries and f not in visitentries:
1249 continue
1265 continue
1250 if normalizefile:
1266 if normalizefile:
1251 # even though f might be a directory, we're only
1267 # even though f might be a directory, we're only
1252 # interested in comparing it to files currently in the
1268 # interested in comparing it to files currently in the
1253 # dmap -- therefore normalizefile is enough
1269 # dmap -- therefore normalizefile is enough
1254 nf = normalizefile(
1270 nf = normalizefile(
1255 nd and (nd + b"/" + f) or f, True, True
1271 nd and (nd + b"/" + f) or f, True, True
1256 )
1272 )
1257 else:
1273 else:
1258 nf = nd and (nd + b"/" + f) or f
1274 nf = nd and (nd + b"/" + f) or f
1259 if nf not in results:
1275 if nf not in results:
1260 if kind == dirkind:
1276 if kind == dirkind:
1261 if not ignore(nf):
1277 if not ignore(nf):
1262 if matchtdir:
1278 if matchtdir:
1263 matchtdir(nf)
1279 matchtdir(nf)
1264 wadd(nf)
1280 wadd(nf)
1265 if nf in dmap and (matchalways or matchfn(nf)):
1281 if nf in dmap and (matchalways or matchfn(nf)):
1266 results[nf] = None
1282 results[nf] = None
1267 elif kind == regkind or kind == lnkkind:
1283 elif kind == regkind or kind == lnkkind:
1268 if nf in dmap:
1284 if nf in dmap:
1269 if matchalways or matchfn(nf):
1285 if matchalways or matchfn(nf):
1270 results[nf] = st
1286 results[nf] = st
1271 elif (matchalways or matchfn(nf)) and not ignore(
1287 elif (matchalways or matchfn(nf)) and not ignore(
1272 nf
1288 nf
1273 ):
1289 ):
1274 # unknown file -- normalize if necessary
1290 # unknown file -- normalize if necessary
1275 if not alreadynormed:
1291 if not alreadynormed:
1276 nf = normalize(nf, False, True)
1292 nf = normalize(nf, False, True)
1277 results[nf] = st
1293 results[nf] = st
1278 elif nf in dmap and (matchalways or matchfn(nf)):
1294 elif nf in dmap and (matchalways or matchfn(nf)):
1279 results[nf] = None
1295 results[nf] = None
1280
1296
1281 for nd, d in work:
1297 for nd, d in work:
1282 # alreadynormed means that processwork doesn't have to do any
1298 # alreadynormed means that processwork doesn't have to do any
1283 # expensive directory normalization
1299 # expensive directory normalization
1284 alreadynormed = not normalize or nd == d
1300 alreadynormed = not normalize or nd == d
1285 traverse([d], alreadynormed)
1301 traverse([d], alreadynormed)
1286
1302
1287 for s in subrepos:
1303 for s in subrepos:
1288 del results[s]
1304 del results[s]
1289 del results[b'.hg']
1305 del results[b'.hg']
1290
1306
1291 # step 3: visit remaining files from dmap
1307 # step 3: visit remaining files from dmap
1292 if not skipstep3 and not exact:
1308 if not skipstep3 and not exact:
1293 # If a dmap file is not in results yet, it was either
1309 # If a dmap file is not in results yet, it was either
1294 # a) not matching matchfn b) ignored, c) missing, or d) under a
1310 # a) not matching matchfn b) ignored, c) missing, or d) under a
1295 # symlink directory.
1311 # symlink directory.
1296 if not results and matchalways:
1312 if not results and matchalways:
1297 visit = [f for f in dmap]
1313 visit = [f for f in dmap]
1298 else:
1314 else:
1299 visit = [f for f in dmap if f not in results and matchfn(f)]
1315 visit = [f for f in dmap if f not in results and matchfn(f)]
1300 visit.sort()
1316 visit.sort()
1301
1317
1302 if unknown:
1318 if unknown:
1303 # unknown == True means we walked all dirs under the roots
1319 # unknown == True means we walked all dirs under the roots
1304 # that wasn't ignored, and everything that matched was stat'ed
1320 # that wasn't ignored, and everything that matched was stat'ed
1305 # and is already in results.
1321 # and is already in results.
1306 # The rest must thus be ignored or under a symlink.
1322 # The rest must thus be ignored or under a symlink.
1307 audit_path = pathutil.pathauditor(self._root, cached=True)
1323 audit_path = pathutil.pathauditor(self._root, cached=True)
1308
1324
1309 for nf in iter(visit):
1325 for nf in iter(visit):
1310 # If a stat for the same file was already added with a
1326 # If a stat for the same file was already added with a
1311 # different case, don't add one for this, since that would
1327 # different case, don't add one for this, since that would
1312 # make it appear as if the file exists under both names
1328 # make it appear as if the file exists under both names
1313 # on disk.
1329 # on disk.
1314 if (
1330 if (
1315 normalizefile
1331 normalizefile
1316 and normalizefile(nf, True, True) in results
1332 and normalizefile(nf, True, True) in results
1317 ):
1333 ):
1318 results[nf] = None
1334 results[nf] = None
1319 # Report ignored items in the dmap as long as they are not
1335 # Report ignored items in the dmap as long as they are not
1320 # under a symlink directory.
1336 # under a symlink directory.
1321 elif audit_path.check(nf):
1337 elif audit_path.check(nf):
1322 try:
1338 try:
1323 results[nf] = lstat(join(nf))
1339 results[nf] = lstat(join(nf))
1324 # file was just ignored, no links, and exists
1340 # file was just ignored, no links, and exists
1325 except OSError:
1341 except OSError:
1326 # file doesn't exist
1342 # file doesn't exist
1327 results[nf] = None
1343 results[nf] = None
1328 else:
1344 else:
1329 # It's either missing or under a symlink directory
1345 # It's either missing or under a symlink directory
1330 # which we in this case report as missing
1346 # which we in this case report as missing
1331 results[nf] = None
1347 results[nf] = None
1332 else:
1348 else:
1333 # We may not have walked the full directory tree above,
1349 # We may not have walked the full directory tree above,
1334 # so stat and check everything we missed.
1350 # so stat and check everything we missed.
1335 iv = iter(visit)
1351 iv = iter(visit)
1336 for st in util.statfiles([join(i) for i in visit]):
1352 for st in util.statfiles([join(i) for i in visit]):
1337 results[next(iv)] = st
1353 results[next(iv)] = st
1338 return results
1354 return results
1339
1355
1340 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1356 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1341 # Force Rayon (Rust parallelism library) to respect the number of
1357 # Force Rayon (Rust parallelism library) to respect the number of
1342 # workers. This is a temporary workaround until Rust code knows
1358 # workers. This is a temporary workaround until Rust code knows
1343 # how to read the config file.
1359 # how to read the config file.
1344 numcpus = self._ui.configint(b"worker", b"numcpus")
1360 numcpus = self._ui.configint(b"worker", b"numcpus")
1345 if numcpus is not None:
1361 if numcpus is not None:
1346 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1362 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1347
1363
1348 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1364 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1349 if not workers_enabled:
1365 if not workers_enabled:
1350 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1366 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1351
1367
1352 (
1368 (
1353 lookup,
1369 lookup,
1354 modified,
1370 modified,
1355 added,
1371 added,
1356 removed,
1372 removed,
1357 deleted,
1373 deleted,
1358 clean,
1374 clean,
1359 ignored,
1375 ignored,
1360 unknown,
1376 unknown,
1361 warnings,
1377 warnings,
1362 bad,
1378 bad,
1363 traversed,
1379 traversed,
1364 dirty,
1380 dirty,
1365 ) = rustmod.status(
1381 ) = rustmod.status(
1366 self._map._rustmap,
1382 self._map._rustmap,
1367 matcher,
1383 matcher,
1368 self._rootdir,
1384 self._rootdir,
1369 self._ignorefiles(),
1385 self._ignorefiles(),
1370 self._checkexec,
1386 self._checkexec,
1371 self._lastnormaltime,
1387 self._lastnormaltime,
1372 bool(list_clean),
1388 bool(list_clean),
1373 bool(list_ignored),
1389 bool(list_ignored),
1374 bool(list_unknown),
1390 bool(list_unknown),
1375 bool(matcher.traversedir),
1391 bool(matcher.traversedir),
1376 )
1392 )
1377
1393
1378 self._dirty |= dirty
1394 self._dirty |= dirty
1379
1395
1380 if matcher.traversedir:
1396 if matcher.traversedir:
1381 for dir in traversed:
1397 for dir in traversed:
1382 matcher.traversedir(dir)
1398 matcher.traversedir(dir)
1383
1399
1384 if self._ui.warn:
1400 if self._ui.warn:
1385 for item in warnings:
1401 for item in warnings:
1386 if isinstance(item, tuple):
1402 if isinstance(item, tuple):
1387 file_path, syntax = item
1403 file_path, syntax = item
1388 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1404 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1389 file_path,
1405 file_path,
1390 syntax,
1406 syntax,
1391 )
1407 )
1392 self._ui.warn(msg)
1408 self._ui.warn(msg)
1393 else:
1409 else:
1394 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1410 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1395 self._ui.warn(
1411 self._ui.warn(
1396 msg
1412 msg
1397 % (
1413 % (
1398 pathutil.canonpath(
1414 pathutil.canonpath(
1399 self._rootdir, self._rootdir, item
1415 self._rootdir, self._rootdir, item
1400 ),
1416 ),
1401 b"No such file or directory",
1417 b"No such file or directory",
1402 )
1418 )
1403 )
1419 )
1404
1420
1405 for (fn, message) in bad:
1421 for (fn, message) in bad:
1406 matcher.bad(fn, encoding.strtolocal(message))
1422 matcher.bad(fn, encoding.strtolocal(message))
1407
1423
1408 status = scmutil.status(
1424 status = scmutil.status(
1409 modified=modified,
1425 modified=modified,
1410 added=added,
1426 added=added,
1411 removed=removed,
1427 removed=removed,
1412 deleted=deleted,
1428 deleted=deleted,
1413 unknown=unknown,
1429 unknown=unknown,
1414 ignored=ignored,
1430 ignored=ignored,
1415 clean=clean,
1431 clean=clean,
1416 )
1432 )
1417 return (lookup, status)
1433 return (lookup, status)
1418
1434
1419 def status(self, match, subrepos, ignored, clean, unknown):
1435 def status(self, match, subrepos, ignored, clean, unknown):
1420 """Determine the status of the working copy relative to the
1436 """Determine the status of the working copy relative to the
1421 dirstate and return a pair of (unsure, status), where status is of type
1437 dirstate and return a pair of (unsure, status), where status is of type
1422 scmutil.status and:
1438 scmutil.status and:
1423
1439
1424 unsure:
1440 unsure:
1425 files that might have been modified since the dirstate was
1441 files that might have been modified since the dirstate was
1426 written, but need to be read to be sure (size is the same
1442 written, but need to be read to be sure (size is the same
1427 but mtime differs)
1443 but mtime differs)
1428 status.modified:
1444 status.modified:
1429 files that have definitely been modified since the dirstate
1445 files that have definitely been modified since the dirstate
1430 was written (different size or mode)
1446 was written (different size or mode)
1431 status.clean:
1447 status.clean:
1432 files that have definitely not been modified since the
1448 files that have definitely not been modified since the
1433 dirstate was written
1449 dirstate was written
1434 """
1450 """
1435 listignored, listclean, listunknown = ignored, clean, unknown
1451 listignored, listclean, listunknown = ignored, clean, unknown
1436 lookup, modified, added, unknown, ignored = [], [], [], [], []
1452 lookup, modified, added, unknown, ignored = [], [], [], [], []
1437 removed, deleted, clean = [], [], []
1453 removed, deleted, clean = [], [], []
1438
1454
1439 dmap = self._map
1455 dmap = self._map
1440 dmap.preload()
1456 dmap.preload()
1441
1457
1442 use_rust = True
1458 use_rust = True
1443
1459
1444 allowed_matchers = (
1460 allowed_matchers = (
1445 matchmod.alwaysmatcher,
1461 matchmod.alwaysmatcher,
1446 matchmod.exactmatcher,
1462 matchmod.exactmatcher,
1447 matchmod.includematcher,
1463 matchmod.includematcher,
1448 )
1464 )
1449
1465
1450 if rustmod is None:
1466 if rustmod is None:
1451 use_rust = False
1467 use_rust = False
1452 elif self._checkcase:
1468 elif self._checkcase:
1453 # Case-insensitive filesystems are not handled yet
1469 # Case-insensitive filesystems are not handled yet
1454 use_rust = False
1470 use_rust = False
1455 elif subrepos:
1471 elif subrepos:
1456 use_rust = False
1472 use_rust = False
1457 elif sparse.enabled:
1473 elif sparse.enabled:
1458 use_rust = False
1474 use_rust = False
1459 elif not isinstance(match, allowed_matchers):
1475 elif not isinstance(match, allowed_matchers):
1460 # Some matchers have yet to be implemented
1476 # Some matchers have yet to be implemented
1461 use_rust = False
1477 use_rust = False
1462
1478
1463 if use_rust:
1479 if use_rust:
1464 try:
1480 try:
1465 return self._rust_status(
1481 return self._rust_status(
1466 match, listclean, listignored, listunknown
1482 match, listclean, listignored, listunknown
1467 )
1483 )
1468 except rustmod.FallbackError:
1484 except rustmod.FallbackError:
1469 pass
1485 pass
1470
1486
1471 def noop(f):
1487 def noop(f):
1472 pass
1488 pass
1473
1489
1474 dcontains = dmap.__contains__
1490 dcontains = dmap.__contains__
1475 dget = dmap.__getitem__
1491 dget = dmap.__getitem__
1476 ladd = lookup.append # aka "unsure"
1492 ladd = lookup.append # aka "unsure"
1477 madd = modified.append
1493 madd = modified.append
1478 aadd = added.append
1494 aadd = added.append
1479 uadd = unknown.append if listunknown else noop
1495 uadd = unknown.append if listunknown else noop
1480 iadd = ignored.append if listignored else noop
1496 iadd = ignored.append if listignored else noop
1481 radd = removed.append
1497 radd = removed.append
1482 dadd = deleted.append
1498 dadd = deleted.append
1483 cadd = clean.append if listclean else noop
1499 cadd = clean.append if listclean else noop
1484 mexact = match.exact
1500 mexact = match.exact
1485 dirignore = self._dirignore
1501 dirignore = self._dirignore
1486 checkexec = self._checkexec
1502 checkexec = self._checkexec
1487 copymap = self._map.copymap
1503 copymap = self._map.copymap
1488 lastnormaltime = self._lastnormaltime
1504 lastnormaltime = self._lastnormaltime
1489
1505
1490 # We need to do full walks when either
1506 # We need to do full walks when either
1491 # - we're listing all clean files, or
1507 # - we're listing all clean files, or
1492 # - match.traversedir does something, because match.traversedir should
1508 # - match.traversedir does something, because match.traversedir should
1493 # be called for every dir in the working dir
1509 # be called for every dir in the working dir
1494 full = listclean or match.traversedir is not None
1510 full = listclean or match.traversedir is not None
1495 for fn, st in pycompat.iteritems(
1511 for fn, st in pycompat.iteritems(
1496 self.walk(match, subrepos, listunknown, listignored, full=full)
1512 self.walk(match, subrepos, listunknown, listignored, full=full)
1497 ):
1513 ):
1498 if not dcontains(fn):
1514 if not dcontains(fn):
1499 if (listignored or mexact(fn)) and dirignore(fn):
1515 if (listignored or mexact(fn)) and dirignore(fn):
1500 if listignored:
1516 if listignored:
1501 iadd(fn)
1517 iadd(fn)
1502 else:
1518 else:
1503 uadd(fn)
1519 uadd(fn)
1504 continue
1520 continue
1505
1521
1506 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1522 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1507 # written like that for performance reasons. dmap[fn] is not a
1523 # written like that for performance reasons. dmap[fn] is not a
1508 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1524 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1509 # opcode has fast paths when the value to be unpacked is a tuple or
1525 # opcode has fast paths when the value to be unpacked is a tuple or
1510 # a list, but falls back to creating a full-fledged iterator in
1526 # a list, but falls back to creating a full-fledged iterator in
1511 # general. That is much slower than simply accessing and storing the
1527 # general. That is much slower than simply accessing and storing the
1512 # tuple members one by one.
1528 # tuple members one by one.
1513 t = dget(fn)
1529 t = dget(fn)
1514 mode = t.mode
1530 mode = t.mode
1515 size = t.size
1531 size = t.size
1516 time = t.mtime
1532 time = t.mtime
1517
1533
1518 if not st and t.tracked:
1534 if not st and t.tracked:
1519 dadd(fn)
1535 dadd(fn)
1520 elif t.merged:
1536 elif t.merged:
1521 madd(fn)
1537 madd(fn)
1522 elif t.added:
1538 elif t.added:
1523 aadd(fn)
1539 aadd(fn)
1524 elif t.removed:
1540 elif t.removed:
1525 radd(fn)
1541 radd(fn)
1526 elif t.tracked:
1542 elif t.tracked:
1527 if (
1543 if (
1528 size >= 0
1544 size >= 0
1529 and (
1545 and (
1530 (size != st.st_size and size != st.st_size & _rangemask)
1546 (size != st.st_size and size != st.st_size & _rangemask)
1531 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1547 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1532 )
1548 )
1533 or t.from_p2
1549 or t.from_p2
1534 or fn in copymap
1550 or fn in copymap
1535 ):
1551 ):
1536 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1552 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1537 # issue6456: Size returned may be longer due to
1553 # issue6456: Size returned may be longer due to
1538 # encryption on EXT-4 fscrypt, undecided.
1554 # encryption on EXT-4 fscrypt, undecided.
1539 ladd(fn)
1555 ladd(fn)
1540 else:
1556 else:
1541 madd(fn)
1557 madd(fn)
1542 elif (
1558 elif (
1543 time != st[stat.ST_MTIME]
1559 time != st[stat.ST_MTIME]
1544 and time != st[stat.ST_MTIME] & _rangemask
1560 and time != st[stat.ST_MTIME] & _rangemask
1545 ):
1561 ):
1546 ladd(fn)
1562 ladd(fn)
1547 elif st[stat.ST_MTIME] == lastnormaltime:
1563 elif st[stat.ST_MTIME] == lastnormaltime:
1548 # fn may have just been marked as normal and it may have
1564 # fn may have just been marked as normal and it may have
1549 # changed in the same second without changing its size.
1565 # changed in the same second without changing its size.
1550 # This can happen if we quickly do multiple commits.
1566 # This can happen if we quickly do multiple commits.
1551 # Force lookup, so we don't miss such a racy file change.
1567 # Force lookup, so we don't miss such a racy file change.
1552 ladd(fn)
1568 ladd(fn)
1553 elif listclean:
1569 elif listclean:
1554 cadd(fn)
1570 cadd(fn)
1555 status = scmutil.status(
1571 status = scmutil.status(
1556 modified, added, removed, deleted, unknown, ignored, clean
1572 modified, added, removed, deleted, unknown, ignored, clean
1557 )
1573 )
1558 return (lookup, status)
1574 return (lookup, status)
1559
1575
1560 def matches(self, match):
1576 def matches(self, match):
1561 """
1577 """
1562 return files in the dirstate (in whatever state) filtered by match
1578 return files in the dirstate (in whatever state) filtered by match
1563 """
1579 """
1564 dmap = self._map
1580 dmap = self._map
1565 if rustmod is not None:
1581 if rustmod is not None:
1566 dmap = self._map._rustmap
1582 dmap = self._map._rustmap
1567
1583
1568 if match.always():
1584 if match.always():
1569 return dmap.keys()
1585 return dmap.keys()
1570 files = match.files()
1586 files = match.files()
1571 if match.isexact():
1587 if match.isexact():
1572 # fast path -- filter the other way around, since typically files is
1588 # fast path -- filter the other way around, since typically files is
1573 # much smaller than dmap
1589 # much smaller than dmap
1574 return [f for f in files if f in dmap]
1590 return [f for f in files if f in dmap]
1575 if match.prefix() and all(fn in dmap for fn in files):
1591 if match.prefix() and all(fn in dmap for fn in files):
1576 # fast path -- all the values are known to be files, so just return
1592 # fast path -- all the values are known to be files, so just return
1577 # that
1593 # that
1578 return list(files)
1594 return list(files)
1579 return [f for f in dmap if match(f)]
1595 return [f for f in dmap if match(f)]
1580
1596
1581 def _actualfilename(self, tr):
1597 def _actualfilename(self, tr):
1582 if tr:
1598 if tr:
1583 return self._pendingfilename
1599 return self._pendingfilename
1584 else:
1600 else:
1585 return self._filename
1601 return self._filename
1586
1602
1587 def savebackup(self, tr, backupname):
1603 def savebackup(self, tr, backupname):
1588 '''Save current dirstate into backup file'''
1604 '''Save current dirstate into backup file'''
1589 filename = self._actualfilename(tr)
1605 filename = self._actualfilename(tr)
1590 assert backupname != filename
1606 assert backupname != filename
1591
1607
1592 # use '_writedirstate' instead of 'write' to write changes certainly,
1608 # use '_writedirstate' instead of 'write' to write changes certainly,
1593 # because the latter omits writing out if transaction is running.
1609 # because the latter omits writing out if transaction is running.
1594 # output file will be used to create backup of dirstate at this point.
1610 # output file will be used to create backup of dirstate at this point.
1595 if self._dirty or not self._opener.exists(filename):
1611 if self._dirty or not self._opener.exists(filename):
1596 self._writedirstate(
1612 self._writedirstate(
1597 tr,
1613 tr,
1598 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1614 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1599 )
1615 )
1600
1616
1601 if tr:
1617 if tr:
1602 # ensure that subsequent tr.writepending returns True for
1618 # ensure that subsequent tr.writepending returns True for
1603 # changes written out above, even if dirstate is never
1619 # changes written out above, even if dirstate is never
1604 # changed after this
1620 # changed after this
1605 tr.addfilegenerator(
1621 tr.addfilegenerator(
1606 b'dirstate',
1622 b'dirstate',
1607 (self._filename,),
1623 (self._filename,),
1608 lambda f: self._writedirstate(tr, f),
1624 lambda f: self._writedirstate(tr, f),
1609 location=b'plain',
1625 location=b'plain',
1610 )
1626 )
1611
1627
1612 # ensure that pending file written above is unlinked at
1628 # ensure that pending file written above is unlinked at
1613 # failure, even if tr.writepending isn't invoked until the
1629 # failure, even if tr.writepending isn't invoked until the
1614 # end of this transaction
1630 # end of this transaction
1615 tr.registertmp(filename, location=b'plain')
1631 tr.registertmp(filename, location=b'plain')
1616
1632
1617 self._opener.tryunlink(backupname)
1633 self._opener.tryunlink(backupname)
1618 # hardlink backup is okay because _writedirstate is always called
1634 # hardlink backup is okay because _writedirstate is always called
1619 # with an "atomictemp=True" file.
1635 # with an "atomictemp=True" file.
1620 util.copyfile(
1636 util.copyfile(
1621 self._opener.join(filename),
1637 self._opener.join(filename),
1622 self._opener.join(backupname),
1638 self._opener.join(backupname),
1623 hardlink=True,
1639 hardlink=True,
1624 )
1640 )
1625
1641
1626 def restorebackup(self, tr, backupname):
1642 def restorebackup(self, tr, backupname):
1627 '''Restore dirstate by backup file'''
1643 '''Restore dirstate by backup file'''
1628 # this "invalidate()" prevents "wlock.release()" from writing
1644 # this "invalidate()" prevents "wlock.release()" from writing
1629 # changes of dirstate out after restoring from backup file
1645 # changes of dirstate out after restoring from backup file
1630 self.invalidate()
1646 self.invalidate()
1631 filename = self._actualfilename(tr)
1647 filename = self._actualfilename(tr)
1632 o = self._opener
1648 o = self._opener
1633 if util.samefile(o.join(backupname), o.join(filename)):
1649 if util.samefile(o.join(backupname), o.join(filename)):
1634 o.unlink(backupname)
1650 o.unlink(backupname)
1635 else:
1651 else:
1636 o.rename(backupname, filename, checkambig=True)
1652 o.rename(backupname, filename, checkambig=True)
1637
1653
1638 def clearbackup(self, tr, backupname):
1654 def clearbackup(self, tr, backupname):
1639 '''Clear backup file'''
1655 '''Clear backup file'''
1640 self._opener.unlink(backupname)
1656 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now