##// END OF EJS Templates
dirstate: drop the `_normal` method...
marmoute -
r48728:1b3c753b default
parent child Browse files
Show More
@@ -1,1637 +1,1634 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = parsers.DirstateItem
48 DirstateItem = parsers.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._updatedfiles = set()
133 self._updatedfiles = set()
134 self._mapcls = dirstatemap.dirstatemap
134 self._mapcls = dirstatemap.dirstatemap
135 # Access and cache cwd early, so we don't access it for the first time
135 # Access and cache cwd early, so we don't access it for the first time
136 # after a working-copy update caused it to not exist (accessing it then
136 # after a working-copy update caused it to not exist (accessing it then
137 # raises an exception).
137 # raises an exception).
138 self._cwd
138 self._cwd
139
139
140 def prefetch_parents(self):
140 def prefetch_parents(self):
141 """make sure the parents are loaded
141 """make sure the parents are loaded
142
142
143 Used to avoid a race condition.
143 Used to avoid a race condition.
144 """
144 """
145 self._pl
145 self._pl
146
146
147 @contextlib.contextmanager
147 @contextlib.contextmanager
148 def parentchange(self):
148 def parentchange(self):
149 """Context manager for handling dirstate parents.
149 """Context manager for handling dirstate parents.
150
150
151 If an exception occurs in the scope of the context manager,
151 If an exception occurs in the scope of the context manager,
152 the incoherent dirstate won't be written when wlock is
152 the incoherent dirstate won't be written when wlock is
153 released.
153 released.
154 """
154 """
155 self._parentwriters += 1
155 self._parentwriters += 1
156 yield
156 yield
157 # Typically we want the "undo" step of a context manager in a
157 # Typically we want the "undo" step of a context manager in a
158 # finally block so it happens even when an exception
158 # finally block so it happens even when an exception
159 # occurs. In this case, however, we only want to decrement
159 # occurs. In this case, however, we only want to decrement
160 # parentwriters if the code in the with statement exits
160 # parentwriters if the code in the with statement exits
161 # normally, so we don't have a try/finally here on purpose.
161 # normally, so we don't have a try/finally here on purpose.
162 self._parentwriters -= 1
162 self._parentwriters -= 1
163
163
164 def pendingparentchange(self):
164 def pendingparentchange(self):
165 """Returns true if the dirstate is in the middle of a set of changes
165 """Returns true if the dirstate is in the middle of a set of changes
166 that modify the dirstate parent.
166 that modify the dirstate parent.
167 """
167 """
168 return self._parentwriters > 0
168 return self._parentwriters > 0
169
169
170 @propertycache
170 @propertycache
171 def _map(self):
171 def _map(self):
172 """Return the dirstate contents (see documentation for dirstatemap)."""
172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 self._map = self._mapcls(
173 self._map = self._mapcls(
174 self._ui,
174 self._ui,
175 self._opener,
175 self._opener,
176 self._root,
176 self._root,
177 self._nodeconstants,
177 self._nodeconstants,
178 self._use_dirstate_v2,
178 self._use_dirstate_v2,
179 )
179 )
180 return self._map
180 return self._map
181
181
182 @property
182 @property
183 def _sparsematcher(self):
183 def _sparsematcher(self):
184 """The matcher for the sparse checkout.
184 """The matcher for the sparse checkout.
185
185
186 The working directory may not include every file from a manifest. The
186 The working directory may not include every file from a manifest. The
187 matcher obtained by this property will match a path if it is to be
187 matcher obtained by this property will match a path if it is to be
188 included in the working directory.
188 included in the working directory.
189 """
189 """
190 # TODO there is potential to cache this property. For now, the matcher
190 # TODO there is potential to cache this property. For now, the matcher
191 # is resolved on every access. (But the called function does use a
191 # is resolved on every access. (But the called function does use a
192 # cache to keep the lookup fast.)
192 # cache to keep the lookup fast.)
193 return self._sparsematchfn()
193 return self._sparsematchfn()
194
194
195 @repocache(b'branch')
195 @repocache(b'branch')
196 def _branch(self):
196 def _branch(self):
197 try:
197 try:
198 return self._opener.read(b"branch").strip() or b"default"
198 return self._opener.read(b"branch").strip() or b"default"
199 except IOError as inst:
199 except IOError as inst:
200 if inst.errno != errno.ENOENT:
200 if inst.errno != errno.ENOENT:
201 raise
201 raise
202 return b"default"
202 return b"default"
203
203
204 @property
204 @property
205 def _pl(self):
205 def _pl(self):
206 return self._map.parents()
206 return self._map.parents()
207
207
208 def hasdir(self, d):
208 def hasdir(self, d):
209 return self._map.hastrackeddir(d)
209 return self._map.hastrackeddir(d)
210
210
211 @rootcache(b'.hgignore')
211 @rootcache(b'.hgignore')
212 def _ignore(self):
212 def _ignore(self):
213 files = self._ignorefiles()
213 files = self._ignorefiles()
214 if not files:
214 if not files:
215 return matchmod.never()
215 return matchmod.never()
216
216
217 pats = [b'include:%s' % f for f in files]
217 pats = [b'include:%s' % f for f in files]
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219
219
220 @propertycache
220 @propertycache
221 def _slash(self):
221 def _slash(self):
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223
223
224 @propertycache
224 @propertycache
225 def _checklink(self):
225 def _checklink(self):
226 return util.checklink(self._root)
226 return util.checklink(self._root)
227
227
228 @propertycache
228 @propertycache
229 def _checkexec(self):
229 def _checkexec(self):
230 return bool(util.checkexec(self._root))
230 return bool(util.checkexec(self._root))
231
231
232 @propertycache
232 @propertycache
233 def _checkcase(self):
233 def _checkcase(self):
234 return not util.fscasesensitive(self._join(b'.hg'))
234 return not util.fscasesensitive(self._join(b'.hg'))
235
235
236 def _join(self, f):
236 def _join(self, f):
237 # much faster than os.path.join()
237 # much faster than os.path.join()
238 # it's safe because f is always a relative path
238 # it's safe because f is always a relative path
239 return self._rootdir + f
239 return self._rootdir + f
240
240
241 def flagfunc(self, buildfallback):
241 def flagfunc(self, buildfallback):
242 if self._checklink and self._checkexec:
242 if self._checklink and self._checkexec:
243
243
244 def f(x):
244 def f(x):
245 try:
245 try:
246 st = os.lstat(self._join(x))
246 st = os.lstat(self._join(x))
247 if util.statislink(st):
247 if util.statislink(st):
248 return b'l'
248 return b'l'
249 if util.statisexec(st):
249 if util.statisexec(st):
250 return b'x'
250 return b'x'
251 except OSError:
251 except OSError:
252 pass
252 pass
253 return b''
253 return b''
254
254
255 return f
255 return f
256
256
257 fallback = buildfallback()
257 fallback = buildfallback()
258 if self._checklink:
258 if self._checklink:
259
259
260 def f(x):
260 def f(x):
261 if os.path.islink(self._join(x)):
261 if os.path.islink(self._join(x)):
262 return b'l'
262 return b'l'
263 if b'x' in fallback(x):
263 if b'x' in fallback(x):
264 return b'x'
264 return b'x'
265 return b''
265 return b''
266
266
267 return f
267 return f
268 if self._checkexec:
268 if self._checkexec:
269
269
270 def f(x):
270 def f(x):
271 if b'l' in fallback(x):
271 if b'l' in fallback(x):
272 return b'l'
272 return b'l'
273 if util.isexec(self._join(x)):
273 if util.isexec(self._join(x)):
274 return b'x'
274 return b'x'
275 return b''
275 return b''
276
276
277 return f
277 return f
278 else:
278 else:
279 return fallback
279 return fallback
280
280
281 @propertycache
281 @propertycache
282 def _cwd(self):
282 def _cwd(self):
283 # internal config: ui.forcecwd
283 # internal config: ui.forcecwd
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 if forcecwd:
285 if forcecwd:
286 return forcecwd
286 return forcecwd
287 return encoding.getcwd()
287 return encoding.getcwd()
288
288
289 def getcwd(self):
289 def getcwd(self):
290 """Return the path from which a canonical path is calculated.
290 """Return the path from which a canonical path is calculated.
291
291
292 This path should be used to resolve file patterns or to convert
292 This path should be used to resolve file patterns or to convert
293 canonical paths back to file paths for display. It shouldn't be
293 canonical paths back to file paths for display. It shouldn't be
294 used to get real file paths. Use vfs functions instead.
294 used to get real file paths. Use vfs functions instead.
295 """
295 """
296 cwd = self._cwd
296 cwd = self._cwd
297 if cwd == self._root:
297 if cwd == self._root:
298 return b''
298 return b''
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 rootsep = self._root
300 rootsep = self._root
301 if not util.endswithsep(rootsep):
301 if not util.endswithsep(rootsep):
302 rootsep += pycompat.ossep
302 rootsep += pycompat.ossep
303 if cwd.startswith(rootsep):
303 if cwd.startswith(rootsep):
304 return cwd[len(rootsep) :]
304 return cwd[len(rootsep) :]
305 else:
305 else:
306 # we're outside the repo. return an absolute path.
306 # we're outside the repo. return an absolute path.
307 return cwd
307 return cwd
308
308
309 def pathto(self, f, cwd=None):
309 def pathto(self, f, cwd=None):
310 if cwd is None:
310 if cwd is None:
311 cwd = self.getcwd()
311 cwd = self.getcwd()
312 path = util.pathto(self._root, cwd, f)
312 path = util.pathto(self._root, cwd, f)
313 if self._slash:
313 if self._slash:
314 return util.pconvert(path)
314 return util.pconvert(path)
315 return path
315 return path
316
316
317 def __getitem__(self, key):
317 def __getitem__(self, key):
318 """Return the current state of key (a filename) in the dirstate.
318 """Return the current state of key (a filename) in the dirstate.
319
319
320 States are:
320 States are:
321 n normal
321 n normal
322 m needs merging
322 m needs merging
323 r marked for removal
323 r marked for removal
324 a marked for addition
324 a marked for addition
325 ? not tracked
325 ? not tracked
326
326
327 XXX The "state" is a bit obscure to be in the "public" API. we should
327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 consider migrating all user of this to going through the dirstate entry
328 consider migrating all user of this to going through the dirstate entry
329 instead.
329 instead.
330 """
330 """
331 entry = self._map.get(key)
331 entry = self._map.get(key)
332 if entry is not None:
332 if entry is not None:
333 return entry.state
333 return entry.state
334 return b'?'
334 return b'?'
335
335
336 def __contains__(self, key):
336 def __contains__(self, key):
337 return key in self._map
337 return key in self._map
338
338
339 def __iter__(self):
339 def __iter__(self):
340 return iter(sorted(self._map))
340 return iter(sorted(self._map))
341
341
342 def items(self):
342 def items(self):
343 return pycompat.iteritems(self._map)
343 return pycompat.iteritems(self._map)
344
344
345 iteritems = items
345 iteritems = items
346
346
347 def directories(self):
347 def directories(self):
348 return self._map.directories()
348 return self._map.directories()
349
349
350 def parents(self):
350 def parents(self):
351 return [self._validate(p) for p in self._pl]
351 return [self._validate(p) for p in self._pl]
352
352
353 def p1(self):
353 def p1(self):
354 return self._validate(self._pl[0])
354 return self._validate(self._pl[0])
355
355
356 def p2(self):
356 def p2(self):
357 return self._validate(self._pl[1])
357 return self._validate(self._pl[1])
358
358
359 @property
359 @property
360 def in_merge(self):
360 def in_merge(self):
361 """True if a merge is in progress"""
361 """True if a merge is in progress"""
362 return self._pl[1] != self._nodeconstants.nullid
362 return self._pl[1] != self._nodeconstants.nullid
363
363
364 def branch(self):
364 def branch(self):
365 return encoding.tolocal(self._branch)
365 return encoding.tolocal(self._branch)
366
366
367 def setparents(self, p1, p2=None):
367 def setparents(self, p1, p2=None):
368 """Set dirstate parents to p1 and p2.
368 """Set dirstate parents to p1 and p2.
369
369
370 When moving from two parents to one, "merged" entries a
370 When moving from two parents to one, "merged" entries a
371 adjusted to normal and previous copy records discarded and
371 adjusted to normal and previous copy records discarded and
372 returned by the call.
372 returned by the call.
373
373
374 See localrepo.setparents()
374 See localrepo.setparents()
375 """
375 """
376 if p2 is None:
376 if p2 is None:
377 p2 = self._nodeconstants.nullid
377 p2 = self._nodeconstants.nullid
378 if self._parentwriters == 0:
378 if self._parentwriters == 0:
379 raise ValueError(
379 raise ValueError(
380 b"cannot set dirstate parent outside of "
380 b"cannot set dirstate parent outside of "
381 b"dirstate.parentchange context manager"
381 b"dirstate.parentchange context manager"
382 )
382 )
383
383
384 self._dirty = True
384 self._dirty = True
385 oldp2 = self._pl[1]
385 oldp2 = self._pl[1]
386 if self._origpl is None:
386 if self._origpl is None:
387 self._origpl = self._pl
387 self._origpl = self._pl
388 self._map.setparents(p1, p2)
388 self._map.setparents(p1, p2)
389 copies = {}
389 copies = {}
390 if (
390 if (
391 oldp2 != self._nodeconstants.nullid
391 oldp2 != self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
393 ):
393 ):
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395
395
396 for f in candidatefiles:
396 for f in candidatefiles:
397 s = self._map.get(f)
397 s = self._map.get(f)
398 if s is None:
398 if s is None:
399 continue
399 continue
400
400
401 # Discard "merged" markers when moving away from a merge state
401 # Discard "merged" markers when moving away from a merge state
402 if s.merged:
402 if s.merged:
403 source = self._map.copymap.get(f)
403 source = self._map.copymap.get(f)
404 if source:
404 if source:
405 copies[f] = source
405 copies[f] = source
406 self._normallookup(f)
406 self._normallookup(f)
407 # Also fix up otherparent markers
407 # Also fix up otherparent markers
408 elif s.from_p2:
408 elif s.from_p2:
409 source = self._map.copymap.get(f)
409 source = self._map.copymap.get(f)
410 if source:
410 if source:
411 copies[f] = source
411 copies[f] = source
412 self._add(f)
412 self._add(f)
413 return copies
413 return copies
414
414
415 def setbranch(self, branch):
415 def setbranch(self, branch):
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 try:
418 try:
419 f.write(self._branch + b'\n')
419 f.write(self._branch + b'\n')
420 f.close()
420 f.close()
421
421
422 # make sure filecache has the correct stat info for _branch after
422 # make sure filecache has the correct stat info for _branch after
423 # replacing the underlying file
423 # replacing the underlying file
424 ce = self._filecache[b'_branch']
424 ce = self._filecache[b'_branch']
425 if ce:
425 if ce:
426 ce.refresh()
426 ce.refresh()
427 except: # re-raises
427 except: # re-raises
428 f.discard()
428 f.discard()
429 raise
429 raise
430
430
431 def invalidate(self):
431 def invalidate(self):
432 """Causes the next access to reread the dirstate.
432 """Causes the next access to reread the dirstate.
433
433
434 This is different from localrepo.invalidatedirstate() because it always
434 This is different from localrepo.invalidatedirstate() because it always
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 check whether the dirstate has changed before rereading it."""
436 check whether the dirstate has changed before rereading it."""
437
437
438 for a in ("_map", "_branch", "_ignore"):
438 for a in ("_map", "_branch", "_ignore"):
439 if a in self.__dict__:
439 if a in self.__dict__:
440 delattr(self, a)
440 delattr(self, a)
441 self._lastnormaltime = 0
441 self._lastnormaltime = 0
442 self._dirty = False
442 self._dirty = False
443 self._updatedfiles.clear()
443 self._updatedfiles.clear()
444 self._parentwriters = 0
444 self._parentwriters = 0
445 self._origpl = None
445 self._origpl = None
446
446
447 def copy(self, source, dest):
447 def copy(self, source, dest):
448 """Mark dest as a copy of source. Unmark dest if source is None."""
448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 if source == dest:
449 if source == dest:
450 return
450 return
451 self._dirty = True
451 self._dirty = True
452 if source is not None:
452 if source is not None:
453 self._map.copymap[dest] = source
453 self._map.copymap[dest] = source
454 self._updatedfiles.add(source)
454 self._updatedfiles.add(source)
455 self._updatedfiles.add(dest)
455 self._updatedfiles.add(dest)
456 elif self._map.copymap.pop(dest, None):
456 elif self._map.copymap.pop(dest, None):
457 self._updatedfiles.add(dest)
457 self._updatedfiles.add(dest)
458
458
459 def copied(self, file):
459 def copied(self, file):
460 return self._map.copymap.get(file, None)
460 return self._map.copymap.get(file, None)
461
461
462 def copies(self):
462 def copies(self):
463 return self._map.copymap
463 return self._map.copymap
464
464
465 @requires_no_parents_change
465 @requires_no_parents_change
466 def set_tracked(self, filename):
466 def set_tracked(self, filename):
467 """a "public" method for generic code to mark a file as tracked
467 """a "public" method for generic code to mark a file as tracked
468
468
469 This function is to be called outside of "update/merge" case. For
469 This function is to be called outside of "update/merge" case. For
470 example by a command like `hg add X`.
470 example by a command like `hg add X`.
471
471
472 return True the file was previously untracked, False otherwise.
472 return True the file was previously untracked, False otherwise.
473 """
473 """
474 entry = self._map.get(filename)
474 entry = self._map.get(filename)
475 if entry is None:
475 if entry is None:
476 self._add(filename)
476 self._add(filename)
477 return True
477 return True
478 elif not entry.tracked:
478 elif not entry.tracked:
479 self._normallookup(filename)
479 self._normallookup(filename)
480 return True
480 return True
481 # XXX This is probably overkill for more case, but we need this to
481 # XXX This is probably overkill for more case, but we need this to
482 # fully replace the `normallookup` call with `set_tracked` one.
482 # fully replace the `normallookup` call with `set_tracked` one.
483 # Consider smoothing this in the future.
483 # Consider smoothing this in the future.
484 self.set_possibly_dirty(filename)
484 self.set_possibly_dirty(filename)
485 return False
485 return False
486
486
487 @requires_no_parents_change
487 @requires_no_parents_change
488 def set_untracked(self, filename):
488 def set_untracked(self, filename):
489 """a "public" method for generic code to mark a file as untracked
489 """a "public" method for generic code to mark a file as untracked
490
490
491 This function is to be called outside of "update/merge" case. For
491 This function is to be called outside of "update/merge" case. For
492 example by a command like `hg remove X`.
492 example by a command like `hg remove X`.
493
493
494 return True the file was previously tracked, False otherwise.
494 return True the file was previously tracked, False otherwise.
495 """
495 """
496 entry = self._map.get(filename)
496 entry = self._map.get(filename)
497 if entry is None:
497 if entry is None:
498 return False
498 return False
499 elif entry.added:
499 elif entry.added:
500 self._drop(filename)
500 self._drop(filename)
501 return True
501 return True
502 else:
502 else:
503 self._dirty = True
503 self._dirty = True
504 self._updatedfiles.add(filename)
504 self._updatedfiles.add(filename)
505 self._map.set_untracked(filename)
505 self._map.set_untracked(filename)
506 return True
506 return True
507
507
508 @requires_no_parents_change
508 @requires_no_parents_change
509 def set_clean(self, filename, parentfiledata=None):
509 def set_clean(self, filename, parentfiledata=None):
510 """record that the current state of the file on disk is known to be clean"""
510 """record that the current state of the file on disk is known to be clean"""
511 self._dirty = True
511 self._dirty = True
512 self._updatedfiles.add(filename)
512 self._updatedfiles.add(filename)
513 self._normal(filename, parentfiledata=parentfiledata)
513 if parentfiledata:
514 (mode, size, mtime) = parentfiledata
515 else:
516 (mode, size, mtime) = self._get_filedata(filename)
517 self._addpath(filename, mode=mode, size=size, mtime=mtime)
518 self._map.copymap.pop(filename, None)
519 if filename in self._map.nonnormalset:
520 self._map.nonnormalset.remove(filename)
521 if mtime > self._lastnormaltime:
522 # Remember the most recent modification timeslot for status(),
523 # to make sure we won't miss future size-preserving file content
524 # modifications that happen within the same timeslot.
525 self._lastnormaltime = mtime
514
526
515 @requires_no_parents_change
527 @requires_no_parents_change
516 def set_possibly_dirty(self, filename):
528 def set_possibly_dirty(self, filename):
517 """record that the current state of the file on disk is unknown"""
529 """record that the current state of the file on disk is unknown"""
518 self._dirty = True
530 self._dirty = True
519 self._updatedfiles.add(filename)
531 self._updatedfiles.add(filename)
520 self._map.set_possibly_dirty(filename)
532 self._map.set_possibly_dirty(filename)
521
533
522 @requires_parents_change
534 @requires_parents_change
523 def update_file_p1(
535 def update_file_p1(
524 self,
536 self,
525 filename,
537 filename,
526 p1_tracked,
538 p1_tracked,
527 ):
539 ):
528 """Set a file as tracked in the parent (or not)
540 """Set a file as tracked in the parent (or not)
529
541
530 This is to be called when adjust the dirstate to a new parent after an history
542 This is to be called when adjust the dirstate to a new parent after an history
531 rewriting operation.
543 rewriting operation.
532
544
533 It should not be called during a merge (p2 != nullid) and only within
545 It should not be called during a merge (p2 != nullid) and only within
534 a `with dirstate.parentchange():` context.
546 a `with dirstate.parentchange():` context.
535 """
547 """
536 if self.in_merge:
548 if self.in_merge:
537 msg = b'update_file_reference should not be called when merging'
549 msg = b'update_file_reference should not be called when merging'
538 raise error.ProgrammingError(msg)
550 raise error.ProgrammingError(msg)
539 entry = self._map.get(filename)
551 entry = self._map.get(filename)
540 if entry is None:
552 if entry is None:
541 wc_tracked = False
553 wc_tracked = False
542 else:
554 else:
543 wc_tracked = entry.tracked
555 wc_tracked = entry.tracked
544 possibly_dirty = False
556 possibly_dirty = False
545 if p1_tracked and wc_tracked:
557 if p1_tracked and wc_tracked:
546 # the underlying reference might have changed, we will have to
558 # the underlying reference might have changed, we will have to
547 # check it.
559 # check it.
548 possibly_dirty = True
560 possibly_dirty = True
549 elif not (p1_tracked or wc_tracked):
561 elif not (p1_tracked or wc_tracked):
550 # the file is no longer relevant to anyone
562 # the file is no longer relevant to anyone
551 self._drop(filename)
563 self._drop(filename)
552 elif (not p1_tracked) and wc_tracked:
564 elif (not p1_tracked) and wc_tracked:
553 if entry is not None and entry.added:
565 if entry is not None and entry.added:
554 return # avoid dropping copy information (maybe?)
566 return # avoid dropping copy information (maybe?)
555 elif p1_tracked and not wc_tracked:
567 elif p1_tracked and not wc_tracked:
556 pass
568 pass
557 else:
569 else:
558 assert False, 'unreachable'
570 assert False, 'unreachable'
559
571
560 # this mean we are doing call for file we do not really care about the
572 # this mean we are doing call for file we do not really care about the
561 # data (eg: added or removed), however this should be a minor overhead
573 # data (eg: added or removed), however this should be a minor overhead
562 # compared to the overall update process calling this.
574 # compared to the overall update process calling this.
563 parentfiledata = None
575 parentfiledata = None
564 if wc_tracked:
576 if wc_tracked:
565 parentfiledata = self._get_filedata(filename)
577 parentfiledata = self._get_filedata(filename)
566
578
567 self._updatedfiles.add(filename)
579 self._updatedfiles.add(filename)
568 self._map.reset_state(
580 self._map.reset_state(
569 filename,
581 filename,
570 wc_tracked,
582 wc_tracked,
571 p1_tracked,
583 p1_tracked,
572 possibly_dirty=possibly_dirty,
584 possibly_dirty=possibly_dirty,
573 parentfiledata=parentfiledata,
585 parentfiledata=parentfiledata,
574 )
586 )
575 if (
587 if (
576 parentfiledata is not None
588 parentfiledata is not None
577 and parentfiledata[2] > self._lastnormaltime
589 and parentfiledata[2] > self._lastnormaltime
578 ):
590 ):
579 # Remember the most recent modification timeslot for status(),
591 # Remember the most recent modification timeslot for status(),
580 # to make sure we won't miss future size-preserving file content
592 # to make sure we won't miss future size-preserving file content
581 # modifications that happen within the same timeslot.
593 # modifications that happen within the same timeslot.
582 self._lastnormaltime = parentfiledata[2]
594 self._lastnormaltime = parentfiledata[2]
583
595
584 @requires_parents_change
596 @requires_parents_change
585 def update_file(
597 def update_file(
586 self,
598 self,
587 filename,
599 filename,
588 wc_tracked,
600 wc_tracked,
589 p1_tracked,
601 p1_tracked,
590 p2_tracked=False,
602 p2_tracked=False,
591 merged=False,
603 merged=False,
592 clean_p1=False,
604 clean_p1=False,
593 clean_p2=False,
605 clean_p2=False,
594 possibly_dirty=False,
606 possibly_dirty=False,
595 parentfiledata=None,
607 parentfiledata=None,
596 ):
608 ):
597 """update the information about a file in the dirstate
609 """update the information about a file in the dirstate
598
610
599 This is to be called when the direstates parent changes to keep track
611 This is to be called when the direstates parent changes to keep track
600 of what is the file situation in regards to the working copy and its parent.
612 of what is the file situation in regards to the working copy and its parent.
601
613
602 This function must be called within a `dirstate.parentchange` context.
614 This function must be called within a `dirstate.parentchange` context.
603
615
604 note: the API is at an early stage and we might need to adjust it
616 note: the API is at an early stage and we might need to adjust it
605 depending of what information ends up being relevant and useful to
617 depending of what information ends up being relevant and useful to
606 other processing.
618 other processing.
607 """
619 """
608 if merged and (clean_p1 or clean_p2):
620 if merged and (clean_p1 or clean_p2):
609 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
621 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
610 raise error.ProgrammingError(msg)
622 raise error.ProgrammingError(msg)
611
623
612 # note: I do not think we need to double check name clash here since we
624 # note: I do not think we need to double check name clash here since we
613 # are in a update/merge case that should already have taken care of
625 # are in a update/merge case that should already have taken care of
614 # this. The test agrees
626 # this. The test agrees
615
627
616 self._dirty = True
628 self._dirty = True
617 self._updatedfiles.add(filename)
629 self._updatedfiles.add(filename)
618
630
619 need_parent_file_data = (
631 need_parent_file_data = (
620 not (possibly_dirty or clean_p2 or merged)
632 not (possibly_dirty or clean_p2 or merged)
621 and wc_tracked
633 and wc_tracked
622 and p1_tracked
634 and p1_tracked
623 )
635 )
624
636
625 # this mean we are doing call for file we do not really care about the
637 # this mean we are doing call for file we do not really care about the
626 # data (eg: added or removed), however this should be a minor overhead
638 # data (eg: added or removed), however this should be a minor overhead
627 # compared to the overall update process calling this.
639 # compared to the overall update process calling this.
628 if need_parent_file_data:
640 if need_parent_file_data:
629 if parentfiledata is None:
641 if parentfiledata is None:
630 parentfiledata = self._get_filedata(filename)
642 parentfiledata = self._get_filedata(filename)
631 mtime = parentfiledata[2]
643 mtime = parentfiledata[2]
632
644
633 if mtime > self._lastnormaltime:
645 if mtime > self._lastnormaltime:
634 # Remember the most recent modification timeslot for
646 # Remember the most recent modification timeslot for
635 # status(), to make sure we won't miss future
647 # status(), to make sure we won't miss future
636 # size-preserving file content modifications that happen
648 # size-preserving file content modifications that happen
637 # within the same timeslot.
649 # within the same timeslot.
638 self._lastnormaltime = mtime
650 self._lastnormaltime = mtime
639
651
640 self._map.reset_state(
652 self._map.reset_state(
641 filename,
653 filename,
642 wc_tracked,
654 wc_tracked,
643 p1_tracked,
655 p1_tracked,
644 p2_tracked=p2_tracked,
656 p2_tracked=p2_tracked,
645 merged=merged,
657 merged=merged,
646 clean_p1=clean_p1,
658 clean_p1=clean_p1,
647 clean_p2=clean_p2,
659 clean_p2=clean_p2,
648 possibly_dirty=possibly_dirty,
660 possibly_dirty=possibly_dirty,
649 parentfiledata=parentfiledata,
661 parentfiledata=parentfiledata,
650 )
662 )
651 if (
663 if (
652 parentfiledata is not None
664 parentfiledata is not None
653 and parentfiledata[2] > self._lastnormaltime
665 and parentfiledata[2] > self._lastnormaltime
654 ):
666 ):
655 # Remember the most recent modification timeslot for status(),
667 # Remember the most recent modification timeslot for status(),
656 # to make sure we won't miss future size-preserving file content
668 # to make sure we won't miss future size-preserving file content
657 # modifications that happen within the same timeslot.
669 # modifications that happen within the same timeslot.
658 self._lastnormaltime = parentfiledata[2]
670 self._lastnormaltime = parentfiledata[2]
659
671
660 def _addpath(
672 def _addpath(
661 self,
673 self,
662 f,
674 f,
663 mode=0,
675 mode=0,
664 size=None,
676 size=None,
665 mtime=None,
677 mtime=None,
666 added=False,
678 added=False,
667 merged=False,
679 merged=False,
668 from_p2=False,
680 from_p2=False,
669 possibly_dirty=False,
681 possibly_dirty=False,
670 ):
682 ):
671 entry = self._map.get(f)
683 entry = self._map.get(f)
672 if added or entry is not None and entry.removed:
684 if added or entry is not None and entry.removed:
673 scmutil.checkfilename(f)
685 scmutil.checkfilename(f)
674 if self._map.hastrackeddir(f):
686 if self._map.hastrackeddir(f):
675 msg = _(b'directory %r already in dirstate')
687 msg = _(b'directory %r already in dirstate')
676 msg %= pycompat.bytestr(f)
688 msg %= pycompat.bytestr(f)
677 raise error.Abort(msg)
689 raise error.Abort(msg)
678 # shadows
690 # shadows
679 for d in pathutil.finddirs(f):
691 for d in pathutil.finddirs(f):
680 if self._map.hastrackeddir(d):
692 if self._map.hastrackeddir(d):
681 break
693 break
682 entry = self._map.get(d)
694 entry = self._map.get(d)
683 if entry is not None and not entry.removed:
695 if entry is not None and not entry.removed:
684 msg = _(b'file %r in dirstate clashes with %r')
696 msg = _(b'file %r in dirstate clashes with %r')
685 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
697 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
686 raise error.Abort(msg)
698 raise error.Abort(msg)
687 self._dirty = True
699 self._dirty = True
688 self._updatedfiles.add(f)
700 self._updatedfiles.add(f)
689 self._map.addfile(
701 self._map.addfile(
690 f,
702 f,
691 mode=mode,
703 mode=mode,
692 size=size,
704 size=size,
693 mtime=mtime,
705 mtime=mtime,
694 added=added,
706 added=added,
695 merged=merged,
707 merged=merged,
696 from_p2=from_p2,
708 from_p2=from_p2,
697 possibly_dirty=possibly_dirty,
709 possibly_dirty=possibly_dirty,
698 )
710 )
699
711
700 def _get_filedata(self, filename):
712 def _get_filedata(self, filename):
701 """returns"""
713 """returns"""
702 s = os.lstat(self._join(filename))
714 s = os.lstat(self._join(filename))
703 mode = s.st_mode
715 mode = s.st_mode
704 size = s.st_size
716 size = s.st_size
705 mtime = s[stat.ST_MTIME]
717 mtime = s[stat.ST_MTIME]
706 return (mode, size, mtime)
718 return (mode, size, mtime)
707
719
708 def _normal(self, f, parentfiledata=None):
709 if parentfiledata:
710 (mode, size, mtime) = parentfiledata
711 else:
712 (mode, size, mtime) = self._get_filedata(f)
713 self._addpath(f, mode=mode, size=size, mtime=mtime)
714 self._map.copymap.pop(f, None)
715 if f in self._map.nonnormalset:
716 self._map.nonnormalset.remove(f)
717 if mtime > self._lastnormaltime:
718 # Remember the most recent modification timeslot for status(),
719 # to make sure we won't miss future size-preserving file content
720 # modifications that happen within the same timeslot.
721 self._lastnormaltime = mtime
722
723 def _normallookup(self, f):
720 def _normallookup(self, f):
724 '''Mark a file normal, but possibly dirty.'''
721 '''Mark a file normal, but possibly dirty.'''
725 if self.in_merge:
722 if self.in_merge:
726 # if there is a merge going on and the file was either
723 # if there is a merge going on and the file was either
727 # "merged" or coming from other parent (-2) before
724 # "merged" or coming from other parent (-2) before
728 # being removed, restore that state.
725 # being removed, restore that state.
729 entry = self._map.get(f)
726 entry = self._map.get(f)
730 if entry is not None:
727 if entry is not None:
731 # XXX this should probably be dealt with a a lower level
728 # XXX this should probably be dealt with a a lower level
732 # (see `merged_removed` and `from_p2_removed`)
729 # (see `merged_removed` and `from_p2_removed`)
733 if entry.merged_removed or entry.from_p2_removed:
730 if entry.merged_removed or entry.from_p2_removed:
734 source = self._map.copymap.get(f)
731 source = self._map.copymap.get(f)
735 if entry.merged_removed:
732 if entry.merged_removed:
736 self._otherparent(f)
733 self._otherparent(f)
737 elif entry.from_p2_removed:
734 elif entry.from_p2_removed:
738 self._otherparent(f)
735 self._otherparent(f)
739 if source is not None:
736 if source is not None:
740 self.copy(source, f)
737 self.copy(source, f)
741 return
738 return
742 elif entry.merged or entry.from_p2:
739 elif entry.merged or entry.from_p2:
743 return
740 return
744 self._addpath(f, possibly_dirty=True)
741 self._addpath(f, possibly_dirty=True)
745 self._map.copymap.pop(f, None)
742 self._map.copymap.pop(f, None)
746
743
747 def _otherparent(self, f):
744 def _otherparent(self, f):
748 if not self.in_merge:
745 if not self.in_merge:
749 msg = _(b"setting %r to other parent only allowed in merges") % f
746 msg = _(b"setting %r to other parent only allowed in merges") % f
750 raise error.Abort(msg)
747 raise error.Abort(msg)
751 entry = self._map.get(f)
748 entry = self._map.get(f)
752 if entry is not None and entry.tracked:
749 if entry is not None and entry.tracked:
753 # merge-like
750 # merge-like
754 self._addpath(f, merged=True)
751 self._addpath(f, merged=True)
755 else:
752 else:
756 # add-like
753 # add-like
757 self._addpath(f, from_p2=True)
754 self._addpath(f, from_p2=True)
758 self._map.copymap.pop(f, None)
755 self._map.copymap.pop(f, None)
759
756
760 def _add(self, filename):
757 def _add(self, filename):
761 """internal function to mark a file as added"""
758 """internal function to mark a file as added"""
762 self._addpath(filename, added=True)
759 self._addpath(filename, added=True)
763 self._map.copymap.pop(filename, None)
760 self._map.copymap.pop(filename, None)
764
761
765 def _drop(self, filename):
762 def _drop(self, filename):
766 """internal function to drop a file from the dirstate"""
763 """internal function to drop a file from the dirstate"""
767 if self._map.dropfile(filename):
764 if self._map.dropfile(filename):
768 self._dirty = True
765 self._dirty = True
769 self._updatedfiles.add(filename)
766 self._updatedfiles.add(filename)
770 self._map.copymap.pop(filename, None)
767 self._map.copymap.pop(filename, None)
771
768
772 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
769 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
773 if exists is None:
770 if exists is None:
774 exists = os.path.lexists(os.path.join(self._root, path))
771 exists = os.path.lexists(os.path.join(self._root, path))
775 if not exists:
772 if not exists:
776 # Maybe a path component exists
773 # Maybe a path component exists
777 if not ignoremissing and b'/' in path:
774 if not ignoremissing and b'/' in path:
778 d, f = path.rsplit(b'/', 1)
775 d, f = path.rsplit(b'/', 1)
779 d = self._normalize(d, False, ignoremissing, None)
776 d = self._normalize(d, False, ignoremissing, None)
780 folded = d + b"/" + f
777 folded = d + b"/" + f
781 else:
778 else:
782 # No path components, preserve original case
779 # No path components, preserve original case
783 folded = path
780 folded = path
784 else:
781 else:
785 # recursively normalize leading directory components
782 # recursively normalize leading directory components
786 # against dirstate
783 # against dirstate
787 if b'/' in normed:
784 if b'/' in normed:
788 d, f = normed.rsplit(b'/', 1)
785 d, f = normed.rsplit(b'/', 1)
789 d = self._normalize(d, False, ignoremissing, True)
786 d = self._normalize(d, False, ignoremissing, True)
790 r = self._root + b"/" + d
787 r = self._root + b"/" + d
791 folded = d + b"/" + util.fspath(f, r)
788 folded = d + b"/" + util.fspath(f, r)
792 else:
789 else:
793 folded = util.fspath(normed, self._root)
790 folded = util.fspath(normed, self._root)
794 storemap[normed] = folded
791 storemap[normed] = folded
795
792
796 return folded
793 return folded
797
794
798 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
795 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
799 normed = util.normcase(path)
796 normed = util.normcase(path)
800 folded = self._map.filefoldmap.get(normed, None)
797 folded = self._map.filefoldmap.get(normed, None)
801 if folded is None:
798 if folded is None:
802 if isknown:
799 if isknown:
803 folded = path
800 folded = path
804 else:
801 else:
805 folded = self._discoverpath(
802 folded = self._discoverpath(
806 path, normed, ignoremissing, exists, self._map.filefoldmap
803 path, normed, ignoremissing, exists, self._map.filefoldmap
807 )
804 )
808 return folded
805 return folded
809
806
810 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
807 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
811 normed = util.normcase(path)
808 normed = util.normcase(path)
812 folded = self._map.filefoldmap.get(normed, None)
809 folded = self._map.filefoldmap.get(normed, None)
813 if folded is None:
810 if folded is None:
814 folded = self._map.dirfoldmap.get(normed, None)
811 folded = self._map.dirfoldmap.get(normed, None)
815 if folded is None:
812 if folded is None:
816 if isknown:
813 if isknown:
817 folded = path
814 folded = path
818 else:
815 else:
819 # store discovered result in dirfoldmap so that future
816 # store discovered result in dirfoldmap so that future
820 # normalizefile calls don't start matching directories
817 # normalizefile calls don't start matching directories
821 folded = self._discoverpath(
818 folded = self._discoverpath(
822 path, normed, ignoremissing, exists, self._map.dirfoldmap
819 path, normed, ignoremissing, exists, self._map.dirfoldmap
823 )
820 )
824 return folded
821 return folded
825
822
826 def normalize(self, path, isknown=False, ignoremissing=False):
823 def normalize(self, path, isknown=False, ignoremissing=False):
827 """
824 """
828 normalize the case of a pathname when on a casefolding filesystem
825 normalize the case of a pathname when on a casefolding filesystem
829
826
830 isknown specifies whether the filename came from walking the
827 isknown specifies whether the filename came from walking the
831 disk, to avoid extra filesystem access.
828 disk, to avoid extra filesystem access.
832
829
833 If ignoremissing is True, missing path are returned
830 If ignoremissing is True, missing path are returned
834 unchanged. Otherwise, we try harder to normalize possibly
831 unchanged. Otherwise, we try harder to normalize possibly
835 existing path components.
832 existing path components.
836
833
837 The normalized case is determined based on the following precedence:
834 The normalized case is determined based on the following precedence:
838
835
839 - version of name already stored in the dirstate
836 - version of name already stored in the dirstate
840 - version of name stored on disk
837 - version of name stored on disk
841 - version provided via command arguments
838 - version provided via command arguments
842 """
839 """
843
840
844 if self._checkcase:
841 if self._checkcase:
845 return self._normalize(path, isknown, ignoremissing)
842 return self._normalize(path, isknown, ignoremissing)
846 return path
843 return path
847
844
848 def clear(self):
845 def clear(self):
849 self._map.clear()
846 self._map.clear()
850 self._lastnormaltime = 0
847 self._lastnormaltime = 0
851 self._updatedfiles.clear()
848 self._updatedfiles.clear()
852 self._dirty = True
849 self._dirty = True
853
850
854 def rebuild(self, parent, allfiles, changedfiles=None):
851 def rebuild(self, parent, allfiles, changedfiles=None):
855 if changedfiles is None:
852 if changedfiles is None:
856 # Rebuild entire dirstate
853 # Rebuild entire dirstate
857 to_lookup = allfiles
854 to_lookup = allfiles
858 to_drop = []
855 to_drop = []
859 lastnormaltime = self._lastnormaltime
856 lastnormaltime = self._lastnormaltime
860 self.clear()
857 self.clear()
861 self._lastnormaltime = lastnormaltime
858 self._lastnormaltime = lastnormaltime
862 elif len(changedfiles) < 10:
859 elif len(changedfiles) < 10:
863 # Avoid turning allfiles into a set, which can be expensive if it's
860 # Avoid turning allfiles into a set, which can be expensive if it's
864 # large.
861 # large.
865 to_lookup = []
862 to_lookup = []
866 to_drop = []
863 to_drop = []
867 for f in changedfiles:
864 for f in changedfiles:
868 if f in allfiles:
865 if f in allfiles:
869 to_lookup.append(f)
866 to_lookup.append(f)
870 else:
867 else:
871 to_drop.append(f)
868 to_drop.append(f)
872 else:
869 else:
873 changedfilesset = set(changedfiles)
870 changedfilesset = set(changedfiles)
874 to_lookup = changedfilesset & set(allfiles)
871 to_lookup = changedfilesset & set(allfiles)
875 to_drop = changedfilesset - to_lookup
872 to_drop = changedfilesset - to_lookup
876
873
877 if self._origpl is None:
874 if self._origpl is None:
878 self._origpl = self._pl
875 self._origpl = self._pl
879 self._map.setparents(parent, self._nodeconstants.nullid)
876 self._map.setparents(parent, self._nodeconstants.nullid)
880
877
881 for f in to_lookup:
878 for f in to_lookup:
882 self._normallookup(f)
879 self._normallookup(f)
883 for f in to_drop:
880 for f in to_drop:
884 self._drop(f)
881 self._drop(f)
885
882
886 self._dirty = True
883 self._dirty = True
887
884
888 def identity(self):
885 def identity(self):
889 """Return identity of dirstate itself to detect changing in storage
886 """Return identity of dirstate itself to detect changing in storage
890
887
891 If identity of previous dirstate is equal to this, writing
888 If identity of previous dirstate is equal to this, writing
892 changes based on the former dirstate out can keep consistency.
889 changes based on the former dirstate out can keep consistency.
893 """
890 """
894 return self._map.identity
891 return self._map.identity
895
892
896 def write(self, tr):
893 def write(self, tr):
897 if not self._dirty:
894 if not self._dirty:
898 return
895 return
899
896
900 filename = self._filename
897 filename = self._filename
901 if tr:
898 if tr:
902 # 'dirstate.write()' is not only for writing in-memory
899 # 'dirstate.write()' is not only for writing in-memory
903 # changes out, but also for dropping ambiguous timestamp.
900 # changes out, but also for dropping ambiguous timestamp.
904 # delayed writing re-raise "ambiguous timestamp issue".
901 # delayed writing re-raise "ambiguous timestamp issue".
905 # See also the wiki page below for detail:
902 # See also the wiki page below for detail:
906 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
903 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
907
904
908 # emulate dropping timestamp in 'parsers.pack_dirstate'
905 # emulate dropping timestamp in 'parsers.pack_dirstate'
909 now = _getfsnow(self._opener)
906 now = _getfsnow(self._opener)
910 self._map.clearambiguoustimes(self._updatedfiles, now)
907 self._map.clearambiguoustimes(self._updatedfiles, now)
911
908
912 # emulate that all 'dirstate.normal' results are written out
909 # emulate that all 'dirstate.normal' results are written out
913 self._lastnormaltime = 0
910 self._lastnormaltime = 0
914 self._updatedfiles.clear()
911 self._updatedfiles.clear()
915
912
916 # delay writing in-memory changes out
913 # delay writing in-memory changes out
917 tr.addfilegenerator(
914 tr.addfilegenerator(
918 b'dirstate',
915 b'dirstate',
919 (self._filename,),
916 (self._filename,),
920 lambda f: self._writedirstate(tr, f),
917 lambda f: self._writedirstate(tr, f),
921 location=b'plain',
918 location=b'plain',
922 )
919 )
923 return
920 return
924
921
925 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
922 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
926 self._writedirstate(tr, st)
923 self._writedirstate(tr, st)
927
924
928 def addparentchangecallback(self, category, callback):
925 def addparentchangecallback(self, category, callback):
929 """add a callback to be called when the wd parents are changed
926 """add a callback to be called when the wd parents are changed
930
927
931 Callback will be called with the following arguments:
928 Callback will be called with the following arguments:
932 dirstate, (oldp1, oldp2), (newp1, newp2)
929 dirstate, (oldp1, oldp2), (newp1, newp2)
933
930
934 Category is a unique identifier to allow overwriting an old callback
931 Category is a unique identifier to allow overwriting an old callback
935 with a newer callback.
932 with a newer callback.
936 """
933 """
937 self._plchangecallbacks[category] = callback
934 self._plchangecallbacks[category] = callback
938
935
939 def _writedirstate(self, tr, st):
936 def _writedirstate(self, tr, st):
940 # notify callbacks about parents change
937 # notify callbacks about parents change
941 if self._origpl is not None and self._origpl != self._pl:
938 if self._origpl is not None and self._origpl != self._pl:
942 for c, callback in sorted(
939 for c, callback in sorted(
943 pycompat.iteritems(self._plchangecallbacks)
940 pycompat.iteritems(self._plchangecallbacks)
944 ):
941 ):
945 callback(self, self._origpl, self._pl)
942 callback(self, self._origpl, self._pl)
946 self._origpl = None
943 self._origpl = None
947 # use the modification time of the newly created temporary file as the
944 # use the modification time of the newly created temporary file as the
948 # filesystem's notion of 'now'
945 # filesystem's notion of 'now'
949 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
946 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
950
947
951 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
948 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
952 # timestamp of each entries in dirstate, because of 'now > mtime'
949 # timestamp of each entries in dirstate, because of 'now > mtime'
953 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
950 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
954 if delaywrite > 0:
951 if delaywrite > 0:
955 # do we have any files to delay for?
952 # do we have any files to delay for?
956 for f, e in pycompat.iteritems(self._map):
953 for f, e in pycompat.iteritems(self._map):
957 if e.need_delay(now):
954 if e.need_delay(now):
958 import time # to avoid useless import
955 import time # to avoid useless import
959
956
960 # rather than sleep n seconds, sleep until the next
957 # rather than sleep n seconds, sleep until the next
961 # multiple of n seconds
958 # multiple of n seconds
962 clock = time.time()
959 clock = time.time()
963 start = int(clock) - (int(clock) % delaywrite)
960 start = int(clock) - (int(clock) % delaywrite)
964 end = start + delaywrite
961 end = start + delaywrite
965 time.sleep(end - clock)
962 time.sleep(end - clock)
966 now = end # trust our estimate that the end is near now
963 now = end # trust our estimate that the end is near now
967 break
964 break
968
965
969 self._map.write(tr, st, now)
966 self._map.write(tr, st, now)
970 self._lastnormaltime = 0
967 self._lastnormaltime = 0
971 self._dirty = False
968 self._dirty = False
972
969
973 def _dirignore(self, f):
970 def _dirignore(self, f):
974 if self._ignore(f):
971 if self._ignore(f):
975 return True
972 return True
976 for p in pathutil.finddirs(f):
973 for p in pathutil.finddirs(f):
977 if self._ignore(p):
974 if self._ignore(p):
978 return True
975 return True
979 return False
976 return False
980
977
981 def _ignorefiles(self):
978 def _ignorefiles(self):
982 files = []
979 files = []
983 if os.path.exists(self._join(b'.hgignore')):
980 if os.path.exists(self._join(b'.hgignore')):
984 files.append(self._join(b'.hgignore'))
981 files.append(self._join(b'.hgignore'))
985 for name, path in self._ui.configitems(b"ui"):
982 for name, path in self._ui.configitems(b"ui"):
986 if name == b'ignore' or name.startswith(b'ignore.'):
983 if name == b'ignore' or name.startswith(b'ignore.'):
987 # we need to use os.path.join here rather than self._join
984 # we need to use os.path.join here rather than self._join
988 # because path is arbitrary and user-specified
985 # because path is arbitrary and user-specified
989 files.append(os.path.join(self._rootdir, util.expandpath(path)))
986 files.append(os.path.join(self._rootdir, util.expandpath(path)))
990 return files
987 return files
991
988
992 def _ignorefileandline(self, f):
989 def _ignorefileandline(self, f):
993 files = collections.deque(self._ignorefiles())
990 files = collections.deque(self._ignorefiles())
994 visited = set()
991 visited = set()
995 while files:
992 while files:
996 i = files.popleft()
993 i = files.popleft()
997 patterns = matchmod.readpatternfile(
994 patterns = matchmod.readpatternfile(
998 i, self._ui.warn, sourceinfo=True
995 i, self._ui.warn, sourceinfo=True
999 )
996 )
1000 for pattern, lineno, line in patterns:
997 for pattern, lineno, line in patterns:
1001 kind, p = matchmod._patsplit(pattern, b'glob')
998 kind, p = matchmod._patsplit(pattern, b'glob')
1002 if kind == b"subinclude":
999 if kind == b"subinclude":
1003 if p not in visited:
1000 if p not in visited:
1004 files.append(p)
1001 files.append(p)
1005 continue
1002 continue
1006 m = matchmod.match(
1003 m = matchmod.match(
1007 self._root, b'', [], [pattern], warn=self._ui.warn
1004 self._root, b'', [], [pattern], warn=self._ui.warn
1008 )
1005 )
1009 if m(f):
1006 if m(f):
1010 return (i, lineno, line)
1007 return (i, lineno, line)
1011 visited.add(i)
1008 visited.add(i)
1012 return (None, -1, b"")
1009 return (None, -1, b"")
1013
1010
1014 def _walkexplicit(self, match, subrepos):
1011 def _walkexplicit(self, match, subrepos):
1015 """Get stat data about the files explicitly specified by match.
1012 """Get stat data about the files explicitly specified by match.
1016
1013
1017 Return a triple (results, dirsfound, dirsnotfound).
1014 Return a triple (results, dirsfound, dirsnotfound).
1018 - results is a mapping from filename to stat result. It also contains
1015 - results is a mapping from filename to stat result. It also contains
1019 listings mapping subrepos and .hg to None.
1016 listings mapping subrepos and .hg to None.
1020 - dirsfound is a list of files found to be directories.
1017 - dirsfound is a list of files found to be directories.
1021 - dirsnotfound is a list of files that the dirstate thinks are
1018 - dirsnotfound is a list of files that the dirstate thinks are
1022 directories and that were not found."""
1019 directories and that were not found."""
1023
1020
1024 def badtype(mode):
1021 def badtype(mode):
1025 kind = _(b'unknown')
1022 kind = _(b'unknown')
1026 if stat.S_ISCHR(mode):
1023 if stat.S_ISCHR(mode):
1027 kind = _(b'character device')
1024 kind = _(b'character device')
1028 elif stat.S_ISBLK(mode):
1025 elif stat.S_ISBLK(mode):
1029 kind = _(b'block device')
1026 kind = _(b'block device')
1030 elif stat.S_ISFIFO(mode):
1027 elif stat.S_ISFIFO(mode):
1031 kind = _(b'fifo')
1028 kind = _(b'fifo')
1032 elif stat.S_ISSOCK(mode):
1029 elif stat.S_ISSOCK(mode):
1033 kind = _(b'socket')
1030 kind = _(b'socket')
1034 elif stat.S_ISDIR(mode):
1031 elif stat.S_ISDIR(mode):
1035 kind = _(b'directory')
1032 kind = _(b'directory')
1036 return _(b'unsupported file type (type is %s)') % kind
1033 return _(b'unsupported file type (type is %s)') % kind
1037
1034
1038 badfn = match.bad
1035 badfn = match.bad
1039 dmap = self._map
1036 dmap = self._map
1040 lstat = os.lstat
1037 lstat = os.lstat
1041 getkind = stat.S_IFMT
1038 getkind = stat.S_IFMT
1042 dirkind = stat.S_IFDIR
1039 dirkind = stat.S_IFDIR
1043 regkind = stat.S_IFREG
1040 regkind = stat.S_IFREG
1044 lnkkind = stat.S_IFLNK
1041 lnkkind = stat.S_IFLNK
1045 join = self._join
1042 join = self._join
1046 dirsfound = []
1043 dirsfound = []
1047 foundadd = dirsfound.append
1044 foundadd = dirsfound.append
1048 dirsnotfound = []
1045 dirsnotfound = []
1049 notfoundadd = dirsnotfound.append
1046 notfoundadd = dirsnotfound.append
1050
1047
1051 if not match.isexact() and self._checkcase:
1048 if not match.isexact() and self._checkcase:
1052 normalize = self._normalize
1049 normalize = self._normalize
1053 else:
1050 else:
1054 normalize = None
1051 normalize = None
1055
1052
1056 files = sorted(match.files())
1053 files = sorted(match.files())
1057 subrepos.sort()
1054 subrepos.sort()
1058 i, j = 0, 0
1055 i, j = 0, 0
1059 while i < len(files) and j < len(subrepos):
1056 while i < len(files) and j < len(subrepos):
1060 subpath = subrepos[j] + b"/"
1057 subpath = subrepos[j] + b"/"
1061 if files[i] < subpath:
1058 if files[i] < subpath:
1062 i += 1
1059 i += 1
1063 continue
1060 continue
1064 while i < len(files) and files[i].startswith(subpath):
1061 while i < len(files) and files[i].startswith(subpath):
1065 del files[i]
1062 del files[i]
1066 j += 1
1063 j += 1
1067
1064
1068 if not files or b'' in files:
1065 if not files or b'' in files:
1069 files = [b'']
1066 files = [b'']
1070 # constructing the foldmap is expensive, so don't do it for the
1067 # constructing the foldmap is expensive, so don't do it for the
1071 # common case where files is ['']
1068 # common case where files is ['']
1072 normalize = None
1069 normalize = None
1073 results = dict.fromkeys(subrepos)
1070 results = dict.fromkeys(subrepos)
1074 results[b'.hg'] = None
1071 results[b'.hg'] = None
1075
1072
1076 for ff in files:
1073 for ff in files:
1077 if normalize:
1074 if normalize:
1078 nf = normalize(ff, False, True)
1075 nf = normalize(ff, False, True)
1079 else:
1076 else:
1080 nf = ff
1077 nf = ff
1081 if nf in results:
1078 if nf in results:
1082 continue
1079 continue
1083
1080
1084 try:
1081 try:
1085 st = lstat(join(nf))
1082 st = lstat(join(nf))
1086 kind = getkind(st.st_mode)
1083 kind = getkind(st.st_mode)
1087 if kind == dirkind:
1084 if kind == dirkind:
1088 if nf in dmap:
1085 if nf in dmap:
1089 # file replaced by dir on disk but still in dirstate
1086 # file replaced by dir on disk but still in dirstate
1090 results[nf] = None
1087 results[nf] = None
1091 foundadd((nf, ff))
1088 foundadd((nf, ff))
1092 elif kind == regkind or kind == lnkkind:
1089 elif kind == regkind or kind == lnkkind:
1093 results[nf] = st
1090 results[nf] = st
1094 else:
1091 else:
1095 badfn(ff, badtype(kind))
1092 badfn(ff, badtype(kind))
1096 if nf in dmap:
1093 if nf in dmap:
1097 results[nf] = None
1094 results[nf] = None
1098 except OSError as inst: # nf not found on disk - it is dirstate only
1095 except OSError as inst: # nf not found on disk - it is dirstate only
1099 if nf in dmap: # does it exactly match a missing file?
1096 if nf in dmap: # does it exactly match a missing file?
1100 results[nf] = None
1097 results[nf] = None
1101 else: # does it match a missing directory?
1098 else: # does it match a missing directory?
1102 if self._map.hasdir(nf):
1099 if self._map.hasdir(nf):
1103 notfoundadd(nf)
1100 notfoundadd(nf)
1104 else:
1101 else:
1105 badfn(ff, encoding.strtolocal(inst.strerror))
1102 badfn(ff, encoding.strtolocal(inst.strerror))
1106
1103
1107 # match.files() may contain explicitly-specified paths that shouldn't
1104 # match.files() may contain explicitly-specified paths that shouldn't
1108 # be taken; drop them from the list of files found. dirsfound/notfound
1105 # be taken; drop them from the list of files found. dirsfound/notfound
1109 # aren't filtered here because they will be tested later.
1106 # aren't filtered here because they will be tested later.
1110 if match.anypats():
1107 if match.anypats():
1111 for f in list(results):
1108 for f in list(results):
1112 if f == b'.hg' or f in subrepos:
1109 if f == b'.hg' or f in subrepos:
1113 # keep sentinel to disable further out-of-repo walks
1110 # keep sentinel to disable further out-of-repo walks
1114 continue
1111 continue
1115 if not match(f):
1112 if not match(f):
1116 del results[f]
1113 del results[f]
1117
1114
1118 # Case insensitive filesystems cannot rely on lstat() failing to detect
1115 # Case insensitive filesystems cannot rely on lstat() failing to detect
1119 # a case-only rename. Prune the stat object for any file that does not
1116 # a case-only rename. Prune the stat object for any file that does not
1120 # match the case in the filesystem, if there are multiple files that
1117 # match the case in the filesystem, if there are multiple files that
1121 # normalize to the same path.
1118 # normalize to the same path.
1122 if match.isexact() and self._checkcase:
1119 if match.isexact() and self._checkcase:
1123 normed = {}
1120 normed = {}
1124
1121
1125 for f, st in pycompat.iteritems(results):
1122 for f, st in pycompat.iteritems(results):
1126 if st is None:
1123 if st is None:
1127 continue
1124 continue
1128
1125
1129 nc = util.normcase(f)
1126 nc = util.normcase(f)
1130 paths = normed.get(nc)
1127 paths = normed.get(nc)
1131
1128
1132 if paths is None:
1129 if paths is None:
1133 paths = set()
1130 paths = set()
1134 normed[nc] = paths
1131 normed[nc] = paths
1135
1132
1136 paths.add(f)
1133 paths.add(f)
1137
1134
1138 for norm, paths in pycompat.iteritems(normed):
1135 for norm, paths in pycompat.iteritems(normed):
1139 if len(paths) > 1:
1136 if len(paths) > 1:
1140 for path in paths:
1137 for path in paths:
1141 folded = self._discoverpath(
1138 folded = self._discoverpath(
1142 path, norm, True, None, self._map.dirfoldmap
1139 path, norm, True, None, self._map.dirfoldmap
1143 )
1140 )
1144 if path != folded:
1141 if path != folded:
1145 results[path] = None
1142 results[path] = None
1146
1143
1147 return results, dirsfound, dirsnotfound
1144 return results, dirsfound, dirsnotfound
1148
1145
1149 def walk(self, match, subrepos, unknown, ignored, full=True):
1146 def walk(self, match, subrepos, unknown, ignored, full=True):
1150 """
1147 """
1151 Walk recursively through the directory tree, finding all files
1148 Walk recursively through the directory tree, finding all files
1152 matched by match.
1149 matched by match.
1153
1150
1154 If full is False, maybe skip some known-clean files.
1151 If full is False, maybe skip some known-clean files.
1155
1152
1156 Return a dict mapping filename to stat-like object (either
1153 Return a dict mapping filename to stat-like object (either
1157 mercurial.osutil.stat instance or return value of os.stat()).
1154 mercurial.osutil.stat instance or return value of os.stat()).
1158
1155
1159 """
1156 """
1160 # full is a flag that extensions that hook into walk can use -- this
1157 # full is a flag that extensions that hook into walk can use -- this
1161 # implementation doesn't use it at all. This satisfies the contract
1158 # implementation doesn't use it at all. This satisfies the contract
1162 # because we only guarantee a "maybe".
1159 # because we only guarantee a "maybe".
1163
1160
1164 if ignored:
1161 if ignored:
1165 ignore = util.never
1162 ignore = util.never
1166 dirignore = util.never
1163 dirignore = util.never
1167 elif unknown:
1164 elif unknown:
1168 ignore = self._ignore
1165 ignore = self._ignore
1169 dirignore = self._dirignore
1166 dirignore = self._dirignore
1170 else:
1167 else:
1171 # if not unknown and not ignored, drop dir recursion and step 2
1168 # if not unknown and not ignored, drop dir recursion and step 2
1172 ignore = util.always
1169 ignore = util.always
1173 dirignore = util.always
1170 dirignore = util.always
1174
1171
1175 matchfn = match.matchfn
1172 matchfn = match.matchfn
1176 matchalways = match.always()
1173 matchalways = match.always()
1177 matchtdir = match.traversedir
1174 matchtdir = match.traversedir
1178 dmap = self._map
1175 dmap = self._map
1179 listdir = util.listdir
1176 listdir = util.listdir
1180 lstat = os.lstat
1177 lstat = os.lstat
1181 dirkind = stat.S_IFDIR
1178 dirkind = stat.S_IFDIR
1182 regkind = stat.S_IFREG
1179 regkind = stat.S_IFREG
1183 lnkkind = stat.S_IFLNK
1180 lnkkind = stat.S_IFLNK
1184 join = self._join
1181 join = self._join
1185
1182
1186 exact = skipstep3 = False
1183 exact = skipstep3 = False
1187 if match.isexact(): # match.exact
1184 if match.isexact(): # match.exact
1188 exact = True
1185 exact = True
1189 dirignore = util.always # skip step 2
1186 dirignore = util.always # skip step 2
1190 elif match.prefix(): # match.match, no patterns
1187 elif match.prefix(): # match.match, no patterns
1191 skipstep3 = True
1188 skipstep3 = True
1192
1189
1193 if not exact and self._checkcase:
1190 if not exact and self._checkcase:
1194 normalize = self._normalize
1191 normalize = self._normalize
1195 normalizefile = self._normalizefile
1192 normalizefile = self._normalizefile
1196 skipstep3 = False
1193 skipstep3 = False
1197 else:
1194 else:
1198 normalize = self._normalize
1195 normalize = self._normalize
1199 normalizefile = None
1196 normalizefile = None
1200
1197
1201 # step 1: find all explicit files
1198 # step 1: find all explicit files
1202 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1199 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1203 if matchtdir:
1200 if matchtdir:
1204 for d in work:
1201 for d in work:
1205 matchtdir(d[0])
1202 matchtdir(d[0])
1206 for d in dirsnotfound:
1203 for d in dirsnotfound:
1207 matchtdir(d)
1204 matchtdir(d)
1208
1205
1209 skipstep3 = skipstep3 and not (work or dirsnotfound)
1206 skipstep3 = skipstep3 and not (work or dirsnotfound)
1210 work = [d for d in work if not dirignore(d[0])]
1207 work = [d for d in work if not dirignore(d[0])]
1211
1208
1212 # step 2: visit subdirectories
1209 # step 2: visit subdirectories
1213 def traverse(work, alreadynormed):
1210 def traverse(work, alreadynormed):
1214 wadd = work.append
1211 wadd = work.append
1215 while work:
1212 while work:
1216 tracing.counter('dirstate.walk work', len(work))
1213 tracing.counter('dirstate.walk work', len(work))
1217 nd = work.pop()
1214 nd = work.pop()
1218 visitentries = match.visitchildrenset(nd)
1215 visitentries = match.visitchildrenset(nd)
1219 if not visitentries:
1216 if not visitentries:
1220 continue
1217 continue
1221 if visitentries == b'this' or visitentries == b'all':
1218 if visitentries == b'this' or visitentries == b'all':
1222 visitentries = None
1219 visitentries = None
1223 skip = None
1220 skip = None
1224 if nd != b'':
1221 if nd != b'':
1225 skip = b'.hg'
1222 skip = b'.hg'
1226 try:
1223 try:
1227 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1224 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1228 entries = listdir(join(nd), stat=True, skip=skip)
1225 entries = listdir(join(nd), stat=True, skip=skip)
1229 except OSError as inst:
1226 except OSError as inst:
1230 if inst.errno in (errno.EACCES, errno.ENOENT):
1227 if inst.errno in (errno.EACCES, errno.ENOENT):
1231 match.bad(
1228 match.bad(
1232 self.pathto(nd), encoding.strtolocal(inst.strerror)
1229 self.pathto(nd), encoding.strtolocal(inst.strerror)
1233 )
1230 )
1234 continue
1231 continue
1235 raise
1232 raise
1236 for f, kind, st in entries:
1233 for f, kind, st in entries:
1237 # Some matchers may return files in the visitentries set,
1234 # Some matchers may return files in the visitentries set,
1238 # instead of 'this', if the matcher explicitly mentions them
1235 # instead of 'this', if the matcher explicitly mentions them
1239 # and is not an exactmatcher. This is acceptable; we do not
1236 # and is not an exactmatcher. This is acceptable; we do not
1240 # make any hard assumptions about file-or-directory below
1237 # make any hard assumptions about file-or-directory below
1241 # based on the presence of `f` in visitentries. If
1238 # based on the presence of `f` in visitentries. If
1242 # visitchildrenset returned a set, we can always skip the
1239 # visitchildrenset returned a set, we can always skip the
1243 # entries *not* in the set it provided regardless of whether
1240 # entries *not* in the set it provided regardless of whether
1244 # they're actually a file or a directory.
1241 # they're actually a file or a directory.
1245 if visitentries and f not in visitentries:
1242 if visitentries and f not in visitentries:
1246 continue
1243 continue
1247 if normalizefile:
1244 if normalizefile:
1248 # even though f might be a directory, we're only
1245 # even though f might be a directory, we're only
1249 # interested in comparing it to files currently in the
1246 # interested in comparing it to files currently in the
1250 # dmap -- therefore normalizefile is enough
1247 # dmap -- therefore normalizefile is enough
1251 nf = normalizefile(
1248 nf = normalizefile(
1252 nd and (nd + b"/" + f) or f, True, True
1249 nd and (nd + b"/" + f) or f, True, True
1253 )
1250 )
1254 else:
1251 else:
1255 nf = nd and (nd + b"/" + f) or f
1252 nf = nd and (nd + b"/" + f) or f
1256 if nf not in results:
1253 if nf not in results:
1257 if kind == dirkind:
1254 if kind == dirkind:
1258 if not ignore(nf):
1255 if not ignore(nf):
1259 if matchtdir:
1256 if matchtdir:
1260 matchtdir(nf)
1257 matchtdir(nf)
1261 wadd(nf)
1258 wadd(nf)
1262 if nf in dmap and (matchalways or matchfn(nf)):
1259 if nf in dmap and (matchalways or matchfn(nf)):
1263 results[nf] = None
1260 results[nf] = None
1264 elif kind == regkind or kind == lnkkind:
1261 elif kind == regkind or kind == lnkkind:
1265 if nf in dmap:
1262 if nf in dmap:
1266 if matchalways or matchfn(nf):
1263 if matchalways or matchfn(nf):
1267 results[nf] = st
1264 results[nf] = st
1268 elif (matchalways or matchfn(nf)) and not ignore(
1265 elif (matchalways or matchfn(nf)) and not ignore(
1269 nf
1266 nf
1270 ):
1267 ):
1271 # unknown file -- normalize if necessary
1268 # unknown file -- normalize if necessary
1272 if not alreadynormed:
1269 if not alreadynormed:
1273 nf = normalize(nf, False, True)
1270 nf = normalize(nf, False, True)
1274 results[nf] = st
1271 results[nf] = st
1275 elif nf in dmap and (matchalways or matchfn(nf)):
1272 elif nf in dmap and (matchalways or matchfn(nf)):
1276 results[nf] = None
1273 results[nf] = None
1277
1274
1278 for nd, d in work:
1275 for nd, d in work:
1279 # alreadynormed means that processwork doesn't have to do any
1276 # alreadynormed means that processwork doesn't have to do any
1280 # expensive directory normalization
1277 # expensive directory normalization
1281 alreadynormed = not normalize or nd == d
1278 alreadynormed = not normalize or nd == d
1282 traverse([d], alreadynormed)
1279 traverse([d], alreadynormed)
1283
1280
1284 for s in subrepos:
1281 for s in subrepos:
1285 del results[s]
1282 del results[s]
1286 del results[b'.hg']
1283 del results[b'.hg']
1287
1284
1288 # step 3: visit remaining files from dmap
1285 # step 3: visit remaining files from dmap
1289 if not skipstep3 and not exact:
1286 if not skipstep3 and not exact:
1290 # If a dmap file is not in results yet, it was either
1287 # If a dmap file is not in results yet, it was either
1291 # a) not matching matchfn b) ignored, c) missing, or d) under a
1288 # a) not matching matchfn b) ignored, c) missing, or d) under a
1292 # symlink directory.
1289 # symlink directory.
1293 if not results and matchalways:
1290 if not results and matchalways:
1294 visit = [f for f in dmap]
1291 visit = [f for f in dmap]
1295 else:
1292 else:
1296 visit = [f for f in dmap if f not in results and matchfn(f)]
1293 visit = [f for f in dmap if f not in results and matchfn(f)]
1297 visit.sort()
1294 visit.sort()
1298
1295
1299 if unknown:
1296 if unknown:
1300 # unknown == True means we walked all dirs under the roots
1297 # unknown == True means we walked all dirs under the roots
1301 # that wasn't ignored, and everything that matched was stat'ed
1298 # that wasn't ignored, and everything that matched was stat'ed
1302 # and is already in results.
1299 # and is already in results.
1303 # The rest must thus be ignored or under a symlink.
1300 # The rest must thus be ignored or under a symlink.
1304 audit_path = pathutil.pathauditor(self._root, cached=True)
1301 audit_path = pathutil.pathauditor(self._root, cached=True)
1305
1302
1306 for nf in iter(visit):
1303 for nf in iter(visit):
1307 # If a stat for the same file was already added with a
1304 # If a stat for the same file was already added with a
1308 # different case, don't add one for this, since that would
1305 # different case, don't add one for this, since that would
1309 # make it appear as if the file exists under both names
1306 # make it appear as if the file exists under both names
1310 # on disk.
1307 # on disk.
1311 if (
1308 if (
1312 normalizefile
1309 normalizefile
1313 and normalizefile(nf, True, True) in results
1310 and normalizefile(nf, True, True) in results
1314 ):
1311 ):
1315 results[nf] = None
1312 results[nf] = None
1316 # Report ignored items in the dmap as long as they are not
1313 # Report ignored items in the dmap as long as they are not
1317 # under a symlink directory.
1314 # under a symlink directory.
1318 elif audit_path.check(nf):
1315 elif audit_path.check(nf):
1319 try:
1316 try:
1320 results[nf] = lstat(join(nf))
1317 results[nf] = lstat(join(nf))
1321 # file was just ignored, no links, and exists
1318 # file was just ignored, no links, and exists
1322 except OSError:
1319 except OSError:
1323 # file doesn't exist
1320 # file doesn't exist
1324 results[nf] = None
1321 results[nf] = None
1325 else:
1322 else:
1326 # It's either missing or under a symlink directory
1323 # It's either missing or under a symlink directory
1327 # which we in this case report as missing
1324 # which we in this case report as missing
1328 results[nf] = None
1325 results[nf] = None
1329 else:
1326 else:
1330 # We may not have walked the full directory tree above,
1327 # We may not have walked the full directory tree above,
1331 # so stat and check everything we missed.
1328 # so stat and check everything we missed.
1332 iv = iter(visit)
1329 iv = iter(visit)
1333 for st in util.statfiles([join(i) for i in visit]):
1330 for st in util.statfiles([join(i) for i in visit]):
1334 results[next(iv)] = st
1331 results[next(iv)] = st
1335 return results
1332 return results
1336
1333
1337 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1334 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1338 # Force Rayon (Rust parallelism library) to respect the number of
1335 # Force Rayon (Rust parallelism library) to respect the number of
1339 # workers. This is a temporary workaround until Rust code knows
1336 # workers. This is a temporary workaround until Rust code knows
1340 # how to read the config file.
1337 # how to read the config file.
1341 numcpus = self._ui.configint(b"worker", b"numcpus")
1338 numcpus = self._ui.configint(b"worker", b"numcpus")
1342 if numcpus is not None:
1339 if numcpus is not None:
1343 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1340 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1344
1341
1345 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1342 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1346 if not workers_enabled:
1343 if not workers_enabled:
1347 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1344 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1348
1345
1349 (
1346 (
1350 lookup,
1347 lookup,
1351 modified,
1348 modified,
1352 added,
1349 added,
1353 removed,
1350 removed,
1354 deleted,
1351 deleted,
1355 clean,
1352 clean,
1356 ignored,
1353 ignored,
1357 unknown,
1354 unknown,
1358 warnings,
1355 warnings,
1359 bad,
1356 bad,
1360 traversed,
1357 traversed,
1361 dirty,
1358 dirty,
1362 ) = rustmod.status(
1359 ) = rustmod.status(
1363 self._map._rustmap,
1360 self._map._rustmap,
1364 matcher,
1361 matcher,
1365 self._rootdir,
1362 self._rootdir,
1366 self._ignorefiles(),
1363 self._ignorefiles(),
1367 self._checkexec,
1364 self._checkexec,
1368 self._lastnormaltime,
1365 self._lastnormaltime,
1369 bool(list_clean),
1366 bool(list_clean),
1370 bool(list_ignored),
1367 bool(list_ignored),
1371 bool(list_unknown),
1368 bool(list_unknown),
1372 bool(matcher.traversedir),
1369 bool(matcher.traversedir),
1373 )
1370 )
1374
1371
1375 self._dirty |= dirty
1372 self._dirty |= dirty
1376
1373
1377 if matcher.traversedir:
1374 if matcher.traversedir:
1378 for dir in traversed:
1375 for dir in traversed:
1379 matcher.traversedir(dir)
1376 matcher.traversedir(dir)
1380
1377
1381 if self._ui.warn:
1378 if self._ui.warn:
1382 for item in warnings:
1379 for item in warnings:
1383 if isinstance(item, tuple):
1380 if isinstance(item, tuple):
1384 file_path, syntax = item
1381 file_path, syntax = item
1385 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1382 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1386 file_path,
1383 file_path,
1387 syntax,
1384 syntax,
1388 )
1385 )
1389 self._ui.warn(msg)
1386 self._ui.warn(msg)
1390 else:
1387 else:
1391 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1388 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1392 self._ui.warn(
1389 self._ui.warn(
1393 msg
1390 msg
1394 % (
1391 % (
1395 pathutil.canonpath(
1392 pathutil.canonpath(
1396 self._rootdir, self._rootdir, item
1393 self._rootdir, self._rootdir, item
1397 ),
1394 ),
1398 b"No such file or directory",
1395 b"No such file or directory",
1399 )
1396 )
1400 )
1397 )
1401
1398
1402 for (fn, message) in bad:
1399 for (fn, message) in bad:
1403 matcher.bad(fn, encoding.strtolocal(message))
1400 matcher.bad(fn, encoding.strtolocal(message))
1404
1401
1405 status = scmutil.status(
1402 status = scmutil.status(
1406 modified=modified,
1403 modified=modified,
1407 added=added,
1404 added=added,
1408 removed=removed,
1405 removed=removed,
1409 deleted=deleted,
1406 deleted=deleted,
1410 unknown=unknown,
1407 unknown=unknown,
1411 ignored=ignored,
1408 ignored=ignored,
1412 clean=clean,
1409 clean=clean,
1413 )
1410 )
1414 return (lookup, status)
1411 return (lookup, status)
1415
1412
1416 def status(self, match, subrepos, ignored, clean, unknown):
1413 def status(self, match, subrepos, ignored, clean, unknown):
1417 """Determine the status of the working copy relative to the
1414 """Determine the status of the working copy relative to the
1418 dirstate and return a pair of (unsure, status), where status is of type
1415 dirstate and return a pair of (unsure, status), where status is of type
1419 scmutil.status and:
1416 scmutil.status and:
1420
1417
1421 unsure:
1418 unsure:
1422 files that might have been modified since the dirstate was
1419 files that might have been modified since the dirstate was
1423 written, but need to be read to be sure (size is the same
1420 written, but need to be read to be sure (size is the same
1424 but mtime differs)
1421 but mtime differs)
1425 status.modified:
1422 status.modified:
1426 files that have definitely been modified since the dirstate
1423 files that have definitely been modified since the dirstate
1427 was written (different size or mode)
1424 was written (different size or mode)
1428 status.clean:
1425 status.clean:
1429 files that have definitely not been modified since the
1426 files that have definitely not been modified since the
1430 dirstate was written
1427 dirstate was written
1431 """
1428 """
1432 listignored, listclean, listunknown = ignored, clean, unknown
1429 listignored, listclean, listunknown = ignored, clean, unknown
1433 lookup, modified, added, unknown, ignored = [], [], [], [], []
1430 lookup, modified, added, unknown, ignored = [], [], [], [], []
1434 removed, deleted, clean = [], [], []
1431 removed, deleted, clean = [], [], []
1435
1432
1436 dmap = self._map
1433 dmap = self._map
1437 dmap.preload()
1434 dmap.preload()
1438
1435
1439 use_rust = True
1436 use_rust = True
1440
1437
1441 allowed_matchers = (
1438 allowed_matchers = (
1442 matchmod.alwaysmatcher,
1439 matchmod.alwaysmatcher,
1443 matchmod.exactmatcher,
1440 matchmod.exactmatcher,
1444 matchmod.includematcher,
1441 matchmod.includematcher,
1445 )
1442 )
1446
1443
1447 if rustmod is None:
1444 if rustmod is None:
1448 use_rust = False
1445 use_rust = False
1449 elif self._checkcase:
1446 elif self._checkcase:
1450 # Case-insensitive filesystems are not handled yet
1447 # Case-insensitive filesystems are not handled yet
1451 use_rust = False
1448 use_rust = False
1452 elif subrepos:
1449 elif subrepos:
1453 use_rust = False
1450 use_rust = False
1454 elif sparse.enabled:
1451 elif sparse.enabled:
1455 use_rust = False
1452 use_rust = False
1456 elif not isinstance(match, allowed_matchers):
1453 elif not isinstance(match, allowed_matchers):
1457 # Some matchers have yet to be implemented
1454 # Some matchers have yet to be implemented
1458 use_rust = False
1455 use_rust = False
1459
1456
1460 if use_rust:
1457 if use_rust:
1461 try:
1458 try:
1462 return self._rust_status(
1459 return self._rust_status(
1463 match, listclean, listignored, listunknown
1460 match, listclean, listignored, listunknown
1464 )
1461 )
1465 except rustmod.FallbackError:
1462 except rustmod.FallbackError:
1466 pass
1463 pass
1467
1464
1468 def noop(f):
1465 def noop(f):
1469 pass
1466 pass
1470
1467
1471 dcontains = dmap.__contains__
1468 dcontains = dmap.__contains__
1472 dget = dmap.__getitem__
1469 dget = dmap.__getitem__
1473 ladd = lookup.append # aka "unsure"
1470 ladd = lookup.append # aka "unsure"
1474 madd = modified.append
1471 madd = modified.append
1475 aadd = added.append
1472 aadd = added.append
1476 uadd = unknown.append if listunknown else noop
1473 uadd = unknown.append if listunknown else noop
1477 iadd = ignored.append if listignored else noop
1474 iadd = ignored.append if listignored else noop
1478 radd = removed.append
1475 radd = removed.append
1479 dadd = deleted.append
1476 dadd = deleted.append
1480 cadd = clean.append if listclean else noop
1477 cadd = clean.append if listclean else noop
1481 mexact = match.exact
1478 mexact = match.exact
1482 dirignore = self._dirignore
1479 dirignore = self._dirignore
1483 checkexec = self._checkexec
1480 checkexec = self._checkexec
1484 copymap = self._map.copymap
1481 copymap = self._map.copymap
1485 lastnormaltime = self._lastnormaltime
1482 lastnormaltime = self._lastnormaltime
1486
1483
1487 # We need to do full walks when either
1484 # We need to do full walks when either
1488 # - we're listing all clean files, or
1485 # - we're listing all clean files, or
1489 # - match.traversedir does something, because match.traversedir should
1486 # - match.traversedir does something, because match.traversedir should
1490 # be called for every dir in the working dir
1487 # be called for every dir in the working dir
1491 full = listclean or match.traversedir is not None
1488 full = listclean or match.traversedir is not None
1492 for fn, st in pycompat.iteritems(
1489 for fn, st in pycompat.iteritems(
1493 self.walk(match, subrepos, listunknown, listignored, full=full)
1490 self.walk(match, subrepos, listunknown, listignored, full=full)
1494 ):
1491 ):
1495 if not dcontains(fn):
1492 if not dcontains(fn):
1496 if (listignored or mexact(fn)) and dirignore(fn):
1493 if (listignored or mexact(fn)) and dirignore(fn):
1497 if listignored:
1494 if listignored:
1498 iadd(fn)
1495 iadd(fn)
1499 else:
1496 else:
1500 uadd(fn)
1497 uadd(fn)
1501 continue
1498 continue
1502
1499
1503 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1500 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1504 # written like that for performance reasons. dmap[fn] is not a
1501 # written like that for performance reasons. dmap[fn] is not a
1505 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1502 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1506 # opcode has fast paths when the value to be unpacked is a tuple or
1503 # opcode has fast paths when the value to be unpacked is a tuple or
1507 # a list, but falls back to creating a full-fledged iterator in
1504 # a list, but falls back to creating a full-fledged iterator in
1508 # general. That is much slower than simply accessing and storing the
1505 # general. That is much slower than simply accessing and storing the
1509 # tuple members one by one.
1506 # tuple members one by one.
1510 t = dget(fn)
1507 t = dget(fn)
1511 mode = t.mode
1508 mode = t.mode
1512 size = t.size
1509 size = t.size
1513 time = t.mtime
1510 time = t.mtime
1514
1511
1515 if not st and t.tracked:
1512 if not st and t.tracked:
1516 dadd(fn)
1513 dadd(fn)
1517 elif t.merged:
1514 elif t.merged:
1518 madd(fn)
1515 madd(fn)
1519 elif t.added:
1516 elif t.added:
1520 aadd(fn)
1517 aadd(fn)
1521 elif t.removed:
1518 elif t.removed:
1522 radd(fn)
1519 radd(fn)
1523 elif t.tracked:
1520 elif t.tracked:
1524 if (
1521 if (
1525 size >= 0
1522 size >= 0
1526 and (
1523 and (
1527 (size != st.st_size and size != st.st_size & _rangemask)
1524 (size != st.st_size and size != st.st_size & _rangemask)
1528 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1525 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1529 )
1526 )
1530 or t.from_p2
1527 or t.from_p2
1531 or fn in copymap
1528 or fn in copymap
1532 ):
1529 ):
1533 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1530 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1534 # issue6456: Size returned may be longer due to
1531 # issue6456: Size returned may be longer due to
1535 # encryption on EXT-4 fscrypt, undecided.
1532 # encryption on EXT-4 fscrypt, undecided.
1536 ladd(fn)
1533 ladd(fn)
1537 else:
1534 else:
1538 madd(fn)
1535 madd(fn)
1539 elif (
1536 elif (
1540 time != st[stat.ST_MTIME]
1537 time != st[stat.ST_MTIME]
1541 and time != st[stat.ST_MTIME] & _rangemask
1538 and time != st[stat.ST_MTIME] & _rangemask
1542 ):
1539 ):
1543 ladd(fn)
1540 ladd(fn)
1544 elif st[stat.ST_MTIME] == lastnormaltime:
1541 elif st[stat.ST_MTIME] == lastnormaltime:
1545 # fn may have just been marked as normal and it may have
1542 # fn may have just been marked as normal and it may have
1546 # changed in the same second without changing its size.
1543 # changed in the same second without changing its size.
1547 # This can happen if we quickly do multiple commits.
1544 # This can happen if we quickly do multiple commits.
1548 # Force lookup, so we don't miss such a racy file change.
1545 # Force lookup, so we don't miss such a racy file change.
1549 ladd(fn)
1546 ladd(fn)
1550 elif listclean:
1547 elif listclean:
1551 cadd(fn)
1548 cadd(fn)
1552 status = scmutil.status(
1549 status = scmutil.status(
1553 modified, added, removed, deleted, unknown, ignored, clean
1550 modified, added, removed, deleted, unknown, ignored, clean
1554 )
1551 )
1555 return (lookup, status)
1552 return (lookup, status)
1556
1553
1557 def matches(self, match):
1554 def matches(self, match):
1558 """
1555 """
1559 return files in the dirstate (in whatever state) filtered by match
1556 return files in the dirstate (in whatever state) filtered by match
1560 """
1557 """
1561 dmap = self._map
1558 dmap = self._map
1562 if rustmod is not None:
1559 if rustmod is not None:
1563 dmap = self._map._rustmap
1560 dmap = self._map._rustmap
1564
1561
1565 if match.always():
1562 if match.always():
1566 return dmap.keys()
1563 return dmap.keys()
1567 files = match.files()
1564 files = match.files()
1568 if match.isexact():
1565 if match.isexact():
1569 # fast path -- filter the other way around, since typically files is
1566 # fast path -- filter the other way around, since typically files is
1570 # much smaller than dmap
1567 # much smaller than dmap
1571 return [f for f in files if f in dmap]
1568 return [f for f in files if f in dmap]
1572 if match.prefix() and all(fn in dmap for fn in files):
1569 if match.prefix() and all(fn in dmap for fn in files):
1573 # fast path -- all the values are known to be files, so just return
1570 # fast path -- all the values are known to be files, so just return
1574 # that
1571 # that
1575 return list(files)
1572 return list(files)
1576 return [f for f in dmap if match(f)]
1573 return [f for f in dmap if match(f)]
1577
1574
1578 def _actualfilename(self, tr):
1575 def _actualfilename(self, tr):
1579 if tr:
1576 if tr:
1580 return self._pendingfilename
1577 return self._pendingfilename
1581 else:
1578 else:
1582 return self._filename
1579 return self._filename
1583
1580
1584 def savebackup(self, tr, backupname):
1581 def savebackup(self, tr, backupname):
1585 '''Save current dirstate into backup file'''
1582 '''Save current dirstate into backup file'''
1586 filename = self._actualfilename(tr)
1583 filename = self._actualfilename(tr)
1587 assert backupname != filename
1584 assert backupname != filename
1588
1585
1589 # use '_writedirstate' instead of 'write' to write changes certainly,
1586 # use '_writedirstate' instead of 'write' to write changes certainly,
1590 # because the latter omits writing out if transaction is running.
1587 # because the latter omits writing out if transaction is running.
1591 # output file will be used to create backup of dirstate at this point.
1588 # output file will be used to create backup of dirstate at this point.
1592 if self._dirty or not self._opener.exists(filename):
1589 if self._dirty or not self._opener.exists(filename):
1593 self._writedirstate(
1590 self._writedirstate(
1594 tr,
1591 tr,
1595 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1592 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1596 )
1593 )
1597
1594
1598 if tr:
1595 if tr:
1599 # ensure that subsequent tr.writepending returns True for
1596 # ensure that subsequent tr.writepending returns True for
1600 # changes written out above, even if dirstate is never
1597 # changes written out above, even if dirstate is never
1601 # changed after this
1598 # changed after this
1602 tr.addfilegenerator(
1599 tr.addfilegenerator(
1603 b'dirstate',
1600 b'dirstate',
1604 (self._filename,),
1601 (self._filename,),
1605 lambda f: self._writedirstate(tr, f),
1602 lambda f: self._writedirstate(tr, f),
1606 location=b'plain',
1603 location=b'plain',
1607 )
1604 )
1608
1605
1609 # ensure that pending file written above is unlinked at
1606 # ensure that pending file written above is unlinked at
1610 # failure, even if tr.writepending isn't invoked until the
1607 # failure, even if tr.writepending isn't invoked until the
1611 # end of this transaction
1608 # end of this transaction
1612 tr.registertmp(filename, location=b'plain')
1609 tr.registertmp(filename, location=b'plain')
1613
1610
1614 self._opener.tryunlink(backupname)
1611 self._opener.tryunlink(backupname)
1615 # hardlink backup is okay because _writedirstate is always called
1612 # hardlink backup is okay because _writedirstate is always called
1616 # with an "atomictemp=True" file.
1613 # with an "atomictemp=True" file.
1617 util.copyfile(
1614 util.copyfile(
1618 self._opener.join(filename),
1615 self._opener.join(filename),
1619 self._opener.join(backupname),
1616 self._opener.join(backupname),
1620 hardlink=True,
1617 hardlink=True,
1621 )
1618 )
1622
1619
1623 def restorebackup(self, tr, backupname):
1620 def restorebackup(self, tr, backupname):
1624 '''Restore dirstate by backup file'''
1621 '''Restore dirstate by backup file'''
1625 # this "invalidate()" prevents "wlock.release()" from writing
1622 # this "invalidate()" prevents "wlock.release()" from writing
1626 # changes of dirstate out after restoring from backup file
1623 # changes of dirstate out after restoring from backup file
1627 self.invalidate()
1624 self.invalidate()
1628 filename = self._actualfilename(tr)
1625 filename = self._actualfilename(tr)
1629 o = self._opener
1626 o = self._opener
1630 if util.samefile(o.join(backupname), o.join(filename)):
1627 if util.samefile(o.join(backupname), o.join(filename)):
1631 o.unlink(backupname)
1628 o.unlink(backupname)
1632 else:
1629 else:
1633 o.rename(backupname, filename, checkambig=True)
1630 o.rename(backupname, filename, checkambig=True)
1634
1631
1635 def clearbackup(self, tr, backupname):
1632 def clearbackup(self, tr, backupname):
1636 '''Clear backup file'''
1633 '''Clear backup file'''
1637 self._opener.unlink(backupname)
1634 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now