##// END OF EJS Templates
dirstate: add dedicated function for updating data of a file...
Pulkit Goyal -
r48412:a9d75262 default
parent child Browse files
Show More
@@ -1,1601 +1,1609 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = parsers.DirstateItem
48 DirstateItem = parsers.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._updatedfiles = set()
133 self._updatedfiles = set()
134 self._mapcls = dirstatemap.dirstatemap
134 self._mapcls = dirstatemap.dirstatemap
135 # Access and cache cwd early, so we don't access it for the first time
135 # Access and cache cwd early, so we don't access it for the first time
136 # after a working-copy update caused it to not exist (accessing it then
136 # after a working-copy update caused it to not exist (accessing it then
137 # raises an exception).
137 # raises an exception).
138 self._cwd
138 self._cwd
139
139
140 def prefetch_parents(self):
140 def prefetch_parents(self):
141 """make sure the parents are loaded
141 """make sure the parents are loaded
142
142
143 Used to avoid a race condition.
143 Used to avoid a race condition.
144 """
144 """
145 self._pl
145 self._pl
146
146
147 @contextlib.contextmanager
147 @contextlib.contextmanager
148 def parentchange(self):
148 def parentchange(self):
149 """Context manager for handling dirstate parents.
149 """Context manager for handling dirstate parents.
150
150
151 If an exception occurs in the scope of the context manager,
151 If an exception occurs in the scope of the context manager,
152 the incoherent dirstate won't be written when wlock is
152 the incoherent dirstate won't be written when wlock is
153 released.
153 released.
154 """
154 """
155 self._parentwriters += 1
155 self._parentwriters += 1
156 yield
156 yield
157 # Typically we want the "undo" step of a context manager in a
157 # Typically we want the "undo" step of a context manager in a
158 # finally block so it happens even when an exception
158 # finally block so it happens even when an exception
159 # occurs. In this case, however, we only want to decrement
159 # occurs. In this case, however, we only want to decrement
160 # parentwriters if the code in the with statement exits
160 # parentwriters if the code in the with statement exits
161 # normally, so we don't have a try/finally here on purpose.
161 # normally, so we don't have a try/finally here on purpose.
162 self._parentwriters -= 1
162 self._parentwriters -= 1
163
163
164 def pendingparentchange(self):
164 def pendingparentchange(self):
165 """Returns true if the dirstate is in the middle of a set of changes
165 """Returns true if the dirstate is in the middle of a set of changes
166 that modify the dirstate parent.
166 that modify the dirstate parent.
167 """
167 """
168 return self._parentwriters > 0
168 return self._parentwriters > 0
169
169
170 @propertycache
170 @propertycache
171 def _map(self):
171 def _map(self):
172 """Return the dirstate contents (see documentation for dirstatemap)."""
172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 self._map = self._mapcls(
173 self._map = self._mapcls(
174 self._ui,
174 self._ui,
175 self._opener,
175 self._opener,
176 self._root,
176 self._root,
177 self._nodeconstants,
177 self._nodeconstants,
178 self._use_dirstate_v2,
178 self._use_dirstate_v2,
179 )
179 )
180 return self._map
180 return self._map
181
181
182 @property
182 @property
183 def _sparsematcher(self):
183 def _sparsematcher(self):
184 """The matcher for the sparse checkout.
184 """The matcher for the sparse checkout.
185
185
186 The working directory may not include every file from a manifest. The
186 The working directory may not include every file from a manifest. The
187 matcher obtained by this property will match a path if it is to be
187 matcher obtained by this property will match a path if it is to be
188 included in the working directory.
188 included in the working directory.
189 """
189 """
190 # TODO there is potential to cache this property. For now, the matcher
190 # TODO there is potential to cache this property. For now, the matcher
191 # is resolved on every access. (But the called function does use a
191 # is resolved on every access. (But the called function does use a
192 # cache to keep the lookup fast.)
192 # cache to keep the lookup fast.)
193 return self._sparsematchfn()
193 return self._sparsematchfn()
194
194
195 @repocache(b'branch')
195 @repocache(b'branch')
196 def _branch(self):
196 def _branch(self):
197 try:
197 try:
198 return self._opener.read(b"branch").strip() or b"default"
198 return self._opener.read(b"branch").strip() or b"default"
199 except IOError as inst:
199 except IOError as inst:
200 if inst.errno != errno.ENOENT:
200 if inst.errno != errno.ENOENT:
201 raise
201 raise
202 return b"default"
202 return b"default"
203
203
204 @property
204 @property
205 def _pl(self):
205 def _pl(self):
206 return self._map.parents()
206 return self._map.parents()
207
207
208 def hasdir(self, d):
208 def hasdir(self, d):
209 return self._map.hastrackeddir(d)
209 return self._map.hastrackeddir(d)
210
210
211 @rootcache(b'.hgignore')
211 @rootcache(b'.hgignore')
212 def _ignore(self):
212 def _ignore(self):
213 files = self._ignorefiles()
213 files = self._ignorefiles()
214 if not files:
214 if not files:
215 return matchmod.never()
215 return matchmod.never()
216
216
217 pats = [b'include:%s' % f for f in files]
217 pats = [b'include:%s' % f for f in files]
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219
219
220 @propertycache
220 @propertycache
221 def _slash(self):
221 def _slash(self):
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223
223
224 @propertycache
224 @propertycache
225 def _checklink(self):
225 def _checklink(self):
226 return util.checklink(self._root)
226 return util.checklink(self._root)
227
227
228 @propertycache
228 @propertycache
229 def _checkexec(self):
229 def _checkexec(self):
230 return bool(util.checkexec(self._root))
230 return bool(util.checkexec(self._root))
231
231
232 @propertycache
232 @propertycache
233 def _checkcase(self):
233 def _checkcase(self):
234 return not util.fscasesensitive(self._join(b'.hg'))
234 return not util.fscasesensitive(self._join(b'.hg'))
235
235
236 def _join(self, f):
236 def _join(self, f):
237 # much faster than os.path.join()
237 # much faster than os.path.join()
238 # it's safe because f is always a relative path
238 # it's safe because f is always a relative path
239 return self._rootdir + f
239 return self._rootdir + f
240
240
241 def flagfunc(self, buildfallback):
241 def flagfunc(self, buildfallback):
242 if self._checklink and self._checkexec:
242 if self._checklink and self._checkexec:
243
243
244 def f(x):
244 def f(x):
245 try:
245 try:
246 st = os.lstat(self._join(x))
246 st = os.lstat(self._join(x))
247 if util.statislink(st):
247 if util.statislink(st):
248 return b'l'
248 return b'l'
249 if util.statisexec(st):
249 if util.statisexec(st):
250 return b'x'
250 return b'x'
251 except OSError:
251 except OSError:
252 pass
252 pass
253 return b''
253 return b''
254
254
255 return f
255 return f
256
256
257 fallback = buildfallback()
257 fallback = buildfallback()
258 if self._checklink:
258 if self._checklink:
259
259
260 def f(x):
260 def f(x):
261 if os.path.islink(self._join(x)):
261 if os.path.islink(self._join(x)):
262 return b'l'
262 return b'l'
263 if b'x' in fallback(x):
263 if b'x' in fallback(x):
264 return b'x'
264 return b'x'
265 return b''
265 return b''
266
266
267 return f
267 return f
268 if self._checkexec:
268 if self._checkexec:
269
269
270 def f(x):
270 def f(x):
271 if b'l' in fallback(x):
271 if b'l' in fallback(x):
272 return b'l'
272 return b'l'
273 if util.isexec(self._join(x)):
273 if util.isexec(self._join(x)):
274 return b'x'
274 return b'x'
275 return b''
275 return b''
276
276
277 return f
277 return f
278 else:
278 else:
279 return fallback
279 return fallback
280
280
281 @propertycache
281 @propertycache
282 def _cwd(self):
282 def _cwd(self):
283 # internal config: ui.forcecwd
283 # internal config: ui.forcecwd
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 if forcecwd:
285 if forcecwd:
286 return forcecwd
286 return forcecwd
287 return encoding.getcwd()
287 return encoding.getcwd()
288
288
289 def getcwd(self):
289 def getcwd(self):
290 """Return the path from which a canonical path is calculated.
290 """Return the path from which a canonical path is calculated.
291
291
292 This path should be used to resolve file patterns or to convert
292 This path should be used to resolve file patterns or to convert
293 canonical paths back to file paths for display. It shouldn't be
293 canonical paths back to file paths for display. It shouldn't be
294 used to get real file paths. Use vfs functions instead.
294 used to get real file paths. Use vfs functions instead.
295 """
295 """
296 cwd = self._cwd
296 cwd = self._cwd
297 if cwd == self._root:
297 if cwd == self._root:
298 return b''
298 return b''
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 rootsep = self._root
300 rootsep = self._root
301 if not util.endswithsep(rootsep):
301 if not util.endswithsep(rootsep):
302 rootsep += pycompat.ossep
302 rootsep += pycompat.ossep
303 if cwd.startswith(rootsep):
303 if cwd.startswith(rootsep):
304 return cwd[len(rootsep) :]
304 return cwd[len(rootsep) :]
305 else:
305 else:
306 # we're outside the repo. return an absolute path.
306 # we're outside the repo. return an absolute path.
307 return cwd
307 return cwd
308
308
309 def pathto(self, f, cwd=None):
309 def pathto(self, f, cwd=None):
310 if cwd is None:
310 if cwd is None:
311 cwd = self.getcwd()
311 cwd = self.getcwd()
312 path = util.pathto(self._root, cwd, f)
312 path = util.pathto(self._root, cwd, f)
313 if self._slash:
313 if self._slash:
314 return util.pconvert(path)
314 return util.pconvert(path)
315 return path
315 return path
316
316
317 def __getitem__(self, key):
317 def __getitem__(self, key):
318 """Return the current state of key (a filename) in the dirstate.
318 """Return the current state of key (a filename) in the dirstate.
319
319
320 States are:
320 States are:
321 n normal
321 n normal
322 m needs merging
322 m needs merging
323 r marked for removal
323 r marked for removal
324 a marked for addition
324 a marked for addition
325 ? not tracked
325 ? not tracked
326
326
327 XXX The "state" is a bit obscure to be in the "public" API. we should
327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 consider migrating all user of this to going through the dirstate entry
328 consider migrating all user of this to going through the dirstate entry
329 instead.
329 instead.
330 """
330 """
331 entry = self._map.get(key)
331 entry = self._map.get(key)
332 if entry is not None:
332 if entry is not None:
333 return entry.state
333 return entry.state
334 return b'?'
334 return b'?'
335
335
336 def __contains__(self, key):
336 def __contains__(self, key):
337 return key in self._map
337 return key in self._map
338
338
339 def __iter__(self):
339 def __iter__(self):
340 return iter(sorted(self._map))
340 return iter(sorted(self._map))
341
341
342 def items(self):
342 def items(self):
343 return pycompat.iteritems(self._map)
343 return pycompat.iteritems(self._map)
344
344
345 iteritems = items
345 iteritems = items
346
346
347 def directories(self):
347 def directories(self):
348 return self._map.directories()
348 return self._map.directories()
349
349
350 def parents(self):
350 def parents(self):
351 return [self._validate(p) for p in self._pl]
351 return [self._validate(p) for p in self._pl]
352
352
353 def p1(self):
353 def p1(self):
354 return self._validate(self._pl[0])
354 return self._validate(self._pl[0])
355
355
356 def p2(self):
356 def p2(self):
357 return self._validate(self._pl[1])
357 return self._validate(self._pl[1])
358
358
359 @property
359 @property
360 def in_merge(self):
360 def in_merge(self):
361 """True if a merge is in progress"""
361 """True if a merge is in progress"""
362 return self._pl[1] != self._nodeconstants.nullid
362 return self._pl[1] != self._nodeconstants.nullid
363
363
364 def branch(self):
364 def branch(self):
365 return encoding.tolocal(self._branch)
365 return encoding.tolocal(self._branch)
366
366
367 def setparents(self, p1, p2=None):
367 def setparents(self, p1, p2=None):
368 """Set dirstate parents to p1 and p2.
368 """Set dirstate parents to p1 and p2.
369
369
370 When moving from two parents to one, "merged" entries a
370 When moving from two parents to one, "merged" entries a
371 adjusted to normal and previous copy records discarded and
371 adjusted to normal and previous copy records discarded and
372 returned by the call.
372 returned by the call.
373
373
374 See localrepo.setparents()
374 See localrepo.setparents()
375 """
375 """
376 if p2 is None:
376 if p2 is None:
377 p2 = self._nodeconstants.nullid
377 p2 = self._nodeconstants.nullid
378 if self._parentwriters == 0:
378 if self._parentwriters == 0:
379 raise ValueError(
379 raise ValueError(
380 b"cannot set dirstate parent outside of "
380 b"cannot set dirstate parent outside of "
381 b"dirstate.parentchange context manager"
381 b"dirstate.parentchange context manager"
382 )
382 )
383
383
384 self._dirty = True
384 self._dirty = True
385 oldp2 = self._pl[1]
385 oldp2 = self._pl[1]
386 if self._origpl is None:
386 if self._origpl is None:
387 self._origpl = self._pl
387 self._origpl = self._pl
388 self._map.setparents(p1, p2)
388 self._map.setparents(p1, p2)
389 copies = {}
389 copies = {}
390 if (
390 if (
391 oldp2 != self._nodeconstants.nullid
391 oldp2 != self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
393 ):
393 ):
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395
395
396 for f in candidatefiles:
396 for f in candidatefiles:
397 s = self._map.get(f)
397 s = self._map.get(f)
398 if s is None:
398 if s is None:
399 continue
399 continue
400
400
401 # Discard "merged" markers when moving away from a merge state
401 # Discard "merged" markers when moving away from a merge state
402 if s.merged:
402 if s.merged:
403 source = self._map.copymap.get(f)
403 source = self._map.copymap.get(f)
404 if source:
404 if source:
405 copies[f] = source
405 copies[f] = source
406 self.normallookup(f)
406 self.normallookup(f)
407 # Also fix up otherparent markers
407 # Also fix up otherparent markers
408 elif s.from_p2:
408 elif s.from_p2:
409 source = self._map.copymap.get(f)
409 source = self._map.copymap.get(f)
410 if source:
410 if source:
411 copies[f] = source
411 copies[f] = source
412 self._add(f)
412 self._add(f)
413 return copies
413 return copies
414
414
415 def setbranch(self, branch):
415 def setbranch(self, branch):
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 try:
418 try:
419 f.write(self._branch + b'\n')
419 f.write(self._branch + b'\n')
420 f.close()
420 f.close()
421
421
422 # make sure filecache has the correct stat info for _branch after
422 # make sure filecache has the correct stat info for _branch after
423 # replacing the underlying file
423 # replacing the underlying file
424 ce = self._filecache[b'_branch']
424 ce = self._filecache[b'_branch']
425 if ce:
425 if ce:
426 ce.refresh()
426 ce.refresh()
427 except: # re-raises
427 except: # re-raises
428 f.discard()
428 f.discard()
429 raise
429 raise
430
430
431 def invalidate(self):
431 def invalidate(self):
432 """Causes the next access to reread the dirstate.
432 """Causes the next access to reread the dirstate.
433
433
434 This is different from localrepo.invalidatedirstate() because it always
434 This is different from localrepo.invalidatedirstate() because it always
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 check whether the dirstate has changed before rereading it."""
436 check whether the dirstate has changed before rereading it."""
437
437
438 for a in ("_map", "_branch", "_ignore"):
438 for a in ("_map", "_branch", "_ignore"):
439 if a in self.__dict__:
439 if a in self.__dict__:
440 delattr(self, a)
440 delattr(self, a)
441 self._lastnormaltime = 0
441 self._lastnormaltime = 0
442 self._dirty = False
442 self._dirty = False
443 self._updatedfiles.clear()
443 self._updatedfiles.clear()
444 self._parentwriters = 0
444 self._parentwriters = 0
445 self._origpl = None
445 self._origpl = None
446
446
447 def copy(self, source, dest):
447 def copy(self, source, dest):
448 """Mark dest as a copy of source. Unmark dest if source is None."""
448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 if source == dest:
449 if source == dest:
450 return
450 return
451 self._dirty = True
451 self._dirty = True
452 if source is not None:
452 if source is not None:
453 self._map.copymap[dest] = source
453 self._map.copymap[dest] = source
454 self._updatedfiles.add(source)
454 self._updatedfiles.add(source)
455 self._updatedfiles.add(dest)
455 self._updatedfiles.add(dest)
456 elif self._map.copymap.pop(dest, None):
456 elif self._map.copymap.pop(dest, None):
457 self._updatedfiles.add(dest)
457 self._updatedfiles.add(dest)
458
458
459 def copied(self, file):
459 def copied(self, file):
460 return self._map.copymap.get(file, None)
460 return self._map.copymap.get(file, None)
461
461
462 def copies(self):
462 def copies(self):
463 return self._map.copymap
463 return self._map.copymap
464
464
465 @requires_no_parents_change
465 @requires_no_parents_change
466 def set_tracked(self, filename):
466 def set_tracked(self, filename):
467 """a "public" method for generic code to mark a file as tracked
467 """a "public" method for generic code to mark a file as tracked
468
468
469 This function is to be called outside of "update/merge" case. For
469 This function is to be called outside of "update/merge" case. For
470 example by a command like `hg add X`.
470 example by a command like `hg add X`.
471
471
472 return True the file was previously untracked, False otherwise.
472 return True the file was previously untracked, False otherwise.
473 """
473 """
474 entry = self._map.get(filename)
474 entry = self._map.get(filename)
475 if entry is None:
475 if entry is None:
476 self._add(filename)
476 self._add(filename)
477 return True
477 return True
478 elif not entry.tracked:
478 elif not entry.tracked:
479 self.normallookup(filename)
479 self.normallookup(filename)
480 return True
480 return True
481 return False
481 return False
482
482
483 @requires_no_parents_change
483 @requires_no_parents_change
484 def set_untracked(self, filename):
484 def set_untracked(self, filename):
485 """a "public" method for generic code to mark a file as untracked
485 """a "public" method for generic code to mark a file as untracked
486
486
487 This function is to be called outside of "update/merge" case. For
487 This function is to be called outside of "update/merge" case. For
488 example by a command like `hg remove X`.
488 example by a command like `hg remove X`.
489
489
490 return True the file was previously tracked, False otherwise.
490 return True the file was previously tracked, False otherwise.
491 """
491 """
492 entry = self._map.get(filename)
492 entry = self._map.get(filename)
493 if entry is None:
493 if entry is None:
494 return False
494 return False
495 elif entry.added:
495 elif entry.added:
496 self._drop(filename)
496 self._drop(filename)
497 return True
497 return True
498 else:
498 else:
499 self._remove(filename)
499 self._remove(filename)
500 return True
500 return True
501
501
502 @requires_parents_change
502 @requires_parents_change
503 def update_file_reference(
503 def update_file_reference(
504 self,
504 self,
505 filename,
505 filename,
506 p1_tracked,
506 p1_tracked,
507 ):
507 ):
508 """Set a file as tracked in the parent (or not)
508 """Set a file as tracked in the parent (or not)
509
509
510 This is to be called when adjust the dirstate to a new parent after an history
510 This is to be called when adjust the dirstate to a new parent after an history
511 rewriting operation.
511 rewriting operation.
512
512
513 It should not be called during a merge (p2 != nullid) and only within
513 It should not be called during a merge (p2 != nullid) and only within
514 a `with dirstate.parentchange():` context.
514 a `with dirstate.parentchange():` context.
515 """
515 """
516 if self.in_merge:
516 if self.in_merge:
517 msg = b'update_file_reference should not be called when merging'
517 msg = b'update_file_reference should not be called when merging'
518 raise error.ProgrammingError(msg)
518 raise error.ProgrammingError(msg)
519 entry = self._map.get(filename)
519 entry = self._map.get(filename)
520 if entry is None:
520 if entry is None:
521 wc_tracked = False
521 wc_tracked = False
522 else:
522 else:
523 wc_tracked = entry.tracked
523 wc_tracked = entry.tracked
524 if p1_tracked and wc_tracked:
524 if p1_tracked and wc_tracked:
525 # the underlying reference might have changed, we will have to
525 # the underlying reference might have changed, we will have to
526 # check it.
526 # check it.
527 self.normallookup(filename)
527 self.normallookup(filename)
528 elif not (p1_tracked or wc_tracked):
528 elif not (p1_tracked or wc_tracked):
529 # the file is no longer relevant to anyone
529 # the file is no longer relevant to anyone
530 self._drop(filename)
530 self._drop(filename)
531 elif (not p1_tracked) and wc_tracked:
531 elif (not p1_tracked) and wc_tracked:
532 if not entry.added:
532 if not entry.added:
533 self._add(filename)
533 self._add(filename)
534 elif p1_tracked and not wc_tracked:
534 elif p1_tracked and not wc_tracked:
535 if entry is None or not entry.removed:
535 if entry is None or not entry.removed:
536 self._remove(filename)
536 self._remove(filename)
537 else:
537 else:
538 assert False, 'unreachable'
538 assert False, 'unreachable'
539
539
540 @requires_parents_change
540 @requires_parents_change
541 def update_file(
541 def update_file(
542 self,
542 self,
543 filename,
543 filename,
544 wc_tracked,
544 wc_tracked,
545 p1_tracked,
545 p1_tracked,
546 p2_tracked=False,
546 p2_tracked=False,
547 merged=False,
547 merged=False,
548 clean_p1=False,
548 clean_p1=False,
549 clean_p2=False,
549 clean_p2=False,
550 possibly_dirty=False,
550 possibly_dirty=False,
551 ):
551 ):
552 """update the information about a file in the dirstate
552 """update the information about a file in the dirstate
553
553
554 This is to be called when the direstates parent changes to keep track
554 This is to be called when the direstates parent changes to keep track
555 of what is the file situation in regards to the working copy and its parent.
555 of what is the file situation in regards to the working copy and its parent.
556
556
557 This function must be called within a `dirstate.parentchange` context.
557 This function must be called within a `dirstate.parentchange` context.
558
558
559 note: the API is at an early stage and we might need to ajust it
559 note: the API is at an early stage and we might need to ajust it
560 depending of what information ends up being relevant and useful to
560 depending of what information ends up being relevant and useful to
561 other processing.
561 other processing.
562 """
562 """
563 if not self.pendingparentchange():
563 if not self.pendingparentchange():
564 msg = b'calling `update_file` outside of a parentchange context'
564 msg = b'calling `update_file` outside of a parentchange context'
565 raise error.ProgrammingError(msg)
565 raise error.ProgrammingError(msg)
566 if merged and (clean_p1 or clean_p2):
566 if merged and (clean_p1 or clean_p2):
567 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
567 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
568 raise error.ProgrammingError(msg)
568 raise error.ProgrammingError(msg)
569 assert not (merged and (clean_p1 or clean_p1))
569 assert not (merged and (clean_p1 or clean_p1))
570 if not (p1_tracked or p2_tracked or wc_tracked):
570 if not (p1_tracked or p2_tracked or wc_tracked):
571 self._drop(filename)
571 self._drop(filename)
572 elif merged:
572 elif merged:
573 assert wc_tracked
573 assert wc_tracked
574 if not self.in_merge:
574 if not self.in_merge:
575 self.normallookup(filename)
575 self.normallookup(filename)
576 self.otherparent(filename)
576 self.otherparent(filename)
577 elif not (p1_tracked or p2_tracked) and wc_tracked:
577 elif not (p1_tracked or p2_tracked) and wc_tracked:
578 self._addpath(filename, added=True, possibly_dirty=possibly_dirty)
578 self._addpath(filename, added=True, possibly_dirty=possibly_dirty)
579 self._map.copymap.pop(filename, None)
579 self._map.copymap.pop(filename, None)
580 elif (p1_tracked or p2_tracked) and not wc_tracked:
580 elif (p1_tracked or p2_tracked) and not wc_tracked:
581 self._remove(filename)
581 self._remove(filename)
582 elif clean_p2 and wc_tracked:
582 elif clean_p2 and wc_tracked:
583 assert p2_tracked
583 assert p2_tracked
584 self.otherparent(filename)
584 self.otherparent(filename)
585 elif not p1_tracked and p2_tracked and wc_tracked:
585 elif not p1_tracked and p2_tracked and wc_tracked:
586 self._addpath(filename, from_p2=True, possibly_dirty=possibly_dirty)
586 self._addpath(filename, from_p2=True, possibly_dirty=possibly_dirty)
587 self._map.copymap.pop(filename, None)
587 self._map.copymap.pop(filename, None)
588 elif possibly_dirty:
588 elif possibly_dirty:
589 self._addpath(filename, possibly_dirty=possibly_dirty)
589 self._addpath(filename, possibly_dirty=possibly_dirty)
590 elif wc_tracked:
590 elif wc_tracked:
591 self.normal(filename)
591 self.normal(filename)
592 # XXX We need something for file that are dirty after an update
592 # XXX We need something for file that are dirty after an update
593 else:
593 else:
594 assert False, 'unreachable'
594 assert False, 'unreachable'
595
595
596 @requires_parents_change
597 def update_parent_file_data(self, f, filedata):
598 """update the information about the content of a file
599
600 This function should be called within a `dirstate.parentchange` context.
601 """
602 self.normal(f, parentfiledata=filedata)
603
596 def _addpath(
604 def _addpath(
597 self,
605 self,
598 f,
606 f,
599 mode=0,
607 mode=0,
600 size=None,
608 size=None,
601 mtime=None,
609 mtime=None,
602 added=False,
610 added=False,
603 merged=False,
611 merged=False,
604 from_p2=False,
612 from_p2=False,
605 possibly_dirty=False,
613 possibly_dirty=False,
606 ):
614 ):
607 entry = self._map.get(f)
615 entry = self._map.get(f)
608 if added or entry is not None and entry.removed:
616 if added or entry is not None and entry.removed:
609 scmutil.checkfilename(f)
617 scmutil.checkfilename(f)
610 if self._map.hastrackeddir(f):
618 if self._map.hastrackeddir(f):
611 msg = _(b'directory %r already in dirstate')
619 msg = _(b'directory %r already in dirstate')
612 msg %= pycompat.bytestr(f)
620 msg %= pycompat.bytestr(f)
613 raise error.Abort(msg)
621 raise error.Abort(msg)
614 # shadows
622 # shadows
615 for d in pathutil.finddirs(f):
623 for d in pathutil.finddirs(f):
616 if self._map.hastrackeddir(d):
624 if self._map.hastrackeddir(d):
617 break
625 break
618 entry = self._map.get(d)
626 entry = self._map.get(d)
619 if entry is not None and not entry.removed:
627 if entry is not None and not entry.removed:
620 msg = _(b'file %r in dirstate clashes with %r')
628 msg = _(b'file %r in dirstate clashes with %r')
621 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
629 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
622 raise error.Abort(msg)
630 raise error.Abort(msg)
623 self._dirty = True
631 self._dirty = True
624 self._updatedfiles.add(f)
632 self._updatedfiles.add(f)
625 self._map.addfile(
633 self._map.addfile(
626 f,
634 f,
627 mode=mode,
635 mode=mode,
628 size=size,
636 size=size,
629 mtime=mtime,
637 mtime=mtime,
630 added=added,
638 added=added,
631 merged=merged,
639 merged=merged,
632 from_p2=from_p2,
640 from_p2=from_p2,
633 possibly_dirty=possibly_dirty,
641 possibly_dirty=possibly_dirty,
634 )
642 )
635
643
636 def normal(self, f, parentfiledata=None):
644 def normal(self, f, parentfiledata=None):
637 """Mark a file normal and clean.
645 """Mark a file normal and clean.
638
646
639 parentfiledata: (mode, size, mtime) of the clean file
647 parentfiledata: (mode, size, mtime) of the clean file
640
648
641 parentfiledata should be computed from memory (for mode,
649 parentfiledata should be computed from memory (for mode,
642 size), as or close as possible from the point where we
650 size), as or close as possible from the point where we
643 determined the file was clean, to limit the risk of the
651 determined the file was clean, to limit the risk of the
644 file having been changed by an external process between the
652 file having been changed by an external process between the
645 moment where the file was determined to be clean and now."""
653 moment where the file was determined to be clean and now."""
646 if parentfiledata:
654 if parentfiledata:
647 (mode, size, mtime) = parentfiledata
655 (mode, size, mtime) = parentfiledata
648 else:
656 else:
649 s = os.lstat(self._join(f))
657 s = os.lstat(self._join(f))
650 mode = s.st_mode
658 mode = s.st_mode
651 size = s.st_size
659 size = s.st_size
652 mtime = s[stat.ST_MTIME]
660 mtime = s[stat.ST_MTIME]
653 self._addpath(f, mode=mode, size=size, mtime=mtime)
661 self._addpath(f, mode=mode, size=size, mtime=mtime)
654 self._map.copymap.pop(f, None)
662 self._map.copymap.pop(f, None)
655 if f in self._map.nonnormalset:
663 if f in self._map.nonnormalset:
656 self._map.nonnormalset.remove(f)
664 self._map.nonnormalset.remove(f)
657 if mtime > self._lastnormaltime:
665 if mtime > self._lastnormaltime:
658 # Remember the most recent modification timeslot for status(),
666 # Remember the most recent modification timeslot for status(),
659 # to make sure we won't miss future size-preserving file content
667 # to make sure we won't miss future size-preserving file content
660 # modifications that happen within the same timeslot.
668 # modifications that happen within the same timeslot.
661 self._lastnormaltime = mtime
669 self._lastnormaltime = mtime
662
670
663 def normallookup(self, f):
671 def normallookup(self, f):
664 '''Mark a file normal, but possibly dirty.'''
672 '''Mark a file normal, but possibly dirty.'''
665 if self.in_merge:
673 if self.in_merge:
666 # if there is a merge going on and the file was either
674 # if there is a merge going on and the file was either
667 # "merged" or coming from other parent (-2) before
675 # "merged" or coming from other parent (-2) before
668 # being removed, restore that state.
676 # being removed, restore that state.
669 entry = self._map.get(f)
677 entry = self._map.get(f)
670 if entry is not None:
678 if entry is not None:
671 # XXX this should probably be dealt with a a lower level
679 # XXX this should probably be dealt with a a lower level
672 # (see `merged_removed` and `from_p2_removed`)
680 # (see `merged_removed` and `from_p2_removed`)
673 if entry.merged_removed or entry.from_p2_removed:
681 if entry.merged_removed or entry.from_p2_removed:
674 source = self._map.copymap.get(f)
682 source = self._map.copymap.get(f)
675 if entry.merged_removed:
683 if entry.merged_removed:
676 self.merge(f)
684 self.merge(f)
677 elif entry.from_p2_removed:
685 elif entry.from_p2_removed:
678 self.otherparent(f)
686 self.otherparent(f)
679 if source is not None:
687 if source is not None:
680 self.copy(source, f)
688 self.copy(source, f)
681 return
689 return
682 elif entry.merged or entry.from_p2:
690 elif entry.merged or entry.from_p2:
683 return
691 return
684 self._addpath(f, possibly_dirty=True)
692 self._addpath(f, possibly_dirty=True)
685 self._map.copymap.pop(f, None)
693 self._map.copymap.pop(f, None)
686
694
687 def otherparent(self, f):
695 def otherparent(self, f):
688 '''Mark as coming from the other parent, always dirty.'''
696 '''Mark as coming from the other parent, always dirty.'''
689 if not self.in_merge:
697 if not self.in_merge:
690 msg = _(b"setting %r to other parent only allowed in merges") % f
698 msg = _(b"setting %r to other parent only allowed in merges") % f
691 raise error.Abort(msg)
699 raise error.Abort(msg)
692 entry = self._map.get(f)
700 entry = self._map.get(f)
693 if entry is not None and entry.tracked:
701 if entry is not None and entry.tracked:
694 # merge-like
702 # merge-like
695 self._addpath(f, merged=True)
703 self._addpath(f, merged=True)
696 else:
704 else:
697 # add-like
705 # add-like
698 self._addpath(f, from_p2=True)
706 self._addpath(f, from_p2=True)
699 self._map.copymap.pop(f, None)
707 self._map.copymap.pop(f, None)
700
708
701 def add(self, f):
709 def add(self, f):
702 '''Mark a file added.'''
710 '''Mark a file added.'''
703 self._add(f)
711 self._add(f)
704
712
705 def _add(self, filename):
713 def _add(self, filename):
706 """internal function to mark a file as added"""
714 """internal function to mark a file as added"""
707 self._addpath(filename, added=True)
715 self._addpath(filename, added=True)
708 self._map.copymap.pop(filename, None)
716 self._map.copymap.pop(filename, None)
709
717
710 def remove(self, f):
718 def remove(self, f):
711 '''Mark a file removed'''
719 '''Mark a file removed'''
712 self._remove(f)
720 self._remove(f)
713
721
714 def _remove(self, filename):
722 def _remove(self, filename):
715 """internal function to mark a file removed"""
723 """internal function to mark a file removed"""
716 self._dirty = True
724 self._dirty = True
717 self._updatedfiles.add(filename)
725 self._updatedfiles.add(filename)
718 self._map.removefile(filename, in_merge=self.in_merge)
726 self._map.removefile(filename, in_merge=self.in_merge)
719
727
720 def merge(self, f):
728 def merge(self, f):
721 '''Mark a file merged.'''
729 '''Mark a file merged.'''
722 if not self.in_merge:
730 if not self.in_merge:
723 return self.normallookup(f)
731 return self.normallookup(f)
724 return self.otherparent(f)
732 return self.otherparent(f)
725
733
726 def drop(self, f):
734 def drop(self, f):
727 '''Drop a file from the dirstate'''
735 '''Drop a file from the dirstate'''
728 self._drop(f)
736 self._drop(f)
729
737
730 def _drop(self, filename):
738 def _drop(self, filename):
731 """internal function to drop a file from the dirstate"""
739 """internal function to drop a file from the dirstate"""
732 if self._map.dropfile(filename):
740 if self._map.dropfile(filename):
733 self._dirty = True
741 self._dirty = True
734 self._updatedfiles.add(filename)
742 self._updatedfiles.add(filename)
735 self._map.copymap.pop(filename, None)
743 self._map.copymap.pop(filename, None)
736
744
737 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
745 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
738 if exists is None:
746 if exists is None:
739 exists = os.path.lexists(os.path.join(self._root, path))
747 exists = os.path.lexists(os.path.join(self._root, path))
740 if not exists:
748 if not exists:
741 # Maybe a path component exists
749 # Maybe a path component exists
742 if not ignoremissing and b'/' in path:
750 if not ignoremissing and b'/' in path:
743 d, f = path.rsplit(b'/', 1)
751 d, f = path.rsplit(b'/', 1)
744 d = self._normalize(d, False, ignoremissing, None)
752 d = self._normalize(d, False, ignoremissing, None)
745 folded = d + b"/" + f
753 folded = d + b"/" + f
746 else:
754 else:
747 # No path components, preserve original case
755 # No path components, preserve original case
748 folded = path
756 folded = path
749 else:
757 else:
750 # recursively normalize leading directory components
758 # recursively normalize leading directory components
751 # against dirstate
759 # against dirstate
752 if b'/' in normed:
760 if b'/' in normed:
753 d, f = normed.rsplit(b'/', 1)
761 d, f = normed.rsplit(b'/', 1)
754 d = self._normalize(d, False, ignoremissing, True)
762 d = self._normalize(d, False, ignoremissing, True)
755 r = self._root + b"/" + d
763 r = self._root + b"/" + d
756 folded = d + b"/" + util.fspath(f, r)
764 folded = d + b"/" + util.fspath(f, r)
757 else:
765 else:
758 folded = util.fspath(normed, self._root)
766 folded = util.fspath(normed, self._root)
759 storemap[normed] = folded
767 storemap[normed] = folded
760
768
761 return folded
769 return folded
762
770
763 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
771 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
764 normed = util.normcase(path)
772 normed = util.normcase(path)
765 folded = self._map.filefoldmap.get(normed, None)
773 folded = self._map.filefoldmap.get(normed, None)
766 if folded is None:
774 if folded is None:
767 if isknown:
775 if isknown:
768 folded = path
776 folded = path
769 else:
777 else:
770 folded = self._discoverpath(
778 folded = self._discoverpath(
771 path, normed, ignoremissing, exists, self._map.filefoldmap
779 path, normed, ignoremissing, exists, self._map.filefoldmap
772 )
780 )
773 return folded
781 return folded
774
782
775 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
783 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
776 normed = util.normcase(path)
784 normed = util.normcase(path)
777 folded = self._map.filefoldmap.get(normed, None)
785 folded = self._map.filefoldmap.get(normed, None)
778 if folded is None:
786 if folded is None:
779 folded = self._map.dirfoldmap.get(normed, None)
787 folded = self._map.dirfoldmap.get(normed, None)
780 if folded is None:
788 if folded is None:
781 if isknown:
789 if isknown:
782 folded = path
790 folded = path
783 else:
791 else:
784 # store discovered result in dirfoldmap so that future
792 # store discovered result in dirfoldmap so that future
785 # normalizefile calls don't start matching directories
793 # normalizefile calls don't start matching directories
786 folded = self._discoverpath(
794 folded = self._discoverpath(
787 path, normed, ignoremissing, exists, self._map.dirfoldmap
795 path, normed, ignoremissing, exists, self._map.dirfoldmap
788 )
796 )
789 return folded
797 return folded
790
798
791 def normalize(self, path, isknown=False, ignoremissing=False):
799 def normalize(self, path, isknown=False, ignoremissing=False):
792 """
800 """
793 normalize the case of a pathname when on a casefolding filesystem
801 normalize the case of a pathname when on a casefolding filesystem
794
802
795 isknown specifies whether the filename came from walking the
803 isknown specifies whether the filename came from walking the
796 disk, to avoid extra filesystem access.
804 disk, to avoid extra filesystem access.
797
805
798 If ignoremissing is True, missing path are returned
806 If ignoremissing is True, missing path are returned
799 unchanged. Otherwise, we try harder to normalize possibly
807 unchanged. Otherwise, we try harder to normalize possibly
800 existing path components.
808 existing path components.
801
809
802 The normalized case is determined based on the following precedence:
810 The normalized case is determined based on the following precedence:
803
811
804 - version of name already stored in the dirstate
812 - version of name already stored in the dirstate
805 - version of name stored on disk
813 - version of name stored on disk
806 - version provided via command arguments
814 - version provided via command arguments
807 """
815 """
808
816
809 if self._checkcase:
817 if self._checkcase:
810 return self._normalize(path, isknown, ignoremissing)
818 return self._normalize(path, isknown, ignoremissing)
811 return path
819 return path
812
820
813 def clear(self):
821 def clear(self):
814 self._map.clear()
822 self._map.clear()
815 self._lastnormaltime = 0
823 self._lastnormaltime = 0
816 self._updatedfiles.clear()
824 self._updatedfiles.clear()
817 self._dirty = True
825 self._dirty = True
818
826
819 def rebuild(self, parent, allfiles, changedfiles=None):
827 def rebuild(self, parent, allfiles, changedfiles=None):
820 if changedfiles is None:
828 if changedfiles is None:
821 # Rebuild entire dirstate
829 # Rebuild entire dirstate
822 to_lookup = allfiles
830 to_lookup = allfiles
823 to_drop = []
831 to_drop = []
824 lastnormaltime = self._lastnormaltime
832 lastnormaltime = self._lastnormaltime
825 self.clear()
833 self.clear()
826 self._lastnormaltime = lastnormaltime
834 self._lastnormaltime = lastnormaltime
827 elif len(changedfiles) < 10:
835 elif len(changedfiles) < 10:
828 # Avoid turning allfiles into a set, which can be expensive if it's
836 # Avoid turning allfiles into a set, which can be expensive if it's
829 # large.
837 # large.
830 to_lookup = []
838 to_lookup = []
831 to_drop = []
839 to_drop = []
832 for f in changedfiles:
840 for f in changedfiles:
833 if f in allfiles:
841 if f in allfiles:
834 to_lookup.append(f)
842 to_lookup.append(f)
835 else:
843 else:
836 to_drop.append(f)
844 to_drop.append(f)
837 else:
845 else:
838 changedfilesset = set(changedfiles)
846 changedfilesset = set(changedfiles)
839 to_lookup = changedfilesset & set(allfiles)
847 to_lookup = changedfilesset & set(allfiles)
840 to_drop = changedfilesset - to_lookup
848 to_drop = changedfilesset - to_lookup
841
849
842 if self._origpl is None:
850 if self._origpl is None:
843 self._origpl = self._pl
851 self._origpl = self._pl
844 self._map.setparents(parent, self._nodeconstants.nullid)
852 self._map.setparents(parent, self._nodeconstants.nullid)
845
853
846 for f in to_lookup:
854 for f in to_lookup:
847 self.normallookup(f)
855 self.normallookup(f)
848 for f in to_drop:
856 for f in to_drop:
849 self._drop(f)
857 self._drop(f)
850
858
851 self._dirty = True
859 self._dirty = True
852
860
853 def identity(self):
861 def identity(self):
854 """Return identity of dirstate itself to detect changing in storage
862 """Return identity of dirstate itself to detect changing in storage
855
863
856 If identity of previous dirstate is equal to this, writing
864 If identity of previous dirstate is equal to this, writing
857 changes based on the former dirstate out can keep consistency.
865 changes based on the former dirstate out can keep consistency.
858 """
866 """
859 return self._map.identity
867 return self._map.identity
860
868
861 def write(self, tr):
869 def write(self, tr):
862 if not self._dirty:
870 if not self._dirty:
863 return
871 return
864
872
865 filename = self._filename
873 filename = self._filename
866 if tr:
874 if tr:
867 # 'dirstate.write()' is not only for writing in-memory
875 # 'dirstate.write()' is not only for writing in-memory
868 # changes out, but also for dropping ambiguous timestamp.
876 # changes out, but also for dropping ambiguous timestamp.
869 # delayed writing re-raise "ambiguous timestamp issue".
877 # delayed writing re-raise "ambiguous timestamp issue".
870 # See also the wiki page below for detail:
878 # See also the wiki page below for detail:
871 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
879 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
872
880
873 # emulate dropping timestamp in 'parsers.pack_dirstate'
881 # emulate dropping timestamp in 'parsers.pack_dirstate'
874 now = _getfsnow(self._opener)
882 now = _getfsnow(self._opener)
875 self._map.clearambiguoustimes(self._updatedfiles, now)
883 self._map.clearambiguoustimes(self._updatedfiles, now)
876
884
877 # emulate that all 'dirstate.normal' results are written out
885 # emulate that all 'dirstate.normal' results are written out
878 self._lastnormaltime = 0
886 self._lastnormaltime = 0
879 self._updatedfiles.clear()
887 self._updatedfiles.clear()
880
888
881 # delay writing in-memory changes out
889 # delay writing in-memory changes out
882 tr.addfilegenerator(
890 tr.addfilegenerator(
883 b'dirstate',
891 b'dirstate',
884 (self._filename,),
892 (self._filename,),
885 self._writedirstate,
893 self._writedirstate,
886 location=b'plain',
894 location=b'plain',
887 )
895 )
888 return
896 return
889
897
890 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
898 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
891 self._writedirstate(st)
899 self._writedirstate(st)
892
900
893 def addparentchangecallback(self, category, callback):
901 def addparentchangecallback(self, category, callback):
894 """add a callback to be called when the wd parents are changed
902 """add a callback to be called when the wd parents are changed
895
903
896 Callback will be called with the following arguments:
904 Callback will be called with the following arguments:
897 dirstate, (oldp1, oldp2), (newp1, newp2)
905 dirstate, (oldp1, oldp2), (newp1, newp2)
898
906
899 Category is a unique identifier to allow overwriting an old callback
907 Category is a unique identifier to allow overwriting an old callback
900 with a newer callback.
908 with a newer callback.
901 """
909 """
902 self._plchangecallbacks[category] = callback
910 self._plchangecallbacks[category] = callback
903
911
904 def _writedirstate(self, st):
912 def _writedirstate(self, st):
905 # notify callbacks about parents change
913 # notify callbacks about parents change
906 if self._origpl is not None and self._origpl != self._pl:
914 if self._origpl is not None and self._origpl != self._pl:
907 for c, callback in sorted(
915 for c, callback in sorted(
908 pycompat.iteritems(self._plchangecallbacks)
916 pycompat.iteritems(self._plchangecallbacks)
909 ):
917 ):
910 callback(self, self._origpl, self._pl)
918 callback(self, self._origpl, self._pl)
911 self._origpl = None
919 self._origpl = None
912 # use the modification time of the newly created temporary file as the
920 # use the modification time of the newly created temporary file as the
913 # filesystem's notion of 'now'
921 # filesystem's notion of 'now'
914 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
922 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
915
923
916 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
924 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
917 # timestamp of each entries in dirstate, because of 'now > mtime'
925 # timestamp of each entries in dirstate, because of 'now > mtime'
918 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
926 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
919 if delaywrite > 0:
927 if delaywrite > 0:
920 # do we have any files to delay for?
928 # do we have any files to delay for?
921 for f, e in pycompat.iteritems(self._map):
929 for f, e in pycompat.iteritems(self._map):
922 if e.need_delay(now):
930 if e.need_delay(now):
923 import time # to avoid useless import
931 import time # to avoid useless import
924
932
925 # rather than sleep n seconds, sleep until the next
933 # rather than sleep n seconds, sleep until the next
926 # multiple of n seconds
934 # multiple of n seconds
927 clock = time.time()
935 clock = time.time()
928 start = int(clock) - (int(clock) % delaywrite)
936 start = int(clock) - (int(clock) % delaywrite)
929 end = start + delaywrite
937 end = start + delaywrite
930 time.sleep(end - clock)
938 time.sleep(end - clock)
931 now = end # trust our estimate that the end is near now
939 now = end # trust our estimate that the end is near now
932 break
940 break
933
941
934 self._map.write(st, now)
942 self._map.write(st, now)
935 self._lastnormaltime = 0
943 self._lastnormaltime = 0
936 self._dirty = False
944 self._dirty = False
937
945
938 def _dirignore(self, f):
946 def _dirignore(self, f):
939 if self._ignore(f):
947 if self._ignore(f):
940 return True
948 return True
941 for p in pathutil.finddirs(f):
949 for p in pathutil.finddirs(f):
942 if self._ignore(p):
950 if self._ignore(p):
943 return True
951 return True
944 return False
952 return False
945
953
946 def _ignorefiles(self):
954 def _ignorefiles(self):
947 files = []
955 files = []
948 if os.path.exists(self._join(b'.hgignore')):
956 if os.path.exists(self._join(b'.hgignore')):
949 files.append(self._join(b'.hgignore'))
957 files.append(self._join(b'.hgignore'))
950 for name, path in self._ui.configitems(b"ui"):
958 for name, path in self._ui.configitems(b"ui"):
951 if name == b'ignore' or name.startswith(b'ignore.'):
959 if name == b'ignore' or name.startswith(b'ignore.'):
952 # we need to use os.path.join here rather than self._join
960 # we need to use os.path.join here rather than self._join
953 # because path is arbitrary and user-specified
961 # because path is arbitrary and user-specified
954 files.append(os.path.join(self._rootdir, util.expandpath(path)))
962 files.append(os.path.join(self._rootdir, util.expandpath(path)))
955 return files
963 return files
956
964
957 def _ignorefileandline(self, f):
965 def _ignorefileandline(self, f):
958 files = collections.deque(self._ignorefiles())
966 files = collections.deque(self._ignorefiles())
959 visited = set()
967 visited = set()
960 while files:
968 while files:
961 i = files.popleft()
969 i = files.popleft()
962 patterns = matchmod.readpatternfile(
970 patterns = matchmod.readpatternfile(
963 i, self._ui.warn, sourceinfo=True
971 i, self._ui.warn, sourceinfo=True
964 )
972 )
965 for pattern, lineno, line in patterns:
973 for pattern, lineno, line in patterns:
966 kind, p = matchmod._patsplit(pattern, b'glob')
974 kind, p = matchmod._patsplit(pattern, b'glob')
967 if kind == b"subinclude":
975 if kind == b"subinclude":
968 if p not in visited:
976 if p not in visited:
969 files.append(p)
977 files.append(p)
970 continue
978 continue
971 m = matchmod.match(
979 m = matchmod.match(
972 self._root, b'', [], [pattern], warn=self._ui.warn
980 self._root, b'', [], [pattern], warn=self._ui.warn
973 )
981 )
974 if m(f):
982 if m(f):
975 return (i, lineno, line)
983 return (i, lineno, line)
976 visited.add(i)
984 visited.add(i)
977 return (None, -1, b"")
985 return (None, -1, b"")
978
986
979 def _walkexplicit(self, match, subrepos):
987 def _walkexplicit(self, match, subrepos):
980 """Get stat data about the files explicitly specified by match.
988 """Get stat data about the files explicitly specified by match.
981
989
982 Return a triple (results, dirsfound, dirsnotfound).
990 Return a triple (results, dirsfound, dirsnotfound).
983 - results is a mapping from filename to stat result. It also contains
991 - results is a mapping from filename to stat result. It also contains
984 listings mapping subrepos and .hg to None.
992 listings mapping subrepos and .hg to None.
985 - dirsfound is a list of files found to be directories.
993 - dirsfound is a list of files found to be directories.
986 - dirsnotfound is a list of files that the dirstate thinks are
994 - dirsnotfound is a list of files that the dirstate thinks are
987 directories and that were not found."""
995 directories and that were not found."""
988
996
989 def badtype(mode):
997 def badtype(mode):
990 kind = _(b'unknown')
998 kind = _(b'unknown')
991 if stat.S_ISCHR(mode):
999 if stat.S_ISCHR(mode):
992 kind = _(b'character device')
1000 kind = _(b'character device')
993 elif stat.S_ISBLK(mode):
1001 elif stat.S_ISBLK(mode):
994 kind = _(b'block device')
1002 kind = _(b'block device')
995 elif stat.S_ISFIFO(mode):
1003 elif stat.S_ISFIFO(mode):
996 kind = _(b'fifo')
1004 kind = _(b'fifo')
997 elif stat.S_ISSOCK(mode):
1005 elif stat.S_ISSOCK(mode):
998 kind = _(b'socket')
1006 kind = _(b'socket')
999 elif stat.S_ISDIR(mode):
1007 elif stat.S_ISDIR(mode):
1000 kind = _(b'directory')
1008 kind = _(b'directory')
1001 return _(b'unsupported file type (type is %s)') % kind
1009 return _(b'unsupported file type (type is %s)') % kind
1002
1010
1003 badfn = match.bad
1011 badfn = match.bad
1004 dmap = self._map
1012 dmap = self._map
1005 lstat = os.lstat
1013 lstat = os.lstat
1006 getkind = stat.S_IFMT
1014 getkind = stat.S_IFMT
1007 dirkind = stat.S_IFDIR
1015 dirkind = stat.S_IFDIR
1008 regkind = stat.S_IFREG
1016 regkind = stat.S_IFREG
1009 lnkkind = stat.S_IFLNK
1017 lnkkind = stat.S_IFLNK
1010 join = self._join
1018 join = self._join
1011 dirsfound = []
1019 dirsfound = []
1012 foundadd = dirsfound.append
1020 foundadd = dirsfound.append
1013 dirsnotfound = []
1021 dirsnotfound = []
1014 notfoundadd = dirsnotfound.append
1022 notfoundadd = dirsnotfound.append
1015
1023
1016 if not match.isexact() and self._checkcase:
1024 if not match.isexact() and self._checkcase:
1017 normalize = self._normalize
1025 normalize = self._normalize
1018 else:
1026 else:
1019 normalize = None
1027 normalize = None
1020
1028
1021 files = sorted(match.files())
1029 files = sorted(match.files())
1022 subrepos.sort()
1030 subrepos.sort()
1023 i, j = 0, 0
1031 i, j = 0, 0
1024 while i < len(files) and j < len(subrepos):
1032 while i < len(files) and j < len(subrepos):
1025 subpath = subrepos[j] + b"/"
1033 subpath = subrepos[j] + b"/"
1026 if files[i] < subpath:
1034 if files[i] < subpath:
1027 i += 1
1035 i += 1
1028 continue
1036 continue
1029 while i < len(files) and files[i].startswith(subpath):
1037 while i < len(files) and files[i].startswith(subpath):
1030 del files[i]
1038 del files[i]
1031 j += 1
1039 j += 1
1032
1040
1033 if not files or b'' in files:
1041 if not files or b'' in files:
1034 files = [b'']
1042 files = [b'']
1035 # constructing the foldmap is expensive, so don't do it for the
1043 # constructing the foldmap is expensive, so don't do it for the
1036 # common case where files is ['']
1044 # common case where files is ['']
1037 normalize = None
1045 normalize = None
1038 results = dict.fromkeys(subrepos)
1046 results = dict.fromkeys(subrepos)
1039 results[b'.hg'] = None
1047 results[b'.hg'] = None
1040
1048
1041 for ff in files:
1049 for ff in files:
1042 if normalize:
1050 if normalize:
1043 nf = normalize(ff, False, True)
1051 nf = normalize(ff, False, True)
1044 else:
1052 else:
1045 nf = ff
1053 nf = ff
1046 if nf in results:
1054 if nf in results:
1047 continue
1055 continue
1048
1056
1049 try:
1057 try:
1050 st = lstat(join(nf))
1058 st = lstat(join(nf))
1051 kind = getkind(st.st_mode)
1059 kind = getkind(st.st_mode)
1052 if kind == dirkind:
1060 if kind == dirkind:
1053 if nf in dmap:
1061 if nf in dmap:
1054 # file replaced by dir on disk but still in dirstate
1062 # file replaced by dir on disk but still in dirstate
1055 results[nf] = None
1063 results[nf] = None
1056 foundadd((nf, ff))
1064 foundadd((nf, ff))
1057 elif kind == regkind or kind == lnkkind:
1065 elif kind == regkind or kind == lnkkind:
1058 results[nf] = st
1066 results[nf] = st
1059 else:
1067 else:
1060 badfn(ff, badtype(kind))
1068 badfn(ff, badtype(kind))
1061 if nf in dmap:
1069 if nf in dmap:
1062 results[nf] = None
1070 results[nf] = None
1063 except OSError as inst: # nf not found on disk - it is dirstate only
1071 except OSError as inst: # nf not found on disk - it is dirstate only
1064 if nf in dmap: # does it exactly match a missing file?
1072 if nf in dmap: # does it exactly match a missing file?
1065 results[nf] = None
1073 results[nf] = None
1066 else: # does it match a missing directory?
1074 else: # does it match a missing directory?
1067 if self._map.hasdir(nf):
1075 if self._map.hasdir(nf):
1068 notfoundadd(nf)
1076 notfoundadd(nf)
1069 else:
1077 else:
1070 badfn(ff, encoding.strtolocal(inst.strerror))
1078 badfn(ff, encoding.strtolocal(inst.strerror))
1071
1079
1072 # match.files() may contain explicitly-specified paths that shouldn't
1080 # match.files() may contain explicitly-specified paths that shouldn't
1073 # be taken; drop them from the list of files found. dirsfound/notfound
1081 # be taken; drop them from the list of files found. dirsfound/notfound
1074 # aren't filtered here because they will be tested later.
1082 # aren't filtered here because they will be tested later.
1075 if match.anypats():
1083 if match.anypats():
1076 for f in list(results):
1084 for f in list(results):
1077 if f == b'.hg' or f in subrepos:
1085 if f == b'.hg' or f in subrepos:
1078 # keep sentinel to disable further out-of-repo walks
1086 # keep sentinel to disable further out-of-repo walks
1079 continue
1087 continue
1080 if not match(f):
1088 if not match(f):
1081 del results[f]
1089 del results[f]
1082
1090
1083 # Case insensitive filesystems cannot rely on lstat() failing to detect
1091 # Case insensitive filesystems cannot rely on lstat() failing to detect
1084 # a case-only rename. Prune the stat object for any file that does not
1092 # a case-only rename. Prune the stat object for any file that does not
1085 # match the case in the filesystem, if there are multiple files that
1093 # match the case in the filesystem, if there are multiple files that
1086 # normalize to the same path.
1094 # normalize to the same path.
1087 if match.isexact() and self._checkcase:
1095 if match.isexact() and self._checkcase:
1088 normed = {}
1096 normed = {}
1089
1097
1090 for f, st in pycompat.iteritems(results):
1098 for f, st in pycompat.iteritems(results):
1091 if st is None:
1099 if st is None:
1092 continue
1100 continue
1093
1101
1094 nc = util.normcase(f)
1102 nc = util.normcase(f)
1095 paths = normed.get(nc)
1103 paths = normed.get(nc)
1096
1104
1097 if paths is None:
1105 if paths is None:
1098 paths = set()
1106 paths = set()
1099 normed[nc] = paths
1107 normed[nc] = paths
1100
1108
1101 paths.add(f)
1109 paths.add(f)
1102
1110
1103 for norm, paths in pycompat.iteritems(normed):
1111 for norm, paths in pycompat.iteritems(normed):
1104 if len(paths) > 1:
1112 if len(paths) > 1:
1105 for path in paths:
1113 for path in paths:
1106 folded = self._discoverpath(
1114 folded = self._discoverpath(
1107 path, norm, True, None, self._map.dirfoldmap
1115 path, norm, True, None, self._map.dirfoldmap
1108 )
1116 )
1109 if path != folded:
1117 if path != folded:
1110 results[path] = None
1118 results[path] = None
1111
1119
1112 return results, dirsfound, dirsnotfound
1120 return results, dirsfound, dirsnotfound
1113
1121
1114 def walk(self, match, subrepos, unknown, ignored, full=True):
1122 def walk(self, match, subrepos, unknown, ignored, full=True):
1115 """
1123 """
1116 Walk recursively through the directory tree, finding all files
1124 Walk recursively through the directory tree, finding all files
1117 matched by match.
1125 matched by match.
1118
1126
1119 If full is False, maybe skip some known-clean files.
1127 If full is False, maybe skip some known-clean files.
1120
1128
1121 Return a dict mapping filename to stat-like object (either
1129 Return a dict mapping filename to stat-like object (either
1122 mercurial.osutil.stat instance or return value of os.stat()).
1130 mercurial.osutil.stat instance or return value of os.stat()).
1123
1131
1124 """
1132 """
1125 # full is a flag that extensions that hook into walk can use -- this
1133 # full is a flag that extensions that hook into walk can use -- this
1126 # implementation doesn't use it at all. This satisfies the contract
1134 # implementation doesn't use it at all. This satisfies the contract
1127 # because we only guarantee a "maybe".
1135 # because we only guarantee a "maybe".
1128
1136
1129 if ignored:
1137 if ignored:
1130 ignore = util.never
1138 ignore = util.never
1131 dirignore = util.never
1139 dirignore = util.never
1132 elif unknown:
1140 elif unknown:
1133 ignore = self._ignore
1141 ignore = self._ignore
1134 dirignore = self._dirignore
1142 dirignore = self._dirignore
1135 else:
1143 else:
1136 # if not unknown and not ignored, drop dir recursion and step 2
1144 # if not unknown and not ignored, drop dir recursion and step 2
1137 ignore = util.always
1145 ignore = util.always
1138 dirignore = util.always
1146 dirignore = util.always
1139
1147
1140 matchfn = match.matchfn
1148 matchfn = match.matchfn
1141 matchalways = match.always()
1149 matchalways = match.always()
1142 matchtdir = match.traversedir
1150 matchtdir = match.traversedir
1143 dmap = self._map
1151 dmap = self._map
1144 listdir = util.listdir
1152 listdir = util.listdir
1145 lstat = os.lstat
1153 lstat = os.lstat
1146 dirkind = stat.S_IFDIR
1154 dirkind = stat.S_IFDIR
1147 regkind = stat.S_IFREG
1155 regkind = stat.S_IFREG
1148 lnkkind = stat.S_IFLNK
1156 lnkkind = stat.S_IFLNK
1149 join = self._join
1157 join = self._join
1150
1158
1151 exact = skipstep3 = False
1159 exact = skipstep3 = False
1152 if match.isexact(): # match.exact
1160 if match.isexact(): # match.exact
1153 exact = True
1161 exact = True
1154 dirignore = util.always # skip step 2
1162 dirignore = util.always # skip step 2
1155 elif match.prefix(): # match.match, no patterns
1163 elif match.prefix(): # match.match, no patterns
1156 skipstep3 = True
1164 skipstep3 = True
1157
1165
1158 if not exact and self._checkcase:
1166 if not exact and self._checkcase:
1159 normalize = self._normalize
1167 normalize = self._normalize
1160 normalizefile = self._normalizefile
1168 normalizefile = self._normalizefile
1161 skipstep3 = False
1169 skipstep3 = False
1162 else:
1170 else:
1163 normalize = self._normalize
1171 normalize = self._normalize
1164 normalizefile = None
1172 normalizefile = None
1165
1173
1166 # step 1: find all explicit files
1174 # step 1: find all explicit files
1167 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1175 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1168 if matchtdir:
1176 if matchtdir:
1169 for d in work:
1177 for d in work:
1170 matchtdir(d[0])
1178 matchtdir(d[0])
1171 for d in dirsnotfound:
1179 for d in dirsnotfound:
1172 matchtdir(d)
1180 matchtdir(d)
1173
1181
1174 skipstep3 = skipstep3 and not (work or dirsnotfound)
1182 skipstep3 = skipstep3 and not (work or dirsnotfound)
1175 work = [d for d in work if not dirignore(d[0])]
1183 work = [d for d in work if not dirignore(d[0])]
1176
1184
1177 # step 2: visit subdirectories
1185 # step 2: visit subdirectories
1178 def traverse(work, alreadynormed):
1186 def traverse(work, alreadynormed):
1179 wadd = work.append
1187 wadd = work.append
1180 while work:
1188 while work:
1181 tracing.counter('dirstate.walk work', len(work))
1189 tracing.counter('dirstate.walk work', len(work))
1182 nd = work.pop()
1190 nd = work.pop()
1183 visitentries = match.visitchildrenset(nd)
1191 visitentries = match.visitchildrenset(nd)
1184 if not visitentries:
1192 if not visitentries:
1185 continue
1193 continue
1186 if visitentries == b'this' or visitentries == b'all':
1194 if visitentries == b'this' or visitentries == b'all':
1187 visitentries = None
1195 visitentries = None
1188 skip = None
1196 skip = None
1189 if nd != b'':
1197 if nd != b'':
1190 skip = b'.hg'
1198 skip = b'.hg'
1191 try:
1199 try:
1192 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1200 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1193 entries = listdir(join(nd), stat=True, skip=skip)
1201 entries = listdir(join(nd), stat=True, skip=skip)
1194 except OSError as inst:
1202 except OSError as inst:
1195 if inst.errno in (errno.EACCES, errno.ENOENT):
1203 if inst.errno in (errno.EACCES, errno.ENOENT):
1196 match.bad(
1204 match.bad(
1197 self.pathto(nd), encoding.strtolocal(inst.strerror)
1205 self.pathto(nd), encoding.strtolocal(inst.strerror)
1198 )
1206 )
1199 continue
1207 continue
1200 raise
1208 raise
1201 for f, kind, st in entries:
1209 for f, kind, st in entries:
1202 # Some matchers may return files in the visitentries set,
1210 # Some matchers may return files in the visitentries set,
1203 # instead of 'this', if the matcher explicitly mentions them
1211 # instead of 'this', if the matcher explicitly mentions them
1204 # and is not an exactmatcher. This is acceptable; we do not
1212 # and is not an exactmatcher. This is acceptable; we do not
1205 # make any hard assumptions about file-or-directory below
1213 # make any hard assumptions about file-or-directory below
1206 # based on the presence of `f` in visitentries. If
1214 # based on the presence of `f` in visitentries. If
1207 # visitchildrenset returned a set, we can always skip the
1215 # visitchildrenset returned a set, we can always skip the
1208 # entries *not* in the set it provided regardless of whether
1216 # entries *not* in the set it provided regardless of whether
1209 # they're actually a file or a directory.
1217 # they're actually a file or a directory.
1210 if visitentries and f not in visitentries:
1218 if visitentries and f not in visitentries:
1211 continue
1219 continue
1212 if normalizefile:
1220 if normalizefile:
1213 # even though f might be a directory, we're only
1221 # even though f might be a directory, we're only
1214 # interested in comparing it to files currently in the
1222 # interested in comparing it to files currently in the
1215 # dmap -- therefore normalizefile is enough
1223 # dmap -- therefore normalizefile is enough
1216 nf = normalizefile(
1224 nf = normalizefile(
1217 nd and (nd + b"/" + f) or f, True, True
1225 nd and (nd + b"/" + f) or f, True, True
1218 )
1226 )
1219 else:
1227 else:
1220 nf = nd and (nd + b"/" + f) or f
1228 nf = nd and (nd + b"/" + f) or f
1221 if nf not in results:
1229 if nf not in results:
1222 if kind == dirkind:
1230 if kind == dirkind:
1223 if not ignore(nf):
1231 if not ignore(nf):
1224 if matchtdir:
1232 if matchtdir:
1225 matchtdir(nf)
1233 matchtdir(nf)
1226 wadd(nf)
1234 wadd(nf)
1227 if nf in dmap and (matchalways or matchfn(nf)):
1235 if nf in dmap and (matchalways or matchfn(nf)):
1228 results[nf] = None
1236 results[nf] = None
1229 elif kind == regkind or kind == lnkkind:
1237 elif kind == regkind or kind == lnkkind:
1230 if nf in dmap:
1238 if nf in dmap:
1231 if matchalways or matchfn(nf):
1239 if matchalways or matchfn(nf):
1232 results[nf] = st
1240 results[nf] = st
1233 elif (matchalways or matchfn(nf)) and not ignore(
1241 elif (matchalways or matchfn(nf)) and not ignore(
1234 nf
1242 nf
1235 ):
1243 ):
1236 # unknown file -- normalize if necessary
1244 # unknown file -- normalize if necessary
1237 if not alreadynormed:
1245 if not alreadynormed:
1238 nf = normalize(nf, False, True)
1246 nf = normalize(nf, False, True)
1239 results[nf] = st
1247 results[nf] = st
1240 elif nf in dmap and (matchalways or matchfn(nf)):
1248 elif nf in dmap and (matchalways or matchfn(nf)):
1241 results[nf] = None
1249 results[nf] = None
1242
1250
1243 for nd, d in work:
1251 for nd, d in work:
1244 # alreadynormed means that processwork doesn't have to do any
1252 # alreadynormed means that processwork doesn't have to do any
1245 # expensive directory normalization
1253 # expensive directory normalization
1246 alreadynormed = not normalize or nd == d
1254 alreadynormed = not normalize or nd == d
1247 traverse([d], alreadynormed)
1255 traverse([d], alreadynormed)
1248
1256
1249 for s in subrepos:
1257 for s in subrepos:
1250 del results[s]
1258 del results[s]
1251 del results[b'.hg']
1259 del results[b'.hg']
1252
1260
1253 # step 3: visit remaining files from dmap
1261 # step 3: visit remaining files from dmap
1254 if not skipstep3 and not exact:
1262 if not skipstep3 and not exact:
1255 # If a dmap file is not in results yet, it was either
1263 # If a dmap file is not in results yet, it was either
1256 # a) not matching matchfn b) ignored, c) missing, or d) under a
1264 # a) not matching matchfn b) ignored, c) missing, or d) under a
1257 # symlink directory.
1265 # symlink directory.
1258 if not results and matchalways:
1266 if not results and matchalways:
1259 visit = [f for f in dmap]
1267 visit = [f for f in dmap]
1260 else:
1268 else:
1261 visit = [f for f in dmap if f not in results and matchfn(f)]
1269 visit = [f for f in dmap if f not in results and matchfn(f)]
1262 visit.sort()
1270 visit.sort()
1263
1271
1264 if unknown:
1272 if unknown:
1265 # unknown == True means we walked all dirs under the roots
1273 # unknown == True means we walked all dirs under the roots
1266 # that wasn't ignored, and everything that matched was stat'ed
1274 # that wasn't ignored, and everything that matched was stat'ed
1267 # and is already in results.
1275 # and is already in results.
1268 # The rest must thus be ignored or under a symlink.
1276 # The rest must thus be ignored or under a symlink.
1269 audit_path = pathutil.pathauditor(self._root, cached=True)
1277 audit_path = pathutil.pathauditor(self._root, cached=True)
1270
1278
1271 for nf in iter(visit):
1279 for nf in iter(visit):
1272 # If a stat for the same file was already added with a
1280 # If a stat for the same file was already added with a
1273 # different case, don't add one for this, since that would
1281 # different case, don't add one for this, since that would
1274 # make it appear as if the file exists under both names
1282 # make it appear as if the file exists under both names
1275 # on disk.
1283 # on disk.
1276 if (
1284 if (
1277 normalizefile
1285 normalizefile
1278 and normalizefile(nf, True, True) in results
1286 and normalizefile(nf, True, True) in results
1279 ):
1287 ):
1280 results[nf] = None
1288 results[nf] = None
1281 # Report ignored items in the dmap as long as they are not
1289 # Report ignored items in the dmap as long as they are not
1282 # under a symlink directory.
1290 # under a symlink directory.
1283 elif audit_path.check(nf):
1291 elif audit_path.check(nf):
1284 try:
1292 try:
1285 results[nf] = lstat(join(nf))
1293 results[nf] = lstat(join(nf))
1286 # file was just ignored, no links, and exists
1294 # file was just ignored, no links, and exists
1287 except OSError:
1295 except OSError:
1288 # file doesn't exist
1296 # file doesn't exist
1289 results[nf] = None
1297 results[nf] = None
1290 else:
1298 else:
1291 # It's either missing or under a symlink directory
1299 # It's either missing or under a symlink directory
1292 # which we in this case report as missing
1300 # which we in this case report as missing
1293 results[nf] = None
1301 results[nf] = None
1294 else:
1302 else:
1295 # We may not have walked the full directory tree above,
1303 # We may not have walked the full directory tree above,
1296 # so stat and check everything we missed.
1304 # so stat and check everything we missed.
1297 iv = iter(visit)
1305 iv = iter(visit)
1298 for st in util.statfiles([join(i) for i in visit]):
1306 for st in util.statfiles([join(i) for i in visit]):
1299 results[next(iv)] = st
1307 results[next(iv)] = st
1300 return results
1308 return results
1301
1309
1302 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1310 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1303 # Force Rayon (Rust parallelism library) to respect the number of
1311 # Force Rayon (Rust parallelism library) to respect the number of
1304 # workers. This is a temporary workaround until Rust code knows
1312 # workers. This is a temporary workaround until Rust code knows
1305 # how to read the config file.
1313 # how to read the config file.
1306 numcpus = self._ui.configint(b"worker", b"numcpus")
1314 numcpus = self._ui.configint(b"worker", b"numcpus")
1307 if numcpus is not None:
1315 if numcpus is not None:
1308 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1316 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1309
1317
1310 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1318 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1311 if not workers_enabled:
1319 if not workers_enabled:
1312 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1320 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1313
1321
1314 (
1322 (
1315 lookup,
1323 lookup,
1316 modified,
1324 modified,
1317 added,
1325 added,
1318 removed,
1326 removed,
1319 deleted,
1327 deleted,
1320 clean,
1328 clean,
1321 ignored,
1329 ignored,
1322 unknown,
1330 unknown,
1323 warnings,
1331 warnings,
1324 bad,
1332 bad,
1325 traversed,
1333 traversed,
1326 dirty,
1334 dirty,
1327 ) = rustmod.status(
1335 ) = rustmod.status(
1328 self._map._rustmap,
1336 self._map._rustmap,
1329 matcher,
1337 matcher,
1330 self._rootdir,
1338 self._rootdir,
1331 self._ignorefiles(),
1339 self._ignorefiles(),
1332 self._checkexec,
1340 self._checkexec,
1333 self._lastnormaltime,
1341 self._lastnormaltime,
1334 bool(list_clean),
1342 bool(list_clean),
1335 bool(list_ignored),
1343 bool(list_ignored),
1336 bool(list_unknown),
1344 bool(list_unknown),
1337 bool(matcher.traversedir),
1345 bool(matcher.traversedir),
1338 )
1346 )
1339
1347
1340 self._dirty |= dirty
1348 self._dirty |= dirty
1341
1349
1342 if matcher.traversedir:
1350 if matcher.traversedir:
1343 for dir in traversed:
1351 for dir in traversed:
1344 matcher.traversedir(dir)
1352 matcher.traversedir(dir)
1345
1353
1346 if self._ui.warn:
1354 if self._ui.warn:
1347 for item in warnings:
1355 for item in warnings:
1348 if isinstance(item, tuple):
1356 if isinstance(item, tuple):
1349 file_path, syntax = item
1357 file_path, syntax = item
1350 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1358 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1351 file_path,
1359 file_path,
1352 syntax,
1360 syntax,
1353 )
1361 )
1354 self._ui.warn(msg)
1362 self._ui.warn(msg)
1355 else:
1363 else:
1356 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1364 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1357 self._ui.warn(
1365 self._ui.warn(
1358 msg
1366 msg
1359 % (
1367 % (
1360 pathutil.canonpath(
1368 pathutil.canonpath(
1361 self._rootdir, self._rootdir, item
1369 self._rootdir, self._rootdir, item
1362 ),
1370 ),
1363 b"No such file or directory",
1371 b"No such file or directory",
1364 )
1372 )
1365 )
1373 )
1366
1374
1367 for (fn, message) in bad:
1375 for (fn, message) in bad:
1368 matcher.bad(fn, encoding.strtolocal(message))
1376 matcher.bad(fn, encoding.strtolocal(message))
1369
1377
1370 status = scmutil.status(
1378 status = scmutil.status(
1371 modified=modified,
1379 modified=modified,
1372 added=added,
1380 added=added,
1373 removed=removed,
1381 removed=removed,
1374 deleted=deleted,
1382 deleted=deleted,
1375 unknown=unknown,
1383 unknown=unknown,
1376 ignored=ignored,
1384 ignored=ignored,
1377 clean=clean,
1385 clean=clean,
1378 )
1386 )
1379 return (lookup, status)
1387 return (lookup, status)
1380
1388
1381 def status(self, match, subrepos, ignored, clean, unknown):
1389 def status(self, match, subrepos, ignored, clean, unknown):
1382 """Determine the status of the working copy relative to the
1390 """Determine the status of the working copy relative to the
1383 dirstate and return a pair of (unsure, status), where status is of type
1391 dirstate and return a pair of (unsure, status), where status is of type
1384 scmutil.status and:
1392 scmutil.status and:
1385
1393
1386 unsure:
1394 unsure:
1387 files that might have been modified since the dirstate was
1395 files that might have been modified since the dirstate was
1388 written, but need to be read to be sure (size is the same
1396 written, but need to be read to be sure (size is the same
1389 but mtime differs)
1397 but mtime differs)
1390 status.modified:
1398 status.modified:
1391 files that have definitely been modified since the dirstate
1399 files that have definitely been modified since the dirstate
1392 was written (different size or mode)
1400 was written (different size or mode)
1393 status.clean:
1401 status.clean:
1394 files that have definitely not been modified since the
1402 files that have definitely not been modified since the
1395 dirstate was written
1403 dirstate was written
1396 """
1404 """
1397 listignored, listclean, listunknown = ignored, clean, unknown
1405 listignored, listclean, listunknown = ignored, clean, unknown
1398 lookup, modified, added, unknown, ignored = [], [], [], [], []
1406 lookup, modified, added, unknown, ignored = [], [], [], [], []
1399 removed, deleted, clean = [], [], []
1407 removed, deleted, clean = [], [], []
1400
1408
1401 dmap = self._map
1409 dmap = self._map
1402 dmap.preload()
1410 dmap.preload()
1403
1411
1404 use_rust = True
1412 use_rust = True
1405
1413
1406 allowed_matchers = (
1414 allowed_matchers = (
1407 matchmod.alwaysmatcher,
1415 matchmod.alwaysmatcher,
1408 matchmod.exactmatcher,
1416 matchmod.exactmatcher,
1409 matchmod.includematcher,
1417 matchmod.includematcher,
1410 )
1418 )
1411
1419
1412 if rustmod is None:
1420 if rustmod is None:
1413 use_rust = False
1421 use_rust = False
1414 elif self._checkcase:
1422 elif self._checkcase:
1415 # Case-insensitive filesystems are not handled yet
1423 # Case-insensitive filesystems are not handled yet
1416 use_rust = False
1424 use_rust = False
1417 elif subrepos:
1425 elif subrepos:
1418 use_rust = False
1426 use_rust = False
1419 elif sparse.enabled:
1427 elif sparse.enabled:
1420 use_rust = False
1428 use_rust = False
1421 elif not isinstance(match, allowed_matchers):
1429 elif not isinstance(match, allowed_matchers):
1422 # Some matchers have yet to be implemented
1430 # Some matchers have yet to be implemented
1423 use_rust = False
1431 use_rust = False
1424
1432
1425 if use_rust:
1433 if use_rust:
1426 try:
1434 try:
1427 return self._rust_status(
1435 return self._rust_status(
1428 match, listclean, listignored, listunknown
1436 match, listclean, listignored, listunknown
1429 )
1437 )
1430 except rustmod.FallbackError:
1438 except rustmod.FallbackError:
1431 pass
1439 pass
1432
1440
1433 def noop(f):
1441 def noop(f):
1434 pass
1442 pass
1435
1443
1436 dcontains = dmap.__contains__
1444 dcontains = dmap.__contains__
1437 dget = dmap.__getitem__
1445 dget = dmap.__getitem__
1438 ladd = lookup.append # aka "unsure"
1446 ladd = lookup.append # aka "unsure"
1439 madd = modified.append
1447 madd = modified.append
1440 aadd = added.append
1448 aadd = added.append
1441 uadd = unknown.append if listunknown else noop
1449 uadd = unknown.append if listunknown else noop
1442 iadd = ignored.append if listignored else noop
1450 iadd = ignored.append if listignored else noop
1443 radd = removed.append
1451 radd = removed.append
1444 dadd = deleted.append
1452 dadd = deleted.append
1445 cadd = clean.append if listclean else noop
1453 cadd = clean.append if listclean else noop
1446 mexact = match.exact
1454 mexact = match.exact
1447 dirignore = self._dirignore
1455 dirignore = self._dirignore
1448 checkexec = self._checkexec
1456 checkexec = self._checkexec
1449 copymap = self._map.copymap
1457 copymap = self._map.copymap
1450 lastnormaltime = self._lastnormaltime
1458 lastnormaltime = self._lastnormaltime
1451
1459
1452 # We need to do full walks when either
1460 # We need to do full walks when either
1453 # - we're listing all clean files, or
1461 # - we're listing all clean files, or
1454 # - match.traversedir does something, because match.traversedir should
1462 # - match.traversedir does something, because match.traversedir should
1455 # be called for every dir in the working dir
1463 # be called for every dir in the working dir
1456 full = listclean or match.traversedir is not None
1464 full = listclean or match.traversedir is not None
1457 for fn, st in pycompat.iteritems(
1465 for fn, st in pycompat.iteritems(
1458 self.walk(match, subrepos, listunknown, listignored, full=full)
1466 self.walk(match, subrepos, listunknown, listignored, full=full)
1459 ):
1467 ):
1460 if not dcontains(fn):
1468 if not dcontains(fn):
1461 if (listignored or mexact(fn)) and dirignore(fn):
1469 if (listignored or mexact(fn)) and dirignore(fn):
1462 if listignored:
1470 if listignored:
1463 iadd(fn)
1471 iadd(fn)
1464 else:
1472 else:
1465 uadd(fn)
1473 uadd(fn)
1466 continue
1474 continue
1467
1475
1468 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1476 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1469 # written like that for performance reasons. dmap[fn] is not a
1477 # written like that for performance reasons. dmap[fn] is not a
1470 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1478 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1471 # opcode has fast paths when the value to be unpacked is a tuple or
1479 # opcode has fast paths when the value to be unpacked is a tuple or
1472 # a list, but falls back to creating a full-fledged iterator in
1480 # a list, but falls back to creating a full-fledged iterator in
1473 # general. That is much slower than simply accessing and storing the
1481 # general. That is much slower than simply accessing and storing the
1474 # tuple members one by one.
1482 # tuple members one by one.
1475 t = dget(fn)
1483 t = dget(fn)
1476 mode = t.mode
1484 mode = t.mode
1477 size = t.size
1485 size = t.size
1478 time = t.mtime
1486 time = t.mtime
1479
1487
1480 if not st and t.tracked:
1488 if not st and t.tracked:
1481 dadd(fn)
1489 dadd(fn)
1482 elif t.merged:
1490 elif t.merged:
1483 madd(fn)
1491 madd(fn)
1484 elif t.added:
1492 elif t.added:
1485 aadd(fn)
1493 aadd(fn)
1486 elif t.removed:
1494 elif t.removed:
1487 radd(fn)
1495 radd(fn)
1488 elif t.tracked:
1496 elif t.tracked:
1489 if (
1497 if (
1490 size >= 0
1498 size >= 0
1491 and (
1499 and (
1492 (size != st.st_size and size != st.st_size & _rangemask)
1500 (size != st.st_size and size != st.st_size & _rangemask)
1493 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1501 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1494 )
1502 )
1495 or t.from_p2
1503 or t.from_p2
1496 or fn in copymap
1504 or fn in copymap
1497 ):
1505 ):
1498 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1506 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1499 # issue6456: Size returned may be longer due to
1507 # issue6456: Size returned may be longer due to
1500 # encryption on EXT-4 fscrypt, undecided.
1508 # encryption on EXT-4 fscrypt, undecided.
1501 ladd(fn)
1509 ladd(fn)
1502 else:
1510 else:
1503 madd(fn)
1511 madd(fn)
1504 elif (
1512 elif (
1505 time != st[stat.ST_MTIME]
1513 time != st[stat.ST_MTIME]
1506 and time != st[stat.ST_MTIME] & _rangemask
1514 and time != st[stat.ST_MTIME] & _rangemask
1507 ):
1515 ):
1508 ladd(fn)
1516 ladd(fn)
1509 elif st[stat.ST_MTIME] == lastnormaltime:
1517 elif st[stat.ST_MTIME] == lastnormaltime:
1510 # fn may have just been marked as normal and it may have
1518 # fn may have just been marked as normal and it may have
1511 # changed in the same second without changing its size.
1519 # changed in the same second without changing its size.
1512 # This can happen if we quickly do multiple commits.
1520 # This can happen if we quickly do multiple commits.
1513 # Force lookup, so we don't miss such a racy file change.
1521 # Force lookup, so we don't miss such a racy file change.
1514 ladd(fn)
1522 ladd(fn)
1515 elif listclean:
1523 elif listclean:
1516 cadd(fn)
1524 cadd(fn)
1517 status = scmutil.status(
1525 status = scmutil.status(
1518 modified, added, removed, deleted, unknown, ignored, clean
1526 modified, added, removed, deleted, unknown, ignored, clean
1519 )
1527 )
1520 return (lookup, status)
1528 return (lookup, status)
1521
1529
1522 def matches(self, match):
1530 def matches(self, match):
1523 """
1531 """
1524 return files in the dirstate (in whatever state) filtered by match
1532 return files in the dirstate (in whatever state) filtered by match
1525 """
1533 """
1526 dmap = self._map
1534 dmap = self._map
1527 if rustmod is not None:
1535 if rustmod is not None:
1528 dmap = self._map._rustmap
1536 dmap = self._map._rustmap
1529
1537
1530 if match.always():
1538 if match.always():
1531 return dmap.keys()
1539 return dmap.keys()
1532 files = match.files()
1540 files = match.files()
1533 if match.isexact():
1541 if match.isexact():
1534 # fast path -- filter the other way around, since typically files is
1542 # fast path -- filter the other way around, since typically files is
1535 # much smaller than dmap
1543 # much smaller than dmap
1536 return [f for f in files if f in dmap]
1544 return [f for f in files if f in dmap]
1537 if match.prefix() and all(fn in dmap for fn in files):
1545 if match.prefix() and all(fn in dmap for fn in files):
1538 # fast path -- all the values are known to be files, so just return
1546 # fast path -- all the values are known to be files, so just return
1539 # that
1547 # that
1540 return list(files)
1548 return list(files)
1541 return [f for f in dmap if match(f)]
1549 return [f for f in dmap if match(f)]
1542
1550
1543 def _actualfilename(self, tr):
1551 def _actualfilename(self, tr):
1544 if tr:
1552 if tr:
1545 return self._pendingfilename
1553 return self._pendingfilename
1546 else:
1554 else:
1547 return self._filename
1555 return self._filename
1548
1556
1549 def savebackup(self, tr, backupname):
1557 def savebackup(self, tr, backupname):
1550 '''Save current dirstate into backup file'''
1558 '''Save current dirstate into backup file'''
1551 filename = self._actualfilename(tr)
1559 filename = self._actualfilename(tr)
1552 assert backupname != filename
1560 assert backupname != filename
1553
1561
1554 # use '_writedirstate' instead of 'write' to write changes certainly,
1562 # use '_writedirstate' instead of 'write' to write changes certainly,
1555 # because the latter omits writing out if transaction is running.
1563 # because the latter omits writing out if transaction is running.
1556 # output file will be used to create backup of dirstate at this point.
1564 # output file will be used to create backup of dirstate at this point.
1557 if self._dirty or not self._opener.exists(filename):
1565 if self._dirty or not self._opener.exists(filename):
1558 self._writedirstate(
1566 self._writedirstate(
1559 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1567 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1560 )
1568 )
1561
1569
1562 if tr:
1570 if tr:
1563 # ensure that subsequent tr.writepending returns True for
1571 # ensure that subsequent tr.writepending returns True for
1564 # changes written out above, even if dirstate is never
1572 # changes written out above, even if dirstate is never
1565 # changed after this
1573 # changed after this
1566 tr.addfilegenerator(
1574 tr.addfilegenerator(
1567 b'dirstate',
1575 b'dirstate',
1568 (self._filename,),
1576 (self._filename,),
1569 self._writedirstate,
1577 self._writedirstate,
1570 location=b'plain',
1578 location=b'plain',
1571 )
1579 )
1572
1580
1573 # ensure that pending file written above is unlinked at
1581 # ensure that pending file written above is unlinked at
1574 # failure, even if tr.writepending isn't invoked until the
1582 # failure, even if tr.writepending isn't invoked until the
1575 # end of this transaction
1583 # end of this transaction
1576 tr.registertmp(filename, location=b'plain')
1584 tr.registertmp(filename, location=b'plain')
1577
1585
1578 self._opener.tryunlink(backupname)
1586 self._opener.tryunlink(backupname)
1579 # hardlink backup is okay because _writedirstate is always called
1587 # hardlink backup is okay because _writedirstate is always called
1580 # with an "atomictemp=True" file.
1588 # with an "atomictemp=True" file.
1581 util.copyfile(
1589 util.copyfile(
1582 self._opener.join(filename),
1590 self._opener.join(filename),
1583 self._opener.join(backupname),
1591 self._opener.join(backupname),
1584 hardlink=True,
1592 hardlink=True,
1585 )
1593 )
1586
1594
1587 def restorebackup(self, tr, backupname):
1595 def restorebackup(self, tr, backupname):
1588 '''Restore dirstate by backup file'''
1596 '''Restore dirstate by backup file'''
1589 # this "invalidate()" prevents "wlock.release()" from writing
1597 # this "invalidate()" prevents "wlock.release()" from writing
1590 # changes of dirstate out after restoring from backup file
1598 # changes of dirstate out after restoring from backup file
1591 self.invalidate()
1599 self.invalidate()
1592 filename = self._actualfilename(tr)
1600 filename = self._actualfilename(tr)
1593 o = self._opener
1601 o = self._opener
1594 if util.samefile(o.join(backupname), o.join(filename)):
1602 if util.samefile(o.join(backupname), o.join(filename)):
1595 o.unlink(backupname)
1603 o.unlink(backupname)
1596 else:
1604 else:
1597 o.rename(backupname, filename, checkambig=True)
1605 o.rename(backupname, filename, checkambig=True)
1598
1606
1599 def clearbackup(self, tr, backupname):
1607 def clearbackup(self, tr, backupname):
1600 '''Clear backup file'''
1608 '''Clear backup file'''
1601 self._opener.unlink(backupname)
1609 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now