##// END OF EJS Templates
dirstate: fix restoration of "merged" state after a remove...
marmoute -
r48803:87b3010c default
parent child Browse files
Show More
@@ -1,1613 +1,1616
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = parsers.DirstateItem
48 DirstateItem = parsers.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._updatedfiles = set()
133 self._updatedfiles = set()
134 self._mapcls = dirstatemap.dirstatemap
134 self._mapcls = dirstatemap.dirstatemap
135 # Access and cache cwd early, so we don't access it for the first time
135 # Access and cache cwd early, so we don't access it for the first time
136 # after a working-copy update caused it to not exist (accessing it then
136 # after a working-copy update caused it to not exist (accessing it then
137 # raises an exception).
137 # raises an exception).
138 self._cwd
138 self._cwd
139
139
140 def prefetch_parents(self):
140 def prefetch_parents(self):
141 """make sure the parents are loaded
141 """make sure the parents are loaded
142
142
143 Used to avoid a race condition.
143 Used to avoid a race condition.
144 """
144 """
145 self._pl
145 self._pl
146
146
147 @contextlib.contextmanager
147 @contextlib.contextmanager
148 def parentchange(self):
148 def parentchange(self):
149 """Context manager for handling dirstate parents.
149 """Context manager for handling dirstate parents.
150
150
151 If an exception occurs in the scope of the context manager,
151 If an exception occurs in the scope of the context manager,
152 the incoherent dirstate won't be written when wlock is
152 the incoherent dirstate won't be written when wlock is
153 released.
153 released.
154 """
154 """
155 self._parentwriters += 1
155 self._parentwriters += 1
156 yield
156 yield
157 # Typically we want the "undo" step of a context manager in a
157 # Typically we want the "undo" step of a context manager in a
158 # finally block so it happens even when an exception
158 # finally block so it happens even when an exception
159 # occurs. In this case, however, we only want to decrement
159 # occurs. In this case, however, we only want to decrement
160 # parentwriters if the code in the with statement exits
160 # parentwriters if the code in the with statement exits
161 # normally, so we don't have a try/finally here on purpose.
161 # normally, so we don't have a try/finally here on purpose.
162 self._parentwriters -= 1
162 self._parentwriters -= 1
163
163
164 def pendingparentchange(self):
164 def pendingparentchange(self):
165 """Returns true if the dirstate is in the middle of a set of changes
165 """Returns true if the dirstate is in the middle of a set of changes
166 that modify the dirstate parent.
166 that modify the dirstate parent.
167 """
167 """
168 return self._parentwriters > 0
168 return self._parentwriters > 0
169
169
170 @propertycache
170 @propertycache
171 def _map(self):
171 def _map(self):
172 """Return the dirstate contents (see documentation for dirstatemap)."""
172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 self._map = self._mapcls(
173 self._map = self._mapcls(
174 self._ui,
174 self._ui,
175 self._opener,
175 self._opener,
176 self._root,
176 self._root,
177 self._nodeconstants,
177 self._nodeconstants,
178 self._use_dirstate_v2,
178 self._use_dirstate_v2,
179 )
179 )
180 return self._map
180 return self._map
181
181
182 @property
182 @property
183 def _sparsematcher(self):
183 def _sparsematcher(self):
184 """The matcher for the sparse checkout.
184 """The matcher for the sparse checkout.
185
185
186 The working directory may not include every file from a manifest. The
186 The working directory may not include every file from a manifest. The
187 matcher obtained by this property will match a path if it is to be
187 matcher obtained by this property will match a path if it is to be
188 included in the working directory.
188 included in the working directory.
189 """
189 """
190 # TODO there is potential to cache this property. For now, the matcher
190 # TODO there is potential to cache this property. For now, the matcher
191 # is resolved on every access. (But the called function does use a
191 # is resolved on every access. (But the called function does use a
192 # cache to keep the lookup fast.)
192 # cache to keep the lookup fast.)
193 return self._sparsematchfn()
193 return self._sparsematchfn()
194
194
195 @repocache(b'branch')
195 @repocache(b'branch')
196 def _branch(self):
196 def _branch(self):
197 try:
197 try:
198 return self._opener.read(b"branch").strip() or b"default"
198 return self._opener.read(b"branch").strip() or b"default"
199 except IOError as inst:
199 except IOError as inst:
200 if inst.errno != errno.ENOENT:
200 if inst.errno != errno.ENOENT:
201 raise
201 raise
202 return b"default"
202 return b"default"
203
203
204 @property
204 @property
205 def _pl(self):
205 def _pl(self):
206 return self._map.parents()
206 return self._map.parents()
207
207
208 def hasdir(self, d):
208 def hasdir(self, d):
209 return self._map.hastrackeddir(d)
209 return self._map.hastrackeddir(d)
210
210
211 @rootcache(b'.hgignore')
211 @rootcache(b'.hgignore')
212 def _ignore(self):
212 def _ignore(self):
213 files = self._ignorefiles()
213 files = self._ignorefiles()
214 if not files:
214 if not files:
215 return matchmod.never()
215 return matchmod.never()
216
216
217 pats = [b'include:%s' % f for f in files]
217 pats = [b'include:%s' % f for f in files]
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219
219
220 @propertycache
220 @propertycache
221 def _slash(self):
221 def _slash(self):
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223
223
224 @propertycache
224 @propertycache
225 def _checklink(self):
225 def _checklink(self):
226 return util.checklink(self._root)
226 return util.checklink(self._root)
227
227
228 @propertycache
228 @propertycache
229 def _checkexec(self):
229 def _checkexec(self):
230 return bool(util.checkexec(self._root))
230 return bool(util.checkexec(self._root))
231
231
232 @propertycache
232 @propertycache
233 def _checkcase(self):
233 def _checkcase(self):
234 return not util.fscasesensitive(self._join(b'.hg'))
234 return not util.fscasesensitive(self._join(b'.hg'))
235
235
236 def _join(self, f):
236 def _join(self, f):
237 # much faster than os.path.join()
237 # much faster than os.path.join()
238 # it's safe because f is always a relative path
238 # it's safe because f is always a relative path
239 return self._rootdir + f
239 return self._rootdir + f
240
240
241 def flagfunc(self, buildfallback):
241 def flagfunc(self, buildfallback):
242 if self._checklink and self._checkexec:
242 if self._checklink and self._checkexec:
243
243
244 def f(x):
244 def f(x):
245 try:
245 try:
246 st = os.lstat(self._join(x))
246 st = os.lstat(self._join(x))
247 if util.statislink(st):
247 if util.statislink(st):
248 return b'l'
248 return b'l'
249 if util.statisexec(st):
249 if util.statisexec(st):
250 return b'x'
250 return b'x'
251 except OSError:
251 except OSError:
252 pass
252 pass
253 return b''
253 return b''
254
254
255 return f
255 return f
256
256
257 fallback = buildfallback()
257 fallback = buildfallback()
258 if self._checklink:
258 if self._checklink:
259
259
260 def f(x):
260 def f(x):
261 if os.path.islink(self._join(x)):
261 if os.path.islink(self._join(x)):
262 return b'l'
262 return b'l'
263 if b'x' in fallback(x):
263 if b'x' in fallback(x):
264 return b'x'
264 return b'x'
265 return b''
265 return b''
266
266
267 return f
267 return f
268 if self._checkexec:
268 if self._checkexec:
269
269
270 def f(x):
270 def f(x):
271 if b'l' in fallback(x):
271 if b'l' in fallback(x):
272 return b'l'
272 return b'l'
273 if util.isexec(self._join(x)):
273 if util.isexec(self._join(x)):
274 return b'x'
274 return b'x'
275 return b''
275 return b''
276
276
277 return f
277 return f
278 else:
278 else:
279 return fallback
279 return fallback
280
280
281 @propertycache
281 @propertycache
282 def _cwd(self):
282 def _cwd(self):
283 # internal config: ui.forcecwd
283 # internal config: ui.forcecwd
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 if forcecwd:
285 if forcecwd:
286 return forcecwd
286 return forcecwd
287 return encoding.getcwd()
287 return encoding.getcwd()
288
288
289 def getcwd(self):
289 def getcwd(self):
290 """Return the path from which a canonical path is calculated.
290 """Return the path from which a canonical path is calculated.
291
291
292 This path should be used to resolve file patterns or to convert
292 This path should be used to resolve file patterns or to convert
293 canonical paths back to file paths for display. It shouldn't be
293 canonical paths back to file paths for display. It shouldn't be
294 used to get real file paths. Use vfs functions instead.
294 used to get real file paths. Use vfs functions instead.
295 """
295 """
296 cwd = self._cwd
296 cwd = self._cwd
297 if cwd == self._root:
297 if cwd == self._root:
298 return b''
298 return b''
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 rootsep = self._root
300 rootsep = self._root
301 if not util.endswithsep(rootsep):
301 if not util.endswithsep(rootsep):
302 rootsep += pycompat.ossep
302 rootsep += pycompat.ossep
303 if cwd.startswith(rootsep):
303 if cwd.startswith(rootsep):
304 return cwd[len(rootsep) :]
304 return cwd[len(rootsep) :]
305 else:
305 else:
306 # we're outside the repo. return an absolute path.
306 # we're outside the repo. return an absolute path.
307 return cwd
307 return cwd
308
308
309 def pathto(self, f, cwd=None):
309 def pathto(self, f, cwd=None):
310 if cwd is None:
310 if cwd is None:
311 cwd = self.getcwd()
311 cwd = self.getcwd()
312 path = util.pathto(self._root, cwd, f)
312 path = util.pathto(self._root, cwd, f)
313 if self._slash:
313 if self._slash:
314 return util.pconvert(path)
314 return util.pconvert(path)
315 return path
315 return path
316
316
317 def __getitem__(self, key):
317 def __getitem__(self, key):
318 """Return the current state of key (a filename) in the dirstate.
318 """Return the current state of key (a filename) in the dirstate.
319
319
320 States are:
320 States are:
321 n normal
321 n normal
322 m needs merging
322 m needs merging
323 r marked for removal
323 r marked for removal
324 a marked for addition
324 a marked for addition
325 ? not tracked
325 ? not tracked
326
326
327 XXX The "state" is a bit obscure to be in the "public" API. we should
327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 consider migrating all user of this to going through the dirstate entry
328 consider migrating all user of this to going through the dirstate entry
329 instead.
329 instead.
330 """
330 """
331 entry = self._map.get(key)
331 entry = self._map.get(key)
332 if entry is not None:
332 if entry is not None:
333 return entry.state
333 return entry.state
334 return b'?'
334 return b'?'
335
335
336 def __contains__(self, key):
336 def __contains__(self, key):
337 return key in self._map
337 return key in self._map
338
338
339 def __iter__(self):
339 def __iter__(self):
340 return iter(sorted(self._map))
340 return iter(sorted(self._map))
341
341
342 def items(self):
342 def items(self):
343 return pycompat.iteritems(self._map)
343 return pycompat.iteritems(self._map)
344
344
345 iteritems = items
345 iteritems = items
346
346
347 def directories(self):
347 def directories(self):
348 return self._map.directories()
348 return self._map.directories()
349
349
350 def parents(self):
350 def parents(self):
351 return [self._validate(p) for p in self._pl]
351 return [self._validate(p) for p in self._pl]
352
352
353 def p1(self):
353 def p1(self):
354 return self._validate(self._pl[0])
354 return self._validate(self._pl[0])
355
355
356 def p2(self):
356 def p2(self):
357 return self._validate(self._pl[1])
357 return self._validate(self._pl[1])
358
358
359 @property
359 @property
360 def in_merge(self):
360 def in_merge(self):
361 """True if a merge is in progress"""
361 """True if a merge is in progress"""
362 return self._pl[1] != self._nodeconstants.nullid
362 return self._pl[1] != self._nodeconstants.nullid
363
363
364 def branch(self):
364 def branch(self):
365 return encoding.tolocal(self._branch)
365 return encoding.tolocal(self._branch)
366
366
367 def setparents(self, p1, p2=None):
367 def setparents(self, p1, p2=None):
368 """Set dirstate parents to p1 and p2.
368 """Set dirstate parents to p1 and p2.
369
369
370 When moving from two parents to one, "merged" entries a
370 When moving from two parents to one, "merged" entries a
371 adjusted to normal and previous copy records discarded and
371 adjusted to normal and previous copy records discarded and
372 returned by the call.
372 returned by the call.
373
373
374 See localrepo.setparents()
374 See localrepo.setparents()
375 """
375 """
376 if p2 is None:
376 if p2 is None:
377 p2 = self._nodeconstants.nullid
377 p2 = self._nodeconstants.nullid
378 if self._parentwriters == 0:
378 if self._parentwriters == 0:
379 raise ValueError(
379 raise ValueError(
380 b"cannot set dirstate parent outside of "
380 b"cannot set dirstate parent outside of "
381 b"dirstate.parentchange context manager"
381 b"dirstate.parentchange context manager"
382 )
382 )
383
383
384 self._dirty = True
384 self._dirty = True
385 oldp2 = self._pl[1]
385 oldp2 = self._pl[1]
386 if self._origpl is None:
386 if self._origpl is None:
387 self._origpl = self._pl
387 self._origpl = self._pl
388 self._map.setparents(p1, p2)
388 self._map.setparents(p1, p2)
389 copies = {}
389 copies = {}
390 nullid = self._nodeconstants.nullid
390 nullid = self._nodeconstants.nullid
391 if oldp2 != nullid and p2 == nullid:
391 if oldp2 != nullid and p2 == nullid:
392 candidatefiles = self._map.non_normal_or_other_parent_paths()
392 candidatefiles = self._map.non_normal_or_other_parent_paths()
393
393
394 for f in candidatefiles:
394 for f in candidatefiles:
395 s = self._map.get(f)
395 s = self._map.get(f)
396 if s is None:
396 if s is None:
397 continue
397 continue
398
398
399 # Discard "merged" markers when moving away from a merge state
399 # Discard "merged" markers when moving away from a merge state
400 if s.merged:
400 if s.merged:
401 source = self._map.copymap.get(f)
401 source = self._map.copymap.get(f)
402 if source:
402 if source:
403 copies[f] = source
403 copies[f] = source
404 self._normallookup(f)
404 self._normallookup(f)
405 # Also fix up otherparent markers
405 # Also fix up otherparent markers
406 elif s.from_p2:
406 elif s.from_p2:
407 source = self._map.copymap.get(f)
407 source = self._map.copymap.get(f)
408 if source:
408 if source:
409 copies[f] = source
409 copies[f] = source
410 self._check_new_tracked_filename(f)
410 self._check_new_tracked_filename(f)
411 self._updatedfiles.add(f)
411 self._updatedfiles.add(f)
412 self._map.reset_state(
412 self._map.reset_state(
413 f,
413 f,
414 p1_tracked=False,
414 p1_tracked=False,
415 wc_tracked=True,
415 wc_tracked=True,
416 )
416 )
417 return copies
417 return copies
418
418
419 def setbranch(self, branch):
419 def setbranch(self, branch):
420 self.__class__._branch.set(self, encoding.fromlocal(branch))
420 self.__class__._branch.set(self, encoding.fromlocal(branch))
421 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
421 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
422 try:
422 try:
423 f.write(self._branch + b'\n')
423 f.write(self._branch + b'\n')
424 f.close()
424 f.close()
425
425
426 # make sure filecache has the correct stat info for _branch after
426 # make sure filecache has the correct stat info for _branch after
427 # replacing the underlying file
427 # replacing the underlying file
428 ce = self._filecache[b'_branch']
428 ce = self._filecache[b'_branch']
429 if ce:
429 if ce:
430 ce.refresh()
430 ce.refresh()
431 except: # re-raises
431 except: # re-raises
432 f.discard()
432 f.discard()
433 raise
433 raise
434
434
435 def invalidate(self):
435 def invalidate(self):
436 """Causes the next access to reread the dirstate.
436 """Causes the next access to reread the dirstate.
437
437
438 This is different from localrepo.invalidatedirstate() because it always
438 This is different from localrepo.invalidatedirstate() because it always
439 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
439 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
440 check whether the dirstate has changed before rereading it."""
440 check whether the dirstate has changed before rereading it."""
441
441
442 for a in ("_map", "_branch", "_ignore"):
442 for a in ("_map", "_branch", "_ignore"):
443 if a in self.__dict__:
443 if a in self.__dict__:
444 delattr(self, a)
444 delattr(self, a)
445 self._lastnormaltime = 0
445 self._lastnormaltime = 0
446 self._dirty = False
446 self._dirty = False
447 self._updatedfiles.clear()
447 self._updatedfiles.clear()
448 self._parentwriters = 0
448 self._parentwriters = 0
449 self._origpl = None
449 self._origpl = None
450
450
451 def copy(self, source, dest):
451 def copy(self, source, dest):
452 """Mark dest as a copy of source. Unmark dest if source is None."""
452 """Mark dest as a copy of source. Unmark dest if source is None."""
453 if source == dest:
453 if source == dest:
454 return
454 return
455 self._dirty = True
455 self._dirty = True
456 if source is not None:
456 if source is not None:
457 self._map.copymap[dest] = source
457 self._map.copymap[dest] = source
458 self._updatedfiles.add(source)
458 self._updatedfiles.add(source)
459 self._updatedfiles.add(dest)
459 self._updatedfiles.add(dest)
460 elif self._map.copymap.pop(dest, None):
460 elif self._map.copymap.pop(dest, None):
461 self._updatedfiles.add(dest)
461 self._updatedfiles.add(dest)
462
462
463 def copied(self, file):
463 def copied(self, file):
464 return self._map.copymap.get(file, None)
464 return self._map.copymap.get(file, None)
465
465
466 def copies(self):
466 def copies(self):
467 return self._map.copymap
467 return self._map.copymap
468
468
469 @requires_no_parents_change
469 @requires_no_parents_change
470 def set_tracked(self, filename):
470 def set_tracked(self, filename):
471 """a "public" method for generic code to mark a file as tracked
471 """a "public" method for generic code to mark a file as tracked
472
472
473 This function is to be called outside of "update/merge" case. For
473 This function is to be called outside of "update/merge" case. For
474 example by a command like `hg add X`.
474 example by a command like `hg add X`.
475
475
476 return True the file was previously untracked, False otherwise.
476 return True the file was previously untracked, False otherwise.
477 """
477 """
478 self._dirty = True
478 self._dirty = True
479 self._updatedfiles.add(filename)
479 self._updatedfiles.add(filename)
480 entry = self._map.get(filename)
480 entry = self._map.get(filename)
481 if entry is None:
481 if entry is None:
482 self._check_new_tracked_filename(filename)
482 self._check_new_tracked_filename(filename)
483 self._map.addfile(filename, added=True)
483 self._map.addfile(filename, added=True)
484 return True
484 return True
485 elif not entry.tracked:
485 elif not entry.tracked:
486 self._normallookup(filename)
486 self._normallookup(filename)
487 return True
487 return True
488 # XXX This is probably overkill for more case, but we need this to
488 # XXX This is probably overkill for more case, but we need this to
489 # fully replace the `normallookup` call with `set_tracked` one.
489 # fully replace the `normallookup` call with `set_tracked` one.
490 # Consider smoothing this in the future.
490 # Consider smoothing this in the future.
491 self.set_possibly_dirty(filename)
491 self.set_possibly_dirty(filename)
492 return False
492 return False
493
493
494 @requires_no_parents_change
494 @requires_no_parents_change
495 def set_untracked(self, filename):
495 def set_untracked(self, filename):
496 """a "public" method for generic code to mark a file as untracked
496 """a "public" method for generic code to mark a file as untracked
497
497
498 This function is to be called outside of "update/merge" case. For
498 This function is to be called outside of "update/merge" case. For
499 example by a command like `hg remove X`.
499 example by a command like `hg remove X`.
500
500
501 return True the file was previously tracked, False otherwise.
501 return True the file was previously tracked, False otherwise.
502 """
502 """
503 ret = self._map.set_untracked(filename)
503 ret = self._map.set_untracked(filename)
504 if ret:
504 if ret:
505 self._dirty = True
505 self._dirty = True
506 self._updatedfiles.add(filename)
506 self._updatedfiles.add(filename)
507 return ret
507 return ret
508
508
509 @requires_no_parents_change
509 @requires_no_parents_change
510 def set_clean(self, filename, parentfiledata=None):
510 def set_clean(self, filename, parentfiledata=None):
511 """record that the current state of the file on disk is known to be clean"""
511 """record that the current state of the file on disk is known to be clean"""
512 self._dirty = True
512 self._dirty = True
513 self._updatedfiles.add(filename)
513 self._updatedfiles.add(filename)
514 if parentfiledata:
514 if parentfiledata:
515 (mode, size, mtime) = parentfiledata
515 (mode, size, mtime) = parentfiledata
516 else:
516 else:
517 (mode, size, mtime) = self._get_filedata(filename)
517 (mode, size, mtime) = self._get_filedata(filename)
518 if not self._map[filename].tracked:
518 if not self._map[filename].tracked:
519 self._check_new_tracked_filename(filename)
519 self._check_new_tracked_filename(filename)
520 self._map.set_clean(filename, mode, size, mtime)
520 self._map.set_clean(filename, mode, size, mtime)
521 if mtime > self._lastnormaltime:
521 if mtime > self._lastnormaltime:
522 # Remember the most recent modification timeslot for status(),
522 # Remember the most recent modification timeslot for status(),
523 # to make sure we won't miss future size-preserving file content
523 # to make sure we won't miss future size-preserving file content
524 # modifications that happen within the same timeslot.
524 # modifications that happen within the same timeslot.
525 self._lastnormaltime = mtime
525 self._lastnormaltime = mtime
526
526
527 @requires_no_parents_change
527 @requires_no_parents_change
528 def set_possibly_dirty(self, filename):
528 def set_possibly_dirty(self, filename):
529 """record that the current state of the file on disk is unknown"""
529 """record that the current state of the file on disk is unknown"""
530 self._dirty = True
530 self._dirty = True
531 self._updatedfiles.add(filename)
531 self._updatedfiles.add(filename)
532 self._map.set_possibly_dirty(filename)
532 self._map.set_possibly_dirty(filename)
533
533
534 @requires_parents_change
534 @requires_parents_change
535 def update_file_p1(
535 def update_file_p1(
536 self,
536 self,
537 filename,
537 filename,
538 p1_tracked,
538 p1_tracked,
539 ):
539 ):
540 """Set a file as tracked in the parent (or not)
540 """Set a file as tracked in the parent (or not)
541
541
542 This is to be called when adjust the dirstate to a new parent after an history
542 This is to be called when adjust the dirstate to a new parent after an history
543 rewriting operation.
543 rewriting operation.
544
544
545 It should not be called during a merge (p2 != nullid) and only within
545 It should not be called during a merge (p2 != nullid) and only within
546 a `with dirstate.parentchange():` context.
546 a `with dirstate.parentchange():` context.
547 """
547 """
548 if self.in_merge:
548 if self.in_merge:
549 msg = b'update_file_reference should not be called when merging'
549 msg = b'update_file_reference should not be called when merging'
550 raise error.ProgrammingError(msg)
550 raise error.ProgrammingError(msg)
551 entry = self._map.get(filename)
551 entry = self._map.get(filename)
552 if entry is None:
552 if entry is None:
553 wc_tracked = False
553 wc_tracked = False
554 else:
554 else:
555 wc_tracked = entry.tracked
555 wc_tracked = entry.tracked
556 possibly_dirty = False
556 possibly_dirty = False
557 if p1_tracked and wc_tracked:
557 if p1_tracked and wc_tracked:
558 # the underlying reference might have changed, we will have to
558 # the underlying reference might have changed, we will have to
559 # check it.
559 # check it.
560 possibly_dirty = True
560 possibly_dirty = True
561 elif not (p1_tracked or wc_tracked):
561 elif not (p1_tracked or wc_tracked):
562 # the file is no longer relevant to anyone
562 # the file is no longer relevant to anyone
563 if self._map.dropfile(filename):
563 if self._map.dropfile(filename):
564 self._dirty = True
564 self._dirty = True
565 self._updatedfiles.add(filename)
565 self._updatedfiles.add(filename)
566 elif (not p1_tracked) and wc_tracked:
566 elif (not p1_tracked) and wc_tracked:
567 if entry is not None and entry.added:
567 if entry is not None and entry.added:
568 return # avoid dropping copy information (maybe?)
568 return # avoid dropping copy information (maybe?)
569 elif p1_tracked and not wc_tracked:
569 elif p1_tracked and not wc_tracked:
570 pass
570 pass
571 else:
571 else:
572 assert False, 'unreachable'
572 assert False, 'unreachable'
573
573
574 # this mean we are doing call for file we do not really care about the
574 # this mean we are doing call for file we do not really care about the
575 # data (eg: added or removed), however this should be a minor overhead
575 # data (eg: added or removed), however this should be a minor overhead
576 # compared to the overall update process calling this.
576 # compared to the overall update process calling this.
577 parentfiledata = None
577 parentfiledata = None
578 if wc_tracked:
578 if wc_tracked:
579 parentfiledata = self._get_filedata(filename)
579 parentfiledata = self._get_filedata(filename)
580
580
581 self._updatedfiles.add(filename)
581 self._updatedfiles.add(filename)
582 self._map.reset_state(
582 self._map.reset_state(
583 filename,
583 filename,
584 wc_tracked,
584 wc_tracked,
585 p1_tracked,
585 p1_tracked,
586 possibly_dirty=possibly_dirty,
586 possibly_dirty=possibly_dirty,
587 parentfiledata=parentfiledata,
587 parentfiledata=parentfiledata,
588 )
588 )
589 if (
589 if (
590 parentfiledata is not None
590 parentfiledata is not None
591 and parentfiledata[2] > self._lastnormaltime
591 and parentfiledata[2] > self._lastnormaltime
592 ):
592 ):
593 # Remember the most recent modification timeslot for status(),
593 # Remember the most recent modification timeslot for status(),
594 # to make sure we won't miss future size-preserving file content
594 # to make sure we won't miss future size-preserving file content
595 # modifications that happen within the same timeslot.
595 # modifications that happen within the same timeslot.
596 self._lastnormaltime = parentfiledata[2]
596 self._lastnormaltime = parentfiledata[2]
597
597
598 @requires_parents_change
598 @requires_parents_change
599 def update_file(
599 def update_file(
600 self,
600 self,
601 filename,
601 filename,
602 wc_tracked,
602 wc_tracked,
603 p1_tracked,
603 p1_tracked,
604 p2_tracked=False,
604 p2_tracked=False,
605 merged=False,
605 merged=False,
606 clean_p1=False,
606 clean_p1=False,
607 clean_p2=False,
607 clean_p2=False,
608 possibly_dirty=False,
608 possibly_dirty=False,
609 parentfiledata=None,
609 parentfiledata=None,
610 ):
610 ):
611 """update the information about a file in the dirstate
611 """update the information about a file in the dirstate
612
612
613 This is to be called when the direstates parent changes to keep track
613 This is to be called when the direstates parent changes to keep track
614 of what is the file situation in regards to the working copy and its parent.
614 of what is the file situation in regards to the working copy and its parent.
615
615
616 This function must be called within a `dirstate.parentchange` context.
616 This function must be called within a `dirstate.parentchange` context.
617
617
618 note: the API is at an early stage and we might need to adjust it
618 note: the API is at an early stage and we might need to adjust it
619 depending of what information ends up being relevant and useful to
619 depending of what information ends up being relevant and useful to
620 other processing.
620 other processing.
621 """
621 """
622 if merged and (clean_p1 or clean_p2):
622 if merged and (clean_p1 or clean_p2):
623 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
623 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
624 raise error.ProgrammingError(msg)
624 raise error.ProgrammingError(msg)
625
625
626 # note: I do not think we need to double check name clash here since we
626 # note: I do not think we need to double check name clash here since we
627 # are in a update/merge case that should already have taken care of
627 # are in a update/merge case that should already have taken care of
628 # this. The test agrees
628 # this. The test agrees
629
629
630 self._dirty = True
630 self._dirty = True
631 self._updatedfiles.add(filename)
631 self._updatedfiles.add(filename)
632
632
633 need_parent_file_data = (
633 need_parent_file_data = (
634 not (possibly_dirty or clean_p2 or merged)
634 not (possibly_dirty or clean_p2 or merged)
635 and wc_tracked
635 and wc_tracked
636 and p1_tracked
636 and p1_tracked
637 )
637 )
638
638
639 # this mean we are doing call for file we do not really care about the
639 # this mean we are doing call for file we do not really care about the
640 # data (eg: added or removed), however this should be a minor overhead
640 # data (eg: added or removed), however this should be a minor overhead
641 # compared to the overall update process calling this.
641 # compared to the overall update process calling this.
642 if need_parent_file_data:
642 if need_parent_file_data:
643 if parentfiledata is None:
643 if parentfiledata is None:
644 parentfiledata = self._get_filedata(filename)
644 parentfiledata = self._get_filedata(filename)
645 mtime = parentfiledata[2]
645 mtime = parentfiledata[2]
646
646
647 if mtime > self._lastnormaltime:
647 if mtime > self._lastnormaltime:
648 # Remember the most recent modification timeslot for
648 # Remember the most recent modification timeslot for
649 # status(), to make sure we won't miss future
649 # status(), to make sure we won't miss future
650 # size-preserving file content modifications that happen
650 # size-preserving file content modifications that happen
651 # within the same timeslot.
651 # within the same timeslot.
652 self._lastnormaltime = mtime
652 self._lastnormaltime = mtime
653
653
654 self._map.reset_state(
654 self._map.reset_state(
655 filename,
655 filename,
656 wc_tracked,
656 wc_tracked,
657 p1_tracked,
657 p1_tracked,
658 p2_tracked=p2_tracked,
658 p2_tracked=p2_tracked,
659 merged=merged,
659 merged=merged,
660 clean_p1=clean_p1,
660 clean_p1=clean_p1,
661 clean_p2=clean_p2,
661 clean_p2=clean_p2,
662 possibly_dirty=possibly_dirty,
662 possibly_dirty=possibly_dirty,
663 parentfiledata=parentfiledata,
663 parentfiledata=parentfiledata,
664 )
664 )
665 if (
665 if (
666 parentfiledata is not None
666 parentfiledata is not None
667 and parentfiledata[2] > self._lastnormaltime
667 and parentfiledata[2] > self._lastnormaltime
668 ):
668 ):
669 # Remember the most recent modification timeslot for status(),
669 # Remember the most recent modification timeslot for status(),
670 # to make sure we won't miss future size-preserving file content
670 # to make sure we won't miss future size-preserving file content
671 # modifications that happen within the same timeslot.
671 # modifications that happen within the same timeslot.
672 self._lastnormaltime = parentfiledata[2]
672 self._lastnormaltime = parentfiledata[2]
673
673
674 def _addpath(
674 def _addpath(
675 self,
675 self,
676 f,
676 f,
677 mode=0,
677 mode=0,
678 size=None,
678 size=None,
679 mtime=None,
679 mtime=None,
680 added=False,
680 added=False,
681 merged=False,
681 merged=False,
682 from_p2=False,
682 from_p2=False,
683 possibly_dirty=False,
683 possibly_dirty=False,
684 ):
684 ):
685 entry = self._map.get(f)
685 entry = self._map.get(f)
686 if added or entry is not None and not entry.tracked:
686 if added or entry is not None and not entry.tracked:
687 self._check_new_tracked_filename(f)
687 self._check_new_tracked_filename(f)
688 self._dirty = True
688 self._dirty = True
689 self._updatedfiles.add(f)
689 self._updatedfiles.add(f)
690 self._map.addfile(
690 self._map.addfile(
691 f,
691 f,
692 mode=mode,
692 mode=mode,
693 size=size,
693 size=size,
694 mtime=mtime,
694 mtime=mtime,
695 added=added,
695 added=added,
696 merged=merged,
696 merged=merged,
697 from_p2=from_p2,
697 from_p2=from_p2,
698 possibly_dirty=possibly_dirty,
698 possibly_dirty=possibly_dirty,
699 )
699 )
700
700
701 def _check_new_tracked_filename(self, filename):
701 def _check_new_tracked_filename(self, filename):
702 scmutil.checkfilename(filename)
702 scmutil.checkfilename(filename)
703 if self._map.hastrackeddir(filename):
703 if self._map.hastrackeddir(filename):
704 msg = _(b'directory %r already in dirstate')
704 msg = _(b'directory %r already in dirstate')
705 msg %= pycompat.bytestr(filename)
705 msg %= pycompat.bytestr(filename)
706 raise error.Abort(msg)
706 raise error.Abort(msg)
707 # shadows
707 # shadows
708 for d in pathutil.finddirs(filename):
708 for d in pathutil.finddirs(filename):
709 if self._map.hastrackeddir(d):
709 if self._map.hastrackeddir(d):
710 break
710 break
711 entry = self._map.get(d)
711 entry = self._map.get(d)
712 if entry is not None and not entry.removed:
712 if entry is not None and not entry.removed:
713 msg = _(b'file %r in dirstate clashes with %r')
713 msg = _(b'file %r in dirstate clashes with %r')
714 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
714 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
715 raise error.Abort(msg)
715 raise error.Abort(msg)
716
716
717 def _get_filedata(self, filename):
717 def _get_filedata(self, filename):
718 """returns"""
718 """returns"""
719 s = os.lstat(self._join(filename))
719 s = os.lstat(self._join(filename))
720 mode = s.st_mode
720 mode = s.st_mode
721 size = s.st_size
721 size = s.st_size
722 mtime = s[stat.ST_MTIME]
722 mtime = s[stat.ST_MTIME]
723 return (mode, size, mtime)
723 return (mode, size, mtime)
724
724
725 def _normallookup(self, f):
725 def _normallookup(self, f):
726 '''Mark a file normal, but possibly dirty.'''
726 '''Mark a file normal, but possibly dirty.'''
727 if self.in_merge:
727 if self.in_merge:
728 # if there is a merge going on and the file was either
728 # if there is a merge going on and the file was either
729 # "merged" or coming from other parent (-2) before
729 # "merged" or coming from other parent (-2) before
730 # being removed, restore that state.
730 # being removed, restore that state.
731 entry = self._map.get(f)
731 entry = self._map.get(f)
732 if entry is not None:
732 if entry is not None:
733 # XXX this should probably be dealt with a a lower level
733 # XXX this should probably be dealt with a a lower level
734 # (see `merged_removed` and `from_p2_removed`)
734 # (see `merged_removed` and `from_p2_removed`)
735 if entry.merged_removed or entry.from_p2_removed:
735 if entry.merged_removed or entry.from_p2_removed:
736 source = self._map.copymap.get(f)
736 source = self._map.copymap.get(f)
737 if entry.merged_removed:
738 self._addpath(f, merged=True)
739 else:
737 self._addpath(f, from_p2=True)
740 self._addpath(f, from_p2=True)
738 self._map.copymap.pop(f, None)
741 self._map.copymap.pop(f, None)
739 if source is not None:
742 if source is not None:
740 self.copy(source, f)
743 self.copy(source, f)
741 return
744 return
742 elif entry.merged or entry.from_p2:
745 elif entry.merged or entry.from_p2:
743 return
746 return
744 self._addpath(f, possibly_dirty=True)
747 self._addpath(f, possibly_dirty=True)
745 self._map.copymap.pop(f, None)
748 self._map.copymap.pop(f, None)
746
749
747 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
750 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
748 if exists is None:
751 if exists is None:
749 exists = os.path.lexists(os.path.join(self._root, path))
752 exists = os.path.lexists(os.path.join(self._root, path))
750 if not exists:
753 if not exists:
751 # Maybe a path component exists
754 # Maybe a path component exists
752 if not ignoremissing and b'/' in path:
755 if not ignoremissing and b'/' in path:
753 d, f = path.rsplit(b'/', 1)
756 d, f = path.rsplit(b'/', 1)
754 d = self._normalize(d, False, ignoremissing, None)
757 d = self._normalize(d, False, ignoremissing, None)
755 folded = d + b"/" + f
758 folded = d + b"/" + f
756 else:
759 else:
757 # No path components, preserve original case
760 # No path components, preserve original case
758 folded = path
761 folded = path
759 else:
762 else:
760 # recursively normalize leading directory components
763 # recursively normalize leading directory components
761 # against dirstate
764 # against dirstate
762 if b'/' in normed:
765 if b'/' in normed:
763 d, f = normed.rsplit(b'/', 1)
766 d, f = normed.rsplit(b'/', 1)
764 d = self._normalize(d, False, ignoremissing, True)
767 d = self._normalize(d, False, ignoremissing, True)
765 r = self._root + b"/" + d
768 r = self._root + b"/" + d
766 folded = d + b"/" + util.fspath(f, r)
769 folded = d + b"/" + util.fspath(f, r)
767 else:
770 else:
768 folded = util.fspath(normed, self._root)
771 folded = util.fspath(normed, self._root)
769 storemap[normed] = folded
772 storemap[normed] = folded
770
773
771 return folded
774 return folded
772
775
773 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
776 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
774 normed = util.normcase(path)
777 normed = util.normcase(path)
775 folded = self._map.filefoldmap.get(normed, None)
778 folded = self._map.filefoldmap.get(normed, None)
776 if folded is None:
779 if folded is None:
777 if isknown:
780 if isknown:
778 folded = path
781 folded = path
779 else:
782 else:
780 folded = self._discoverpath(
783 folded = self._discoverpath(
781 path, normed, ignoremissing, exists, self._map.filefoldmap
784 path, normed, ignoremissing, exists, self._map.filefoldmap
782 )
785 )
783 return folded
786 return folded
784
787
785 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
788 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
786 normed = util.normcase(path)
789 normed = util.normcase(path)
787 folded = self._map.filefoldmap.get(normed, None)
790 folded = self._map.filefoldmap.get(normed, None)
788 if folded is None:
791 if folded is None:
789 folded = self._map.dirfoldmap.get(normed, None)
792 folded = self._map.dirfoldmap.get(normed, None)
790 if folded is None:
793 if folded is None:
791 if isknown:
794 if isknown:
792 folded = path
795 folded = path
793 else:
796 else:
794 # store discovered result in dirfoldmap so that future
797 # store discovered result in dirfoldmap so that future
795 # normalizefile calls don't start matching directories
798 # normalizefile calls don't start matching directories
796 folded = self._discoverpath(
799 folded = self._discoverpath(
797 path, normed, ignoremissing, exists, self._map.dirfoldmap
800 path, normed, ignoremissing, exists, self._map.dirfoldmap
798 )
801 )
799 return folded
802 return folded
800
803
801 def normalize(self, path, isknown=False, ignoremissing=False):
804 def normalize(self, path, isknown=False, ignoremissing=False):
802 """
805 """
803 normalize the case of a pathname when on a casefolding filesystem
806 normalize the case of a pathname when on a casefolding filesystem
804
807
805 isknown specifies whether the filename came from walking the
808 isknown specifies whether the filename came from walking the
806 disk, to avoid extra filesystem access.
809 disk, to avoid extra filesystem access.
807
810
808 If ignoremissing is True, missing path are returned
811 If ignoremissing is True, missing path are returned
809 unchanged. Otherwise, we try harder to normalize possibly
812 unchanged. Otherwise, we try harder to normalize possibly
810 existing path components.
813 existing path components.
811
814
812 The normalized case is determined based on the following precedence:
815 The normalized case is determined based on the following precedence:
813
816
814 - version of name already stored in the dirstate
817 - version of name already stored in the dirstate
815 - version of name stored on disk
818 - version of name stored on disk
816 - version provided via command arguments
819 - version provided via command arguments
817 """
820 """
818
821
819 if self._checkcase:
822 if self._checkcase:
820 return self._normalize(path, isknown, ignoremissing)
823 return self._normalize(path, isknown, ignoremissing)
821 return path
824 return path
822
825
823 def clear(self):
826 def clear(self):
824 self._map.clear()
827 self._map.clear()
825 self._lastnormaltime = 0
828 self._lastnormaltime = 0
826 self._updatedfiles.clear()
829 self._updatedfiles.clear()
827 self._dirty = True
830 self._dirty = True
828
831
829 def rebuild(self, parent, allfiles, changedfiles=None):
832 def rebuild(self, parent, allfiles, changedfiles=None):
830 if changedfiles is None:
833 if changedfiles is None:
831 # Rebuild entire dirstate
834 # Rebuild entire dirstate
832 to_lookup = allfiles
835 to_lookup = allfiles
833 to_drop = []
836 to_drop = []
834 lastnormaltime = self._lastnormaltime
837 lastnormaltime = self._lastnormaltime
835 self.clear()
838 self.clear()
836 self._lastnormaltime = lastnormaltime
839 self._lastnormaltime = lastnormaltime
837 elif len(changedfiles) < 10:
840 elif len(changedfiles) < 10:
838 # Avoid turning allfiles into a set, which can be expensive if it's
841 # Avoid turning allfiles into a set, which can be expensive if it's
839 # large.
842 # large.
840 to_lookup = []
843 to_lookup = []
841 to_drop = []
844 to_drop = []
842 for f in changedfiles:
845 for f in changedfiles:
843 if f in allfiles:
846 if f in allfiles:
844 to_lookup.append(f)
847 to_lookup.append(f)
845 else:
848 else:
846 to_drop.append(f)
849 to_drop.append(f)
847 else:
850 else:
848 changedfilesset = set(changedfiles)
851 changedfilesset = set(changedfiles)
849 to_lookup = changedfilesset & set(allfiles)
852 to_lookup = changedfilesset & set(allfiles)
850 to_drop = changedfilesset - to_lookup
853 to_drop = changedfilesset - to_lookup
851
854
852 if self._origpl is None:
855 if self._origpl is None:
853 self._origpl = self._pl
856 self._origpl = self._pl
854 self._map.setparents(parent, self._nodeconstants.nullid)
857 self._map.setparents(parent, self._nodeconstants.nullid)
855
858
856 for f in to_lookup:
859 for f in to_lookup:
857 self._normallookup(f)
860 self._normallookup(f)
858 for f in to_drop:
861 for f in to_drop:
859 if self._map.dropfile(f):
862 if self._map.dropfile(f):
860 self._updatedfiles.add(f)
863 self._updatedfiles.add(f)
861
864
862 self._dirty = True
865 self._dirty = True
863
866
864 def identity(self):
867 def identity(self):
865 """Return identity of dirstate itself to detect changing in storage
868 """Return identity of dirstate itself to detect changing in storage
866
869
867 If identity of previous dirstate is equal to this, writing
870 If identity of previous dirstate is equal to this, writing
868 changes based on the former dirstate out can keep consistency.
871 changes based on the former dirstate out can keep consistency.
869 """
872 """
870 return self._map.identity
873 return self._map.identity
871
874
872 def write(self, tr):
875 def write(self, tr):
873 if not self._dirty:
876 if not self._dirty:
874 return
877 return
875
878
876 filename = self._filename
879 filename = self._filename
877 if tr:
880 if tr:
878 # 'dirstate.write()' is not only for writing in-memory
881 # 'dirstate.write()' is not only for writing in-memory
879 # changes out, but also for dropping ambiguous timestamp.
882 # changes out, but also for dropping ambiguous timestamp.
880 # delayed writing re-raise "ambiguous timestamp issue".
883 # delayed writing re-raise "ambiguous timestamp issue".
881 # See also the wiki page below for detail:
884 # See also the wiki page below for detail:
882 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
885 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
883
886
884 # emulate dropping timestamp in 'parsers.pack_dirstate'
887 # emulate dropping timestamp in 'parsers.pack_dirstate'
885 now = _getfsnow(self._opener)
888 now = _getfsnow(self._opener)
886 self._map.clearambiguoustimes(self._updatedfiles, now)
889 self._map.clearambiguoustimes(self._updatedfiles, now)
887
890
888 # emulate that all 'dirstate.normal' results are written out
891 # emulate that all 'dirstate.normal' results are written out
889 self._lastnormaltime = 0
892 self._lastnormaltime = 0
890 self._updatedfiles.clear()
893 self._updatedfiles.clear()
891
894
892 # delay writing in-memory changes out
895 # delay writing in-memory changes out
893 tr.addfilegenerator(
896 tr.addfilegenerator(
894 b'dirstate',
897 b'dirstate',
895 (self._filename,),
898 (self._filename,),
896 lambda f: self._writedirstate(tr, f),
899 lambda f: self._writedirstate(tr, f),
897 location=b'plain',
900 location=b'plain',
898 )
901 )
899 return
902 return
900
903
901 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
904 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
902 self._writedirstate(tr, st)
905 self._writedirstate(tr, st)
903
906
904 def addparentchangecallback(self, category, callback):
907 def addparentchangecallback(self, category, callback):
905 """add a callback to be called when the wd parents are changed
908 """add a callback to be called when the wd parents are changed
906
909
907 Callback will be called with the following arguments:
910 Callback will be called with the following arguments:
908 dirstate, (oldp1, oldp2), (newp1, newp2)
911 dirstate, (oldp1, oldp2), (newp1, newp2)
909
912
910 Category is a unique identifier to allow overwriting an old callback
913 Category is a unique identifier to allow overwriting an old callback
911 with a newer callback.
914 with a newer callback.
912 """
915 """
913 self._plchangecallbacks[category] = callback
916 self._plchangecallbacks[category] = callback
914
917
915 def _writedirstate(self, tr, st):
918 def _writedirstate(self, tr, st):
916 # notify callbacks about parents change
919 # notify callbacks about parents change
917 if self._origpl is not None and self._origpl != self._pl:
920 if self._origpl is not None and self._origpl != self._pl:
918 for c, callback in sorted(
921 for c, callback in sorted(
919 pycompat.iteritems(self._plchangecallbacks)
922 pycompat.iteritems(self._plchangecallbacks)
920 ):
923 ):
921 callback(self, self._origpl, self._pl)
924 callback(self, self._origpl, self._pl)
922 self._origpl = None
925 self._origpl = None
923 # use the modification time of the newly created temporary file as the
926 # use the modification time of the newly created temporary file as the
924 # filesystem's notion of 'now'
927 # filesystem's notion of 'now'
925 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
928 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
926
929
927 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
930 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
928 # timestamp of each entries in dirstate, because of 'now > mtime'
931 # timestamp of each entries in dirstate, because of 'now > mtime'
929 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
932 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
930 if delaywrite > 0:
933 if delaywrite > 0:
931 # do we have any files to delay for?
934 # do we have any files to delay for?
932 for f, e in pycompat.iteritems(self._map):
935 for f, e in pycompat.iteritems(self._map):
933 if e.need_delay(now):
936 if e.need_delay(now):
934 import time # to avoid useless import
937 import time # to avoid useless import
935
938
936 # rather than sleep n seconds, sleep until the next
939 # rather than sleep n seconds, sleep until the next
937 # multiple of n seconds
940 # multiple of n seconds
938 clock = time.time()
941 clock = time.time()
939 start = int(clock) - (int(clock) % delaywrite)
942 start = int(clock) - (int(clock) % delaywrite)
940 end = start + delaywrite
943 end = start + delaywrite
941 time.sleep(end - clock)
944 time.sleep(end - clock)
942 now = end # trust our estimate that the end is near now
945 now = end # trust our estimate that the end is near now
943 break
946 break
944
947
945 self._map.write(tr, st, now)
948 self._map.write(tr, st, now)
946 self._lastnormaltime = 0
949 self._lastnormaltime = 0
947 self._dirty = False
950 self._dirty = False
948
951
949 def _dirignore(self, f):
952 def _dirignore(self, f):
950 if self._ignore(f):
953 if self._ignore(f):
951 return True
954 return True
952 for p in pathutil.finddirs(f):
955 for p in pathutil.finddirs(f):
953 if self._ignore(p):
956 if self._ignore(p):
954 return True
957 return True
955 return False
958 return False
956
959
957 def _ignorefiles(self):
960 def _ignorefiles(self):
958 files = []
961 files = []
959 if os.path.exists(self._join(b'.hgignore')):
962 if os.path.exists(self._join(b'.hgignore')):
960 files.append(self._join(b'.hgignore'))
963 files.append(self._join(b'.hgignore'))
961 for name, path in self._ui.configitems(b"ui"):
964 for name, path in self._ui.configitems(b"ui"):
962 if name == b'ignore' or name.startswith(b'ignore.'):
965 if name == b'ignore' or name.startswith(b'ignore.'):
963 # we need to use os.path.join here rather than self._join
966 # we need to use os.path.join here rather than self._join
964 # because path is arbitrary and user-specified
967 # because path is arbitrary and user-specified
965 files.append(os.path.join(self._rootdir, util.expandpath(path)))
968 files.append(os.path.join(self._rootdir, util.expandpath(path)))
966 return files
969 return files
967
970
968 def _ignorefileandline(self, f):
971 def _ignorefileandline(self, f):
969 files = collections.deque(self._ignorefiles())
972 files = collections.deque(self._ignorefiles())
970 visited = set()
973 visited = set()
971 while files:
974 while files:
972 i = files.popleft()
975 i = files.popleft()
973 patterns = matchmod.readpatternfile(
976 patterns = matchmod.readpatternfile(
974 i, self._ui.warn, sourceinfo=True
977 i, self._ui.warn, sourceinfo=True
975 )
978 )
976 for pattern, lineno, line in patterns:
979 for pattern, lineno, line in patterns:
977 kind, p = matchmod._patsplit(pattern, b'glob')
980 kind, p = matchmod._patsplit(pattern, b'glob')
978 if kind == b"subinclude":
981 if kind == b"subinclude":
979 if p not in visited:
982 if p not in visited:
980 files.append(p)
983 files.append(p)
981 continue
984 continue
982 m = matchmod.match(
985 m = matchmod.match(
983 self._root, b'', [], [pattern], warn=self._ui.warn
986 self._root, b'', [], [pattern], warn=self._ui.warn
984 )
987 )
985 if m(f):
988 if m(f):
986 return (i, lineno, line)
989 return (i, lineno, line)
987 visited.add(i)
990 visited.add(i)
988 return (None, -1, b"")
991 return (None, -1, b"")
989
992
990 def _walkexplicit(self, match, subrepos):
993 def _walkexplicit(self, match, subrepos):
991 """Get stat data about the files explicitly specified by match.
994 """Get stat data about the files explicitly specified by match.
992
995
993 Return a triple (results, dirsfound, dirsnotfound).
996 Return a triple (results, dirsfound, dirsnotfound).
994 - results is a mapping from filename to stat result. It also contains
997 - results is a mapping from filename to stat result. It also contains
995 listings mapping subrepos and .hg to None.
998 listings mapping subrepos and .hg to None.
996 - dirsfound is a list of files found to be directories.
999 - dirsfound is a list of files found to be directories.
997 - dirsnotfound is a list of files that the dirstate thinks are
1000 - dirsnotfound is a list of files that the dirstate thinks are
998 directories and that were not found."""
1001 directories and that were not found."""
999
1002
1000 def badtype(mode):
1003 def badtype(mode):
1001 kind = _(b'unknown')
1004 kind = _(b'unknown')
1002 if stat.S_ISCHR(mode):
1005 if stat.S_ISCHR(mode):
1003 kind = _(b'character device')
1006 kind = _(b'character device')
1004 elif stat.S_ISBLK(mode):
1007 elif stat.S_ISBLK(mode):
1005 kind = _(b'block device')
1008 kind = _(b'block device')
1006 elif stat.S_ISFIFO(mode):
1009 elif stat.S_ISFIFO(mode):
1007 kind = _(b'fifo')
1010 kind = _(b'fifo')
1008 elif stat.S_ISSOCK(mode):
1011 elif stat.S_ISSOCK(mode):
1009 kind = _(b'socket')
1012 kind = _(b'socket')
1010 elif stat.S_ISDIR(mode):
1013 elif stat.S_ISDIR(mode):
1011 kind = _(b'directory')
1014 kind = _(b'directory')
1012 return _(b'unsupported file type (type is %s)') % kind
1015 return _(b'unsupported file type (type is %s)') % kind
1013
1016
1014 badfn = match.bad
1017 badfn = match.bad
1015 dmap = self._map
1018 dmap = self._map
1016 lstat = os.lstat
1019 lstat = os.lstat
1017 getkind = stat.S_IFMT
1020 getkind = stat.S_IFMT
1018 dirkind = stat.S_IFDIR
1021 dirkind = stat.S_IFDIR
1019 regkind = stat.S_IFREG
1022 regkind = stat.S_IFREG
1020 lnkkind = stat.S_IFLNK
1023 lnkkind = stat.S_IFLNK
1021 join = self._join
1024 join = self._join
1022 dirsfound = []
1025 dirsfound = []
1023 foundadd = dirsfound.append
1026 foundadd = dirsfound.append
1024 dirsnotfound = []
1027 dirsnotfound = []
1025 notfoundadd = dirsnotfound.append
1028 notfoundadd = dirsnotfound.append
1026
1029
1027 if not match.isexact() and self._checkcase:
1030 if not match.isexact() and self._checkcase:
1028 normalize = self._normalize
1031 normalize = self._normalize
1029 else:
1032 else:
1030 normalize = None
1033 normalize = None
1031
1034
1032 files = sorted(match.files())
1035 files = sorted(match.files())
1033 subrepos.sort()
1036 subrepos.sort()
1034 i, j = 0, 0
1037 i, j = 0, 0
1035 while i < len(files) and j < len(subrepos):
1038 while i < len(files) and j < len(subrepos):
1036 subpath = subrepos[j] + b"/"
1039 subpath = subrepos[j] + b"/"
1037 if files[i] < subpath:
1040 if files[i] < subpath:
1038 i += 1
1041 i += 1
1039 continue
1042 continue
1040 while i < len(files) and files[i].startswith(subpath):
1043 while i < len(files) and files[i].startswith(subpath):
1041 del files[i]
1044 del files[i]
1042 j += 1
1045 j += 1
1043
1046
1044 if not files or b'' in files:
1047 if not files or b'' in files:
1045 files = [b'']
1048 files = [b'']
1046 # constructing the foldmap is expensive, so don't do it for the
1049 # constructing the foldmap is expensive, so don't do it for the
1047 # common case where files is ['']
1050 # common case where files is ['']
1048 normalize = None
1051 normalize = None
1049 results = dict.fromkeys(subrepos)
1052 results = dict.fromkeys(subrepos)
1050 results[b'.hg'] = None
1053 results[b'.hg'] = None
1051
1054
1052 for ff in files:
1055 for ff in files:
1053 if normalize:
1056 if normalize:
1054 nf = normalize(ff, False, True)
1057 nf = normalize(ff, False, True)
1055 else:
1058 else:
1056 nf = ff
1059 nf = ff
1057 if nf in results:
1060 if nf in results:
1058 continue
1061 continue
1059
1062
1060 try:
1063 try:
1061 st = lstat(join(nf))
1064 st = lstat(join(nf))
1062 kind = getkind(st.st_mode)
1065 kind = getkind(st.st_mode)
1063 if kind == dirkind:
1066 if kind == dirkind:
1064 if nf in dmap:
1067 if nf in dmap:
1065 # file replaced by dir on disk but still in dirstate
1068 # file replaced by dir on disk but still in dirstate
1066 results[nf] = None
1069 results[nf] = None
1067 foundadd((nf, ff))
1070 foundadd((nf, ff))
1068 elif kind == regkind or kind == lnkkind:
1071 elif kind == regkind or kind == lnkkind:
1069 results[nf] = st
1072 results[nf] = st
1070 else:
1073 else:
1071 badfn(ff, badtype(kind))
1074 badfn(ff, badtype(kind))
1072 if nf in dmap:
1075 if nf in dmap:
1073 results[nf] = None
1076 results[nf] = None
1074 except OSError as inst: # nf not found on disk - it is dirstate only
1077 except OSError as inst: # nf not found on disk - it is dirstate only
1075 if nf in dmap: # does it exactly match a missing file?
1078 if nf in dmap: # does it exactly match a missing file?
1076 results[nf] = None
1079 results[nf] = None
1077 else: # does it match a missing directory?
1080 else: # does it match a missing directory?
1078 if self._map.hasdir(nf):
1081 if self._map.hasdir(nf):
1079 notfoundadd(nf)
1082 notfoundadd(nf)
1080 else:
1083 else:
1081 badfn(ff, encoding.strtolocal(inst.strerror))
1084 badfn(ff, encoding.strtolocal(inst.strerror))
1082
1085
1083 # match.files() may contain explicitly-specified paths that shouldn't
1086 # match.files() may contain explicitly-specified paths that shouldn't
1084 # be taken; drop them from the list of files found. dirsfound/notfound
1087 # be taken; drop them from the list of files found. dirsfound/notfound
1085 # aren't filtered here because they will be tested later.
1088 # aren't filtered here because they will be tested later.
1086 if match.anypats():
1089 if match.anypats():
1087 for f in list(results):
1090 for f in list(results):
1088 if f == b'.hg' or f in subrepos:
1091 if f == b'.hg' or f in subrepos:
1089 # keep sentinel to disable further out-of-repo walks
1092 # keep sentinel to disable further out-of-repo walks
1090 continue
1093 continue
1091 if not match(f):
1094 if not match(f):
1092 del results[f]
1095 del results[f]
1093
1096
1094 # Case insensitive filesystems cannot rely on lstat() failing to detect
1097 # Case insensitive filesystems cannot rely on lstat() failing to detect
1095 # a case-only rename. Prune the stat object for any file that does not
1098 # a case-only rename. Prune the stat object for any file that does not
1096 # match the case in the filesystem, if there are multiple files that
1099 # match the case in the filesystem, if there are multiple files that
1097 # normalize to the same path.
1100 # normalize to the same path.
1098 if match.isexact() and self._checkcase:
1101 if match.isexact() and self._checkcase:
1099 normed = {}
1102 normed = {}
1100
1103
1101 for f, st in pycompat.iteritems(results):
1104 for f, st in pycompat.iteritems(results):
1102 if st is None:
1105 if st is None:
1103 continue
1106 continue
1104
1107
1105 nc = util.normcase(f)
1108 nc = util.normcase(f)
1106 paths = normed.get(nc)
1109 paths = normed.get(nc)
1107
1110
1108 if paths is None:
1111 if paths is None:
1109 paths = set()
1112 paths = set()
1110 normed[nc] = paths
1113 normed[nc] = paths
1111
1114
1112 paths.add(f)
1115 paths.add(f)
1113
1116
1114 for norm, paths in pycompat.iteritems(normed):
1117 for norm, paths in pycompat.iteritems(normed):
1115 if len(paths) > 1:
1118 if len(paths) > 1:
1116 for path in paths:
1119 for path in paths:
1117 folded = self._discoverpath(
1120 folded = self._discoverpath(
1118 path, norm, True, None, self._map.dirfoldmap
1121 path, norm, True, None, self._map.dirfoldmap
1119 )
1122 )
1120 if path != folded:
1123 if path != folded:
1121 results[path] = None
1124 results[path] = None
1122
1125
1123 return results, dirsfound, dirsnotfound
1126 return results, dirsfound, dirsnotfound
1124
1127
1125 def walk(self, match, subrepos, unknown, ignored, full=True):
1128 def walk(self, match, subrepos, unknown, ignored, full=True):
1126 """
1129 """
1127 Walk recursively through the directory tree, finding all files
1130 Walk recursively through the directory tree, finding all files
1128 matched by match.
1131 matched by match.
1129
1132
1130 If full is False, maybe skip some known-clean files.
1133 If full is False, maybe skip some known-clean files.
1131
1134
1132 Return a dict mapping filename to stat-like object (either
1135 Return a dict mapping filename to stat-like object (either
1133 mercurial.osutil.stat instance or return value of os.stat()).
1136 mercurial.osutil.stat instance or return value of os.stat()).
1134
1137
1135 """
1138 """
1136 # full is a flag that extensions that hook into walk can use -- this
1139 # full is a flag that extensions that hook into walk can use -- this
1137 # implementation doesn't use it at all. This satisfies the contract
1140 # implementation doesn't use it at all. This satisfies the contract
1138 # because we only guarantee a "maybe".
1141 # because we only guarantee a "maybe".
1139
1142
1140 if ignored:
1143 if ignored:
1141 ignore = util.never
1144 ignore = util.never
1142 dirignore = util.never
1145 dirignore = util.never
1143 elif unknown:
1146 elif unknown:
1144 ignore = self._ignore
1147 ignore = self._ignore
1145 dirignore = self._dirignore
1148 dirignore = self._dirignore
1146 else:
1149 else:
1147 # if not unknown and not ignored, drop dir recursion and step 2
1150 # if not unknown and not ignored, drop dir recursion and step 2
1148 ignore = util.always
1151 ignore = util.always
1149 dirignore = util.always
1152 dirignore = util.always
1150
1153
1151 matchfn = match.matchfn
1154 matchfn = match.matchfn
1152 matchalways = match.always()
1155 matchalways = match.always()
1153 matchtdir = match.traversedir
1156 matchtdir = match.traversedir
1154 dmap = self._map
1157 dmap = self._map
1155 listdir = util.listdir
1158 listdir = util.listdir
1156 lstat = os.lstat
1159 lstat = os.lstat
1157 dirkind = stat.S_IFDIR
1160 dirkind = stat.S_IFDIR
1158 regkind = stat.S_IFREG
1161 regkind = stat.S_IFREG
1159 lnkkind = stat.S_IFLNK
1162 lnkkind = stat.S_IFLNK
1160 join = self._join
1163 join = self._join
1161
1164
1162 exact = skipstep3 = False
1165 exact = skipstep3 = False
1163 if match.isexact(): # match.exact
1166 if match.isexact(): # match.exact
1164 exact = True
1167 exact = True
1165 dirignore = util.always # skip step 2
1168 dirignore = util.always # skip step 2
1166 elif match.prefix(): # match.match, no patterns
1169 elif match.prefix(): # match.match, no patterns
1167 skipstep3 = True
1170 skipstep3 = True
1168
1171
1169 if not exact and self._checkcase:
1172 if not exact and self._checkcase:
1170 normalize = self._normalize
1173 normalize = self._normalize
1171 normalizefile = self._normalizefile
1174 normalizefile = self._normalizefile
1172 skipstep3 = False
1175 skipstep3 = False
1173 else:
1176 else:
1174 normalize = self._normalize
1177 normalize = self._normalize
1175 normalizefile = None
1178 normalizefile = None
1176
1179
1177 # step 1: find all explicit files
1180 # step 1: find all explicit files
1178 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1181 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1179 if matchtdir:
1182 if matchtdir:
1180 for d in work:
1183 for d in work:
1181 matchtdir(d[0])
1184 matchtdir(d[0])
1182 for d in dirsnotfound:
1185 for d in dirsnotfound:
1183 matchtdir(d)
1186 matchtdir(d)
1184
1187
1185 skipstep3 = skipstep3 and not (work or dirsnotfound)
1188 skipstep3 = skipstep3 and not (work or dirsnotfound)
1186 work = [d for d in work if not dirignore(d[0])]
1189 work = [d for d in work if not dirignore(d[0])]
1187
1190
1188 # step 2: visit subdirectories
1191 # step 2: visit subdirectories
1189 def traverse(work, alreadynormed):
1192 def traverse(work, alreadynormed):
1190 wadd = work.append
1193 wadd = work.append
1191 while work:
1194 while work:
1192 tracing.counter('dirstate.walk work', len(work))
1195 tracing.counter('dirstate.walk work', len(work))
1193 nd = work.pop()
1196 nd = work.pop()
1194 visitentries = match.visitchildrenset(nd)
1197 visitentries = match.visitchildrenset(nd)
1195 if not visitentries:
1198 if not visitentries:
1196 continue
1199 continue
1197 if visitentries == b'this' or visitentries == b'all':
1200 if visitentries == b'this' or visitentries == b'all':
1198 visitentries = None
1201 visitentries = None
1199 skip = None
1202 skip = None
1200 if nd != b'':
1203 if nd != b'':
1201 skip = b'.hg'
1204 skip = b'.hg'
1202 try:
1205 try:
1203 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1206 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1204 entries = listdir(join(nd), stat=True, skip=skip)
1207 entries = listdir(join(nd), stat=True, skip=skip)
1205 except OSError as inst:
1208 except OSError as inst:
1206 if inst.errno in (errno.EACCES, errno.ENOENT):
1209 if inst.errno in (errno.EACCES, errno.ENOENT):
1207 match.bad(
1210 match.bad(
1208 self.pathto(nd), encoding.strtolocal(inst.strerror)
1211 self.pathto(nd), encoding.strtolocal(inst.strerror)
1209 )
1212 )
1210 continue
1213 continue
1211 raise
1214 raise
1212 for f, kind, st in entries:
1215 for f, kind, st in entries:
1213 # Some matchers may return files in the visitentries set,
1216 # Some matchers may return files in the visitentries set,
1214 # instead of 'this', if the matcher explicitly mentions them
1217 # instead of 'this', if the matcher explicitly mentions them
1215 # and is not an exactmatcher. This is acceptable; we do not
1218 # and is not an exactmatcher. This is acceptable; we do not
1216 # make any hard assumptions about file-or-directory below
1219 # make any hard assumptions about file-or-directory below
1217 # based on the presence of `f` in visitentries. If
1220 # based on the presence of `f` in visitentries. If
1218 # visitchildrenset returned a set, we can always skip the
1221 # visitchildrenset returned a set, we can always skip the
1219 # entries *not* in the set it provided regardless of whether
1222 # entries *not* in the set it provided regardless of whether
1220 # they're actually a file or a directory.
1223 # they're actually a file or a directory.
1221 if visitentries and f not in visitentries:
1224 if visitentries and f not in visitentries:
1222 continue
1225 continue
1223 if normalizefile:
1226 if normalizefile:
1224 # even though f might be a directory, we're only
1227 # even though f might be a directory, we're only
1225 # interested in comparing it to files currently in the
1228 # interested in comparing it to files currently in the
1226 # dmap -- therefore normalizefile is enough
1229 # dmap -- therefore normalizefile is enough
1227 nf = normalizefile(
1230 nf = normalizefile(
1228 nd and (nd + b"/" + f) or f, True, True
1231 nd and (nd + b"/" + f) or f, True, True
1229 )
1232 )
1230 else:
1233 else:
1231 nf = nd and (nd + b"/" + f) or f
1234 nf = nd and (nd + b"/" + f) or f
1232 if nf not in results:
1235 if nf not in results:
1233 if kind == dirkind:
1236 if kind == dirkind:
1234 if not ignore(nf):
1237 if not ignore(nf):
1235 if matchtdir:
1238 if matchtdir:
1236 matchtdir(nf)
1239 matchtdir(nf)
1237 wadd(nf)
1240 wadd(nf)
1238 if nf in dmap and (matchalways or matchfn(nf)):
1241 if nf in dmap and (matchalways or matchfn(nf)):
1239 results[nf] = None
1242 results[nf] = None
1240 elif kind == regkind or kind == lnkkind:
1243 elif kind == regkind or kind == lnkkind:
1241 if nf in dmap:
1244 if nf in dmap:
1242 if matchalways or matchfn(nf):
1245 if matchalways or matchfn(nf):
1243 results[nf] = st
1246 results[nf] = st
1244 elif (matchalways or matchfn(nf)) and not ignore(
1247 elif (matchalways or matchfn(nf)) and not ignore(
1245 nf
1248 nf
1246 ):
1249 ):
1247 # unknown file -- normalize if necessary
1250 # unknown file -- normalize if necessary
1248 if not alreadynormed:
1251 if not alreadynormed:
1249 nf = normalize(nf, False, True)
1252 nf = normalize(nf, False, True)
1250 results[nf] = st
1253 results[nf] = st
1251 elif nf in dmap and (matchalways or matchfn(nf)):
1254 elif nf in dmap and (matchalways or matchfn(nf)):
1252 results[nf] = None
1255 results[nf] = None
1253
1256
1254 for nd, d in work:
1257 for nd, d in work:
1255 # alreadynormed means that processwork doesn't have to do any
1258 # alreadynormed means that processwork doesn't have to do any
1256 # expensive directory normalization
1259 # expensive directory normalization
1257 alreadynormed = not normalize or nd == d
1260 alreadynormed = not normalize or nd == d
1258 traverse([d], alreadynormed)
1261 traverse([d], alreadynormed)
1259
1262
1260 for s in subrepos:
1263 for s in subrepos:
1261 del results[s]
1264 del results[s]
1262 del results[b'.hg']
1265 del results[b'.hg']
1263
1266
1264 # step 3: visit remaining files from dmap
1267 # step 3: visit remaining files from dmap
1265 if not skipstep3 and not exact:
1268 if not skipstep3 and not exact:
1266 # If a dmap file is not in results yet, it was either
1269 # If a dmap file is not in results yet, it was either
1267 # a) not matching matchfn b) ignored, c) missing, or d) under a
1270 # a) not matching matchfn b) ignored, c) missing, or d) under a
1268 # symlink directory.
1271 # symlink directory.
1269 if not results and matchalways:
1272 if not results and matchalways:
1270 visit = [f for f in dmap]
1273 visit = [f for f in dmap]
1271 else:
1274 else:
1272 visit = [f for f in dmap if f not in results and matchfn(f)]
1275 visit = [f for f in dmap if f not in results and matchfn(f)]
1273 visit.sort()
1276 visit.sort()
1274
1277
1275 if unknown:
1278 if unknown:
1276 # unknown == True means we walked all dirs under the roots
1279 # unknown == True means we walked all dirs under the roots
1277 # that wasn't ignored, and everything that matched was stat'ed
1280 # that wasn't ignored, and everything that matched was stat'ed
1278 # and is already in results.
1281 # and is already in results.
1279 # The rest must thus be ignored or under a symlink.
1282 # The rest must thus be ignored or under a symlink.
1280 audit_path = pathutil.pathauditor(self._root, cached=True)
1283 audit_path = pathutil.pathauditor(self._root, cached=True)
1281
1284
1282 for nf in iter(visit):
1285 for nf in iter(visit):
1283 # If a stat for the same file was already added with a
1286 # If a stat for the same file was already added with a
1284 # different case, don't add one for this, since that would
1287 # different case, don't add one for this, since that would
1285 # make it appear as if the file exists under both names
1288 # make it appear as if the file exists under both names
1286 # on disk.
1289 # on disk.
1287 if (
1290 if (
1288 normalizefile
1291 normalizefile
1289 and normalizefile(nf, True, True) in results
1292 and normalizefile(nf, True, True) in results
1290 ):
1293 ):
1291 results[nf] = None
1294 results[nf] = None
1292 # Report ignored items in the dmap as long as they are not
1295 # Report ignored items in the dmap as long as they are not
1293 # under a symlink directory.
1296 # under a symlink directory.
1294 elif audit_path.check(nf):
1297 elif audit_path.check(nf):
1295 try:
1298 try:
1296 results[nf] = lstat(join(nf))
1299 results[nf] = lstat(join(nf))
1297 # file was just ignored, no links, and exists
1300 # file was just ignored, no links, and exists
1298 except OSError:
1301 except OSError:
1299 # file doesn't exist
1302 # file doesn't exist
1300 results[nf] = None
1303 results[nf] = None
1301 else:
1304 else:
1302 # It's either missing or under a symlink directory
1305 # It's either missing or under a symlink directory
1303 # which we in this case report as missing
1306 # which we in this case report as missing
1304 results[nf] = None
1307 results[nf] = None
1305 else:
1308 else:
1306 # We may not have walked the full directory tree above,
1309 # We may not have walked the full directory tree above,
1307 # so stat and check everything we missed.
1310 # so stat and check everything we missed.
1308 iv = iter(visit)
1311 iv = iter(visit)
1309 for st in util.statfiles([join(i) for i in visit]):
1312 for st in util.statfiles([join(i) for i in visit]):
1310 results[next(iv)] = st
1313 results[next(iv)] = st
1311 return results
1314 return results
1312
1315
1313 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1316 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1314 # Force Rayon (Rust parallelism library) to respect the number of
1317 # Force Rayon (Rust parallelism library) to respect the number of
1315 # workers. This is a temporary workaround until Rust code knows
1318 # workers. This is a temporary workaround until Rust code knows
1316 # how to read the config file.
1319 # how to read the config file.
1317 numcpus = self._ui.configint(b"worker", b"numcpus")
1320 numcpus = self._ui.configint(b"worker", b"numcpus")
1318 if numcpus is not None:
1321 if numcpus is not None:
1319 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1322 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1320
1323
1321 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1324 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1322 if not workers_enabled:
1325 if not workers_enabled:
1323 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1326 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1324
1327
1325 (
1328 (
1326 lookup,
1329 lookup,
1327 modified,
1330 modified,
1328 added,
1331 added,
1329 removed,
1332 removed,
1330 deleted,
1333 deleted,
1331 clean,
1334 clean,
1332 ignored,
1335 ignored,
1333 unknown,
1336 unknown,
1334 warnings,
1337 warnings,
1335 bad,
1338 bad,
1336 traversed,
1339 traversed,
1337 dirty,
1340 dirty,
1338 ) = rustmod.status(
1341 ) = rustmod.status(
1339 self._map._rustmap,
1342 self._map._rustmap,
1340 matcher,
1343 matcher,
1341 self._rootdir,
1344 self._rootdir,
1342 self._ignorefiles(),
1345 self._ignorefiles(),
1343 self._checkexec,
1346 self._checkexec,
1344 self._lastnormaltime,
1347 self._lastnormaltime,
1345 bool(list_clean),
1348 bool(list_clean),
1346 bool(list_ignored),
1349 bool(list_ignored),
1347 bool(list_unknown),
1350 bool(list_unknown),
1348 bool(matcher.traversedir),
1351 bool(matcher.traversedir),
1349 )
1352 )
1350
1353
1351 self._dirty |= dirty
1354 self._dirty |= dirty
1352
1355
1353 if matcher.traversedir:
1356 if matcher.traversedir:
1354 for dir in traversed:
1357 for dir in traversed:
1355 matcher.traversedir(dir)
1358 matcher.traversedir(dir)
1356
1359
1357 if self._ui.warn:
1360 if self._ui.warn:
1358 for item in warnings:
1361 for item in warnings:
1359 if isinstance(item, tuple):
1362 if isinstance(item, tuple):
1360 file_path, syntax = item
1363 file_path, syntax = item
1361 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1364 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1362 file_path,
1365 file_path,
1363 syntax,
1366 syntax,
1364 )
1367 )
1365 self._ui.warn(msg)
1368 self._ui.warn(msg)
1366 else:
1369 else:
1367 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1370 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1368 self._ui.warn(
1371 self._ui.warn(
1369 msg
1372 msg
1370 % (
1373 % (
1371 pathutil.canonpath(
1374 pathutil.canonpath(
1372 self._rootdir, self._rootdir, item
1375 self._rootdir, self._rootdir, item
1373 ),
1376 ),
1374 b"No such file or directory",
1377 b"No such file or directory",
1375 )
1378 )
1376 )
1379 )
1377
1380
1378 for (fn, message) in bad:
1381 for (fn, message) in bad:
1379 matcher.bad(fn, encoding.strtolocal(message))
1382 matcher.bad(fn, encoding.strtolocal(message))
1380
1383
1381 status = scmutil.status(
1384 status = scmutil.status(
1382 modified=modified,
1385 modified=modified,
1383 added=added,
1386 added=added,
1384 removed=removed,
1387 removed=removed,
1385 deleted=deleted,
1388 deleted=deleted,
1386 unknown=unknown,
1389 unknown=unknown,
1387 ignored=ignored,
1390 ignored=ignored,
1388 clean=clean,
1391 clean=clean,
1389 )
1392 )
1390 return (lookup, status)
1393 return (lookup, status)
1391
1394
1392 def status(self, match, subrepos, ignored, clean, unknown):
1395 def status(self, match, subrepos, ignored, clean, unknown):
1393 """Determine the status of the working copy relative to the
1396 """Determine the status of the working copy relative to the
1394 dirstate and return a pair of (unsure, status), where status is of type
1397 dirstate and return a pair of (unsure, status), where status is of type
1395 scmutil.status and:
1398 scmutil.status and:
1396
1399
1397 unsure:
1400 unsure:
1398 files that might have been modified since the dirstate was
1401 files that might have been modified since the dirstate was
1399 written, but need to be read to be sure (size is the same
1402 written, but need to be read to be sure (size is the same
1400 but mtime differs)
1403 but mtime differs)
1401 status.modified:
1404 status.modified:
1402 files that have definitely been modified since the dirstate
1405 files that have definitely been modified since the dirstate
1403 was written (different size or mode)
1406 was written (different size or mode)
1404 status.clean:
1407 status.clean:
1405 files that have definitely not been modified since the
1408 files that have definitely not been modified since the
1406 dirstate was written
1409 dirstate was written
1407 """
1410 """
1408 listignored, listclean, listunknown = ignored, clean, unknown
1411 listignored, listclean, listunknown = ignored, clean, unknown
1409 lookup, modified, added, unknown, ignored = [], [], [], [], []
1412 lookup, modified, added, unknown, ignored = [], [], [], [], []
1410 removed, deleted, clean = [], [], []
1413 removed, deleted, clean = [], [], []
1411
1414
1412 dmap = self._map
1415 dmap = self._map
1413 dmap.preload()
1416 dmap.preload()
1414
1417
1415 use_rust = True
1418 use_rust = True
1416
1419
1417 allowed_matchers = (
1420 allowed_matchers = (
1418 matchmod.alwaysmatcher,
1421 matchmod.alwaysmatcher,
1419 matchmod.exactmatcher,
1422 matchmod.exactmatcher,
1420 matchmod.includematcher,
1423 matchmod.includematcher,
1421 )
1424 )
1422
1425
1423 if rustmod is None:
1426 if rustmod is None:
1424 use_rust = False
1427 use_rust = False
1425 elif self._checkcase:
1428 elif self._checkcase:
1426 # Case-insensitive filesystems are not handled yet
1429 # Case-insensitive filesystems are not handled yet
1427 use_rust = False
1430 use_rust = False
1428 elif subrepos:
1431 elif subrepos:
1429 use_rust = False
1432 use_rust = False
1430 elif sparse.enabled:
1433 elif sparse.enabled:
1431 use_rust = False
1434 use_rust = False
1432 elif not isinstance(match, allowed_matchers):
1435 elif not isinstance(match, allowed_matchers):
1433 # Some matchers have yet to be implemented
1436 # Some matchers have yet to be implemented
1434 use_rust = False
1437 use_rust = False
1435
1438
1436 if use_rust:
1439 if use_rust:
1437 try:
1440 try:
1438 return self._rust_status(
1441 return self._rust_status(
1439 match, listclean, listignored, listunknown
1442 match, listclean, listignored, listunknown
1440 )
1443 )
1441 except rustmod.FallbackError:
1444 except rustmod.FallbackError:
1442 pass
1445 pass
1443
1446
1444 def noop(f):
1447 def noop(f):
1445 pass
1448 pass
1446
1449
1447 dcontains = dmap.__contains__
1450 dcontains = dmap.__contains__
1448 dget = dmap.__getitem__
1451 dget = dmap.__getitem__
1449 ladd = lookup.append # aka "unsure"
1452 ladd = lookup.append # aka "unsure"
1450 madd = modified.append
1453 madd = modified.append
1451 aadd = added.append
1454 aadd = added.append
1452 uadd = unknown.append if listunknown else noop
1455 uadd = unknown.append if listunknown else noop
1453 iadd = ignored.append if listignored else noop
1456 iadd = ignored.append if listignored else noop
1454 radd = removed.append
1457 radd = removed.append
1455 dadd = deleted.append
1458 dadd = deleted.append
1456 cadd = clean.append if listclean else noop
1459 cadd = clean.append if listclean else noop
1457 mexact = match.exact
1460 mexact = match.exact
1458 dirignore = self._dirignore
1461 dirignore = self._dirignore
1459 checkexec = self._checkexec
1462 checkexec = self._checkexec
1460 copymap = self._map.copymap
1463 copymap = self._map.copymap
1461 lastnormaltime = self._lastnormaltime
1464 lastnormaltime = self._lastnormaltime
1462
1465
1463 # We need to do full walks when either
1466 # We need to do full walks when either
1464 # - we're listing all clean files, or
1467 # - we're listing all clean files, or
1465 # - match.traversedir does something, because match.traversedir should
1468 # - match.traversedir does something, because match.traversedir should
1466 # be called for every dir in the working dir
1469 # be called for every dir in the working dir
1467 full = listclean or match.traversedir is not None
1470 full = listclean or match.traversedir is not None
1468 for fn, st in pycompat.iteritems(
1471 for fn, st in pycompat.iteritems(
1469 self.walk(match, subrepos, listunknown, listignored, full=full)
1472 self.walk(match, subrepos, listunknown, listignored, full=full)
1470 ):
1473 ):
1471 if not dcontains(fn):
1474 if not dcontains(fn):
1472 if (listignored or mexact(fn)) and dirignore(fn):
1475 if (listignored or mexact(fn)) and dirignore(fn):
1473 if listignored:
1476 if listignored:
1474 iadd(fn)
1477 iadd(fn)
1475 else:
1478 else:
1476 uadd(fn)
1479 uadd(fn)
1477 continue
1480 continue
1478
1481
1479 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1482 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1480 # written like that for performance reasons. dmap[fn] is not a
1483 # written like that for performance reasons. dmap[fn] is not a
1481 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1484 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1482 # opcode has fast paths when the value to be unpacked is a tuple or
1485 # opcode has fast paths when the value to be unpacked is a tuple or
1483 # a list, but falls back to creating a full-fledged iterator in
1486 # a list, but falls back to creating a full-fledged iterator in
1484 # general. That is much slower than simply accessing and storing the
1487 # general. That is much slower than simply accessing and storing the
1485 # tuple members one by one.
1488 # tuple members one by one.
1486 t = dget(fn)
1489 t = dget(fn)
1487 mode = t.mode
1490 mode = t.mode
1488 size = t.size
1491 size = t.size
1489 time = t.mtime
1492 time = t.mtime
1490
1493
1491 if not st and t.tracked:
1494 if not st and t.tracked:
1492 dadd(fn)
1495 dadd(fn)
1493 elif t.merged:
1496 elif t.merged:
1494 madd(fn)
1497 madd(fn)
1495 elif t.added:
1498 elif t.added:
1496 aadd(fn)
1499 aadd(fn)
1497 elif t.removed:
1500 elif t.removed:
1498 radd(fn)
1501 radd(fn)
1499 elif t.tracked:
1502 elif t.tracked:
1500 if (
1503 if (
1501 size >= 0
1504 size >= 0
1502 and (
1505 and (
1503 (size != st.st_size and size != st.st_size & _rangemask)
1506 (size != st.st_size and size != st.st_size & _rangemask)
1504 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1507 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1505 )
1508 )
1506 or t.from_p2
1509 or t.from_p2
1507 or fn in copymap
1510 or fn in copymap
1508 ):
1511 ):
1509 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1512 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1510 # issue6456: Size returned may be longer due to
1513 # issue6456: Size returned may be longer due to
1511 # encryption on EXT-4 fscrypt, undecided.
1514 # encryption on EXT-4 fscrypt, undecided.
1512 ladd(fn)
1515 ladd(fn)
1513 else:
1516 else:
1514 madd(fn)
1517 madd(fn)
1515 elif (
1518 elif (
1516 time != st[stat.ST_MTIME]
1519 time != st[stat.ST_MTIME]
1517 and time != st[stat.ST_MTIME] & _rangemask
1520 and time != st[stat.ST_MTIME] & _rangemask
1518 ):
1521 ):
1519 ladd(fn)
1522 ladd(fn)
1520 elif st[stat.ST_MTIME] == lastnormaltime:
1523 elif st[stat.ST_MTIME] == lastnormaltime:
1521 # fn may have just been marked as normal and it may have
1524 # fn may have just been marked as normal and it may have
1522 # changed in the same second without changing its size.
1525 # changed in the same second without changing its size.
1523 # This can happen if we quickly do multiple commits.
1526 # This can happen if we quickly do multiple commits.
1524 # Force lookup, so we don't miss such a racy file change.
1527 # Force lookup, so we don't miss such a racy file change.
1525 ladd(fn)
1528 ladd(fn)
1526 elif listclean:
1529 elif listclean:
1527 cadd(fn)
1530 cadd(fn)
1528 status = scmutil.status(
1531 status = scmutil.status(
1529 modified, added, removed, deleted, unknown, ignored, clean
1532 modified, added, removed, deleted, unknown, ignored, clean
1530 )
1533 )
1531 return (lookup, status)
1534 return (lookup, status)
1532
1535
1533 def matches(self, match):
1536 def matches(self, match):
1534 """
1537 """
1535 return files in the dirstate (in whatever state) filtered by match
1538 return files in the dirstate (in whatever state) filtered by match
1536 """
1539 """
1537 dmap = self._map
1540 dmap = self._map
1538 if rustmod is not None:
1541 if rustmod is not None:
1539 dmap = self._map._rustmap
1542 dmap = self._map._rustmap
1540
1543
1541 if match.always():
1544 if match.always():
1542 return dmap.keys()
1545 return dmap.keys()
1543 files = match.files()
1546 files = match.files()
1544 if match.isexact():
1547 if match.isexact():
1545 # fast path -- filter the other way around, since typically files is
1548 # fast path -- filter the other way around, since typically files is
1546 # much smaller than dmap
1549 # much smaller than dmap
1547 return [f for f in files if f in dmap]
1550 return [f for f in files if f in dmap]
1548 if match.prefix() and all(fn in dmap for fn in files):
1551 if match.prefix() and all(fn in dmap for fn in files):
1549 # fast path -- all the values are known to be files, so just return
1552 # fast path -- all the values are known to be files, so just return
1550 # that
1553 # that
1551 return list(files)
1554 return list(files)
1552 return [f for f in dmap if match(f)]
1555 return [f for f in dmap if match(f)]
1553
1556
1554 def _actualfilename(self, tr):
1557 def _actualfilename(self, tr):
1555 if tr:
1558 if tr:
1556 return self._pendingfilename
1559 return self._pendingfilename
1557 else:
1560 else:
1558 return self._filename
1561 return self._filename
1559
1562
1560 def savebackup(self, tr, backupname):
1563 def savebackup(self, tr, backupname):
1561 '''Save current dirstate into backup file'''
1564 '''Save current dirstate into backup file'''
1562 filename = self._actualfilename(tr)
1565 filename = self._actualfilename(tr)
1563 assert backupname != filename
1566 assert backupname != filename
1564
1567
1565 # use '_writedirstate' instead of 'write' to write changes certainly,
1568 # use '_writedirstate' instead of 'write' to write changes certainly,
1566 # because the latter omits writing out if transaction is running.
1569 # because the latter omits writing out if transaction is running.
1567 # output file will be used to create backup of dirstate at this point.
1570 # output file will be used to create backup of dirstate at this point.
1568 if self._dirty or not self._opener.exists(filename):
1571 if self._dirty or not self._opener.exists(filename):
1569 self._writedirstate(
1572 self._writedirstate(
1570 tr,
1573 tr,
1571 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1574 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1572 )
1575 )
1573
1576
1574 if tr:
1577 if tr:
1575 # ensure that subsequent tr.writepending returns True for
1578 # ensure that subsequent tr.writepending returns True for
1576 # changes written out above, even if dirstate is never
1579 # changes written out above, even if dirstate is never
1577 # changed after this
1580 # changed after this
1578 tr.addfilegenerator(
1581 tr.addfilegenerator(
1579 b'dirstate',
1582 b'dirstate',
1580 (self._filename,),
1583 (self._filename,),
1581 lambda f: self._writedirstate(tr, f),
1584 lambda f: self._writedirstate(tr, f),
1582 location=b'plain',
1585 location=b'plain',
1583 )
1586 )
1584
1587
1585 # ensure that pending file written above is unlinked at
1588 # ensure that pending file written above is unlinked at
1586 # failure, even if tr.writepending isn't invoked until the
1589 # failure, even if tr.writepending isn't invoked until the
1587 # end of this transaction
1590 # end of this transaction
1588 tr.registertmp(filename, location=b'plain')
1591 tr.registertmp(filename, location=b'plain')
1589
1592
1590 self._opener.tryunlink(backupname)
1593 self._opener.tryunlink(backupname)
1591 # hardlink backup is okay because _writedirstate is always called
1594 # hardlink backup is okay because _writedirstate is always called
1592 # with an "atomictemp=True" file.
1595 # with an "atomictemp=True" file.
1593 util.copyfile(
1596 util.copyfile(
1594 self._opener.join(filename),
1597 self._opener.join(filename),
1595 self._opener.join(backupname),
1598 self._opener.join(backupname),
1596 hardlink=True,
1599 hardlink=True,
1597 )
1600 )
1598
1601
1599 def restorebackup(self, tr, backupname):
1602 def restorebackup(self, tr, backupname):
1600 '''Restore dirstate by backup file'''
1603 '''Restore dirstate by backup file'''
1601 # this "invalidate()" prevents "wlock.release()" from writing
1604 # this "invalidate()" prevents "wlock.release()" from writing
1602 # changes of dirstate out after restoring from backup file
1605 # changes of dirstate out after restoring from backup file
1603 self.invalidate()
1606 self.invalidate()
1604 filename = self._actualfilename(tr)
1607 filename = self._actualfilename(tr)
1605 o = self._opener
1608 o = self._opener
1606 if util.samefile(o.join(backupname), o.join(filename)):
1609 if util.samefile(o.join(backupname), o.join(filename)):
1607 o.unlink(backupname)
1610 o.unlink(backupname)
1608 else:
1611 else:
1609 o.rename(backupname, filename, checkambig=True)
1612 o.rename(backupname, filename, checkambig=True)
1610
1613
1611 def clearbackup(self, tr, backupname):
1614 def clearbackup(self, tr, backupname):
1612 '''Clear backup file'''
1615 '''Clear backup file'''
1613 self._opener.unlink(backupname)
1616 self._opener.unlink(backupname)
@@ -1,116 +1,116
1 $ hg init
1 $ hg init
2
2
3 $ echo foo > foo
3 $ echo foo > foo
4 $ echo bar > bar
4 $ echo bar > bar
5 $ hg ci -qAm 'add foo bar'
5 $ hg ci -qAm 'add foo bar'
6
6
7 $ echo foo2 >> foo
7 $ echo foo2 >> foo
8 $ echo bleh > bar
8 $ echo bleh > bar
9 $ hg ci -m 'change foo bar'
9 $ hg ci -m 'change foo bar'
10
10
11 $ hg up -qC 0
11 $ hg up -qC 0
12 $ hg mv foo foo1
12 $ hg mv foo foo1
13 $ echo foo1 > foo1
13 $ echo foo1 > foo1
14 $ hg cat foo >> foo1
14 $ hg cat foo >> foo1
15 $ hg ci -m 'mv foo foo1'
15 $ hg ci -m 'mv foo foo1'
16 created new head
16 created new head
17
17
18 $ hg merge
18 $ hg merge
19 merging foo1 and foo to foo1
19 merging foo1 and foo to foo1
20 1 files updated, 1 files merged, 0 files removed, 0 files unresolved
20 1 files updated, 1 files merged, 0 files removed, 0 files unresolved
21 (branch merge, don't forget to commit)
21 (branch merge, don't forget to commit)
22
22
23 $ hg debugstate --no-dates
23 $ hg debugstate --no-dates
24 m 0 -2 unset bar
24 m 0 -2 unset bar
25 m 0 -2 unset foo1
25 m 0 -2 unset foo1
26 copy: foo -> foo1
26 copy: foo -> foo1
27
27
28 $ hg st -q
28 $ hg st -q
29 M bar
29 M bar
30 M foo1
30 M foo1
31
31
32
32
33 Removing foo1 and bar:
33 Removing foo1 and bar:
34
34
35 $ cp foo1 F
35 $ cp foo1 F
36 $ cp bar B
36 $ cp bar B
37 $ hg rm -f foo1 bar
37 $ hg rm -f foo1 bar
38
38
39 $ hg debugstate --no-dates
39 $ hg debugstate --no-dates
40 r 0 -1 set bar
40 r 0 -1 set bar
41 r 0 -1 set foo1
41 r 0 -1 set foo1
42 copy: foo -> foo1
42 copy: foo -> foo1
43
43
44 $ hg st -qC
44 $ hg st -qC
45 R bar
45 R bar
46 R foo1
46 R foo1
47
47
48
48
49 Re-adding foo1 and bar:
49 Re-adding foo1 and bar:
50
50
51 $ cp F foo1
51 $ cp F foo1
52 $ cp B bar
52 $ cp B bar
53 $ hg add -v foo1 bar
53 $ hg add -v foo1 bar
54 adding bar
54 adding bar
55 adding foo1
55 adding foo1
56
56
57 $ hg debugstate --no-dates
57 $ hg debugstate --no-dates
58 n 0 -2 unset bar
58 m 0 -2 unset bar
59 n 0 -2 unset foo1
59 m 0 -2 unset foo1
60 copy: foo -> foo1
60 copy: foo -> foo1
61
61
62 $ hg st -qC
62 $ hg st -qC
63 M bar
63 M bar
64 M foo1
64 M foo1
65 foo
65 foo
66
66
67
67
68 Reverting foo1 and bar:
68 Reverting foo1 and bar:
69
69
70 $ hg revert -vr . foo1 bar
70 $ hg revert -vr . foo1 bar
71 saving current version of bar as bar.orig
71 saving current version of bar as bar.orig
72 saving current version of foo1 as foo1.orig
72 saving current version of foo1 as foo1.orig
73 reverting bar
73 reverting bar
74 reverting foo1
74 reverting foo1
75
75
76 $ hg debugstate --no-dates
76 $ hg debugstate --no-dates
77 n 0 -2 unset bar
77 m 0 -2 unset bar
78 n 0 -2 unset foo1
78 m 0 -2 unset foo1
79 copy: foo -> foo1
79 copy: foo -> foo1
80
80
81 $ hg st -qC
81 $ hg st -qC
82 M bar
82 M bar
83 M foo1
83 M foo1
84 foo
84 foo
85
85
86 $ hg diff
86 $ hg diff
87
87
88 Merge should not overwrite local file that is untracked after remove
88 Merge should not overwrite local file that is untracked after remove
89
89
90 $ rm *
90 $ rm *
91 $ hg up -qC
91 $ hg up -qC
92 $ hg rm bar
92 $ hg rm bar
93 $ hg ci -m 'remove bar'
93 $ hg ci -m 'remove bar'
94 $ echo 'memories of buried pirate treasure' > bar
94 $ echo 'memories of buried pirate treasure' > bar
95 $ hg merge
95 $ hg merge
96 bar: untracked file differs
96 bar: untracked file differs
97 abort: untracked files in working directory differ from files in requested revision
97 abort: untracked files in working directory differ from files in requested revision
98 [20]
98 [20]
99 $ cat bar
99 $ cat bar
100 memories of buried pirate treasure
100 memories of buried pirate treasure
101
101
102 Those who use force will lose
102 Those who use force will lose
103
103
104 $ hg merge -f
104 $ hg merge -f
105 file 'bar' was deleted in local [working copy] but was modified in other [merge rev].
105 file 'bar' was deleted in local [working copy] but was modified in other [merge rev].
106 You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
106 You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
107 What do you want to do? u
107 What do you want to do? u
108 merging foo1 and foo to foo1
108 merging foo1 and foo to foo1
109 0 files updated, 1 files merged, 0 files removed, 1 files unresolved
109 0 files updated, 1 files merged, 0 files removed, 1 files unresolved
110 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
110 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
111 [1]
111 [1]
112 $ cat bar
112 $ cat bar
113 bleh
113 bleh
114 $ hg st
114 $ hg st
115 M bar
115 M bar
116 M foo1
116 M foo1
General Comments 0
You need to be logged in to leave comments. Login now