##// END OF EJS Templates
dirstate: deprecate `dirstate.remove` in all cases...
marmoute -
r48501:0e87c90f default
parent child Browse files
Show More
@@ -1,1672 +1,1679 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = parsers.DirstateItem
48 DirstateItem = parsers.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._updatedfiles = set()
133 self._updatedfiles = set()
134 self._mapcls = dirstatemap.dirstatemap
134 self._mapcls = dirstatemap.dirstatemap
135 # Access and cache cwd early, so we don't access it for the first time
135 # Access and cache cwd early, so we don't access it for the first time
136 # after a working-copy update caused it to not exist (accessing it then
136 # after a working-copy update caused it to not exist (accessing it then
137 # raises an exception).
137 # raises an exception).
138 self._cwd
138 self._cwd
139
139
140 def prefetch_parents(self):
140 def prefetch_parents(self):
141 """make sure the parents are loaded
141 """make sure the parents are loaded
142
142
143 Used to avoid a race condition.
143 Used to avoid a race condition.
144 """
144 """
145 self._pl
145 self._pl
146
146
147 @contextlib.contextmanager
147 @contextlib.contextmanager
148 def parentchange(self):
148 def parentchange(self):
149 """Context manager for handling dirstate parents.
149 """Context manager for handling dirstate parents.
150
150
151 If an exception occurs in the scope of the context manager,
151 If an exception occurs in the scope of the context manager,
152 the incoherent dirstate won't be written when wlock is
152 the incoherent dirstate won't be written when wlock is
153 released.
153 released.
154 """
154 """
155 self._parentwriters += 1
155 self._parentwriters += 1
156 yield
156 yield
157 # Typically we want the "undo" step of a context manager in a
157 # Typically we want the "undo" step of a context manager in a
158 # finally block so it happens even when an exception
158 # finally block so it happens even when an exception
159 # occurs. In this case, however, we only want to decrement
159 # occurs. In this case, however, we only want to decrement
160 # parentwriters if the code in the with statement exits
160 # parentwriters if the code in the with statement exits
161 # normally, so we don't have a try/finally here on purpose.
161 # normally, so we don't have a try/finally here on purpose.
162 self._parentwriters -= 1
162 self._parentwriters -= 1
163
163
164 def pendingparentchange(self):
164 def pendingparentchange(self):
165 """Returns true if the dirstate is in the middle of a set of changes
165 """Returns true if the dirstate is in the middle of a set of changes
166 that modify the dirstate parent.
166 that modify the dirstate parent.
167 """
167 """
168 return self._parentwriters > 0
168 return self._parentwriters > 0
169
169
170 @propertycache
170 @propertycache
171 def _map(self):
171 def _map(self):
172 """Return the dirstate contents (see documentation for dirstatemap)."""
172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 self._map = self._mapcls(
173 self._map = self._mapcls(
174 self._ui,
174 self._ui,
175 self._opener,
175 self._opener,
176 self._root,
176 self._root,
177 self._nodeconstants,
177 self._nodeconstants,
178 self._use_dirstate_v2,
178 self._use_dirstate_v2,
179 )
179 )
180 return self._map
180 return self._map
181
181
182 @property
182 @property
183 def _sparsematcher(self):
183 def _sparsematcher(self):
184 """The matcher for the sparse checkout.
184 """The matcher for the sparse checkout.
185
185
186 The working directory may not include every file from a manifest. The
186 The working directory may not include every file from a manifest. The
187 matcher obtained by this property will match a path if it is to be
187 matcher obtained by this property will match a path if it is to be
188 included in the working directory.
188 included in the working directory.
189 """
189 """
190 # TODO there is potential to cache this property. For now, the matcher
190 # TODO there is potential to cache this property. For now, the matcher
191 # is resolved on every access. (But the called function does use a
191 # is resolved on every access. (But the called function does use a
192 # cache to keep the lookup fast.)
192 # cache to keep the lookup fast.)
193 return self._sparsematchfn()
193 return self._sparsematchfn()
194
194
195 @repocache(b'branch')
195 @repocache(b'branch')
196 def _branch(self):
196 def _branch(self):
197 try:
197 try:
198 return self._opener.read(b"branch").strip() or b"default"
198 return self._opener.read(b"branch").strip() or b"default"
199 except IOError as inst:
199 except IOError as inst:
200 if inst.errno != errno.ENOENT:
200 if inst.errno != errno.ENOENT:
201 raise
201 raise
202 return b"default"
202 return b"default"
203
203
204 @property
204 @property
205 def _pl(self):
205 def _pl(self):
206 return self._map.parents()
206 return self._map.parents()
207
207
208 def hasdir(self, d):
208 def hasdir(self, d):
209 return self._map.hastrackeddir(d)
209 return self._map.hastrackeddir(d)
210
210
211 @rootcache(b'.hgignore')
211 @rootcache(b'.hgignore')
212 def _ignore(self):
212 def _ignore(self):
213 files = self._ignorefiles()
213 files = self._ignorefiles()
214 if not files:
214 if not files:
215 return matchmod.never()
215 return matchmod.never()
216
216
217 pats = [b'include:%s' % f for f in files]
217 pats = [b'include:%s' % f for f in files]
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219
219
220 @propertycache
220 @propertycache
221 def _slash(self):
221 def _slash(self):
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223
223
224 @propertycache
224 @propertycache
225 def _checklink(self):
225 def _checklink(self):
226 return util.checklink(self._root)
226 return util.checklink(self._root)
227
227
228 @propertycache
228 @propertycache
229 def _checkexec(self):
229 def _checkexec(self):
230 return bool(util.checkexec(self._root))
230 return bool(util.checkexec(self._root))
231
231
232 @propertycache
232 @propertycache
233 def _checkcase(self):
233 def _checkcase(self):
234 return not util.fscasesensitive(self._join(b'.hg'))
234 return not util.fscasesensitive(self._join(b'.hg'))
235
235
236 def _join(self, f):
236 def _join(self, f):
237 # much faster than os.path.join()
237 # much faster than os.path.join()
238 # it's safe because f is always a relative path
238 # it's safe because f is always a relative path
239 return self._rootdir + f
239 return self._rootdir + f
240
240
241 def flagfunc(self, buildfallback):
241 def flagfunc(self, buildfallback):
242 if self._checklink and self._checkexec:
242 if self._checklink and self._checkexec:
243
243
244 def f(x):
244 def f(x):
245 try:
245 try:
246 st = os.lstat(self._join(x))
246 st = os.lstat(self._join(x))
247 if util.statislink(st):
247 if util.statislink(st):
248 return b'l'
248 return b'l'
249 if util.statisexec(st):
249 if util.statisexec(st):
250 return b'x'
250 return b'x'
251 except OSError:
251 except OSError:
252 pass
252 pass
253 return b''
253 return b''
254
254
255 return f
255 return f
256
256
257 fallback = buildfallback()
257 fallback = buildfallback()
258 if self._checklink:
258 if self._checklink:
259
259
260 def f(x):
260 def f(x):
261 if os.path.islink(self._join(x)):
261 if os.path.islink(self._join(x)):
262 return b'l'
262 return b'l'
263 if b'x' in fallback(x):
263 if b'x' in fallback(x):
264 return b'x'
264 return b'x'
265 return b''
265 return b''
266
266
267 return f
267 return f
268 if self._checkexec:
268 if self._checkexec:
269
269
270 def f(x):
270 def f(x):
271 if b'l' in fallback(x):
271 if b'l' in fallback(x):
272 return b'l'
272 return b'l'
273 if util.isexec(self._join(x)):
273 if util.isexec(self._join(x)):
274 return b'x'
274 return b'x'
275 return b''
275 return b''
276
276
277 return f
277 return f
278 else:
278 else:
279 return fallback
279 return fallback
280
280
281 @propertycache
281 @propertycache
282 def _cwd(self):
282 def _cwd(self):
283 # internal config: ui.forcecwd
283 # internal config: ui.forcecwd
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 if forcecwd:
285 if forcecwd:
286 return forcecwd
286 return forcecwd
287 return encoding.getcwd()
287 return encoding.getcwd()
288
288
289 def getcwd(self):
289 def getcwd(self):
290 """Return the path from which a canonical path is calculated.
290 """Return the path from which a canonical path is calculated.
291
291
292 This path should be used to resolve file patterns or to convert
292 This path should be used to resolve file patterns or to convert
293 canonical paths back to file paths for display. It shouldn't be
293 canonical paths back to file paths for display. It shouldn't be
294 used to get real file paths. Use vfs functions instead.
294 used to get real file paths. Use vfs functions instead.
295 """
295 """
296 cwd = self._cwd
296 cwd = self._cwd
297 if cwd == self._root:
297 if cwd == self._root:
298 return b''
298 return b''
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 rootsep = self._root
300 rootsep = self._root
301 if not util.endswithsep(rootsep):
301 if not util.endswithsep(rootsep):
302 rootsep += pycompat.ossep
302 rootsep += pycompat.ossep
303 if cwd.startswith(rootsep):
303 if cwd.startswith(rootsep):
304 return cwd[len(rootsep) :]
304 return cwd[len(rootsep) :]
305 else:
305 else:
306 # we're outside the repo. return an absolute path.
306 # we're outside the repo. return an absolute path.
307 return cwd
307 return cwd
308
308
309 def pathto(self, f, cwd=None):
309 def pathto(self, f, cwd=None):
310 if cwd is None:
310 if cwd is None:
311 cwd = self.getcwd()
311 cwd = self.getcwd()
312 path = util.pathto(self._root, cwd, f)
312 path = util.pathto(self._root, cwd, f)
313 if self._slash:
313 if self._slash:
314 return util.pconvert(path)
314 return util.pconvert(path)
315 return path
315 return path
316
316
317 def __getitem__(self, key):
317 def __getitem__(self, key):
318 """Return the current state of key (a filename) in the dirstate.
318 """Return the current state of key (a filename) in the dirstate.
319
319
320 States are:
320 States are:
321 n normal
321 n normal
322 m needs merging
322 m needs merging
323 r marked for removal
323 r marked for removal
324 a marked for addition
324 a marked for addition
325 ? not tracked
325 ? not tracked
326
326
327 XXX The "state" is a bit obscure to be in the "public" API. we should
327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 consider migrating all user of this to going through the dirstate entry
328 consider migrating all user of this to going through the dirstate entry
329 instead.
329 instead.
330 """
330 """
331 entry = self._map.get(key)
331 entry = self._map.get(key)
332 if entry is not None:
332 if entry is not None:
333 return entry.state
333 return entry.state
334 return b'?'
334 return b'?'
335
335
336 def __contains__(self, key):
336 def __contains__(self, key):
337 return key in self._map
337 return key in self._map
338
338
339 def __iter__(self):
339 def __iter__(self):
340 return iter(sorted(self._map))
340 return iter(sorted(self._map))
341
341
342 def items(self):
342 def items(self):
343 return pycompat.iteritems(self._map)
343 return pycompat.iteritems(self._map)
344
344
345 iteritems = items
345 iteritems = items
346
346
347 def directories(self):
347 def directories(self):
348 return self._map.directories()
348 return self._map.directories()
349
349
350 def parents(self):
350 def parents(self):
351 return [self._validate(p) for p in self._pl]
351 return [self._validate(p) for p in self._pl]
352
352
353 def p1(self):
353 def p1(self):
354 return self._validate(self._pl[0])
354 return self._validate(self._pl[0])
355
355
356 def p2(self):
356 def p2(self):
357 return self._validate(self._pl[1])
357 return self._validate(self._pl[1])
358
358
359 @property
359 @property
360 def in_merge(self):
360 def in_merge(self):
361 """True if a merge is in progress"""
361 """True if a merge is in progress"""
362 return self._pl[1] != self._nodeconstants.nullid
362 return self._pl[1] != self._nodeconstants.nullid
363
363
364 def branch(self):
364 def branch(self):
365 return encoding.tolocal(self._branch)
365 return encoding.tolocal(self._branch)
366
366
367 def setparents(self, p1, p2=None):
367 def setparents(self, p1, p2=None):
368 """Set dirstate parents to p1 and p2.
368 """Set dirstate parents to p1 and p2.
369
369
370 When moving from two parents to one, "merged" entries a
370 When moving from two parents to one, "merged" entries a
371 adjusted to normal and previous copy records discarded and
371 adjusted to normal and previous copy records discarded and
372 returned by the call.
372 returned by the call.
373
373
374 See localrepo.setparents()
374 See localrepo.setparents()
375 """
375 """
376 if p2 is None:
376 if p2 is None:
377 p2 = self._nodeconstants.nullid
377 p2 = self._nodeconstants.nullid
378 if self._parentwriters == 0:
378 if self._parentwriters == 0:
379 raise ValueError(
379 raise ValueError(
380 b"cannot set dirstate parent outside of "
380 b"cannot set dirstate parent outside of "
381 b"dirstate.parentchange context manager"
381 b"dirstate.parentchange context manager"
382 )
382 )
383
383
384 self._dirty = True
384 self._dirty = True
385 oldp2 = self._pl[1]
385 oldp2 = self._pl[1]
386 if self._origpl is None:
386 if self._origpl is None:
387 self._origpl = self._pl
387 self._origpl = self._pl
388 self._map.setparents(p1, p2)
388 self._map.setparents(p1, p2)
389 copies = {}
389 copies = {}
390 if (
390 if (
391 oldp2 != self._nodeconstants.nullid
391 oldp2 != self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
393 ):
393 ):
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395
395
396 for f in candidatefiles:
396 for f in candidatefiles:
397 s = self._map.get(f)
397 s = self._map.get(f)
398 if s is None:
398 if s is None:
399 continue
399 continue
400
400
401 # Discard "merged" markers when moving away from a merge state
401 # Discard "merged" markers when moving away from a merge state
402 if s.merged:
402 if s.merged:
403 source = self._map.copymap.get(f)
403 source = self._map.copymap.get(f)
404 if source:
404 if source:
405 copies[f] = source
405 copies[f] = source
406 self.normallookup(f)
406 self.normallookup(f)
407 # Also fix up otherparent markers
407 # Also fix up otherparent markers
408 elif s.from_p2:
408 elif s.from_p2:
409 source = self._map.copymap.get(f)
409 source = self._map.copymap.get(f)
410 if source:
410 if source:
411 copies[f] = source
411 copies[f] = source
412 self._add(f)
412 self._add(f)
413 return copies
413 return copies
414
414
415 def setbranch(self, branch):
415 def setbranch(self, branch):
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 try:
418 try:
419 f.write(self._branch + b'\n')
419 f.write(self._branch + b'\n')
420 f.close()
420 f.close()
421
421
422 # make sure filecache has the correct stat info for _branch after
422 # make sure filecache has the correct stat info for _branch after
423 # replacing the underlying file
423 # replacing the underlying file
424 ce = self._filecache[b'_branch']
424 ce = self._filecache[b'_branch']
425 if ce:
425 if ce:
426 ce.refresh()
426 ce.refresh()
427 except: # re-raises
427 except: # re-raises
428 f.discard()
428 f.discard()
429 raise
429 raise
430
430
431 def invalidate(self):
431 def invalidate(self):
432 """Causes the next access to reread the dirstate.
432 """Causes the next access to reread the dirstate.
433
433
434 This is different from localrepo.invalidatedirstate() because it always
434 This is different from localrepo.invalidatedirstate() because it always
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 check whether the dirstate has changed before rereading it."""
436 check whether the dirstate has changed before rereading it."""
437
437
438 for a in ("_map", "_branch", "_ignore"):
438 for a in ("_map", "_branch", "_ignore"):
439 if a in self.__dict__:
439 if a in self.__dict__:
440 delattr(self, a)
440 delattr(self, a)
441 self._lastnormaltime = 0
441 self._lastnormaltime = 0
442 self._dirty = False
442 self._dirty = False
443 self._updatedfiles.clear()
443 self._updatedfiles.clear()
444 self._parentwriters = 0
444 self._parentwriters = 0
445 self._origpl = None
445 self._origpl = None
446
446
447 def copy(self, source, dest):
447 def copy(self, source, dest):
448 """Mark dest as a copy of source. Unmark dest if source is None."""
448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 if source == dest:
449 if source == dest:
450 return
450 return
451 self._dirty = True
451 self._dirty = True
452 if source is not None:
452 if source is not None:
453 self._map.copymap[dest] = source
453 self._map.copymap[dest] = source
454 self._updatedfiles.add(source)
454 self._updatedfiles.add(source)
455 self._updatedfiles.add(dest)
455 self._updatedfiles.add(dest)
456 elif self._map.copymap.pop(dest, None):
456 elif self._map.copymap.pop(dest, None):
457 self._updatedfiles.add(dest)
457 self._updatedfiles.add(dest)
458
458
459 def copied(self, file):
459 def copied(self, file):
460 return self._map.copymap.get(file, None)
460 return self._map.copymap.get(file, None)
461
461
462 def copies(self):
462 def copies(self):
463 return self._map.copymap
463 return self._map.copymap
464
464
465 @requires_no_parents_change
465 @requires_no_parents_change
466 def set_tracked(self, filename):
466 def set_tracked(self, filename):
467 """a "public" method for generic code to mark a file as tracked
467 """a "public" method for generic code to mark a file as tracked
468
468
469 This function is to be called outside of "update/merge" case. For
469 This function is to be called outside of "update/merge" case. For
470 example by a command like `hg add X`.
470 example by a command like `hg add X`.
471
471
472 return True the file was previously untracked, False otherwise.
472 return True the file was previously untracked, False otherwise.
473 """
473 """
474 entry = self._map.get(filename)
474 entry = self._map.get(filename)
475 if entry is None:
475 if entry is None:
476 self._add(filename)
476 self._add(filename)
477 return True
477 return True
478 elif not entry.tracked:
478 elif not entry.tracked:
479 self.normallookup(filename)
479 self.normallookup(filename)
480 return True
480 return True
481 return False
481 return False
482
482
483 @requires_no_parents_change
483 @requires_no_parents_change
484 def set_untracked(self, filename):
484 def set_untracked(self, filename):
485 """a "public" method for generic code to mark a file as untracked
485 """a "public" method for generic code to mark a file as untracked
486
486
487 This function is to be called outside of "update/merge" case. For
487 This function is to be called outside of "update/merge" case. For
488 example by a command like `hg remove X`.
488 example by a command like `hg remove X`.
489
489
490 return True the file was previously tracked, False otherwise.
490 return True the file was previously tracked, False otherwise.
491 """
491 """
492 entry = self._map.get(filename)
492 entry = self._map.get(filename)
493 if entry is None:
493 if entry is None:
494 return False
494 return False
495 elif entry.added:
495 elif entry.added:
496 self._drop(filename)
496 self._drop(filename)
497 return True
497 return True
498 else:
498 else:
499 self._remove(filename)
499 self._remove(filename)
500 return True
500 return True
501
501
502 @requires_parents_change
502 @requires_parents_change
503 def update_file_p1(
503 def update_file_p1(
504 self,
504 self,
505 filename,
505 filename,
506 p1_tracked,
506 p1_tracked,
507 ):
507 ):
508 """Set a file as tracked in the parent (or not)
508 """Set a file as tracked in the parent (or not)
509
509
510 This is to be called when adjust the dirstate to a new parent after an history
510 This is to be called when adjust the dirstate to a new parent after an history
511 rewriting operation.
511 rewriting operation.
512
512
513 It should not be called during a merge (p2 != nullid) and only within
513 It should not be called during a merge (p2 != nullid) and only within
514 a `with dirstate.parentchange():` context.
514 a `with dirstate.parentchange():` context.
515 """
515 """
516 if self.in_merge:
516 if self.in_merge:
517 msg = b'update_file_reference should not be called when merging'
517 msg = b'update_file_reference should not be called when merging'
518 raise error.ProgrammingError(msg)
518 raise error.ProgrammingError(msg)
519 entry = self._map.get(filename)
519 entry = self._map.get(filename)
520 if entry is None:
520 if entry is None:
521 wc_tracked = False
521 wc_tracked = False
522 else:
522 else:
523 wc_tracked = entry.tracked
523 wc_tracked = entry.tracked
524 possibly_dirty = False
524 possibly_dirty = False
525 if p1_tracked and wc_tracked:
525 if p1_tracked and wc_tracked:
526 # the underlying reference might have changed, we will have to
526 # the underlying reference might have changed, we will have to
527 # check it.
527 # check it.
528 possibly_dirty = True
528 possibly_dirty = True
529 elif not (p1_tracked or wc_tracked):
529 elif not (p1_tracked or wc_tracked):
530 # the file is no longer relevant to anyone
530 # the file is no longer relevant to anyone
531 self._drop(filename)
531 self._drop(filename)
532 elif (not p1_tracked) and wc_tracked:
532 elif (not p1_tracked) and wc_tracked:
533 if entry is not None and entry.added:
533 if entry is not None and entry.added:
534 return # avoid dropping copy information (maybe?)
534 return # avoid dropping copy information (maybe?)
535 elif p1_tracked and not wc_tracked:
535 elif p1_tracked and not wc_tracked:
536 pass
536 pass
537 else:
537 else:
538 assert False, 'unreachable'
538 assert False, 'unreachable'
539
539
540 # this mean we are doing call for file we do not really care about the
540 # this mean we are doing call for file we do not really care about the
541 # data (eg: added or removed), however this should be a minor overhead
541 # data (eg: added or removed), however this should be a minor overhead
542 # compared to the overall update process calling this.
542 # compared to the overall update process calling this.
543 parentfiledata = None
543 parentfiledata = None
544 if wc_tracked:
544 if wc_tracked:
545 parentfiledata = self._get_filedata(filename)
545 parentfiledata = self._get_filedata(filename)
546
546
547 self._updatedfiles.add(filename)
547 self._updatedfiles.add(filename)
548 self._map.reset_state(
548 self._map.reset_state(
549 filename,
549 filename,
550 wc_tracked,
550 wc_tracked,
551 p1_tracked,
551 p1_tracked,
552 possibly_dirty=possibly_dirty,
552 possibly_dirty=possibly_dirty,
553 parentfiledata=parentfiledata,
553 parentfiledata=parentfiledata,
554 )
554 )
555 if (
555 if (
556 parentfiledata is not None
556 parentfiledata is not None
557 and parentfiledata[2] > self._lastnormaltime
557 and parentfiledata[2] > self._lastnormaltime
558 ):
558 ):
559 # Remember the most recent modification timeslot for status(),
559 # Remember the most recent modification timeslot for status(),
560 # to make sure we won't miss future size-preserving file content
560 # to make sure we won't miss future size-preserving file content
561 # modifications that happen within the same timeslot.
561 # modifications that happen within the same timeslot.
562 self._lastnormaltime = parentfiledata[2]
562 self._lastnormaltime = parentfiledata[2]
563
563
564 @requires_parents_change
564 @requires_parents_change
565 def update_file(
565 def update_file(
566 self,
566 self,
567 filename,
567 filename,
568 wc_tracked,
568 wc_tracked,
569 p1_tracked,
569 p1_tracked,
570 p2_tracked=False,
570 p2_tracked=False,
571 merged=False,
571 merged=False,
572 clean_p1=False,
572 clean_p1=False,
573 clean_p2=False,
573 clean_p2=False,
574 possibly_dirty=False,
574 possibly_dirty=False,
575 parentfiledata=None,
575 parentfiledata=None,
576 ):
576 ):
577 """update the information about a file in the dirstate
577 """update the information about a file in the dirstate
578
578
579 This is to be called when the direstates parent changes to keep track
579 This is to be called when the direstates parent changes to keep track
580 of what is the file situation in regards to the working copy and its parent.
580 of what is the file situation in regards to the working copy and its parent.
581
581
582 This function must be called within a `dirstate.parentchange` context.
582 This function must be called within a `dirstate.parentchange` context.
583
583
584 note: the API is at an early stage and we might need to ajust it
584 note: the API is at an early stage and we might need to ajust it
585 depending of what information ends up being relevant and useful to
585 depending of what information ends up being relevant and useful to
586 other processing.
586 other processing.
587 """
587 """
588 if merged and (clean_p1 or clean_p2):
588 if merged and (clean_p1 or clean_p2):
589 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
589 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
590 raise error.ProgrammingError(msg)
590 raise error.ProgrammingError(msg)
591
591
592 # note: I do not think we need to double check name clash here since we
592 # note: I do not think we need to double check name clash here since we
593 # are in a update/merge case that should already have taken care of
593 # are in a update/merge case that should already have taken care of
594 # this. The test agrees
594 # this. The test agrees
595
595
596 self._dirty = True
596 self._dirty = True
597 self._updatedfiles.add(filename)
597 self._updatedfiles.add(filename)
598
598
599 need_parent_file_data = (
599 need_parent_file_data = (
600 not (possibly_dirty or clean_p2 or merged)
600 not (possibly_dirty or clean_p2 or merged)
601 and wc_tracked
601 and wc_tracked
602 and p1_tracked
602 and p1_tracked
603 )
603 )
604
604
605 # this mean we are doing call for file we do not really care about the
605 # this mean we are doing call for file we do not really care about the
606 # data (eg: added or removed), however this should be a minor overhead
606 # data (eg: added or removed), however this should be a minor overhead
607 # compared to the overall update process calling this.
607 # compared to the overall update process calling this.
608 if need_parent_file_data:
608 if need_parent_file_data:
609 if parentfiledata is None:
609 if parentfiledata is None:
610 parentfiledata = self._get_filedata(filename)
610 parentfiledata = self._get_filedata(filename)
611 mtime = parentfiledata[2]
611 mtime = parentfiledata[2]
612
612
613 if mtime > self._lastnormaltime:
613 if mtime > self._lastnormaltime:
614 # Remember the most recent modification timeslot for
614 # Remember the most recent modification timeslot for
615 # status(), to make sure we won't miss future
615 # status(), to make sure we won't miss future
616 # size-preserving file content modifications that happen
616 # size-preserving file content modifications that happen
617 # within the same timeslot.
617 # within the same timeslot.
618 self._lastnormaltime = mtime
618 self._lastnormaltime = mtime
619
619
620 self._map.reset_state(
620 self._map.reset_state(
621 filename,
621 filename,
622 wc_tracked,
622 wc_tracked,
623 p1_tracked,
623 p1_tracked,
624 p2_tracked=p2_tracked,
624 p2_tracked=p2_tracked,
625 merged=merged,
625 merged=merged,
626 clean_p1=clean_p1,
626 clean_p1=clean_p1,
627 clean_p2=clean_p2,
627 clean_p2=clean_p2,
628 possibly_dirty=possibly_dirty,
628 possibly_dirty=possibly_dirty,
629 parentfiledata=parentfiledata,
629 parentfiledata=parentfiledata,
630 )
630 )
631 if (
631 if (
632 parentfiledata is not None
632 parentfiledata is not None
633 and parentfiledata[2] > self._lastnormaltime
633 and parentfiledata[2] > self._lastnormaltime
634 ):
634 ):
635 # Remember the most recent modification timeslot for status(),
635 # Remember the most recent modification timeslot for status(),
636 # to make sure we won't miss future size-preserving file content
636 # to make sure we won't miss future size-preserving file content
637 # modifications that happen within the same timeslot.
637 # modifications that happen within the same timeslot.
638 self._lastnormaltime = parentfiledata[2]
638 self._lastnormaltime = parentfiledata[2]
639
639
640 def _addpath(
640 def _addpath(
641 self,
641 self,
642 f,
642 f,
643 mode=0,
643 mode=0,
644 size=None,
644 size=None,
645 mtime=None,
645 mtime=None,
646 added=False,
646 added=False,
647 merged=False,
647 merged=False,
648 from_p2=False,
648 from_p2=False,
649 possibly_dirty=False,
649 possibly_dirty=False,
650 ):
650 ):
651 entry = self._map.get(f)
651 entry = self._map.get(f)
652 if added or entry is not None and entry.removed:
652 if added or entry is not None and entry.removed:
653 scmutil.checkfilename(f)
653 scmutil.checkfilename(f)
654 if self._map.hastrackeddir(f):
654 if self._map.hastrackeddir(f):
655 msg = _(b'directory %r already in dirstate')
655 msg = _(b'directory %r already in dirstate')
656 msg %= pycompat.bytestr(f)
656 msg %= pycompat.bytestr(f)
657 raise error.Abort(msg)
657 raise error.Abort(msg)
658 # shadows
658 # shadows
659 for d in pathutil.finddirs(f):
659 for d in pathutil.finddirs(f):
660 if self._map.hastrackeddir(d):
660 if self._map.hastrackeddir(d):
661 break
661 break
662 entry = self._map.get(d)
662 entry = self._map.get(d)
663 if entry is not None and not entry.removed:
663 if entry is not None and not entry.removed:
664 msg = _(b'file %r in dirstate clashes with %r')
664 msg = _(b'file %r in dirstate clashes with %r')
665 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
665 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
666 raise error.Abort(msg)
666 raise error.Abort(msg)
667 self._dirty = True
667 self._dirty = True
668 self._updatedfiles.add(f)
668 self._updatedfiles.add(f)
669 self._map.addfile(
669 self._map.addfile(
670 f,
670 f,
671 mode=mode,
671 mode=mode,
672 size=size,
672 size=size,
673 mtime=mtime,
673 mtime=mtime,
674 added=added,
674 added=added,
675 merged=merged,
675 merged=merged,
676 from_p2=from_p2,
676 from_p2=from_p2,
677 possibly_dirty=possibly_dirty,
677 possibly_dirty=possibly_dirty,
678 )
678 )
679
679
680 def _get_filedata(self, filename):
680 def _get_filedata(self, filename):
681 """returns"""
681 """returns"""
682 s = os.lstat(self._join(filename))
682 s = os.lstat(self._join(filename))
683 mode = s.st_mode
683 mode = s.st_mode
684 size = s.st_size
684 size = s.st_size
685 mtime = s[stat.ST_MTIME]
685 mtime = s[stat.ST_MTIME]
686 return (mode, size, mtime)
686 return (mode, size, mtime)
687
687
688 def normal(self, f, parentfiledata=None):
688 def normal(self, f, parentfiledata=None):
689 """Mark a file normal and clean.
689 """Mark a file normal and clean.
690
690
691 parentfiledata: (mode, size, mtime) of the clean file
691 parentfiledata: (mode, size, mtime) of the clean file
692
692
693 parentfiledata should be computed from memory (for mode,
693 parentfiledata should be computed from memory (for mode,
694 size), as or close as possible from the point where we
694 size), as or close as possible from the point where we
695 determined the file was clean, to limit the risk of the
695 determined the file was clean, to limit the risk of the
696 file having been changed by an external process between the
696 file having been changed by an external process between the
697 moment where the file was determined to be clean and now."""
697 moment where the file was determined to be clean and now."""
698 if parentfiledata:
698 if parentfiledata:
699 (mode, size, mtime) = parentfiledata
699 (mode, size, mtime) = parentfiledata
700 else:
700 else:
701 (mode, size, mtime) = self._get_filedata(f)
701 (mode, size, mtime) = self._get_filedata(f)
702 self._addpath(f, mode=mode, size=size, mtime=mtime)
702 self._addpath(f, mode=mode, size=size, mtime=mtime)
703 self._map.copymap.pop(f, None)
703 self._map.copymap.pop(f, None)
704 if f in self._map.nonnormalset:
704 if f in self._map.nonnormalset:
705 self._map.nonnormalset.remove(f)
705 self._map.nonnormalset.remove(f)
706 if mtime > self._lastnormaltime:
706 if mtime > self._lastnormaltime:
707 # Remember the most recent modification timeslot for status(),
707 # Remember the most recent modification timeslot for status(),
708 # to make sure we won't miss future size-preserving file content
708 # to make sure we won't miss future size-preserving file content
709 # modifications that happen within the same timeslot.
709 # modifications that happen within the same timeslot.
710 self._lastnormaltime = mtime
710 self._lastnormaltime = mtime
711
711
712 def normallookup(self, f):
712 def normallookup(self, f):
713 '''Mark a file normal, but possibly dirty.'''
713 '''Mark a file normal, but possibly dirty.'''
714 if self.in_merge:
714 if self.in_merge:
715 # if there is a merge going on and the file was either
715 # if there is a merge going on and the file was either
716 # "merged" or coming from other parent (-2) before
716 # "merged" or coming from other parent (-2) before
717 # being removed, restore that state.
717 # being removed, restore that state.
718 entry = self._map.get(f)
718 entry = self._map.get(f)
719 if entry is not None:
719 if entry is not None:
720 # XXX this should probably be dealt with a a lower level
720 # XXX this should probably be dealt with a a lower level
721 # (see `merged_removed` and `from_p2_removed`)
721 # (see `merged_removed` and `from_p2_removed`)
722 if entry.merged_removed or entry.from_p2_removed:
722 if entry.merged_removed or entry.from_p2_removed:
723 source = self._map.copymap.get(f)
723 source = self._map.copymap.get(f)
724 if entry.merged_removed:
724 if entry.merged_removed:
725 self.merge(f)
725 self.merge(f)
726 elif entry.from_p2_removed:
726 elif entry.from_p2_removed:
727 self.otherparent(f)
727 self.otherparent(f)
728 if source is not None:
728 if source is not None:
729 self.copy(source, f)
729 self.copy(source, f)
730 return
730 return
731 elif entry.merged or entry.from_p2:
731 elif entry.merged or entry.from_p2:
732 return
732 return
733 self._addpath(f, possibly_dirty=True)
733 self._addpath(f, possibly_dirty=True)
734 self._map.copymap.pop(f, None)
734 self._map.copymap.pop(f, None)
735
735
736 def otherparent(self, f):
736 def otherparent(self, f):
737 '''Mark as coming from the other parent, always dirty.'''
737 '''Mark as coming from the other parent, always dirty.'''
738 if not self.in_merge:
738 if not self.in_merge:
739 msg = _(b"setting %r to other parent only allowed in merges") % f
739 msg = _(b"setting %r to other parent only allowed in merges") % f
740 raise error.Abort(msg)
740 raise error.Abort(msg)
741 entry = self._map.get(f)
741 entry = self._map.get(f)
742 if entry is not None and entry.tracked:
742 if entry is not None and entry.tracked:
743 # merge-like
743 # merge-like
744 self._addpath(f, merged=True)
744 self._addpath(f, merged=True)
745 else:
745 else:
746 # add-like
746 # add-like
747 self._addpath(f, from_p2=True)
747 self._addpath(f, from_p2=True)
748 self._map.copymap.pop(f, None)
748 self._map.copymap.pop(f, None)
749
749
750 def add(self, f):
750 def add(self, f):
751 '''Mark a file added.'''
751 '''Mark a file added.'''
752 if not self.pendingparentchange():
752 if not self.pendingparentchange():
753 util.nouideprecwarn(
753 util.nouideprecwarn(
754 b"do not use `add` outside of update/merge context."
754 b"do not use `add` outside of update/merge context."
755 b" Use `set_tracked`",
755 b" Use `set_tracked`",
756 b'6.0',
756 b'6.0',
757 stacklevel=2,
757 stacklevel=2,
758 )
758 )
759 self._add(f)
759 self._add(f)
760
760
761 def _add(self, filename):
761 def _add(self, filename):
762 """internal function to mark a file as added"""
762 """internal function to mark a file as added"""
763 self._addpath(filename, added=True)
763 self._addpath(filename, added=True)
764 self._map.copymap.pop(filename, None)
764 self._map.copymap.pop(filename, None)
765
765
766 def remove(self, f):
766 def remove(self, f):
767 '''Mark a file removed'''
767 '''Mark a file removed'''
768 if not self.pendingparentchange():
768 if self.pendingparentchange():
769 util.nouideprecwarn(
770 b"do not use `remove` insde of update/merge context."
771 b" Use `update_file` or `update_file_p1`",
772 b'6.0',
773 stacklevel=2,
774 )
775 else:
769 util.nouideprecwarn(
776 util.nouideprecwarn(
770 b"do not use `remove` outside of update/merge context."
777 b"do not use `remove` outside of update/merge context."
771 b" Use `set_untracked`",
778 b" Use `set_untracked`",
772 b'6.0',
779 b'6.0',
773 stacklevel=2,
780 stacklevel=2,
774 )
781 )
775 self._remove(f)
782 self._remove(f)
776
783
777 def _remove(self, filename):
784 def _remove(self, filename):
778 """internal function to mark a file removed"""
785 """internal function to mark a file removed"""
779 self._dirty = True
786 self._dirty = True
780 self._updatedfiles.add(filename)
787 self._updatedfiles.add(filename)
781 self._map.removefile(filename, in_merge=self.in_merge)
788 self._map.removefile(filename, in_merge=self.in_merge)
782
789
783 def merge(self, f):
790 def merge(self, f):
784 '''Mark a file merged.'''
791 '''Mark a file merged.'''
785 if not self.in_merge:
792 if not self.in_merge:
786 return self.normallookup(f)
793 return self.normallookup(f)
787 return self.otherparent(f)
794 return self.otherparent(f)
788
795
789 def drop(self, f):
796 def drop(self, f):
790 '''Drop a file from the dirstate'''
797 '''Drop a file from the dirstate'''
791 if not self.pendingparentchange():
798 if not self.pendingparentchange():
792 util.nouideprecwarn(
799 util.nouideprecwarn(
793 b"do not use `drop` outside of update/merge context."
800 b"do not use `drop` outside of update/merge context."
794 b" Use `set_untracked`",
801 b" Use `set_untracked`",
795 b'6.0',
802 b'6.0',
796 stacklevel=2,
803 stacklevel=2,
797 )
804 )
798 self._drop(f)
805 self._drop(f)
799
806
800 def _drop(self, filename):
807 def _drop(self, filename):
801 """internal function to drop a file from the dirstate"""
808 """internal function to drop a file from the dirstate"""
802 if self._map.dropfile(filename):
809 if self._map.dropfile(filename):
803 self._dirty = True
810 self._dirty = True
804 self._updatedfiles.add(filename)
811 self._updatedfiles.add(filename)
805 self._map.copymap.pop(filename, None)
812 self._map.copymap.pop(filename, None)
806
813
807 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
814 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
808 if exists is None:
815 if exists is None:
809 exists = os.path.lexists(os.path.join(self._root, path))
816 exists = os.path.lexists(os.path.join(self._root, path))
810 if not exists:
817 if not exists:
811 # Maybe a path component exists
818 # Maybe a path component exists
812 if not ignoremissing and b'/' in path:
819 if not ignoremissing and b'/' in path:
813 d, f = path.rsplit(b'/', 1)
820 d, f = path.rsplit(b'/', 1)
814 d = self._normalize(d, False, ignoremissing, None)
821 d = self._normalize(d, False, ignoremissing, None)
815 folded = d + b"/" + f
822 folded = d + b"/" + f
816 else:
823 else:
817 # No path components, preserve original case
824 # No path components, preserve original case
818 folded = path
825 folded = path
819 else:
826 else:
820 # recursively normalize leading directory components
827 # recursively normalize leading directory components
821 # against dirstate
828 # against dirstate
822 if b'/' in normed:
829 if b'/' in normed:
823 d, f = normed.rsplit(b'/', 1)
830 d, f = normed.rsplit(b'/', 1)
824 d = self._normalize(d, False, ignoremissing, True)
831 d = self._normalize(d, False, ignoremissing, True)
825 r = self._root + b"/" + d
832 r = self._root + b"/" + d
826 folded = d + b"/" + util.fspath(f, r)
833 folded = d + b"/" + util.fspath(f, r)
827 else:
834 else:
828 folded = util.fspath(normed, self._root)
835 folded = util.fspath(normed, self._root)
829 storemap[normed] = folded
836 storemap[normed] = folded
830
837
831 return folded
838 return folded
832
839
833 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
840 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
834 normed = util.normcase(path)
841 normed = util.normcase(path)
835 folded = self._map.filefoldmap.get(normed, None)
842 folded = self._map.filefoldmap.get(normed, None)
836 if folded is None:
843 if folded is None:
837 if isknown:
844 if isknown:
838 folded = path
845 folded = path
839 else:
846 else:
840 folded = self._discoverpath(
847 folded = self._discoverpath(
841 path, normed, ignoremissing, exists, self._map.filefoldmap
848 path, normed, ignoremissing, exists, self._map.filefoldmap
842 )
849 )
843 return folded
850 return folded
844
851
845 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
852 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
846 normed = util.normcase(path)
853 normed = util.normcase(path)
847 folded = self._map.filefoldmap.get(normed, None)
854 folded = self._map.filefoldmap.get(normed, None)
848 if folded is None:
855 if folded is None:
849 folded = self._map.dirfoldmap.get(normed, None)
856 folded = self._map.dirfoldmap.get(normed, None)
850 if folded is None:
857 if folded is None:
851 if isknown:
858 if isknown:
852 folded = path
859 folded = path
853 else:
860 else:
854 # store discovered result in dirfoldmap so that future
861 # store discovered result in dirfoldmap so that future
855 # normalizefile calls don't start matching directories
862 # normalizefile calls don't start matching directories
856 folded = self._discoverpath(
863 folded = self._discoverpath(
857 path, normed, ignoremissing, exists, self._map.dirfoldmap
864 path, normed, ignoremissing, exists, self._map.dirfoldmap
858 )
865 )
859 return folded
866 return folded
860
867
861 def normalize(self, path, isknown=False, ignoremissing=False):
868 def normalize(self, path, isknown=False, ignoremissing=False):
862 """
869 """
863 normalize the case of a pathname when on a casefolding filesystem
870 normalize the case of a pathname when on a casefolding filesystem
864
871
865 isknown specifies whether the filename came from walking the
872 isknown specifies whether the filename came from walking the
866 disk, to avoid extra filesystem access.
873 disk, to avoid extra filesystem access.
867
874
868 If ignoremissing is True, missing path are returned
875 If ignoremissing is True, missing path are returned
869 unchanged. Otherwise, we try harder to normalize possibly
876 unchanged. Otherwise, we try harder to normalize possibly
870 existing path components.
877 existing path components.
871
878
872 The normalized case is determined based on the following precedence:
879 The normalized case is determined based on the following precedence:
873
880
874 - version of name already stored in the dirstate
881 - version of name already stored in the dirstate
875 - version of name stored on disk
882 - version of name stored on disk
876 - version provided via command arguments
883 - version provided via command arguments
877 """
884 """
878
885
879 if self._checkcase:
886 if self._checkcase:
880 return self._normalize(path, isknown, ignoremissing)
887 return self._normalize(path, isknown, ignoremissing)
881 return path
888 return path
882
889
883 def clear(self):
890 def clear(self):
884 self._map.clear()
891 self._map.clear()
885 self._lastnormaltime = 0
892 self._lastnormaltime = 0
886 self._updatedfiles.clear()
893 self._updatedfiles.clear()
887 self._dirty = True
894 self._dirty = True
888
895
889 def rebuild(self, parent, allfiles, changedfiles=None):
896 def rebuild(self, parent, allfiles, changedfiles=None):
890 if changedfiles is None:
897 if changedfiles is None:
891 # Rebuild entire dirstate
898 # Rebuild entire dirstate
892 to_lookup = allfiles
899 to_lookup = allfiles
893 to_drop = []
900 to_drop = []
894 lastnormaltime = self._lastnormaltime
901 lastnormaltime = self._lastnormaltime
895 self.clear()
902 self.clear()
896 self._lastnormaltime = lastnormaltime
903 self._lastnormaltime = lastnormaltime
897 elif len(changedfiles) < 10:
904 elif len(changedfiles) < 10:
898 # Avoid turning allfiles into a set, which can be expensive if it's
905 # Avoid turning allfiles into a set, which can be expensive if it's
899 # large.
906 # large.
900 to_lookup = []
907 to_lookup = []
901 to_drop = []
908 to_drop = []
902 for f in changedfiles:
909 for f in changedfiles:
903 if f in allfiles:
910 if f in allfiles:
904 to_lookup.append(f)
911 to_lookup.append(f)
905 else:
912 else:
906 to_drop.append(f)
913 to_drop.append(f)
907 else:
914 else:
908 changedfilesset = set(changedfiles)
915 changedfilesset = set(changedfiles)
909 to_lookup = changedfilesset & set(allfiles)
916 to_lookup = changedfilesset & set(allfiles)
910 to_drop = changedfilesset - to_lookup
917 to_drop = changedfilesset - to_lookup
911
918
912 if self._origpl is None:
919 if self._origpl is None:
913 self._origpl = self._pl
920 self._origpl = self._pl
914 self._map.setparents(parent, self._nodeconstants.nullid)
921 self._map.setparents(parent, self._nodeconstants.nullid)
915
922
916 for f in to_lookup:
923 for f in to_lookup:
917 self.normallookup(f)
924 self.normallookup(f)
918 for f in to_drop:
925 for f in to_drop:
919 self._drop(f)
926 self._drop(f)
920
927
921 self._dirty = True
928 self._dirty = True
922
929
923 def identity(self):
930 def identity(self):
924 """Return identity of dirstate itself to detect changing in storage
931 """Return identity of dirstate itself to detect changing in storage
925
932
926 If identity of previous dirstate is equal to this, writing
933 If identity of previous dirstate is equal to this, writing
927 changes based on the former dirstate out can keep consistency.
934 changes based on the former dirstate out can keep consistency.
928 """
935 """
929 return self._map.identity
936 return self._map.identity
930
937
931 def write(self, tr):
938 def write(self, tr):
932 if not self._dirty:
939 if not self._dirty:
933 return
940 return
934
941
935 filename = self._filename
942 filename = self._filename
936 if tr:
943 if tr:
937 # 'dirstate.write()' is not only for writing in-memory
944 # 'dirstate.write()' is not only for writing in-memory
938 # changes out, but also for dropping ambiguous timestamp.
945 # changes out, but also for dropping ambiguous timestamp.
939 # delayed writing re-raise "ambiguous timestamp issue".
946 # delayed writing re-raise "ambiguous timestamp issue".
940 # See also the wiki page below for detail:
947 # See also the wiki page below for detail:
941 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
948 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
942
949
943 # emulate dropping timestamp in 'parsers.pack_dirstate'
950 # emulate dropping timestamp in 'parsers.pack_dirstate'
944 now = _getfsnow(self._opener)
951 now = _getfsnow(self._opener)
945 self._map.clearambiguoustimes(self._updatedfiles, now)
952 self._map.clearambiguoustimes(self._updatedfiles, now)
946
953
947 # emulate that all 'dirstate.normal' results are written out
954 # emulate that all 'dirstate.normal' results are written out
948 self._lastnormaltime = 0
955 self._lastnormaltime = 0
949 self._updatedfiles.clear()
956 self._updatedfiles.clear()
950
957
951 # delay writing in-memory changes out
958 # delay writing in-memory changes out
952 tr.addfilegenerator(
959 tr.addfilegenerator(
953 b'dirstate',
960 b'dirstate',
954 (self._filename,),
961 (self._filename,),
955 lambda f: self._writedirstate(tr, f),
962 lambda f: self._writedirstate(tr, f),
956 location=b'plain',
963 location=b'plain',
957 )
964 )
958 return
965 return
959
966
960 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
967 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
961 self._writedirstate(tr, st)
968 self._writedirstate(tr, st)
962
969
963 def addparentchangecallback(self, category, callback):
970 def addparentchangecallback(self, category, callback):
964 """add a callback to be called when the wd parents are changed
971 """add a callback to be called when the wd parents are changed
965
972
966 Callback will be called with the following arguments:
973 Callback will be called with the following arguments:
967 dirstate, (oldp1, oldp2), (newp1, newp2)
974 dirstate, (oldp1, oldp2), (newp1, newp2)
968
975
969 Category is a unique identifier to allow overwriting an old callback
976 Category is a unique identifier to allow overwriting an old callback
970 with a newer callback.
977 with a newer callback.
971 """
978 """
972 self._plchangecallbacks[category] = callback
979 self._plchangecallbacks[category] = callback
973
980
974 def _writedirstate(self, tr, st):
981 def _writedirstate(self, tr, st):
975 # notify callbacks about parents change
982 # notify callbacks about parents change
976 if self._origpl is not None and self._origpl != self._pl:
983 if self._origpl is not None and self._origpl != self._pl:
977 for c, callback in sorted(
984 for c, callback in sorted(
978 pycompat.iteritems(self._plchangecallbacks)
985 pycompat.iteritems(self._plchangecallbacks)
979 ):
986 ):
980 callback(self, self._origpl, self._pl)
987 callback(self, self._origpl, self._pl)
981 self._origpl = None
988 self._origpl = None
982 # use the modification time of the newly created temporary file as the
989 # use the modification time of the newly created temporary file as the
983 # filesystem's notion of 'now'
990 # filesystem's notion of 'now'
984 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
991 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
985
992
986 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
993 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
987 # timestamp of each entries in dirstate, because of 'now > mtime'
994 # timestamp of each entries in dirstate, because of 'now > mtime'
988 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
995 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
989 if delaywrite > 0:
996 if delaywrite > 0:
990 # do we have any files to delay for?
997 # do we have any files to delay for?
991 for f, e in pycompat.iteritems(self._map):
998 for f, e in pycompat.iteritems(self._map):
992 if e.need_delay(now):
999 if e.need_delay(now):
993 import time # to avoid useless import
1000 import time # to avoid useless import
994
1001
995 # rather than sleep n seconds, sleep until the next
1002 # rather than sleep n seconds, sleep until the next
996 # multiple of n seconds
1003 # multiple of n seconds
997 clock = time.time()
1004 clock = time.time()
998 start = int(clock) - (int(clock) % delaywrite)
1005 start = int(clock) - (int(clock) % delaywrite)
999 end = start + delaywrite
1006 end = start + delaywrite
1000 time.sleep(end - clock)
1007 time.sleep(end - clock)
1001 now = end # trust our estimate that the end is near now
1008 now = end # trust our estimate that the end is near now
1002 break
1009 break
1003
1010
1004 self._map.write(tr, st, now)
1011 self._map.write(tr, st, now)
1005 self._lastnormaltime = 0
1012 self._lastnormaltime = 0
1006 self._dirty = False
1013 self._dirty = False
1007
1014
1008 def _dirignore(self, f):
1015 def _dirignore(self, f):
1009 if self._ignore(f):
1016 if self._ignore(f):
1010 return True
1017 return True
1011 for p in pathutil.finddirs(f):
1018 for p in pathutil.finddirs(f):
1012 if self._ignore(p):
1019 if self._ignore(p):
1013 return True
1020 return True
1014 return False
1021 return False
1015
1022
1016 def _ignorefiles(self):
1023 def _ignorefiles(self):
1017 files = []
1024 files = []
1018 if os.path.exists(self._join(b'.hgignore')):
1025 if os.path.exists(self._join(b'.hgignore')):
1019 files.append(self._join(b'.hgignore'))
1026 files.append(self._join(b'.hgignore'))
1020 for name, path in self._ui.configitems(b"ui"):
1027 for name, path in self._ui.configitems(b"ui"):
1021 if name == b'ignore' or name.startswith(b'ignore.'):
1028 if name == b'ignore' or name.startswith(b'ignore.'):
1022 # we need to use os.path.join here rather than self._join
1029 # we need to use os.path.join here rather than self._join
1023 # because path is arbitrary and user-specified
1030 # because path is arbitrary and user-specified
1024 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1031 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1025 return files
1032 return files
1026
1033
1027 def _ignorefileandline(self, f):
1034 def _ignorefileandline(self, f):
1028 files = collections.deque(self._ignorefiles())
1035 files = collections.deque(self._ignorefiles())
1029 visited = set()
1036 visited = set()
1030 while files:
1037 while files:
1031 i = files.popleft()
1038 i = files.popleft()
1032 patterns = matchmod.readpatternfile(
1039 patterns = matchmod.readpatternfile(
1033 i, self._ui.warn, sourceinfo=True
1040 i, self._ui.warn, sourceinfo=True
1034 )
1041 )
1035 for pattern, lineno, line in patterns:
1042 for pattern, lineno, line in patterns:
1036 kind, p = matchmod._patsplit(pattern, b'glob')
1043 kind, p = matchmod._patsplit(pattern, b'glob')
1037 if kind == b"subinclude":
1044 if kind == b"subinclude":
1038 if p not in visited:
1045 if p not in visited:
1039 files.append(p)
1046 files.append(p)
1040 continue
1047 continue
1041 m = matchmod.match(
1048 m = matchmod.match(
1042 self._root, b'', [], [pattern], warn=self._ui.warn
1049 self._root, b'', [], [pattern], warn=self._ui.warn
1043 )
1050 )
1044 if m(f):
1051 if m(f):
1045 return (i, lineno, line)
1052 return (i, lineno, line)
1046 visited.add(i)
1053 visited.add(i)
1047 return (None, -1, b"")
1054 return (None, -1, b"")
1048
1055
1049 def _walkexplicit(self, match, subrepos):
1056 def _walkexplicit(self, match, subrepos):
1050 """Get stat data about the files explicitly specified by match.
1057 """Get stat data about the files explicitly specified by match.
1051
1058
1052 Return a triple (results, dirsfound, dirsnotfound).
1059 Return a triple (results, dirsfound, dirsnotfound).
1053 - results is a mapping from filename to stat result. It also contains
1060 - results is a mapping from filename to stat result. It also contains
1054 listings mapping subrepos and .hg to None.
1061 listings mapping subrepos and .hg to None.
1055 - dirsfound is a list of files found to be directories.
1062 - dirsfound is a list of files found to be directories.
1056 - dirsnotfound is a list of files that the dirstate thinks are
1063 - dirsnotfound is a list of files that the dirstate thinks are
1057 directories and that were not found."""
1064 directories and that were not found."""
1058
1065
1059 def badtype(mode):
1066 def badtype(mode):
1060 kind = _(b'unknown')
1067 kind = _(b'unknown')
1061 if stat.S_ISCHR(mode):
1068 if stat.S_ISCHR(mode):
1062 kind = _(b'character device')
1069 kind = _(b'character device')
1063 elif stat.S_ISBLK(mode):
1070 elif stat.S_ISBLK(mode):
1064 kind = _(b'block device')
1071 kind = _(b'block device')
1065 elif stat.S_ISFIFO(mode):
1072 elif stat.S_ISFIFO(mode):
1066 kind = _(b'fifo')
1073 kind = _(b'fifo')
1067 elif stat.S_ISSOCK(mode):
1074 elif stat.S_ISSOCK(mode):
1068 kind = _(b'socket')
1075 kind = _(b'socket')
1069 elif stat.S_ISDIR(mode):
1076 elif stat.S_ISDIR(mode):
1070 kind = _(b'directory')
1077 kind = _(b'directory')
1071 return _(b'unsupported file type (type is %s)') % kind
1078 return _(b'unsupported file type (type is %s)') % kind
1072
1079
1073 badfn = match.bad
1080 badfn = match.bad
1074 dmap = self._map
1081 dmap = self._map
1075 lstat = os.lstat
1082 lstat = os.lstat
1076 getkind = stat.S_IFMT
1083 getkind = stat.S_IFMT
1077 dirkind = stat.S_IFDIR
1084 dirkind = stat.S_IFDIR
1078 regkind = stat.S_IFREG
1085 regkind = stat.S_IFREG
1079 lnkkind = stat.S_IFLNK
1086 lnkkind = stat.S_IFLNK
1080 join = self._join
1087 join = self._join
1081 dirsfound = []
1088 dirsfound = []
1082 foundadd = dirsfound.append
1089 foundadd = dirsfound.append
1083 dirsnotfound = []
1090 dirsnotfound = []
1084 notfoundadd = dirsnotfound.append
1091 notfoundadd = dirsnotfound.append
1085
1092
1086 if not match.isexact() and self._checkcase:
1093 if not match.isexact() and self._checkcase:
1087 normalize = self._normalize
1094 normalize = self._normalize
1088 else:
1095 else:
1089 normalize = None
1096 normalize = None
1090
1097
1091 files = sorted(match.files())
1098 files = sorted(match.files())
1092 subrepos.sort()
1099 subrepos.sort()
1093 i, j = 0, 0
1100 i, j = 0, 0
1094 while i < len(files) and j < len(subrepos):
1101 while i < len(files) and j < len(subrepos):
1095 subpath = subrepos[j] + b"/"
1102 subpath = subrepos[j] + b"/"
1096 if files[i] < subpath:
1103 if files[i] < subpath:
1097 i += 1
1104 i += 1
1098 continue
1105 continue
1099 while i < len(files) and files[i].startswith(subpath):
1106 while i < len(files) and files[i].startswith(subpath):
1100 del files[i]
1107 del files[i]
1101 j += 1
1108 j += 1
1102
1109
1103 if not files or b'' in files:
1110 if not files or b'' in files:
1104 files = [b'']
1111 files = [b'']
1105 # constructing the foldmap is expensive, so don't do it for the
1112 # constructing the foldmap is expensive, so don't do it for the
1106 # common case where files is ['']
1113 # common case where files is ['']
1107 normalize = None
1114 normalize = None
1108 results = dict.fromkeys(subrepos)
1115 results = dict.fromkeys(subrepos)
1109 results[b'.hg'] = None
1116 results[b'.hg'] = None
1110
1117
1111 for ff in files:
1118 for ff in files:
1112 if normalize:
1119 if normalize:
1113 nf = normalize(ff, False, True)
1120 nf = normalize(ff, False, True)
1114 else:
1121 else:
1115 nf = ff
1122 nf = ff
1116 if nf in results:
1123 if nf in results:
1117 continue
1124 continue
1118
1125
1119 try:
1126 try:
1120 st = lstat(join(nf))
1127 st = lstat(join(nf))
1121 kind = getkind(st.st_mode)
1128 kind = getkind(st.st_mode)
1122 if kind == dirkind:
1129 if kind == dirkind:
1123 if nf in dmap:
1130 if nf in dmap:
1124 # file replaced by dir on disk but still in dirstate
1131 # file replaced by dir on disk but still in dirstate
1125 results[nf] = None
1132 results[nf] = None
1126 foundadd((nf, ff))
1133 foundadd((nf, ff))
1127 elif kind == regkind or kind == lnkkind:
1134 elif kind == regkind or kind == lnkkind:
1128 results[nf] = st
1135 results[nf] = st
1129 else:
1136 else:
1130 badfn(ff, badtype(kind))
1137 badfn(ff, badtype(kind))
1131 if nf in dmap:
1138 if nf in dmap:
1132 results[nf] = None
1139 results[nf] = None
1133 except OSError as inst: # nf not found on disk - it is dirstate only
1140 except OSError as inst: # nf not found on disk - it is dirstate only
1134 if nf in dmap: # does it exactly match a missing file?
1141 if nf in dmap: # does it exactly match a missing file?
1135 results[nf] = None
1142 results[nf] = None
1136 else: # does it match a missing directory?
1143 else: # does it match a missing directory?
1137 if self._map.hasdir(nf):
1144 if self._map.hasdir(nf):
1138 notfoundadd(nf)
1145 notfoundadd(nf)
1139 else:
1146 else:
1140 badfn(ff, encoding.strtolocal(inst.strerror))
1147 badfn(ff, encoding.strtolocal(inst.strerror))
1141
1148
1142 # match.files() may contain explicitly-specified paths that shouldn't
1149 # match.files() may contain explicitly-specified paths that shouldn't
1143 # be taken; drop them from the list of files found. dirsfound/notfound
1150 # be taken; drop them from the list of files found. dirsfound/notfound
1144 # aren't filtered here because they will be tested later.
1151 # aren't filtered here because they will be tested later.
1145 if match.anypats():
1152 if match.anypats():
1146 for f in list(results):
1153 for f in list(results):
1147 if f == b'.hg' or f in subrepos:
1154 if f == b'.hg' or f in subrepos:
1148 # keep sentinel to disable further out-of-repo walks
1155 # keep sentinel to disable further out-of-repo walks
1149 continue
1156 continue
1150 if not match(f):
1157 if not match(f):
1151 del results[f]
1158 del results[f]
1152
1159
1153 # Case insensitive filesystems cannot rely on lstat() failing to detect
1160 # Case insensitive filesystems cannot rely on lstat() failing to detect
1154 # a case-only rename. Prune the stat object for any file that does not
1161 # a case-only rename. Prune the stat object for any file that does not
1155 # match the case in the filesystem, if there are multiple files that
1162 # match the case in the filesystem, if there are multiple files that
1156 # normalize to the same path.
1163 # normalize to the same path.
1157 if match.isexact() and self._checkcase:
1164 if match.isexact() and self._checkcase:
1158 normed = {}
1165 normed = {}
1159
1166
1160 for f, st in pycompat.iteritems(results):
1167 for f, st in pycompat.iteritems(results):
1161 if st is None:
1168 if st is None:
1162 continue
1169 continue
1163
1170
1164 nc = util.normcase(f)
1171 nc = util.normcase(f)
1165 paths = normed.get(nc)
1172 paths = normed.get(nc)
1166
1173
1167 if paths is None:
1174 if paths is None:
1168 paths = set()
1175 paths = set()
1169 normed[nc] = paths
1176 normed[nc] = paths
1170
1177
1171 paths.add(f)
1178 paths.add(f)
1172
1179
1173 for norm, paths in pycompat.iteritems(normed):
1180 for norm, paths in pycompat.iteritems(normed):
1174 if len(paths) > 1:
1181 if len(paths) > 1:
1175 for path in paths:
1182 for path in paths:
1176 folded = self._discoverpath(
1183 folded = self._discoverpath(
1177 path, norm, True, None, self._map.dirfoldmap
1184 path, norm, True, None, self._map.dirfoldmap
1178 )
1185 )
1179 if path != folded:
1186 if path != folded:
1180 results[path] = None
1187 results[path] = None
1181
1188
1182 return results, dirsfound, dirsnotfound
1189 return results, dirsfound, dirsnotfound
1183
1190
1184 def walk(self, match, subrepos, unknown, ignored, full=True):
1191 def walk(self, match, subrepos, unknown, ignored, full=True):
1185 """
1192 """
1186 Walk recursively through the directory tree, finding all files
1193 Walk recursively through the directory tree, finding all files
1187 matched by match.
1194 matched by match.
1188
1195
1189 If full is False, maybe skip some known-clean files.
1196 If full is False, maybe skip some known-clean files.
1190
1197
1191 Return a dict mapping filename to stat-like object (either
1198 Return a dict mapping filename to stat-like object (either
1192 mercurial.osutil.stat instance or return value of os.stat()).
1199 mercurial.osutil.stat instance or return value of os.stat()).
1193
1200
1194 """
1201 """
1195 # full is a flag that extensions that hook into walk can use -- this
1202 # full is a flag that extensions that hook into walk can use -- this
1196 # implementation doesn't use it at all. This satisfies the contract
1203 # implementation doesn't use it at all. This satisfies the contract
1197 # because we only guarantee a "maybe".
1204 # because we only guarantee a "maybe".
1198
1205
1199 if ignored:
1206 if ignored:
1200 ignore = util.never
1207 ignore = util.never
1201 dirignore = util.never
1208 dirignore = util.never
1202 elif unknown:
1209 elif unknown:
1203 ignore = self._ignore
1210 ignore = self._ignore
1204 dirignore = self._dirignore
1211 dirignore = self._dirignore
1205 else:
1212 else:
1206 # if not unknown and not ignored, drop dir recursion and step 2
1213 # if not unknown and not ignored, drop dir recursion and step 2
1207 ignore = util.always
1214 ignore = util.always
1208 dirignore = util.always
1215 dirignore = util.always
1209
1216
1210 matchfn = match.matchfn
1217 matchfn = match.matchfn
1211 matchalways = match.always()
1218 matchalways = match.always()
1212 matchtdir = match.traversedir
1219 matchtdir = match.traversedir
1213 dmap = self._map
1220 dmap = self._map
1214 listdir = util.listdir
1221 listdir = util.listdir
1215 lstat = os.lstat
1222 lstat = os.lstat
1216 dirkind = stat.S_IFDIR
1223 dirkind = stat.S_IFDIR
1217 regkind = stat.S_IFREG
1224 regkind = stat.S_IFREG
1218 lnkkind = stat.S_IFLNK
1225 lnkkind = stat.S_IFLNK
1219 join = self._join
1226 join = self._join
1220
1227
1221 exact = skipstep3 = False
1228 exact = skipstep3 = False
1222 if match.isexact(): # match.exact
1229 if match.isexact(): # match.exact
1223 exact = True
1230 exact = True
1224 dirignore = util.always # skip step 2
1231 dirignore = util.always # skip step 2
1225 elif match.prefix(): # match.match, no patterns
1232 elif match.prefix(): # match.match, no patterns
1226 skipstep3 = True
1233 skipstep3 = True
1227
1234
1228 if not exact and self._checkcase:
1235 if not exact and self._checkcase:
1229 normalize = self._normalize
1236 normalize = self._normalize
1230 normalizefile = self._normalizefile
1237 normalizefile = self._normalizefile
1231 skipstep3 = False
1238 skipstep3 = False
1232 else:
1239 else:
1233 normalize = self._normalize
1240 normalize = self._normalize
1234 normalizefile = None
1241 normalizefile = None
1235
1242
1236 # step 1: find all explicit files
1243 # step 1: find all explicit files
1237 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1244 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1238 if matchtdir:
1245 if matchtdir:
1239 for d in work:
1246 for d in work:
1240 matchtdir(d[0])
1247 matchtdir(d[0])
1241 for d in dirsnotfound:
1248 for d in dirsnotfound:
1242 matchtdir(d)
1249 matchtdir(d)
1243
1250
1244 skipstep3 = skipstep3 and not (work or dirsnotfound)
1251 skipstep3 = skipstep3 and not (work or dirsnotfound)
1245 work = [d for d in work if not dirignore(d[0])]
1252 work = [d for d in work if not dirignore(d[0])]
1246
1253
1247 # step 2: visit subdirectories
1254 # step 2: visit subdirectories
1248 def traverse(work, alreadynormed):
1255 def traverse(work, alreadynormed):
1249 wadd = work.append
1256 wadd = work.append
1250 while work:
1257 while work:
1251 tracing.counter('dirstate.walk work', len(work))
1258 tracing.counter('dirstate.walk work', len(work))
1252 nd = work.pop()
1259 nd = work.pop()
1253 visitentries = match.visitchildrenset(nd)
1260 visitentries = match.visitchildrenset(nd)
1254 if not visitentries:
1261 if not visitentries:
1255 continue
1262 continue
1256 if visitentries == b'this' or visitentries == b'all':
1263 if visitentries == b'this' or visitentries == b'all':
1257 visitentries = None
1264 visitentries = None
1258 skip = None
1265 skip = None
1259 if nd != b'':
1266 if nd != b'':
1260 skip = b'.hg'
1267 skip = b'.hg'
1261 try:
1268 try:
1262 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1269 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1263 entries = listdir(join(nd), stat=True, skip=skip)
1270 entries = listdir(join(nd), stat=True, skip=skip)
1264 except OSError as inst:
1271 except OSError as inst:
1265 if inst.errno in (errno.EACCES, errno.ENOENT):
1272 if inst.errno in (errno.EACCES, errno.ENOENT):
1266 match.bad(
1273 match.bad(
1267 self.pathto(nd), encoding.strtolocal(inst.strerror)
1274 self.pathto(nd), encoding.strtolocal(inst.strerror)
1268 )
1275 )
1269 continue
1276 continue
1270 raise
1277 raise
1271 for f, kind, st in entries:
1278 for f, kind, st in entries:
1272 # Some matchers may return files in the visitentries set,
1279 # Some matchers may return files in the visitentries set,
1273 # instead of 'this', if the matcher explicitly mentions them
1280 # instead of 'this', if the matcher explicitly mentions them
1274 # and is not an exactmatcher. This is acceptable; we do not
1281 # and is not an exactmatcher. This is acceptable; we do not
1275 # make any hard assumptions about file-or-directory below
1282 # make any hard assumptions about file-or-directory below
1276 # based on the presence of `f` in visitentries. If
1283 # based on the presence of `f` in visitentries. If
1277 # visitchildrenset returned a set, we can always skip the
1284 # visitchildrenset returned a set, we can always skip the
1278 # entries *not* in the set it provided regardless of whether
1285 # entries *not* in the set it provided regardless of whether
1279 # they're actually a file or a directory.
1286 # they're actually a file or a directory.
1280 if visitentries and f not in visitentries:
1287 if visitentries and f not in visitentries:
1281 continue
1288 continue
1282 if normalizefile:
1289 if normalizefile:
1283 # even though f might be a directory, we're only
1290 # even though f might be a directory, we're only
1284 # interested in comparing it to files currently in the
1291 # interested in comparing it to files currently in the
1285 # dmap -- therefore normalizefile is enough
1292 # dmap -- therefore normalizefile is enough
1286 nf = normalizefile(
1293 nf = normalizefile(
1287 nd and (nd + b"/" + f) or f, True, True
1294 nd and (nd + b"/" + f) or f, True, True
1288 )
1295 )
1289 else:
1296 else:
1290 nf = nd and (nd + b"/" + f) or f
1297 nf = nd and (nd + b"/" + f) or f
1291 if nf not in results:
1298 if nf not in results:
1292 if kind == dirkind:
1299 if kind == dirkind:
1293 if not ignore(nf):
1300 if not ignore(nf):
1294 if matchtdir:
1301 if matchtdir:
1295 matchtdir(nf)
1302 matchtdir(nf)
1296 wadd(nf)
1303 wadd(nf)
1297 if nf in dmap and (matchalways or matchfn(nf)):
1304 if nf in dmap and (matchalways or matchfn(nf)):
1298 results[nf] = None
1305 results[nf] = None
1299 elif kind == regkind or kind == lnkkind:
1306 elif kind == regkind or kind == lnkkind:
1300 if nf in dmap:
1307 if nf in dmap:
1301 if matchalways or matchfn(nf):
1308 if matchalways or matchfn(nf):
1302 results[nf] = st
1309 results[nf] = st
1303 elif (matchalways or matchfn(nf)) and not ignore(
1310 elif (matchalways or matchfn(nf)) and not ignore(
1304 nf
1311 nf
1305 ):
1312 ):
1306 # unknown file -- normalize if necessary
1313 # unknown file -- normalize if necessary
1307 if not alreadynormed:
1314 if not alreadynormed:
1308 nf = normalize(nf, False, True)
1315 nf = normalize(nf, False, True)
1309 results[nf] = st
1316 results[nf] = st
1310 elif nf in dmap and (matchalways or matchfn(nf)):
1317 elif nf in dmap and (matchalways or matchfn(nf)):
1311 results[nf] = None
1318 results[nf] = None
1312
1319
1313 for nd, d in work:
1320 for nd, d in work:
1314 # alreadynormed means that processwork doesn't have to do any
1321 # alreadynormed means that processwork doesn't have to do any
1315 # expensive directory normalization
1322 # expensive directory normalization
1316 alreadynormed = not normalize or nd == d
1323 alreadynormed = not normalize or nd == d
1317 traverse([d], alreadynormed)
1324 traverse([d], alreadynormed)
1318
1325
1319 for s in subrepos:
1326 for s in subrepos:
1320 del results[s]
1327 del results[s]
1321 del results[b'.hg']
1328 del results[b'.hg']
1322
1329
1323 # step 3: visit remaining files from dmap
1330 # step 3: visit remaining files from dmap
1324 if not skipstep3 and not exact:
1331 if not skipstep3 and not exact:
1325 # If a dmap file is not in results yet, it was either
1332 # If a dmap file is not in results yet, it was either
1326 # a) not matching matchfn b) ignored, c) missing, or d) under a
1333 # a) not matching matchfn b) ignored, c) missing, or d) under a
1327 # symlink directory.
1334 # symlink directory.
1328 if not results and matchalways:
1335 if not results and matchalways:
1329 visit = [f for f in dmap]
1336 visit = [f for f in dmap]
1330 else:
1337 else:
1331 visit = [f for f in dmap if f not in results and matchfn(f)]
1338 visit = [f for f in dmap if f not in results and matchfn(f)]
1332 visit.sort()
1339 visit.sort()
1333
1340
1334 if unknown:
1341 if unknown:
1335 # unknown == True means we walked all dirs under the roots
1342 # unknown == True means we walked all dirs under the roots
1336 # that wasn't ignored, and everything that matched was stat'ed
1343 # that wasn't ignored, and everything that matched was stat'ed
1337 # and is already in results.
1344 # and is already in results.
1338 # The rest must thus be ignored or under a symlink.
1345 # The rest must thus be ignored or under a symlink.
1339 audit_path = pathutil.pathauditor(self._root, cached=True)
1346 audit_path = pathutil.pathauditor(self._root, cached=True)
1340
1347
1341 for nf in iter(visit):
1348 for nf in iter(visit):
1342 # If a stat for the same file was already added with a
1349 # If a stat for the same file was already added with a
1343 # different case, don't add one for this, since that would
1350 # different case, don't add one for this, since that would
1344 # make it appear as if the file exists under both names
1351 # make it appear as if the file exists under both names
1345 # on disk.
1352 # on disk.
1346 if (
1353 if (
1347 normalizefile
1354 normalizefile
1348 and normalizefile(nf, True, True) in results
1355 and normalizefile(nf, True, True) in results
1349 ):
1356 ):
1350 results[nf] = None
1357 results[nf] = None
1351 # Report ignored items in the dmap as long as they are not
1358 # Report ignored items in the dmap as long as they are not
1352 # under a symlink directory.
1359 # under a symlink directory.
1353 elif audit_path.check(nf):
1360 elif audit_path.check(nf):
1354 try:
1361 try:
1355 results[nf] = lstat(join(nf))
1362 results[nf] = lstat(join(nf))
1356 # file was just ignored, no links, and exists
1363 # file was just ignored, no links, and exists
1357 except OSError:
1364 except OSError:
1358 # file doesn't exist
1365 # file doesn't exist
1359 results[nf] = None
1366 results[nf] = None
1360 else:
1367 else:
1361 # It's either missing or under a symlink directory
1368 # It's either missing or under a symlink directory
1362 # which we in this case report as missing
1369 # which we in this case report as missing
1363 results[nf] = None
1370 results[nf] = None
1364 else:
1371 else:
1365 # We may not have walked the full directory tree above,
1372 # We may not have walked the full directory tree above,
1366 # so stat and check everything we missed.
1373 # so stat and check everything we missed.
1367 iv = iter(visit)
1374 iv = iter(visit)
1368 for st in util.statfiles([join(i) for i in visit]):
1375 for st in util.statfiles([join(i) for i in visit]):
1369 results[next(iv)] = st
1376 results[next(iv)] = st
1370 return results
1377 return results
1371
1378
1372 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1379 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1373 # Force Rayon (Rust parallelism library) to respect the number of
1380 # Force Rayon (Rust parallelism library) to respect the number of
1374 # workers. This is a temporary workaround until Rust code knows
1381 # workers. This is a temporary workaround until Rust code knows
1375 # how to read the config file.
1382 # how to read the config file.
1376 numcpus = self._ui.configint(b"worker", b"numcpus")
1383 numcpus = self._ui.configint(b"worker", b"numcpus")
1377 if numcpus is not None:
1384 if numcpus is not None:
1378 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1385 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1379
1386
1380 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1387 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1381 if not workers_enabled:
1388 if not workers_enabled:
1382 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1389 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1383
1390
1384 (
1391 (
1385 lookup,
1392 lookup,
1386 modified,
1393 modified,
1387 added,
1394 added,
1388 removed,
1395 removed,
1389 deleted,
1396 deleted,
1390 clean,
1397 clean,
1391 ignored,
1398 ignored,
1392 unknown,
1399 unknown,
1393 warnings,
1400 warnings,
1394 bad,
1401 bad,
1395 traversed,
1402 traversed,
1396 dirty,
1403 dirty,
1397 ) = rustmod.status(
1404 ) = rustmod.status(
1398 self._map._rustmap,
1405 self._map._rustmap,
1399 matcher,
1406 matcher,
1400 self._rootdir,
1407 self._rootdir,
1401 self._ignorefiles(),
1408 self._ignorefiles(),
1402 self._checkexec,
1409 self._checkexec,
1403 self._lastnormaltime,
1410 self._lastnormaltime,
1404 bool(list_clean),
1411 bool(list_clean),
1405 bool(list_ignored),
1412 bool(list_ignored),
1406 bool(list_unknown),
1413 bool(list_unknown),
1407 bool(matcher.traversedir),
1414 bool(matcher.traversedir),
1408 )
1415 )
1409
1416
1410 self._dirty |= dirty
1417 self._dirty |= dirty
1411
1418
1412 if matcher.traversedir:
1419 if matcher.traversedir:
1413 for dir in traversed:
1420 for dir in traversed:
1414 matcher.traversedir(dir)
1421 matcher.traversedir(dir)
1415
1422
1416 if self._ui.warn:
1423 if self._ui.warn:
1417 for item in warnings:
1424 for item in warnings:
1418 if isinstance(item, tuple):
1425 if isinstance(item, tuple):
1419 file_path, syntax = item
1426 file_path, syntax = item
1420 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1427 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1421 file_path,
1428 file_path,
1422 syntax,
1429 syntax,
1423 )
1430 )
1424 self._ui.warn(msg)
1431 self._ui.warn(msg)
1425 else:
1432 else:
1426 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1433 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1427 self._ui.warn(
1434 self._ui.warn(
1428 msg
1435 msg
1429 % (
1436 % (
1430 pathutil.canonpath(
1437 pathutil.canonpath(
1431 self._rootdir, self._rootdir, item
1438 self._rootdir, self._rootdir, item
1432 ),
1439 ),
1433 b"No such file or directory",
1440 b"No such file or directory",
1434 )
1441 )
1435 )
1442 )
1436
1443
1437 for (fn, message) in bad:
1444 for (fn, message) in bad:
1438 matcher.bad(fn, encoding.strtolocal(message))
1445 matcher.bad(fn, encoding.strtolocal(message))
1439
1446
1440 status = scmutil.status(
1447 status = scmutil.status(
1441 modified=modified,
1448 modified=modified,
1442 added=added,
1449 added=added,
1443 removed=removed,
1450 removed=removed,
1444 deleted=deleted,
1451 deleted=deleted,
1445 unknown=unknown,
1452 unknown=unknown,
1446 ignored=ignored,
1453 ignored=ignored,
1447 clean=clean,
1454 clean=clean,
1448 )
1455 )
1449 return (lookup, status)
1456 return (lookup, status)
1450
1457
1451 def status(self, match, subrepos, ignored, clean, unknown):
1458 def status(self, match, subrepos, ignored, clean, unknown):
1452 """Determine the status of the working copy relative to the
1459 """Determine the status of the working copy relative to the
1453 dirstate and return a pair of (unsure, status), where status is of type
1460 dirstate and return a pair of (unsure, status), where status is of type
1454 scmutil.status and:
1461 scmutil.status and:
1455
1462
1456 unsure:
1463 unsure:
1457 files that might have been modified since the dirstate was
1464 files that might have been modified since the dirstate was
1458 written, but need to be read to be sure (size is the same
1465 written, but need to be read to be sure (size is the same
1459 but mtime differs)
1466 but mtime differs)
1460 status.modified:
1467 status.modified:
1461 files that have definitely been modified since the dirstate
1468 files that have definitely been modified since the dirstate
1462 was written (different size or mode)
1469 was written (different size or mode)
1463 status.clean:
1470 status.clean:
1464 files that have definitely not been modified since the
1471 files that have definitely not been modified since the
1465 dirstate was written
1472 dirstate was written
1466 """
1473 """
1467 listignored, listclean, listunknown = ignored, clean, unknown
1474 listignored, listclean, listunknown = ignored, clean, unknown
1468 lookup, modified, added, unknown, ignored = [], [], [], [], []
1475 lookup, modified, added, unknown, ignored = [], [], [], [], []
1469 removed, deleted, clean = [], [], []
1476 removed, deleted, clean = [], [], []
1470
1477
1471 dmap = self._map
1478 dmap = self._map
1472 dmap.preload()
1479 dmap.preload()
1473
1480
1474 use_rust = True
1481 use_rust = True
1475
1482
1476 allowed_matchers = (
1483 allowed_matchers = (
1477 matchmod.alwaysmatcher,
1484 matchmod.alwaysmatcher,
1478 matchmod.exactmatcher,
1485 matchmod.exactmatcher,
1479 matchmod.includematcher,
1486 matchmod.includematcher,
1480 )
1487 )
1481
1488
1482 if rustmod is None:
1489 if rustmod is None:
1483 use_rust = False
1490 use_rust = False
1484 elif self._checkcase:
1491 elif self._checkcase:
1485 # Case-insensitive filesystems are not handled yet
1492 # Case-insensitive filesystems are not handled yet
1486 use_rust = False
1493 use_rust = False
1487 elif subrepos:
1494 elif subrepos:
1488 use_rust = False
1495 use_rust = False
1489 elif sparse.enabled:
1496 elif sparse.enabled:
1490 use_rust = False
1497 use_rust = False
1491 elif not isinstance(match, allowed_matchers):
1498 elif not isinstance(match, allowed_matchers):
1492 # Some matchers have yet to be implemented
1499 # Some matchers have yet to be implemented
1493 use_rust = False
1500 use_rust = False
1494
1501
1495 if use_rust:
1502 if use_rust:
1496 try:
1503 try:
1497 return self._rust_status(
1504 return self._rust_status(
1498 match, listclean, listignored, listunknown
1505 match, listclean, listignored, listunknown
1499 )
1506 )
1500 except rustmod.FallbackError:
1507 except rustmod.FallbackError:
1501 pass
1508 pass
1502
1509
1503 def noop(f):
1510 def noop(f):
1504 pass
1511 pass
1505
1512
1506 dcontains = dmap.__contains__
1513 dcontains = dmap.__contains__
1507 dget = dmap.__getitem__
1514 dget = dmap.__getitem__
1508 ladd = lookup.append # aka "unsure"
1515 ladd = lookup.append # aka "unsure"
1509 madd = modified.append
1516 madd = modified.append
1510 aadd = added.append
1517 aadd = added.append
1511 uadd = unknown.append if listunknown else noop
1518 uadd = unknown.append if listunknown else noop
1512 iadd = ignored.append if listignored else noop
1519 iadd = ignored.append if listignored else noop
1513 radd = removed.append
1520 radd = removed.append
1514 dadd = deleted.append
1521 dadd = deleted.append
1515 cadd = clean.append if listclean else noop
1522 cadd = clean.append if listclean else noop
1516 mexact = match.exact
1523 mexact = match.exact
1517 dirignore = self._dirignore
1524 dirignore = self._dirignore
1518 checkexec = self._checkexec
1525 checkexec = self._checkexec
1519 copymap = self._map.copymap
1526 copymap = self._map.copymap
1520 lastnormaltime = self._lastnormaltime
1527 lastnormaltime = self._lastnormaltime
1521
1528
1522 # We need to do full walks when either
1529 # We need to do full walks when either
1523 # - we're listing all clean files, or
1530 # - we're listing all clean files, or
1524 # - match.traversedir does something, because match.traversedir should
1531 # - match.traversedir does something, because match.traversedir should
1525 # be called for every dir in the working dir
1532 # be called for every dir in the working dir
1526 full = listclean or match.traversedir is not None
1533 full = listclean or match.traversedir is not None
1527 for fn, st in pycompat.iteritems(
1534 for fn, st in pycompat.iteritems(
1528 self.walk(match, subrepos, listunknown, listignored, full=full)
1535 self.walk(match, subrepos, listunknown, listignored, full=full)
1529 ):
1536 ):
1530 if not dcontains(fn):
1537 if not dcontains(fn):
1531 if (listignored or mexact(fn)) and dirignore(fn):
1538 if (listignored or mexact(fn)) and dirignore(fn):
1532 if listignored:
1539 if listignored:
1533 iadd(fn)
1540 iadd(fn)
1534 else:
1541 else:
1535 uadd(fn)
1542 uadd(fn)
1536 continue
1543 continue
1537
1544
1538 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1545 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1539 # written like that for performance reasons. dmap[fn] is not a
1546 # written like that for performance reasons. dmap[fn] is not a
1540 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1547 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1541 # opcode has fast paths when the value to be unpacked is a tuple or
1548 # opcode has fast paths when the value to be unpacked is a tuple or
1542 # a list, but falls back to creating a full-fledged iterator in
1549 # a list, but falls back to creating a full-fledged iterator in
1543 # general. That is much slower than simply accessing and storing the
1550 # general. That is much slower than simply accessing and storing the
1544 # tuple members one by one.
1551 # tuple members one by one.
1545 t = dget(fn)
1552 t = dget(fn)
1546 mode = t.mode
1553 mode = t.mode
1547 size = t.size
1554 size = t.size
1548 time = t.mtime
1555 time = t.mtime
1549
1556
1550 if not st and t.tracked:
1557 if not st and t.tracked:
1551 dadd(fn)
1558 dadd(fn)
1552 elif t.merged:
1559 elif t.merged:
1553 madd(fn)
1560 madd(fn)
1554 elif t.added:
1561 elif t.added:
1555 aadd(fn)
1562 aadd(fn)
1556 elif t.removed:
1563 elif t.removed:
1557 radd(fn)
1564 radd(fn)
1558 elif t.tracked:
1565 elif t.tracked:
1559 if (
1566 if (
1560 size >= 0
1567 size >= 0
1561 and (
1568 and (
1562 (size != st.st_size and size != st.st_size & _rangemask)
1569 (size != st.st_size and size != st.st_size & _rangemask)
1563 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1570 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1564 )
1571 )
1565 or t.from_p2
1572 or t.from_p2
1566 or fn in copymap
1573 or fn in copymap
1567 ):
1574 ):
1568 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1575 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1569 # issue6456: Size returned may be longer due to
1576 # issue6456: Size returned may be longer due to
1570 # encryption on EXT-4 fscrypt, undecided.
1577 # encryption on EXT-4 fscrypt, undecided.
1571 ladd(fn)
1578 ladd(fn)
1572 else:
1579 else:
1573 madd(fn)
1580 madd(fn)
1574 elif (
1581 elif (
1575 time != st[stat.ST_MTIME]
1582 time != st[stat.ST_MTIME]
1576 and time != st[stat.ST_MTIME] & _rangemask
1583 and time != st[stat.ST_MTIME] & _rangemask
1577 ):
1584 ):
1578 ladd(fn)
1585 ladd(fn)
1579 elif st[stat.ST_MTIME] == lastnormaltime:
1586 elif st[stat.ST_MTIME] == lastnormaltime:
1580 # fn may have just been marked as normal and it may have
1587 # fn may have just been marked as normal and it may have
1581 # changed in the same second without changing its size.
1588 # changed in the same second without changing its size.
1582 # This can happen if we quickly do multiple commits.
1589 # This can happen if we quickly do multiple commits.
1583 # Force lookup, so we don't miss such a racy file change.
1590 # Force lookup, so we don't miss such a racy file change.
1584 ladd(fn)
1591 ladd(fn)
1585 elif listclean:
1592 elif listclean:
1586 cadd(fn)
1593 cadd(fn)
1587 status = scmutil.status(
1594 status = scmutil.status(
1588 modified, added, removed, deleted, unknown, ignored, clean
1595 modified, added, removed, deleted, unknown, ignored, clean
1589 )
1596 )
1590 return (lookup, status)
1597 return (lookup, status)
1591
1598
1592 def matches(self, match):
1599 def matches(self, match):
1593 """
1600 """
1594 return files in the dirstate (in whatever state) filtered by match
1601 return files in the dirstate (in whatever state) filtered by match
1595 """
1602 """
1596 dmap = self._map
1603 dmap = self._map
1597 if rustmod is not None:
1604 if rustmod is not None:
1598 dmap = self._map._rustmap
1605 dmap = self._map._rustmap
1599
1606
1600 if match.always():
1607 if match.always():
1601 return dmap.keys()
1608 return dmap.keys()
1602 files = match.files()
1609 files = match.files()
1603 if match.isexact():
1610 if match.isexact():
1604 # fast path -- filter the other way around, since typically files is
1611 # fast path -- filter the other way around, since typically files is
1605 # much smaller than dmap
1612 # much smaller than dmap
1606 return [f for f in files if f in dmap]
1613 return [f for f in files if f in dmap]
1607 if match.prefix() and all(fn in dmap for fn in files):
1614 if match.prefix() and all(fn in dmap for fn in files):
1608 # fast path -- all the values are known to be files, so just return
1615 # fast path -- all the values are known to be files, so just return
1609 # that
1616 # that
1610 return list(files)
1617 return list(files)
1611 return [f for f in dmap if match(f)]
1618 return [f for f in dmap if match(f)]
1612
1619
1613 def _actualfilename(self, tr):
1620 def _actualfilename(self, tr):
1614 if tr:
1621 if tr:
1615 return self._pendingfilename
1622 return self._pendingfilename
1616 else:
1623 else:
1617 return self._filename
1624 return self._filename
1618
1625
1619 def savebackup(self, tr, backupname):
1626 def savebackup(self, tr, backupname):
1620 '''Save current dirstate into backup file'''
1627 '''Save current dirstate into backup file'''
1621 filename = self._actualfilename(tr)
1628 filename = self._actualfilename(tr)
1622 assert backupname != filename
1629 assert backupname != filename
1623
1630
1624 # use '_writedirstate' instead of 'write' to write changes certainly,
1631 # use '_writedirstate' instead of 'write' to write changes certainly,
1625 # because the latter omits writing out if transaction is running.
1632 # because the latter omits writing out if transaction is running.
1626 # output file will be used to create backup of dirstate at this point.
1633 # output file will be used to create backup of dirstate at this point.
1627 if self._dirty or not self._opener.exists(filename):
1634 if self._dirty or not self._opener.exists(filename):
1628 self._writedirstate(
1635 self._writedirstate(
1629 tr,
1636 tr,
1630 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1637 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1631 )
1638 )
1632
1639
1633 if tr:
1640 if tr:
1634 # ensure that subsequent tr.writepending returns True for
1641 # ensure that subsequent tr.writepending returns True for
1635 # changes written out above, even if dirstate is never
1642 # changes written out above, even if dirstate is never
1636 # changed after this
1643 # changed after this
1637 tr.addfilegenerator(
1644 tr.addfilegenerator(
1638 b'dirstate',
1645 b'dirstate',
1639 (self._filename,),
1646 (self._filename,),
1640 lambda f: self._writedirstate(tr, f),
1647 lambda f: self._writedirstate(tr, f),
1641 location=b'plain',
1648 location=b'plain',
1642 )
1649 )
1643
1650
1644 # ensure that pending file written above is unlinked at
1651 # ensure that pending file written above is unlinked at
1645 # failure, even if tr.writepending isn't invoked until the
1652 # failure, even if tr.writepending isn't invoked until the
1646 # end of this transaction
1653 # end of this transaction
1647 tr.registertmp(filename, location=b'plain')
1654 tr.registertmp(filename, location=b'plain')
1648
1655
1649 self._opener.tryunlink(backupname)
1656 self._opener.tryunlink(backupname)
1650 # hardlink backup is okay because _writedirstate is always called
1657 # hardlink backup is okay because _writedirstate is always called
1651 # with an "atomictemp=True" file.
1658 # with an "atomictemp=True" file.
1652 util.copyfile(
1659 util.copyfile(
1653 self._opener.join(filename),
1660 self._opener.join(filename),
1654 self._opener.join(backupname),
1661 self._opener.join(backupname),
1655 hardlink=True,
1662 hardlink=True,
1656 )
1663 )
1657
1664
1658 def restorebackup(self, tr, backupname):
1665 def restorebackup(self, tr, backupname):
1659 '''Restore dirstate by backup file'''
1666 '''Restore dirstate by backup file'''
1660 # this "invalidate()" prevents "wlock.release()" from writing
1667 # this "invalidate()" prevents "wlock.release()" from writing
1661 # changes of dirstate out after restoring from backup file
1668 # changes of dirstate out after restoring from backup file
1662 self.invalidate()
1669 self.invalidate()
1663 filename = self._actualfilename(tr)
1670 filename = self._actualfilename(tr)
1664 o = self._opener
1671 o = self._opener
1665 if util.samefile(o.join(backupname), o.join(filename)):
1672 if util.samefile(o.join(backupname), o.join(filename)):
1666 o.unlink(backupname)
1673 o.unlink(backupname)
1667 else:
1674 else:
1668 o.rename(backupname, filename, checkambig=True)
1675 o.rename(backupname, filename, checkambig=True)
1669
1676
1670 def clearbackup(self, tr, backupname):
1677 def clearbackup(self, tr, backupname):
1671 '''Clear backup file'''
1678 '''Clear backup file'''
1672 self._opener.unlink(backupname)
1679 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now