##// END OF EJS Templates
dirstate: deprecate the `normallookup` method in all cases...
marmoute -
r48541:9f19d9f2 default
parent child Browse files
Show More
@@ -1,1714 +1,1732 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = parsers.DirstateItem
48 DirstateItem = parsers.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._updatedfiles = set()
133 self._updatedfiles = set()
134 self._mapcls = dirstatemap.dirstatemap
134 self._mapcls = dirstatemap.dirstatemap
135 # Access and cache cwd early, so we don't access it for the first time
135 # Access and cache cwd early, so we don't access it for the first time
136 # after a working-copy update caused it to not exist (accessing it then
136 # after a working-copy update caused it to not exist (accessing it then
137 # raises an exception).
137 # raises an exception).
138 self._cwd
138 self._cwd
139
139
140 def prefetch_parents(self):
140 def prefetch_parents(self):
141 """make sure the parents are loaded
141 """make sure the parents are loaded
142
142
143 Used to avoid a race condition.
143 Used to avoid a race condition.
144 """
144 """
145 self._pl
145 self._pl
146
146
147 @contextlib.contextmanager
147 @contextlib.contextmanager
148 def parentchange(self):
148 def parentchange(self):
149 """Context manager for handling dirstate parents.
149 """Context manager for handling dirstate parents.
150
150
151 If an exception occurs in the scope of the context manager,
151 If an exception occurs in the scope of the context manager,
152 the incoherent dirstate won't be written when wlock is
152 the incoherent dirstate won't be written when wlock is
153 released.
153 released.
154 """
154 """
155 self._parentwriters += 1
155 self._parentwriters += 1
156 yield
156 yield
157 # Typically we want the "undo" step of a context manager in a
157 # Typically we want the "undo" step of a context manager in a
158 # finally block so it happens even when an exception
158 # finally block so it happens even when an exception
159 # occurs. In this case, however, we only want to decrement
159 # occurs. In this case, however, we only want to decrement
160 # parentwriters if the code in the with statement exits
160 # parentwriters if the code in the with statement exits
161 # normally, so we don't have a try/finally here on purpose.
161 # normally, so we don't have a try/finally here on purpose.
162 self._parentwriters -= 1
162 self._parentwriters -= 1
163
163
164 def pendingparentchange(self):
164 def pendingparentchange(self):
165 """Returns true if the dirstate is in the middle of a set of changes
165 """Returns true if the dirstate is in the middle of a set of changes
166 that modify the dirstate parent.
166 that modify the dirstate parent.
167 """
167 """
168 return self._parentwriters > 0
168 return self._parentwriters > 0
169
169
170 @propertycache
170 @propertycache
171 def _map(self):
171 def _map(self):
172 """Return the dirstate contents (see documentation for dirstatemap)."""
172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 self._map = self._mapcls(
173 self._map = self._mapcls(
174 self._ui,
174 self._ui,
175 self._opener,
175 self._opener,
176 self._root,
176 self._root,
177 self._nodeconstants,
177 self._nodeconstants,
178 self._use_dirstate_v2,
178 self._use_dirstate_v2,
179 )
179 )
180 return self._map
180 return self._map
181
181
182 @property
182 @property
183 def _sparsematcher(self):
183 def _sparsematcher(self):
184 """The matcher for the sparse checkout.
184 """The matcher for the sparse checkout.
185
185
186 The working directory may not include every file from a manifest. The
186 The working directory may not include every file from a manifest. The
187 matcher obtained by this property will match a path if it is to be
187 matcher obtained by this property will match a path if it is to be
188 included in the working directory.
188 included in the working directory.
189 """
189 """
190 # TODO there is potential to cache this property. For now, the matcher
190 # TODO there is potential to cache this property. For now, the matcher
191 # is resolved on every access. (But the called function does use a
191 # is resolved on every access. (But the called function does use a
192 # cache to keep the lookup fast.)
192 # cache to keep the lookup fast.)
193 return self._sparsematchfn()
193 return self._sparsematchfn()
194
194
195 @repocache(b'branch')
195 @repocache(b'branch')
196 def _branch(self):
196 def _branch(self):
197 try:
197 try:
198 return self._opener.read(b"branch").strip() or b"default"
198 return self._opener.read(b"branch").strip() or b"default"
199 except IOError as inst:
199 except IOError as inst:
200 if inst.errno != errno.ENOENT:
200 if inst.errno != errno.ENOENT:
201 raise
201 raise
202 return b"default"
202 return b"default"
203
203
204 @property
204 @property
205 def _pl(self):
205 def _pl(self):
206 return self._map.parents()
206 return self._map.parents()
207
207
208 def hasdir(self, d):
208 def hasdir(self, d):
209 return self._map.hastrackeddir(d)
209 return self._map.hastrackeddir(d)
210
210
211 @rootcache(b'.hgignore')
211 @rootcache(b'.hgignore')
212 def _ignore(self):
212 def _ignore(self):
213 files = self._ignorefiles()
213 files = self._ignorefiles()
214 if not files:
214 if not files:
215 return matchmod.never()
215 return matchmod.never()
216
216
217 pats = [b'include:%s' % f for f in files]
217 pats = [b'include:%s' % f for f in files]
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219
219
220 @propertycache
220 @propertycache
221 def _slash(self):
221 def _slash(self):
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223
223
224 @propertycache
224 @propertycache
225 def _checklink(self):
225 def _checklink(self):
226 return util.checklink(self._root)
226 return util.checklink(self._root)
227
227
228 @propertycache
228 @propertycache
229 def _checkexec(self):
229 def _checkexec(self):
230 return bool(util.checkexec(self._root))
230 return bool(util.checkexec(self._root))
231
231
232 @propertycache
232 @propertycache
233 def _checkcase(self):
233 def _checkcase(self):
234 return not util.fscasesensitive(self._join(b'.hg'))
234 return not util.fscasesensitive(self._join(b'.hg'))
235
235
236 def _join(self, f):
236 def _join(self, f):
237 # much faster than os.path.join()
237 # much faster than os.path.join()
238 # it's safe because f is always a relative path
238 # it's safe because f is always a relative path
239 return self._rootdir + f
239 return self._rootdir + f
240
240
241 def flagfunc(self, buildfallback):
241 def flagfunc(self, buildfallback):
242 if self._checklink and self._checkexec:
242 if self._checklink and self._checkexec:
243
243
244 def f(x):
244 def f(x):
245 try:
245 try:
246 st = os.lstat(self._join(x))
246 st = os.lstat(self._join(x))
247 if util.statislink(st):
247 if util.statislink(st):
248 return b'l'
248 return b'l'
249 if util.statisexec(st):
249 if util.statisexec(st):
250 return b'x'
250 return b'x'
251 except OSError:
251 except OSError:
252 pass
252 pass
253 return b''
253 return b''
254
254
255 return f
255 return f
256
256
257 fallback = buildfallback()
257 fallback = buildfallback()
258 if self._checklink:
258 if self._checklink:
259
259
260 def f(x):
260 def f(x):
261 if os.path.islink(self._join(x)):
261 if os.path.islink(self._join(x)):
262 return b'l'
262 return b'l'
263 if b'x' in fallback(x):
263 if b'x' in fallback(x):
264 return b'x'
264 return b'x'
265 return b''
265 return b''
266
266
267 return f
267 return f
268 if self._checkexec:
268 if self._checkexec:
269
269
270 def f(x):
270 def f(x):
271 if b'l' in fallback(x):
271 if b'l' in fallback(x):
272 return b'l'
272 return b'l'
273 if util.isexec(self._join(x)):
273 if util.isexec(self._join(x)):
274 return b'x'
274 return b'x'
275 return b''
275 return b''
276
276
277 return f
277 return f
278 else:
278 else:
279 return fallback
279 return fallback
280
280
281 @propertycache
281 @propertycache
282 def _cwd(self):
282 def _cwd(self):
283 # internal config: ui.forcecwd
283 # internal config: ui.forcecwd
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 if forcecwd:
285 if forcecwd:
286 return forcecwd
286 return forcecwd
287 return encoding.getcwd()
287 return encoding.getcwd()
288
288
289 def getcwd(self):
289 def getcwd(self):
290 """Return the path from which a canonical path is calculated.
290 """Return the path from which a canonical path is calculated.
291
291
292 This path should be used to resolve file patterns or to convert
292 This path should be used to resolve file patterns or to convert
293 canonical paths back to file paths for display. It shouldn't be
293 canonical paths back to file paths for display. It shouldn't be
294 used to get real file paths. Use vfs functions instead.
294 used to get real file paths. Use vfs functions instead.
295 """
295 """
296 cwd = self._cwd
296 cwd = self._cwd
297 if cwd == self._root:
297 if cwd == self._root:
298 return b''
298 return b''
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 rootsep = self._root
300 rootsep = self._root
301 if not util.endswithsep(rootsep):
301 if not util.endswithsep(rootsep):
302 rootsep += pycompat.ossep
302 rootsep += pycompat.ossep
303 if cwd.startswith(rootsep):
303 if cwd.startswith(rootsep):
304 return cwd[len(rootsep) :]
304 return cwd[len(rootsep) :]
305 else:
305 else:
306 # we're outside the repo. return an absolute path.
306 # we're outside the repo. return an absolute path.
307 return cwd
307 return cwd
308
308
309 def pathto(self, f, cwd=None):
309 def pathto(self, f, cwd=None):
310 if cwd is None:
310 if cwd is None:
311 cwd = self.getcwd()
311 cwd = self.getcwd()
312 path = util.pathto(self._root, cwd, f)
312 path = util.pathto(self._root, cwd, f)
313 if self._slash:
313 if self._slash:
314 return util.pconvert(path)
314 return util.pconvert(path)
315 return path
315 return path
316
316
317 def __getitem__(self, key):
317 def __getitem__(self, key):
318 """Return the current state of key (a filename) in the dirstate.
318 """Return the current state of key (a filename) in the dirstate.
319
319
320 States are:
320 States are:
321 n normal
321 n normal
322 m needs merging
322 m needs merging
323 r marked for removal
323 r marked for removal
324 a marked for addition
324 a marked for addition
325 ? not tracked
325 ? not tracked
326
326
327 XXX The "state" is a bit obscure to be in the "public" API. we should
327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 consider migrating all user of this to going through the dirstate entry
328 consider migrating all user of this to going through the dirstate entry
329 instead.
329 instead.
330 """
330 """
331 entry = self._map.get(key)
331 entry = self._map.get(key)
332 if entry is not None:
332 if entry is not None:
333 return entry.state
333 return entry.state
334 return b'?'
334 return b'?'
335
335
336 def __contains__(self, key):
336 def __contains__(self, key):
337 return key in self._map
337 return key in self._map
338
338
339 def __iter__(self):
339 def __iter__(self):
340 return iter(sorted(self._map))
340 return iter(sorted(self._map))
341
341
342 def items(self):
342 def items(self):
343 return pycompat.iteritems(self._map)
343 return pycompat.iteritems(self._map)
344
344
345 iteritems = items
345 iteritems = items
346
346
347 def directories(self):
347 def directories(self):
348 return self._map.directories()
348 return self._map.directories()
349
349
350 def parents(self):
350 def parents(self):
351 return [self._validate(p) for p in self._pl]
351 return [self._validate(p) for p in self._pl]
352
352
353 def p1(self):
353 def p1(self):
354 return self._validate(self._pl[0])
354 return self._validate(self._pl[0])
355
355
356 def p2(self):
356 def p2(self):
357 return self._validate(self._pl[1])
357 return self._validate(self._pl[1])
358
358
359 @property
359 @property
360 def in_merge(self):
360 def in_merge(self):
361 """True if a merge is in progress"""
361 """True if a merge is in progress"""
362 return self._pl[1] != self._nodeconstants.nullid
362 return self._pl[1] != self._nodeconstants.nullid
363
363
364 def branch(self):
364 def branch(self):
365 return encoding.tolocal(self._branch)
365 return encoding.tolocal(self._branch)
366
366
367 def setparents(self, p1, p2=None):
367 def setparents(self, p1, p2=None):
368 """Set dirstate parents to p1 and p2.
368 """Set dirstate parents to p1 and p2.
369
369
370 When moving from two parents to one, "merged" entries a
370 When moving from two parents to one, "merged" entries a
371 adjusted to normal and previous copy records discarded and
371 adjusted to normal and previous copy records discarded and
372 returned by the call.
372 returned by the call.
373
373
374 See localrepo.setparents()
374 See localrepo.setparents()
375 """
375 """
376 if p2 is None:
376 if p2 is None:
377 p2 = self._nodeconstants.nullid
377 p2 = self._nodeconstants.nullid
378 if self._parentwriters == 0:
378 if self._parentwriters == 0:
379 raise ValueError(
379 raise ValueError(
380 b"cannot set dirstate parent outside of "
380 b"cannot set dirstate parent outside of "
381 b"dirstate.parentchange context manager"
381 b"dirstate.parentchange context manager"
382 )
382 )
383
383
384 self._dirty = True
384 self._dirty = True
385 oldp2 = self._pl[1]
385 oldp2 = self._pl[1]
386 if self._origpl is None:
386 if self._origpl is None:
387 self._origpl = self._pl
387 self._origpl = self._pl
388 self._map.setparents(p1, p2)
388 self._map.setparents(p1, p2)
389 copies = {}
389 copies = {}
390 if (
390 if (
391 oldp2 != self._nodeconstants.nullid
391 oldp2 != self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
393 ):
393 ):
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395
395
396 for f in candidatefiles:
396 for f in candidatefiles:
397 s = self._map.get(f)
397 s = self._map.get(f)
398 if s is None:
398 if s is None:
399 continue
399 continue
400
400
401 # Discard "merged" markers when moving away from a merge state
401 # Discard "merged" markers when moving away from a merge state
402 if s.merged:
402 if s.merged:
403 source = self._map.copymap.get(f)
403 source = self._map.copymap.get(f)
404 if source:
404 if source:
405 copies[f] = source
405 copies[f] = source
406 self.normallookup(f)
406 self._normallookup(f)
407 # Also fix up otherparent markers
407 # Also fix up otherparent markers
408 elif s.from_p2:
408 elif s.from_p2:
409 source = self._map.copymap.get(f)
409 source = self._map.copymap.get(f)
410 if source:
410 if source:
411 copies[f] = source
411 copies[f] = source
412 self._add(f)
412 self._add(f)
413 return copies
413 return copies
414
414
415 def setbranch(self, branch):
415 def setbranch(self, branch):
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 try:
418 try:
419 f.write(self._branch + b'\n')
419 f.write(self._branch + b'\n')
420 f.close()
420 f.close()
421
421
422 # make sure filecache has the correct stat info for _branch after
422 # make sure filecache has the correct stat info for _branch after
423 # replacing the underlying file
423 # replacing the underlying file
424 ce = self._filecache[b'_branch']
424 ce = self._filecache[b'_branch']
425 if ce:
425 if ce:
426 ce.refresh()
426 ce.refresh()
427 except: # re-raises
427 except: # re-raises
428 f.discard()
428 f.discard()
429 raise
429 raise
430
430
431 def invalidate(self):
431 def invalidate(self):
432 """Causes the next access to reread the dirstate.
432 """Causes the next access to reread the dirstate.
433
433
434 This is different from localrepo.invalidatedirstate() because it always
434 This is different from localrepo.invalidatedirstate() because it always
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 check whether the dirstate has changed before rereading it."""
436 check whether the dirstate has changed before rereading it."""
437
437
438 for a in ("_map", "_branch", "_ignore"):
438 for a in ("_map", "_branch", "_ignore"):
439 if a in self.__dict__:
439 if a in self.__dict__:
440 delattr(self, a)
440 delattr(self, a)
441 self._lastnormaltime = 0
441 self._lastnormaltime = 0
442 self._dirty = False
442 self._dirty = False
443 self._updatedfiles.clear()
443 self._updatedfiles.clear()
444 self._parentwriters = 0
444 self._parentwriters = 0
445 self._origpl = None
445 self._origpl = None
446
446
447 def copy(self, source, dest):
447 def copy(self, source, dest):
448 """Mark dest as a copy of source. Unmark dest if source is None."""
448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 if source == dest:
449 if source == dest:
450 return
450 return
451 self._dirty = True
451 self._dirty = True
452 if source is not None:
452 if source is not None:
453 self._map.copymap[dest] = source
453 self._map.copymap[dest] = source
454 self._updatedfiles.add(source)
454 self._updatedfiles.add(source)
455 self._updatedfiles.add(dest)
455 self._updatedfiles.add(dest)
456 elif self._map.copymap.pop(dest, None):
456 elif self._map.copymap.pop(dest, None):
457 self._updatedfiles.add(dest)
457 self._updatedfiles.add(dest)
458
458
459 def copied(self, file):
459 def copied(self, file):
460 return self._map.copymap.get(file, None)
460 return self._map.copymap.get(file, None)
461
461
462 def copies(self):
462 def copies(self):
463 return self._map.copymap
463 return self._map.copymap
464
464
465 @requires_no_parents_change
465 @requires_no_parents_change
466 def set_tracked(self, filename):
466 def set_tracked(self, filename):
467 """a "public" method for generic code to mark a file as tracked
467 """a "public" method for generic code to mark a file as tracked
468
468
469 This function is to be called outside of "update/merge" case. For
469 This function is to be called outside of "update/merge" case. For
470 example by a command like `hg add X`.
470 example by a command like `hg add X`.
471
471
472 return True the file was previously untracked, False otherwise.
472 return True the file was previously untracked, False otherwise.
473 """
473 """
474 entry = self._map.get(filename)
474 entry = self._map.get(filename)
475 if entry is None:
475 if entry is None:
476 self._add(filename)
476 self._add(filename)
477 return True
477 return True
478 elif not entry.tracked:
478 elif not entry.tracked:
479 self.normallookup(filename)
479 self._normallookup(filename)
480 return True
480 return True
481 # XXX This is probably overkill for more case, but we need this to
481 # XXX This is probably overkill for more case, but we need this to
482 # fully replace the `normallookup` call with `set_tracked` one.
482 # fully replace the `normallookup` call with `set_tracked` one.
483 # Consider smoothing this in the future.
483 # Consider smoothing this in the future.
484 self.set_possibly_dirty(filename)
484 self.set_possibly_dirty(filename)
485 return False
485 return False
486
486
487 @requires_no_parents_change
487 @requires_no_parents_change
488 def set_untracked(self, filename):
488 def set_untracked(self, filename):
489 """a "public" method for generic code to mark a file as untracked
489 """a "public" method for generic code to mark a file as untracked
490
490
491 This function is to be called outside of "update/merge" case. For
491 This function is to be called outside of "update/merge" case. For
492 example by a command like `hg remove X`.
492 example by a command like `hg remove X`.
493
493
494 return True the file was previously tracked, False otherwise.
494 return True the file was previously tracked, False otherwise.
495 """
495 """
496 entry = self._map.get(filename)
496 entry = self._map.get(filename)
497 if entry is None:
497 if entry is None:
498 return False
498 return False
499 elif entry.added:
499 elif entry.added:
500 self._drop(filename)
500 self._drop(filename)
501 return True
501 return True
502 else:
502 else:
503 self._remove(filename)
503 self._remove(filename)
504 return True
504 return True
505
505
506 @requires_no_parents_change
506 @requires_no_parents_change
507 def set_clean(self, filename, parentfiledata=None):
507 def set_clean(self, filename, parentfiledata=None):
508 """record that the current state of the file on disk is known to be clean"""
508 """record that the current state of the file on disk is known to be clean"""
509 self._dirty = True
509 self._dirty = True
510 self._updatedfiles.add(filename)
510 self._updatedfiles.add(filename)
511 self._normal(filename, parentfiledata=parentfiledata)
511 self._normal(filename, parentfiledata=parentfiledata)
512
512
513 @requires_no_parents_change
513 @requires_no_parents_change
514 def set_possibly_dirty(self, filename):
514 def set_possibly_dirty(self, filename):
515 """record that the current state of the file on disk is unknown"""
515 """record that the current state of the file on disk is unknown"""
516 self._dirty = True
516 self._dirty = True
517 self._updatedfiles.add(filename)
517 self._updatedfiles.add(filename)
518 self._map.set_possibly_dirty(filename)
518 self._map.set_possibly_dirty(filename)
519
519
520 @requires_parents_change
520 @requires_parents_change
521 def update_file_p1(
521 def update_file_p1(
522 self,
522 self,
523 filename,
523 filename,
524 p1_tracked,
524 p1_tracked,
525 ):
525 ):
526 """Set a file as tracked in the parent (or not)
526 """Set a file as tracked in the parent (or not)
527
527
528 This is to be called when adjust the dirstate to a new parent after an history
528 This is to be called when adjust the dirstate to a new parent after an history
529 rewriting operation.
529 rewriting operation.
530
530
531 It should not be called during a merge (p2 != nullid) and only within
531 It should not be called during a merge (p2 != nullid) and only within
532 a `with dirstate.parentchange():` context.
532 a `with dirstate.parentchange():` context.
533 """
533 """
534 if self.in_merge:
534 if self.in_merge:
535 msg = b'update_file_reference should not be called when merging'
535 msg = b'update_file_reference should not be called when merging'
536 raise error.ProgrammingError(msg)
536 raise error.ProgrammingError(msg)
537 entry = self._map.get(filename)
537 entry = self._map.get(filename)
538 if entry is None:
538 if entry is None:
539 wc_tracked = False
539 wc_tracked = False
540 else:
540 else:
541 wc_tracked = entry.tracked
541 wc_tracked = entry.tracked
542 possibly_dirty = False
542 possibly_dirty = False
543 if p1_tracked and wc_tracked:
543 if p1_tracked and wc_tracked:
544 # the underlying reference might have changed, we will have to
544 # the underlying reference might have changed, we will have to
545 # check it.
545 # check it.
546 possibly_dirty = True
546 possibly_dirty = True
547 elif not (p1_tracked or wc_tracked):
547 elif not (p1_tracked or wc_tracked):
548 # the file is no longer relevant to anyone
548 # the file is no longer relevant to anyone
549 self._drop(filename)
549 self._drop(filename)
550 elif (not p1_tracked) and wc_tracked:
550 elif (not p1_tracked) and wc_tracked:
551 if entry is not None and entry.added:
551 if entry is not None and entry.added:
552 return # avoid dropping copy information (maybe?)
552 return # avoid dropping copy information (maybe?)
553 elif p1_tracked and not wc_tracked:
553 elif p1_tracked and not wc_tracked:
554 pass
554 pass
555 else:
555 else:
556 assert False, 'unreachable'
556 assert False, 'unreachable'
557
557
558 # this mean we are doing call for file we do not really care about the
558 # this mean we are doing call for file we do not really care about the
559 # data (eg: added or removed), however this should be a minor overhead
559 # data (eg: added or removed), however this should be a minor overhead
560 # compared to the overall update process calling this.
560 # compared to the overall update process calling this.
561 parentfiledata = None
561 parentfiledata = None
562 if wc_tracked:
562 if wc_tracked:
563 parentfiledata = self._get_filedata(filename)
563 parentfiledata = self._get_filedata(filename)
564
564
565 self._updatedfiles.add(filename)
565 self._updatedfiles.add(filename)
566 self._map.reset_state(
566 self._map.reset_state(
567 filename,
567 filename,
568 wc_tracked,
568 wc_tracked,
569 p1_tracked,
569 p1_tracked,
570 possibly_dirty=possibly_dirty,
570 possibly_dirty=possibly_dirty,
571 parentfiledata=parentfiledata,
571 parentfiledata=parentfiledata,
572 )
572 )
573 if (
573 if (
574 parentfiledata is not None
574 parentfiledata is not None
575 and parentfiledata[2] > self._lastnormaltime
575 and parentfiledata[2] > self._lastnormaltime
576 ):
576 ):
577 # Remember the most recent modification timeslot for status(),
577 # Remember the most recent modification timeslot for status(),
578 # to make sure we won't miss future size-preserving file content
578 # to make sure we won't miss future size-preserving file content
579 # modifications that happen within the same timeslot.
579 # modifications that happen within the same timeslot.
580 self._lastnormaltime = parentfiledata[2]
580 self._lastnormaltime = parentfiledata[2]
581
581
582 @requires_parents_change
582 @requires_parents_change
583 def update_file(
583 def update_file(
584 self,
584 self,
585 filename,
585 filename,
586 wc_tracked,
586 wc_tracked,
587 p1_tracked,
587 p1_tracked,
588 p2_tracked=False,
588 p2_tracked=False,
589 merged=False,
589 merged=False,
590 clean_p1=False,
590 clean_p1=False,
591 clean_p2=False,
591 clean_p2=False,
592 possibly_dirty=False,
592 possibly_dirty=False,
593 parentfiledata=None,
593 parentfiledata=None,
594 ):
594 ):
595 """update the information about a file in the dirstate
595 """update the information about a file in the dirstate
596
596
597 This is to be called when the direstates parent changes to keep track
597 This is to be called when the direstates parent changes to keep track
598 of what is the file situation in regards to the working copy and its parent.
598 of what is the file situation in regards to the working copy and its parent.
599
599
600 This function must be called within a `dirstate.parentchange` context.
600 This function must be called within a `dirstate.parentchange` context.
601
601
602 note: the API is at an early stage and we might need to ajust it
602 note: the API is at an early stage and we might need to ajust it
603 depending of what information ends up being relevant and useful to
603 depending of what information ends up being relevant and useful to
604 other processing.
604 other processing.
605 """
605 """
606 if merged and (clean_p1 or clean_p2):
606 if merged and (clean_p1 or clean_p2):
607 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
607 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
608 raise error.ProgrammingError(msg)
608 raise error.ProgrammingError(msg)
609
609
610 # note: I do not think we need to double check name clash here since we
610 # note: I do not think we need to double check name clash here since we
611 # are in a update/merge case that should already have taken care of
611 # are in a update/merge case that should already have taken care of
612 # this. The test agrees
612 # this. The test agrees
613
613
614 self._dirty = True
614 self._dirty = True
615 self._updatedfiles.add(filename)
615 self._updatedfiles.add(filename)
616
616
617 need_parent_file_data = (
617 need_parent_file_data = (
618 not (possibly_dirty or clean_p2 or merged)
618 not (possibly_dirty or clean_p2 or merged)
619 and wc_tracked
619 and wc_tracked
620 and p1_tracked
620 and p1_tracked
621 )
621 )
622
622
623 # this mean we are doing call for file we do not really care about the
623 # this mean we are doing call for file we do not really care about the
624 # data (eg: added or removed), however this should be a minor overhead
624 # data (eg: added or removed), however this should be a minor overhead
625 # compared to the overall update process calling this.
625 # compared to the overall update process calling this.
626 if need_parent_file_data:
626 if need_parent_file_data:
627 if parentfiledata is None:
627 if parentfiledata is None:
628 parentfiledata = self._get_filedata(filename)
628 parentfiledata = self._get_filedata(filename)
629 mtime = parentfiledata[2]
629 mtime = parentfiledata[2]
630
630
631 if mtime > self._lastnormaltime:
631 if mtime > self._lastnormaltime:
632 # Remember the most recent modification timeslot for
632 # Remember the most recent modification timeslot for
633 # status(), to make sure we won't miss future
633 # status(), to make sure we won't miss future
634 # size-preserving file content modifications that happen
634 # size-preserving file content modifications that happen
635 # within the same timeslot.
635 # within the same timeslot.
636 self._lastnormaltime = mtime
636 self._lastnormaltime = mtime
637
637
638 self._map.reset_state(
638 self._map.reset_state(
639 filename,
639 filename,
640 wc_tracked,
640 wc_tracked,
641 p1_tracked,
641 p1_tracked,
642 p2_tracked=p2_tracked,
642 p2_tracked=p2_tracked,
643 merged=merged,
643 merged=merged,
644 clean_p1=clean_p1,
644 clean_p1=clean_p1,
645 clean_p2=clean_p2,
645 clean_p2=clean_p2,
646 possibly_dirty=possibly_dirty,
646 possibly_dirty=possibly_dirty,
647 parentfiledata=parentfiledata,
647 parentfiledata=parentfiledata,
648 )
648 )
649 if (
649 if (
650 parentfiledata is not None
650 parentfiledata is not None
651 and parentfiledata[2] > self._lastnormaltime
651 and parentfiledata[2] > self._lastnormaltime
652 ):
652 ):
653 # Remember the most recent modification timeslot for status(),
653 # Remember the most recent modification timeslot for status(),
654 # to make sure we won't miss future size-preserving file content
654 # to make sure we won't miss future size-preserving file content
655 # modifications that happen within the same timeslot.
655 # modifications that happen within the same timeslot.
656 self._lastnormaltime = parentfiledata[2]
656 self._lastnormaltime = parentfiledata[2]
657
657
658 def _addpath(
658 def _addpath(
659 self,
659 self,
660 f,
660 f,
661 mode=0,
661 mode=0,
662 size=None,
662 size=None,
663 mtime=None,
663 mtime=None,
664 added=False,
664 added=False,
665 merged=False,
665 merged=False,
666 from_p2=False,
666 from_p2=False,
667 possibly_dirty=False,
667 possibly_dirty=False,
668 ):
668 ):
669 entry = self._map.get(f)
669 entry = self._map.get(f)
670 if added or entry is not None and entry.removed:
670 if added or entry is not None and entry.removed:
671 scmutil.checkfilename(f)
671 scmutil.checkfilename(f)
672 if self._map.hastrackeddir(f):
672 if self._map.hastrackeddir(f):
673 msg = _(b'directory %r already in dirstate')
673 msg = _(b'directory %r already in dirstate')
674 msg %= pycompat.bytestr(f)
674 msg %= pycompat.bytestr(f)
675 raise error.Abort(msg)
675 raise error.Abort(msg)
676 # shadows
676 # shadows
677 for d in pathutil.finddirs(f):
677 for d in pathutil.finddirs(f):
678 if self._map.hastrackeddir(d):
678 if self._map.hastrackeddir(d):
679 break
679 break
680 entry = self._map.get(d)
680 entry = self._map.get(d)
681 if entry is not None and not entry.removed:
681 if entry is not None and not entry.removed:
682 msg = _(b'file %r in dirstate clashes with %r')
682 msg = _(b'file %r in dirstate clashes with %r')
683 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
683 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
684 raise error.Abort(msg)
684 raise error.Abort(msg)
685 self._dirty = True
685 self._dirty = True
686 self._updatedfiles.add(f)
686 self._updatedfiles.add(f)
687 self._map.addfile(
687 self._map.addfile(
688 f,
688 f,
689 mode=mode,
689 mode=mode,
690 size=size,
690 size=size,
691 mtime=mtime,
691 mtime=mtime,
692 added=added,
692 added=added,
693 merged=merged,
693 merged=merged,
694 from_p2=from_p2,
694 from_p2=from_p2,
695 possibly_dirty=possibly_dirty,
695 possibly_dirty=possibly_dirty,
696 )
696 )
697
697
698 def _get_filedata(self, filename):
698 def _get_filedata(self, filename):
699 """returns"""
699 """returns"""
700 s = os.lstat(self._join(filename))
700 s = os.lstat(self._join(filename))
701 mode = s.st_mode
701 mode = s.st_mode
702 size = s.st_size
702 size = s.st_size
703 mtime = s[stat.ST_MTIME]
703 mtime = s[stat.ST_MTIME]
704 return (mode, size, mtime)
704 return (mode, size, mtime)
705
705
706 def normal(self, f, parentfiledata=None):
706 def normal(self, f, parentfiledata=None):
707 """Mark a file normal and clean.
707 """Mark a file normal and clean.
708
708
709 parentfiledata: (mode, size, mtime) of the clean file
709 parentfiledata: (mode, size, mtime) of the clean file
710
710
711 parentfiledata should be computed from memory (for mode,
711 parentfiledata should be computed from memory (for mode,
712 size), as or close as possible from the point where we
712 size), as or close as possible from the point where we
713 determined the file was clean, to limit the risk of the
713 determined the file was clean, to limit the risk of the
714 file having been changed by an external process between the
714 file having been changed by an external process between the
715 moment where the file was determined to be clean and now."""
715 moment where the file was determined to be clean and now."""
716 if self.pendingparentchange():
716 if self.pendingparentchange():
717 util.nouideprecwarn(
717 util.nouideprecwarn(
718 b"do not use `normal` inside of update/merge context."
718 b"do not use `normal` inside of update/merge context."
719 b" Use `update_file` or `update_file_p1`",
719 b" Use `update_file` or `update_file_p1`",
720 b'6.0',
720 b'6.0',
721 stacklevel=2,
721 stacklevel=2,
722 )
722 )
723 else:
723 else:
724 util.nouideprecwarn(
724 util.nouideprecwarn(
725 b"do not use `normal` outside of update/merge context."
725 b"do not use `normal` outside of update/merge context."
726 b" Use `set_tracked`",
726 b" Use `set_tracked`",
727 b'6.0',
727 b'6.0',
728 stacklevel=2,
728 stacklevel=2,
729 )
729 )
730 self._normal(f, parentfiledata=parentfiledata)
730 self._normal(f, parentfiledata=parentfiledata)
731
731
732 def _normal(self, f, parentfiledata=None):
732 def _normal(self, f, parentfiledata=None):
733 if parentfiledata:
733 if parentfiledata:
734 (mode, size, mtime) = parentfiledata
734 (mode, size, mtime) = parentfiledata
735 else:
735 else:
736 (mode, size, mtime) = self._get_filedata(f)
736 (mode, size, mtime) = self._get_filedata(f)
737 self._addpath(f, mode=mode, size=size, mtime=mtime)
737 self._addpath(f, mode=mode, size=size, mtime=mtime)
738 self._map.copymap.pop(f, None)
738 self._map.copymap.pop(f, None)
739 if f in self._map.nonnormalset:
739 if f in self._map.nonnormalset:
740 self._map.nonnormalset.remove(f)
740 self._map.nonnormalset.remove(f)
741 if mtime > self._lastnormaltime:
741 if mtime > self._lastnormaltime:
742 # Remember the most recent modification timeslot for status(),
742 # Remember the most recent modification timeslot for status(),
743 # to make sure we won't miss future size-preserving file content
743 # to make sure we won't miss future size-preserving file content
744 # modifications that happen within the same timeslot.
744 # modifications that happen within the same timeslot.
745 self._lastnormaltime = mtime
745 self._lastnormaltime = mtime
746
746
747 def normallookup(self, f):
747 def normallookup(self, f):
748 '''Mark a file normal, but possibly dirty.'''
748 '''Mark a file normal, but possibly dirty.'''
749 if self.pendingparentchange():
750 util.nouideprecwarn(
751 b"do not use `normallookup` inside of update/merge context."
752 b" Use `update_file` or `update_file_p1`",
753 b'6.0',
754 stacklevel=2,
755 )
756 else:
757 util.nouideprecwarn(
758 b"do not use `normallookup` outside of update/merge context."
759 b" Use `set_possibly_dirty` or `set_tracked`",
760 b'6.0',
761 stacklevel=2,
762 )
763 self._normallookup(f)
764
765 def _normallookup(self, f):
766 '''Mark a file normal, but possibly dirty.'''
749 if self.in_merge:
767 if self.in_merge:
750 # if there is a merge going on and the file was either
768 # if there is a merge going on and the file was either
751 # "merged" or coming from other parent (-2) before
769 # "merged" or coming from other parent (-2) before
752 # being removed, restore that state.
770 # being removed, restore that state.
753 entry = self._map.get(f)
771 entry = self._map.get(f)
754 if entry is not None:
772 if entry is not None:
755 # XXX this should probably be dealt with a a lower level
773 # XXX this should probably be dealt with a a lower level
756 # (see `merged_removed` and `from_p2_removed`)
774 # (see `merged_removed` and `from_p2_removed`)
757 if entry.merged_removed or entry.from_p2_removed:
775 if entry.merged_removed or entry.from_p2_removed:
758 source = self._map.copymap.get(f)
776 source = self._map.copymap.get(f)
759 if entry.merged_removed:
777 if entry.merged_removed:
760 self.merge(f)
778 self.merge(f)
761 elif entry.from_p2_removed:
779 elif entry.from_p2_removed:
762 self.otherparent(f)
780 self.otherparent(f)
763 if source is not None:
781 if source is not None:
764 self.copy(source, f)
782 self.copy(source, f)
765 return
783 return
766 elif entry.merged or entry.from_p2:
784 elif entry.merged or entry.from_p2:
767 return
785 return
768 self._addpath(f, possibly_dirty=True)
786 self._addpath(f, possibly_dirty=True)
769 self._map.copymap.pop(f, None)
787 self._map.copymap.pop(f, None)
770
788
771 def otherparent(self, f):
789 def otherparent(self, f):
772 '''Mark as coming from the other parent, always dirty.'''
790 '''Mark as coming from the other parent, always dirty.'''
773 if not self.in_merge:
791 if not self.in_merge:
774 msg = _(b"setting %r to other parent only allowed in merges") % f
792 msg = _(b"setting %r to other parent only allowed in merges") % f
775 raise error.Abort(msg)
793 raise error.Abort(msg)
776 entry = self._map.get(f)
794 entry = self._map.get(f)
777 if entry is not None and entry.tracked:
795 if entry is not None and entry.tracked:
778 # merge-like
796 # merge-like
779 self._addpath(f, merged=True)
797 self._addpath(f, merged=True)
780 else:
798 else:
781 # add-like
799 # add-like
782 self._addpath(f, from_p2=True)
800 self._addpath(f, from_p2=True)
783 self._map.copymap.pop(f, None)
801 self._map.copymap.pop(f, None)
784
802
785 def add(self, f):
803 def add(self, f):
786 '''Mark a file added.'''
804 '''Mark a file added.'''
787 if not self.pendingparentchange():
805 if not self.pendingparentchange():
788 util.nouideprecwarn(
806 util.nouideprecwarn(
789 b"do not use `add` outside of update/merge context."
807 b"do not use `add` outside of update/merge context."
790 b" Use `set_tracked`",
808 b" Use `set_tracked`",
791 b'6.0',
809 b'6.0',
792 stacklevel=2,
810 stacklevel=2,
793 )
811 )
794 self._add(f)
812 self._add(f)
795
813
796 def _add(self, filename):
814 def _add(self, filename):
797 """internal function to mark a file as added"""
815 """internal function to mark a file as added"""
798 self._addpath(filename, added=True)
816 self._addpath(filename, added=True)
799 self._map.copymap.pop(filename, None)
817 self._map.copymap.pop(filename, None)
800
818
801 def remove(self, f):
819 def remove(self, f):
802 '''Mark a file removed'''
820 '''Mark a file removed'''
803 if self.pendingparentchange():
821 if self.pendingparentchange():
804 util.nouideprecwarn(
822 util.nouideprecwarn(
805 b"do not use `remove` insde of update/merge context."
823 b"do not use `remove` insde of update/merge context."
806 b" Use `update_file` or `update_file_p1`",
824 b" Use `update_file` or `update_file_p1`",
807 b'6.0',
825 b'6.0',
808 stacklevel=2,
826 stacklevel=2,
809 )
827 )
810 else:
828 else:
811 util.nouideprecwarn(
829 util.nouideprecwarn(
812 b"do not use `remove` outside of update/merge context."
830 b"do not use `remove` outside of update/merge context."
813 b" Use `set_untracked`",
831 b" Use `set_untracked`",
814 b'6.0',
832 b'6.0',
815 stacklevel=2,
833 stacklevel=2,
816 )
834 )
817 self._remove(f)
835 self._remove(f)
818
836
819 def _remove(self, filename):
837 def _remove(self, filename):
820 """internal function to mark a file removed"""
838 """internal function to mark a file removed"""
821 self._dirty = True
839 self._dirty = True
822 self._updatedfiles.add(filename)
840 self._updatedfiles.add(filename)
823 self._map.removefile(filename, in_merge=self.in_merge)
841 self._map.removefile(filename, in_merge=self.in_merge)
824
842
825 def merge(self, f):
843 def merge(self, f):
826 '''Mark a file merged.'''
844 '''Mark a file merged.'''
827 if not self.in_merge:
845 if not self.in_merge:
828 return self.normallookup(f)
846 return self._normallookup(f)
829 return self.otherparent(f)
847 return self.otherparent(f)
830
848
831 def drop(self, f):
849 def drop(self, f):
832 '''Drop a file from the dirstate'''
850 '''Drop a file from the dirstate'''
833 if not self.pendingparentchange():
851 if not self.pendingparentchange():
834 util.nouideprecwarn(
852 util.nouideprecwarn(
835 b"do not use `drop` outside of update/merge context."
853 b"do not use `drop` outside of update/merge context."
836 b" Use `set_untracked`",
854 b" Use `set_untracked`",
837 b'6.0',
855 b'6.0',
838 stacklevel=2,
856 stacklevel=2,
839 )
857 )
840 self._drop(f)
858 self._drop(f)
841
859
842 def _drop(self, filename):
860 def _drop(self, filename):
843 """internal function to drop a file from the dirstate"""
861 """internal function to drop a file from the dirstate"""
844 if self._map.dropfile(filename):
862 if self._map.dropfile(filename):
845 self._dirty = True
863 self._dirty = True
846 self._updatedfiles.add(filename)
864 self._updatedfiles.add(filename)
847 self._map.copymap.pop(filename, None)
865 self._map.copymap.pop(filename, None)
848
866
849 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
867 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
850 if exists is None:
868 if exists is None:
851 exists = os.path.lexists(os.path.join(self._root, path))
869 exists = os.path.lexists(os.path.join(self._root, path))
852 if not exists:
870 if not exists:
853 # Maybe a path component exists
871 # Maybe a path component exists
854 if not ignoremissing and b'/' in path:
872 if not ignoremissing and b'/' in path:
855 d, f = path.rsplit(b'/', 1)
873 d, f = path.rsplit(b'/', 1)
856 d = self._normalize(d, False, ignoremissing, None)
874 d = self._normalize(d, False, ignoremissing, None)
857 folded = d + b"/" + f
875 folded = d + b"/" + f
858 else:
876 else:
859 # No path components, preserve original case
877 # No path components, preserve original case
860 folded = path
878 folded = path
861 else:
879 else:
862 # recursively normalize leading directory components
880 # recursively normalize leading directory components
863 # against dirstate
881 # against dirstate
864 if b'/' in normed:
882 if b'/' in normed:
865 d, f = normed.rsplit(b'/', 1)
883 d, f = normed.rsplit(b'/', 1)
866 d = self._normalize(d, False, ignoremissing, True)
884 d = self._normalize(d, False, ignoremissing, True)
867 r = self._root + b"/" + d
885 r = self._root + b"/" + d
868 folded = d + b"/" + util.fspath(f, r)
886 folded = d + b"/" + util.fspath(f, r)
869 else:
887 else:
870 folded = util.fspath(normed, self._root)
888 folded = util.fspath(normed, self._root)
871 storemap[normed] = folded
889 storemap[normed] = folded
872
890
873 return folded
891 return folded
874
892
875 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
893 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
876 normed = util.normcase(path)
894 normed = util.normcase(path)
877 folded = self._map.filefoldmap.get(normed, None)
895 folded = self._map.filefoldmap.get(normed, None)
878 if folded is None:
896 if folded is None:
879 if isknown:
897 if isknown:
880 folded = path
898 folded = path
881 else:
899 else:
882 folded = self._discoverpath(
900 folded = self._discoverpath(
883 path, normed, ignoremissing, exists, self._map.filefoldmap
901 path, normed, ignoremissing, exists, self._map.filefoldmap
884 )
902 )
885 return folded
903 return folded
886
904
887 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
905 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
888 normed = util.normcase(path)
906 normed = util.normcase(path)
889 folded = self._map.filefoldmap.get(normed, None)
907 folded = self._map.filefoldmap.get(normed, None)
890 if folded is None:
908 if folded is None:
891 folded = self._map.dirfoldmap.get(normed, None)
909 folded = self._map.dirfoldmap.get(normed, None)
892 if folded is None:
910 if folded is None:
893 if isknown:
911 if isknown:
894 folded = path
912 folded = path
895 else:
913 else:
896 # store discovered result in dirfoldmap so that future
914 # store discovered result in dirfoldmap so that future
897 # normalizefile calls don't start matching directories
915 # normalizefile calls don't start matching directories
898 folded = self._discoverpath(
916 folded = self._discoverpath(
899 path, normed, ignoremissing, exists, self._map.dirfoldmap
917 path, normed, ignoremissing, exists, self._map.dirfoldmap
900 )
918 )
901 return folded
919 return folded
902
920
903 def normalize(self, path, isknown=False, ignoremissing=False):
921 def normalize(self, path, isknown=False, ignoremissing=False):
904 """
922 """
905 normalize the case of a pathname when on a casefolding filesystem
923 normalize the case of a pathname when on a casefolding filesystem
906
924
907 isknown specifies whether the filename came from walking the
925 isknown specifies whether the filename came from walking the
908 disk, to avoid extra filesystem access.
926 disk, to avoid extra filesystem access.
909
927
910 If ignoremissing is True, missing path are returned
928 If ignoremissing is True, missing path are returned
911 unchanged. Otherwise, we try harder to normalize possibly
929 unchanged. Otherwise, we try harder to normalize possibly
912 existing path components.
930 existing path components.
913
931
914 The normalized case is determined based on the following precedence:
932 The normalized case is determined based on the following precedence:
915
933
916 - version of name already stored in the dirstate
934 - version of name already stored in the dirstate
917 - version of name stored on disk
935 - version of name stored on disk
918 - version provided via command arguments
936 - version provided via command arguments
919 """
937 """
920
938
921 if self._checkcase:
939 if self._checkcase:
922 return self._normalize(path, isknown, ignoremissing)
940 return self._normalize(path, isknown, ignoremissing)
923 return path
941 return path
924
942
925 def clear(self):
943 def clear(self):
926 self._map.clear()
944 self._map.clear()
927 self._lastnormaltime = 0
945 self._lastnormaltime = 0
928 self._updatedfiles.clear()
946 self._updatedfiles.clear()
929 self._dirty = True
947 self._dirty = True
930
948
931 def rebuild(self, parent, allfiles, changedfiles=None):
949 def rebuild(self, parent, allfiles, changedfiles=None):
932 if changedfiles is None:
950 if changedfiles is None:
933 # Rebuild entire dirstate
951 # Rebuild entire dirstate
934 to_lookup = allfiles
952 to_lookup = allfiles
935 to_drop = []
953 to_drop = []
936 lastnormaltime = self._lastnormaltime
954 lastnormaltime = self._lastnormaltime
937 self.clear()
955 self.clear()
938 self._lastnormaltime = lastnormaltime
956 self._lastnormaltime = lastnormaltime
939 elif len(changedfiles) < 10:
957 elif len(changedfiles) < 10:
940 # Avoid turning allfiles into a set, which can be expensive if it's
958 # Avoid turning allfiles into a set, which can be expensive if it's
941 # large.
959 # large.
942 to_lookup = []
960 to_lookup = []
943 to_drop = []
961 to_drop = []
944 for f in changedfiles:
962 for f in changedfiles:
945 if f in allfiles:
963 if f in allfiles:
946 to_lookup.append(f)
964 to_lookup.append(f)
947 else:
965 else:
948 to_drop.append(f)
966 to_drop.append(f)
949 else:
967 else:
950 changedfilesset = set(changedfiles)
968 changedfilesset = set(changedfiles)
951 to_lookup = changedfilesset & set(allfiles)
969 to_lookup = changedfilesset & set(allfiles)
952 to_drop = changedfilesset - to_lookup
970 to_drop = changedfilesset - to_lookup
953
971
954 if self._origpl is None:
972 if self._origpl is None:
955 self._origpl = self._pl
973 self._origpl = self._pl
956 self._map.setparents(parent, self._nodeconstants.nullid)
974 self._map.setparents(parent, self._nodeconstants.nullid)
957
975
958 for f in to_lookup:
976 for f in to_lookup:
959 self.normallookup(f)
977 self._normallookup(f)
960 for f in to_drop:
978 for f in to_drop:
961 self._drop(f)
979 self._drop(f)
962
980
963 self._dirty = True
981 self._dirty = True
964
982
965 def identity(self):
983 def identity(self):
966 """Return identity of dirstate itself to detect changing in storage
984 """Return identity of dirstate itself to detect changing in storage
967
985
968 If identity of previous dirstate is equal to this, writing
986 If identity of previous dirstate is equal to this, writing
969 changes based on the former dirstate out can keep consistency.
987 changes based on the former dirstate out can keep consistency.
970 """
988 """
971 return self._map.identity
989 return self._map.identity
972
990
973 def write(self, tr):
991 def write(self, tr):
974 if not self._dirty:
992 if not self._dirty:
975 return
993 return
976
994
977 filename = self._filename
995 filename = self._filename
978 if tr:
996 if tr:
979 # 'dirstate.write()' is not only for writing in-memory
997 # 'dirstate.write()' is not only for writing in-memory
980 # changes out, but also for dropping ambiguous timestamp.
998 # changes out, but also for dropping ambiguous timestamp.
981 # delayed writing re-raise "ambiguous timestamp issue".
999 # delayed writing re-raise "ambiguous timestamp issue".
982 # See also the wiki page below for detail:
1000 # See also the wiki page below for detail:
983 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
1001 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
984
1002
985 # emulate dropping timestamp in 'parsers.pack_dirstate'
1003 # emulate dropping timestamp in 'parsers.pack_dirstate'
986 now = _getfsnow(self._opener)
1004 now = _getfsnow(self._opener)
987 self._map.clearambiguoustimes(self._updatedfiles, now)
1005 self._map.clearambiguoustimes(self._updatedfiles, now)
988
1006
989 # emulate that all 'dirstate.normal' results are written out
1007 # emulate that all 'dirstate.normal' results are written out
990 self._lastnormaltime = 0
1008 self._lastnormaltime = 0
991 self._updatedfiles.clear()
1009 self._updatedfiles.clear()
992
1010
993 # delay writing in-memory changes out
1011 # delay writing in-memory changes out
994 tr.addfilegenerator(
1012 tr.addfilegenerator(
995 b'dirstate',
1013 b'dirstate',
996 (self._filename,),
1014 (self._filename,),
997 lambda f: self._writedirstate(tr, f),
1015 lambda f: self._writedirstate(tr, f),
998 location=b'plain',
1016 location=b'plain',
999 )
1017 )
1000 return
1018 return
1001
1019
1002 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
1020 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
1003 self._writedirstate(tr, st)
1021 self._writedirstate(tr, st)
1004
1022
1005 def addparentchangecallback(self, category, callback):
1023 def addparentchangecallback(self, category, callback):
1006 """add a callback to be called when the wd parents are changed
1024 """add a callback to be called when the wd parents are changed
1007
1025
1008 Callback will be called with the following arguments:
1026 Callback will be called with the following arguments:
1009 dirstate, (oldp1, oldp2), (newp1, newp2)
1027 dirstate, (oldp1, oldp2), (newp1, newp2)
1010
1028
1011 Category is a unique identifier to allow overwriting an old callback
1029 Category is a unique identifier to allow overwriting an old callback
1012 with a newer callback.
1030 with a newer callback.
1013 """
1031 """
1014 self._plchangecallbacks[category] = callback
1032 self._plchangecallbacks[category] = callback
1015
1033
1016 def _writedirstate(self, tr, st):
1034 def _writedirstate(self, tr, st):
1017 # notify callbacks about parents change
1035 # notify callbacks about parents change
1018 if self._origpl is not None and self._origpl != self._pl:
1036 if self._origpl is not None and self._origpl != self._pl:
1019 for c, callback in sorted(
1037 for c, callback in sorted(
1020 pycompat.iteritems(self._plchangecallbacks)
1038 pycompat.iteritems(self._plchangecallbacks)
1021 ):
1039 ):
1022 callback(self, self._origpl, self._pl)
1040 callback(self, self._origpl, self._pl)
1023 self._origpl = None
1041 self._origpl = None
1024 # use the modification time of the newly created temporary file as the
1042 # use the modification time of the newly created temporary file as the
1025 # filesystem's notion of 'now'
1043 # filesystem's notion of 'now'
1026 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
1044 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
1027
1045
1028 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
1046 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
1029 # timestamp of each entries in dirstate, because of 'now > mtime'
1047 # timestamp of each entries in dirstate, because of 'now > mtime'
1030 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
1048 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
1031 if delaywrite > 0:
1049 if delaywrite > 0:
1032 # do we have any files to delay for?
1050 # do we have any files to delay for?
1033 for f, e in pycompat.iteritems(self._map):
1051 for f, e in pycompat.iteritems(self._map):
1034 if e.need_delay(now):
1052 if e.need_delay(now):
1035 import time # to avoid useless import
1053 import time # to avoid useless import
1036
1054
1037 # rather than sleep n seconds, sleep until the next
1055 # rather than sleep n seconds, sleep until the next
1038 # multiple of n seconds
1056 # multiple of n seconds
1039 clock = time.time()
1057 clock = time.time()
1040 start = int(clock) - (int(clock) % delaywrite)
1058 start = int(clock) - (int(clock) % delaywrite)
1041 end = start + delaywrite
1059 end = start + delaywrite
1042 time.sleep(end - clock)
1060 time.sleep(end - clock)
1043 now = end # trust our estimate that the end is near now
1061 now = end # trust our estimate that the end is near now
1044 break
1062 break
1045
1063
1046 self._map.write(tr, st, now)
1064 self._map.write(tr, st, now)
1047 self._lastnormaltime = 0
1065 self._lastnormaltime = 0
1048 self._dirty = False
1066 self._dirty = False
1049
1067
1050 def _dirignore(self, f):
1068 def _dirignore(self, f):
1051 if self._ignore(f):
1069 if self._ignore(f):
1052 return True
1070 return True
1053 for p in pathutil.finddirs(f):
1071 for p in pathutil.finddirs(f):
1054 if self._ignore(p):
1072 if self._ignore(p):
1055 return True
1073 return True
1056 return False
1074 return False
1057
1075
1058 def _ignorefiles(self):
1076 def _ignorefiles(self):
1059 files = []
1077 files = []
1060 if os.path.exists(self._join(b'.hgignore')):
1078 if os.path.exists(self._join(b'.hgignore')):
1061 files.append(self._join(b'.hgignore'))
1079 files.append(self._join(b'.hgignore'))
1062 for name, path in self._ui.configitems(b"ui"):
1080 for name, path in self._ui.configitems(b"ui"):
1063 if name == b'ignore' or name.startswith(b'ignore.'):
1081 if name == b'ignore' or name.startswith(b'ignore.'):
1064 # we need to use os.path.join here rather than self._join
1082 # we need to use os.path.join here rather than self._join
1065 # because path is arbitrary and user-specified
1083 # because path is arbitrary and user-specified
1066 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1084 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1067 return files
1085 return files
1068
1086
1069 def _ignorefileandline(self, f):
1087 def _ignorefileandline(self, f):
1070 files = collections.deque(self._ignorefiles())
1088 files = collections.deque(self._ignorefiles())
1071 visited = set()
1089 visited = set()
1072 while files:
1090 while files:
1073 i = files.popleft()
1091 i = files.popleft()
1074 patterns = matchmod.readpatternfile(
1092 patterns = matchmod.readpatternfile(
1075 i, self._ui.warn, sourceinfo=True
1093 i, self._ui.warn, sourceinfo=True
1076 )
1094 )
1077 for pattern, lineno, line in patterns:
1095 for pattern, lineno, line in patterns:
1078 kind, p = matchmod._patsplit(pattern, b'glob')
1096 kind, p = matchmod._patsplit(pattern, b'glob')
1079 if kind == b"subinclude":
1097 if kind == b"subinclude":
1080 if p not in visited:
1098 if p not in visited:
1081 files.append(p)
1099 files.append(p)
1082 continue
1100 continue
1083 m = matchmod.match(
1101 m = matchmod.match(
1084 self._root, b'', [], [pattern], warn=self._ui.warn
1102 self._root, b'', [], [pattern], warn=self._ui.warn
1085 )
1103 )
1086 if m(f):
1104 if m(f):
1087 return (i, lineno, line)
1105 return (i, lineno, line)
1088 visited.add(i)
1106 visited.add(i)
1089 return (None, -1, b"")
1107 return (None, -1, b"")
1090
1108
1091 def _walkexplicit(self, match, subrepos):
1109 def _walkexplicit(self, match, subrepos):
1092 """Get stat data about the files explicitly specified by match.
1110 """Get stat data about the files explicitly specified by match.
1093
1111
1094 Return a triple (results, dirsfound, dirsnotfound).
1112 Return a triple (results, dirsfound, dirsnotfound).
1095 - results is a mapping from filename to stat result. It also contains
1113 - results is a mapping from filename to stat result. It also contains
1096 listings mapping subrepos and .hg to None.
1114 listings mapping subrepos and .hg to None.
1097 - dirsfound is a list of files found to be directories.
1115 - dirsfound is a list of files found to be directories.
1098 - dirsnotfound is a list of files that the dirstate thinks are
1116 - dirsnotfound is a list of files that the dirstate thinks are
1099 directories and that were not found."""
1117 directories and that were not found."""
1100
1118
1101 def badtype(mode):
1119 def badtype(mode):
1102 kind = _(b'unknown')
1120 kind = _(b'unknown')
1103 if stat.S_ISCHR(mode):
1121 if stat.S_ISCHR(mode):
1104 kind = _(b'character device')
1122 kind = _(b'character device')
1105 elif stat.S_ISBLK(mode):
1123 elif stat.S_ISBLK(mode):
1106 kind = _(b'block device')
1124 kind = _(b'block device')
1107 elif stat.S_ISFIFO(mode):
1125 elif stat.S_ISFIFO(mode):
1108 kind = _(b'fifo')
1126 kind = _(b'fifo')
1109 elif stat.S_ISSOCK(mode):
1127 elif stat.S_ISSOCK(mode):
1110 kind = _(b'socket')
1128 kind = _(b'socket')
1111 elif stat.S_ISDIR(mode):
1129 elif stat.S_ISDIR(mode):
1112 kind = _(b'directory')
1130 kind = _(b'directory')
1113 return _(b'unsupported file type (type is %s)') % kind
1131 return _(b'unsupported file type (type is %s)') % kind
1114
1132
1115 badfn = match.bad
1133 badfn = match.bad
1116 dmap = self._map
1134 dmap = self._map
1117 lstat = os.lstat
1135 lstat = os.lstat
1118 getkind = stat.S_IFMT
1136 getkind = stat.S_IFMT
1119 dirkind = stat.S_IFDIR
1137 dirkind = stat.S_IFDIR
1120 regkind = stat.S_IFREG
1138 regkind = stat.S_IFREG
1121 lnkkind = stat.S_IFLNK
1139 lnkkind = stat.S_IFLNK
1122 join = self._join
1140 join = self._join
1123 dirsfound = []
1141 dirsfound = []
1124 foundadd = dirsfound.append
1142 foundadd = dirsfound.append
1125 dirsnotfound = []
1143 dirsnotfound = []
1126 notfoundadd = dirsnotfound.append
1144 notfoundadd = dirsnotfound.append
1127
1145
1128 if not match.isexact() and self._checkcase:
1146 if not match.isexact() and self._checkcase:
1129 normalize = self._normalize
1147 normalize = self._normalize
1130 else:
1148 else:
1131 normalize = None
1149 normalize = None
1132
1150
1133 files = sorted(match.files())
1151 files = sorted(match.files())
1134 subrepos.sort()
1152 subrepos.sort()
1135 i, j = 0, 0
1153 i, j = 0, 0
1136 while i < len(files) and j < len(subrepos):
1154 while i < len(files) and j < len(subrepos):
1137 subpath = subrepos[j] + b"/"
1155 subpath = subrepos[j] + b"/"
1138 if files[i] < subpath:
1156 if files[i] < subpath:
1139 i += 1
1157 i += 1
1140 continue
1158 continue
1141 while i < len(files) and files[i].startswith(subpath):
1159 while i < len(files) and files[i].startswith(subpath):
1142 del files[i]
1160 del files[i]
1143 j += 1
1161 j += 1
1144
1162
1145 if not files or b'' in files:
1163 if not files or b'' in files:
1146 files = [b'']
1164 files = [b'']
1147 # constructing the foldmap is expensive, so don't do it for the
1165 # constructing the foldmap is expensive, so don't do it for the
1148 # common case where files is ['']
1166 # common case where files is ['']
1149 normalize = None
1167 normalize = None
1150 results = dict.fromkeys(subrepos)
1168 results = dict.fromkeys(subrepos)
1151 results[b'.hg'] = None
1169 results[b'.hg'] = None
1152
1170
1153 for ff in files:
1171 for ff in files:
1154 if normalize:
1172 if normalize:
1155 nf = normalize(ff, False, True)
1173 nf = normalize(ff, False, True)
1156 else:
1174 else:
1157 nf = ff
1175 nf = ff
1158 if nf in results:
1176 if nf in results:
1159 continue
1177 continue
1160
1178
1161 try:
1179 try:
1162 st = lstat(join(nf))
1180 st = lstat(join(nf))
1163 kind = getkind(st.st_mode)
1181 kind = getkind(st.st_mode)
1164 if kind == dirkind:
1182 if kind == dirkind:
1165 if nf in dmap:
1183 if nf in dmap:
1166 # file replaced by dir on disk but still in dirstate
1184 # file replaced by dir on disk but still in dirstate
1167 results[nf] = None
1185 results[nf] = None
1168 foundadd((nf, ff))
1186 foundadd((nf, ff))
1169 elif kind == regkind or kind == lnkkind:
1187 elif kind == regkind or kind == lnkkind:
1170 results[nf] = st
1188 results[nf] = st
1171 else:
1189 else:
1172 badfn(ff, badtype(kind))
1190 badfn(ff, badtype(kind))
1173 if nf in dmap:
1191 if nf in dmap:
1174 results[nf] = None
1192 results[nf] = None
1175 except OSError as inst: # nf not found on disk - it is dirstate only
1193 except OSError as inst: # nf not found on disk - it is dirstate only
1176 if nf in dmap: # does it exactly match a missing file?
1194 if nf in dmap: # does it exactly match a missing file?
1177 results[nf] = None
1195 results[nf] = None
1178 else: # does it match a missing directory?
1196 else: # does it match a missing directory?
1179 if self._map.hasdir(nf):
1197 if self._map.hasdir(nf):
1180 notfoundadd(nf)
1198 notfoundadd(nf)
1181 else:
1199 else:
1182 badfn(ff, encoding.strtolocal(inst.strerror))
1200 badfn(ff, encoding.strtolocal(inst.strerror))
1183
1201
1184 # match.files() may contain explicitly-specified paths that shouldn't
1202 # match.files() may contain explicitly-specified paths that shouldn't
1185 # be taken; drop them from the list of files found. dirsfound/notfound
1203 # be taken; drop them from the list of files found. dirsfound/notfound
1186 # aren't filtered here because they will be tested later.
1204 # aren't filtered here because they will be tested later.
1187 if match.anypats():
1205 if match.anypats():
1188 for f in list(results):
1206 for f in list(results):
1189 if f == b'.hg' or f in subrepos:
1207 if f == b'.hg' or f in subrepos:
1190 # keep sentinel to disable further out-of-repo walks
1208 # keep sentinel to disable further out-of-repo walks
1191 continue
1209 continue
1192 if not match(f):
1210 if not match(f):
1193 del results[f]
1211 del results[f]
1194
1212
1195 # Case insensitive filesystems cannot rely on lstat() failing to detect
1213 # Case insensitive filesystems cannot rely on lstat() failing to detect
1196 # a case-only rename. Prune the stat object for any file that does not
1214 # a case-only rename. Prune the stat object for any file that does not
1197 # match the case in the filesystem, if there are multiple files that
1215 # match the case in the filesystem, if there are multiple files that
1198 # normalize to the same path.
1216 # normalize to the same path.
1199 if match.isexact() and self._checkcase:
1217 if match.isexact() and self._checkcase:
1200 normed = {}
1218 normed = {}
1201
1219
1202 for f, st in pycompat.iteritems(results):
1220 for f, st in pycompat.iteritems(results):
1203 if st is None:
1221 if st is None:
1204 continue
1222 continue
1205
1223
1206 nc = util.normcase(f)
1224 nc = util.normcase(f)
1207 paths = normed.get(nc)
1225 paths = normed.get(nc)
1208
1226
1209 if paths is None:
1227 if paths is None:
1210 paths = set()
1228 paths = set()
1211 normed[nc] = paths
1229 normed[nc] = paths
1212
1230
1213 paths.add(f)
1231 paths.add(f)
1214
1232
1215 for norm, paths in pycompat.iteritems(normed):
1233 for norm, paths in pycompat.iteritems(normed):
1216 if len(paths) > 1:
1234 if len(paths) > 1:
1217 for path in paths:
1235 for path in paths:
1218 folded = self._discoverpath(
1236 folded = self._discoverpath(
1219 path, norm, True, None, self._map.dirfoldmap
1237 path, norm, True, None, self._map.dirfoldmap
1220 )
1238 )
1221 if path != folded:
1239 if path != folded:
1222 results[path] = None
1240 results[path] = None
1223
1241
1224 return results, dirsfound, dirsnotfound
1242 return results, dirsfound, dirsnotfound
1225
1243
1226 def walk(self, match, subrepos, unknown, ignored, full=True):
1244 def walk(self, match, subrepos, unknown, ignored, full=True):
1227 """
1245 """
1228 Walk recursively through the directory tree, finding all files
1246 Walk recursively through the directory tree, finding all files
1229 matched by match.
1247 matched by match.
1230
1248
1231 If full is False, maybe skip some known-clean files.
1249 If full is False, maybe skip some known-clean files.
1232
1250
1233 Return a dict mapping filename to stat-like object (either
1251 Return a dict mapping filename to stat-like object (either
1234 mercurial.osutil.stat instance or return value of os.stat()).
1252 mercurial.osutil.stat instance or return value of os.stat()).
1235
1253
1236 """
1254 """
1237 # full is a flag that extensions that hook into walk can use -- this
1255 # full is a flag that extensions that hook into walk can use -- this
1238 # implementation doesn't use it at all. This satisfies the contract
1256 # implementation doesn't use it at all. This satisfies the contract
1239 # because we only guarantee a "maybe".
1257 # because we only guarantee a "maybe".
1240
1258
1241 if ignored:
1259 if ignored:
1242 ignore = util.never
1260 ignore = util.never
1243 dirignore = util.never
1261 dirignore = util.never
1244 elif unknown:
1262 elif unknown:
1245 ignore = self._ignore
1263 ignore = self._ignore
1246 dirignore = self._dirignore
1264 dirignore = self._dirignore
1247 else:
1265 else:
1248 # if not unknown and not ignored, drop dir recursion and step 2
1266 # if not unknown and not ignored, drop dir recursion and step 2
1249 ignore = util.always
1267 ignore = util.always
1250 dirignore = util.always
1268 dirignore = util.always
1251
1269
1252 matchfn = match.matchfn
1270 matchfn = match.matchfn
1253 matchalways = match.always()
1271 matchalways = match.always()
1254 matchtdir = match.traversedir
1272 matchtdir = match.traversedir
1255 dmap = self._map
1273 dmap = self._map
1256 listdir = util.listdir
1274 listdir = util.listdir
1257 lstat = os.lstat
1275 lstat = os.lstat
1258 dirkind = stat.S_IFDIR
1276 dirkind = stat.S_IFDIR
1259 regkind = stat.S_IFREG
1277 regkind = stat.S_IFREG
1260 lnkkind = stat.S_IFLNK
1278 lnkkind = stat.S_IFLNK
1261 join = self._join
1279 join = self._join
1262
1280
1263 exact = skipstep3 = False
1281 exact = skipstep3 = False
1264 if match.isexact(): # match.exact
1282 if match.isexact(): # match.exact
1265 exact = True
1283 exact = True
1266 dirignore = util.always # skip step 2
1284 dirignore = util.always # skip step 2
1267 elif match.prefix(): # match.match, no patterns
1285 elif match.prefix(): # match.match, no patterns
1268 skipstep3 = True
1286 skipstep3 = True
1269
1287
1270 if not exact and self._checkcase:
1288 if not exact and self._checkcase:
1271 normalize = self._normalize
1289 normalize = self._normalize
1272 normalizefile = self._normalizefile
1290 normalizefile = self._normalizefile
1273 skipstep3 = False
1291 skipstep3 = False
1274 else:
1292 else:
1275 normalize = self._normalize
1293 normalize = self._normalize
1276 normalizefile = None
1294 normalizefile = None
1277
1295
1278 # step 1: find all explicit files
1296 # step 1: find all explicit files
1279 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1297 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1280 if matchtdir:
1298 if matchtdir:
1281 for d in work:
1299 for d in work:
1282 matchtdir(d[0])
1300 matchtdir(d[0])
1283 for d in dirsnotfound:
1301 for d in dirsnotfound:
1284 matchtdir(d)
1302 matchtdir(d)
1285
1303
1286 skipstep3 = skipstep3 and not (work or dirsnotfound)
1304 skipstep3 = skipstep3 and not (work or dirsnotfound)
1287 work = [d for d in work if not dirignore(d[0])]
1305 work = [d for d in work if not dirignore(d[0])]
1288
1306
1289 # step 2: visit subdirectories
1307 # step 2: visit subdirectories
1290 def traverse(work, alreadynormed):
1308 def traverse(work, alreadynormed):
1291 wadd = work.append
1309 wadd = work.append
1292 while work:
1310 while work:
1293 tracing.counter('dirstate.walk work', len(work))
1311 tracing.counter('dirstate.walk work', len(work))
1294 nd = work.pop()
1312 nd = work.pop()
1295 visitentries = match.visitchildrenset(nd)
1313 visitentries = match.visitchildrenset(nd)
1296 if not visitentries:
1314 if not visitentries:
1297 continue
1315 continue
1298 if visitentries == b'this' or visitentries == b'all':
1316 if visitentries == b'this' or visitentries == b'all':
1299 visitentries = None
1317 visitentries = None
1300 skip = None
1318 skip = None
1301 if nd != b'':
1319 if nd != b'':
1302 skip = b'.hg'
1320 skip = b'.hg'
1303 try:
1321 try:
1304 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1322 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1305 entries = listdir(join(nd), stat=True, skip=skip)
1323 entries = listdir(join(nd), stat=True, skip=skip)
1306 except OSError as inst:
1324 except OSError as inst:
1307 if inst.errno in (errno.EACCES, errno.ENOENT):
1325 if inst.errno in (errno.EACCES, errno.ENOENT):
1308 match.bad(
1326 match.bad(
1309 self.pathto(nd), encoding.strtolocal(inst.strerror)
1327 self.pathto(nd), encoding.strtolocal(inst.strerror)
1310 )
1328 )
1311 continue
1329 continue
1312 raise
1330 raise
1313 for f, kind, st in entries:
1331 for f, kind, st in entries:
1314 # Some matchers may return files in the visitentries set,
1332 # Some matchers may return files in the visitentries set,
1315 # instead of 'this', if the matcher explicitly mentions them
1333 # instead of 'this', if the matcher explicitly mentions them
1316 # and is not an exactmatcher. This is acceptable; we do not
1334 # and is not an exactmatcher. This is acceptable; we do not
1317 # make any hard assumptions about file-or-directory below
1335 # make any hard assumptions about file-or-directory below
1318 # based on the presence of `f` in visitentries. If
1336 # based on the presence of `f` in visitentries. If
1319 # visitchildrenset returned a set, we can always skip the
1337 # visitchildrenset returned a set, we can always skip the
1320 # entries *not* in the set it provided regardless of whether
1338 # entries *not* in the set it provided regardless of whether
1321 # they're actually a file or a directory.
1339 # they're actually a file or a directory.
1322 if visitentries and f not in visitentries:
1340 if visitentries and f not in visitentries:
1323 continue
1341 continue
1324 if normalizefile:
1342 if normalizefile:
1325 # even though f might be a directory, we're only
1343 # even though f might be a directory, we're only
1326 # interested in comparing it to files currently in the
1344 # interested in comparing it to files currently in the
1327 # dmap -- therefore normalizefile is enough
1345 # dmap -- therefore normalizefile is enough
1328 nf = normalizefile(
1346 nf = normalizefile(
1329 nd and (nd + b"/" + f) or f, True, True
1347 nd and (nd + b"/" + f) or f, True, True
1330 )
1348 )
1331 else:
1349 else:
1332 nf = nd and (nd + b"/" + f) or f
1350 nf = nd and (nd + b"/" + f) or f
1333 if nf not in results:
1351 if nf not in results:
1334 if kind == dirkind:
1352 if kind == dirkind:
1335 if not ignore(nf):
1353 if not ignore(nf):
1336 if matchtdir:
1354 if matchtdir:
1337 matchtdir(nf)
1355 matchtdir(nf)
1338 wadd(nf)
1356 wadd(nf)
1339 if nf in dmap and (matchalways or matchfn(nf)):
1357 if nf in dmap and (matchalways or matchfn(nf)):
1340 results[nf] = None
1358 results[nf] = None
1341 elif kind == regkind or kind == lnkkind:
1359 elif kind == regkind or kind == lnkkind:
1342 if nf in dmap:
1360 if nf in dmap:
1343 if matchalways or matchfn(nf):
1361 if matchalways or matchfn(nf):
1344 results[nf] = st
1362 results[nf] = st
1345 elif (matchalways or matchfn(nf)) and not ignore(
1363 elif (matchalways or matchfn(nf)) and not ignore(
1346 nf
1364 nf
1347 ):
1365 ):
1348 # unknown file -- normalize if necessary
1366 # unknown file -- normalize if necessary
1349 if not alreadynormed:
1367 if not alreadynormed:
1350 nf = normalize(nf, False, True)
1368 nf = normalize(nf, False, True)
1351 results[nf] = st
1369 results[nf] = st
1352 elif nf in dmap and (matchalways or matchfn(nf)):
1370 elif nf in dmap and (matchalways or matchfn(nf)):
1353 results[nf] = None
1371 results[nf] = None
1354
1372
1355 for nd, d in work:
1373 for nd, d in work:
1356 # alreadynormed means that processwork doesn't have to do any
1374 # alreadynormed means that processwork doesn't have to do any
1357 # expensive directory normalization
1375 # expensive directory normalization
1358 alreadynormed = not normalize or nd == d
1376 alreadynormed = not normalize or nd == d
1359 traverse([d], alreadynormed)
1377 traverse([d], alreadynormed)
1360
1378
1361 for s in subrepos:
1379 for s in subrepos:
1362 del results[s]
1380 del results[s]
1363 del results[b'.hg']
1381 del results[b'.hg']
1364
1382
1365 # step 3: visit remaining files from dmap
1383 # step 3: visit remaining files from dmap
1366 if not skipstep3 and not exact:
1384 if not skipstep3 and not exact:
1367 # If a dmap file is not in results yet, it was either
1385 # If a dmap file is not in results yet, it was either
1368 # a) not matching matchfn b) ignored, c) missing, or d) under a
1386 # a) not matching matchfn b) ignored, c) missing, or d) under a
1369 # symlink directory.
1387 # symlink directory.
1370 if not results and matchalways:
1388 if not results and matchalways:
1371 visit = [f for f in dmap]
1389 visit = [f for f in dmap]
1372 else:
1390 else:
1373 visit = [f for f in dmap if f not in results and matchfn(f)]
1391 visit = [f for f in dmap if f not in results and matchfn(f)]
1374 visit.sort()
1392 visit.sort()
1375
1393
1376 if unknown:
1394 if unknown:
1377 # unknown == True means we walked all dirs under the roots
1395 # unknown == True means we walked all dirs under the roots
1378 # that wasn't ignored, and everything that matched was stat'ed
1396 # that wasn't ignored, and everything that matched was stat'ed
1379 # and is already in results.
1397 # and is already in results.
1380 # The rest must thus be ignored or under a symlink.
1398 # The rest must thus be ignored or under a symlink.
1381 audit_path = pathutil.pathauditor(self._root, cached=True)
1399 audit_path = pathutil.pathauditor(self._root, cached=True)
1382
1400
1383 for nf in iter(visit):
1401 for nf in iter(visit):
1384 # If a stat for the same file was already added with a
1402 # If a stat for the same file was already added with a
1385 # different case, don't add one for this, since that would
1403 # different case, don't add one for this, since that would
1386 # make it appear as if the file exists under both names
1404 # make it appear as if the file exists under both names
1387 # on disk.
1405 # on disk.
1388 if (
1406 if (
1389 normalizefile
1407 normalizefile
1390 and normalizefile(nf, True, True) in results
1408 and normalizefile(nf, True, True) in results
1391 ):
1409 ):
1392 results[nf] = None
1410 results[nf] = None
1393 # Report ignored items in the dmap as long as they are not
1411 # Report ignored items in the dmap as long as they are not
1394 # under a symlink directory.
1412 # under a symlink directory.
1395 elif audit_path.check(nf):
1413 elif audit_path.check(nf):
1396 try:
1414 try:
1397 results[nf] = lstat(join(nf))
1415 results[nf] = lstat(join(nf))
1398 # file was just ignored, no links, and exists
1416 # file was just ignored, no links, and exists
1399 except OSError:
1417 except OSError:
1400 # file doesn't exist
1418 # file doesn't exist
1401 results[nf] = None
1419 results[nf] = None
1402 else:
1420 else:
1403 # It's either missing or under a symlink directory
1421 # It's either missing or under a symlink directory
1404 # which we in this case report as missing
1422 # which we in this case report as missing
1405 results[nf] = None
1423 results[nf] = None
1406 else:
1424 else:
1407 # We may not have walked the full directory tree above,
1425 # We may not have walked the full directory tree above,
1408 # so stat and check everything we missed.
1426 # so stat and check everything we missed.
1409 iv = iter(visit)
1427 iv = iter(visit)
1410 for st in util.statfiles([join(i) for i in visit]):
1428 for st in util.statfiles([join(i) for i in visit]):
1411 results[next(iv)] = st
1429 results[next(iv)] = st
1412 return results
1430 return results
1413
1431
1414 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1432 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1415 # Force Rayon (Rust parallelism library) to respect the number of
1433 # Force Rayon (Rust parallelism library) to respect the number of
1416 # workers. This is a temporary workaround until Rust code knows
1434 # workers. This is a temporary workaround until Rust code knows
1417 # how to read the config file.
1435 # how to read the config file.
1418 numcpus = self._ui.configint(b"worker", b"numcpus")
1436 numcpus = self._ui.configint(b"worker", b"numcpus")
1419 if numcpus is not None:
1437 if numcpus is not None:
1420 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1438 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1421
1439
1422 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1440 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1423 if not workers_enabled:
1441 if not workers_enabled:
1424 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1442 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1425
1443
1426 (
1444 (
1427 lookup,
1445 lookup,
1428 modified,
1446 modified,
1429 added,
1447 added,
1430 removed,
1448 removed,
1431 deleted,
1449 deleted,
1432 clean,
1450 clean,
1433 ignored,
1451 ignored,
1434 unknown,
1452 unknown,
1435 warnings,
1453 warnings,
1436 bad,
1454 bad,
1437 traversed,
1455 traversed,
1438 dirty,
1456 dirty,
1439 ) = rustmod.status(
1457 ) = rustmod.status(
1440 self._map._rustmap,
1458 self._map._rustmap,
1441 matcher,
1459 matcher,
1442 self._rootdir,
1460 self._rootdir,
1443 self._ignorefiles(),
1461 self._ignorefiles(),
1444 self._checkexec,
1462 self._checkexec,
1445 self._lastnormaltime,
1463 self._lastnormaltime,
1446 bool(list_clean),
1464 bool(list_clean),
1447 bool(list_ignored),
1465 bool(list_ignored),
1448 bool(list_unknown),
1466 bool(list_unknown),
1449 bool(matcher.traversedir),
1467 bool(matcher.traversedir),
1450 )
1468 )
1451
1469
1452 self._dirty |= dirty
1470 self._dirty |= dirty
1453
1471
1454 if matcher.traversedir:
1472 if matcher.traversedir:
1455 for dir in traversed:
1473 for dir in traversed:
1456 matcher.traversedir(dir)
1474 matcher.traversedir(dir)
1457
1475
1458 if self._ui.warn:
1476 if self._ui.warn:
1459 for item in warnings:
1477 for item in warnings:
1460 if isinstance(item, tuple):
1478 if isinstance(item, tuple):
1461 file_path, syntax = item
1479 file_path, syntax = item
1462 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1480 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1463 file_path,
1481 file_path,
1464 syntax,
1482 syntax,
1465 )
1483 )
1466 self._ui.warn(msg)
1484 self._ui.warn(msg)
1467 else:
1485 else:
1468 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1486 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1469 self._ui.warn(
1487 self._ui.warn(
1470 msg
1488 msg
1471 % (
1489 % (
1472 pathutil.canonpath(
1490 pathutil.canonpath(
1473 self._rootdir, self._rootdir, item
1491 self._rootdir, self._rootdir, item
1474 ),
1492 ),
1475 b"No such file or directory",
1493 b"No such file or directory",
1476 )
1494 )
1477 )
1495 )
1478
1496
1479 for (fn, message) in bad:
1497 for (fn, message) in bad:
1480 matcher.bad(fn, encoding.strtolocal(message))
1498 matcher.bad(fn, encoding.strtolocal(message))
1481
1499
1482 status = scmutil.status(
1500 status = scmutil.status(
1483 modified=modified,
1501 modified=modified,
1484 added=added,
1502 added=added,
1485 removed=removed,
1503 removed=removed,
1486 deleted=deleted,
1504 deleted=deleted,
1487 unknown=unknown,
1505 unknown=unknown,
1488 ignored=ignored,
1506 ignored=ignored,
1489 clean=clean,
1507 clean=clean,
1490 )
1508 )
1491 return (lookup, status)
1509 return (lookup, status)
1492
1510
1493 def status(self, match, subrepos, ignored, clean, unknown):
1511 def status(self, match, subrepos, ignored, clean, unknown):
1494 """Determine the status of the working copy relative to the
1512 """Determine the status of the working copy relative to the
1495 dirstate and return a pair of (unsure, status), where status is of type
1513 dirstate and return a pair of (unsure, status), where status is of type
1496 scmutil.status and:
1514 scmutil.status and:
1497
1515
1498 unsure:
1516 unsure:
1499 files that might have been modified since the dirstate was
1517 files that might have been modified since the dirstate was
1500 written, but need to be read to be sure (size is the same
1518 written, but need to be read to be sure (size is the same
1501 but mtime differs)
1519 but mtime differs)
1502 status.modified:
1520 status.modified:
1503 files that have definitely been modified since the dirstate
1521 files that have definitely been modified since the dirstate
1504 was written (different size or mode)
1522 was written (different size or mode)
1505 status.clean:
1523 status.clean:
1506 files that have definitely not been modified since the
1524 files that have definitely not been modified since the
1507 dirstate was written
1525 dirstate was written
1508 """
1526 """
1509 listignored, listclean, listunknown = ignored, clean, unknown
1527 listignored, listclean, listunknown = ignored, clean, unknown
1510 lookup, modified, added, unknown, ignored = [], [], [], [], []
1528 lookup, modified, added, unknown, ignored = [], [], [], [], []
1511 removed, deleted, clean = [], [], []
1529 removed, deleted, clean = [], [], []
1512
1530
1513 dmap = self._map
1531 dmap = self._map
1514 dmap.preload()
1532 dmap.preload()
1515
1533
1516 use_rust = True
1534 use_rust = True
1517
1535
1518 allowed_matchers = (
1536 allowed_matchers = (
1519 matchmod.alwaysmatcher,
1537 matchmod.alwaysmatcher,
1520 matchmod.exactmatcher,
1538 matchmod.exactmatcher,
1521 matchmod.includematcher,
1539 matchmod.includematcher,
1522 )
1540 )
1523
1541
1524 if rustmod is None:
1542 if rustmod is None:
1525 use_rust = False
1543 use_rust = False
1526 elif self._checkcase:
1544 elif self._checkcase:
1527 # Case-insensitive filesystems are not handled yet
1545 # Case-insensitive filesystems are not handled yet
1528 use_rust = False
1546 use_rust = False
1529 elif subrepos:
1547 elif subrepos:
1530 use_rust = False
1548 use_rust = False
1531 elif sparse.enabled:
1549 elif sparse.enabled:
1532 use_rust = False
1550 use_rust = False
1533 elif not isinstance(match, allowed_matchers):
1551 elif not isinstance(match, allowed_matchers):
1534 # Some matchers have yet to be implemented
1552 # Some matchers have yet to be implemented
1535 use_rust = False
1553 use_rust = False
1536
1554
1537 if use_rust:
1555 if use_rust:
1538 try:
1556 try:
1539 return self._rust_status(
1557 return self._rust_status(
1540 match, listclean, listignored, listunknown
1558 match, listclean, listignored, listunknown
1541 )
1559 )
1542 except rustmod.FallbackError:
1560 except rustmod.FallbackError:
1543 pass
1561 pass
1544
1562
1545 def noop(f):
1563 def noop(f):
1546 pass
1564 pass
1547
1565
1548 dcontains = dmap.__contains__
1566 dcontains = dmap.__contains__
1549 dget = dmap.__getitem__
1567 dget = dmap.__getitem__
1550 ladd = lookup.append # aka "unsure"
1568 ladd = lookup.append # aka "unsure"
1551 madd = modified.append
1569 madd = modified.append
1552 aadd = added.append
1570 aadd = added.append
1553 uadd = unknown.append if listunknown else noop
1571 uadd = unknown.append if listunknown else noop
1554 iadd = ignored.append if listignored else noop
1572 iadd = ignored.append if listignored else noop
1555 radd = removed.append
1573 radd = removed.append
1556 dadd = deleted.append
1574 dadd = deleted.append
1557 cadd = clean.append if listclean else noop
1575 cadd = clean.append if listclean else noop
1558 mexact = match.exact
1576 mexact = match.exact
1559 dirignore = self._dirignore
1577 dirignore = self._dirignore
1560 checkexec = self._checkexec
1578 checkexec = self._checkexec
1561 copymap = self._map.copymap
1579 copymap = self._map.copymap
1562 lastnormaltime = self._lastnormaltime
1580 lastnormaltime = self._lastnormaltime
1563
1581
1564 # We need to do full walks when either
1582 # We need to do full walks when either
1565 # - we're listing all clean files, or
1583 # - we're listing all clean files, or
1566 # - match.traversedir does something, because match.traversedir should
1584 # - match.traversedir does something, because match.traversedir should
1567 # be called for every dir in the working dir
1585 # be called for every dir in the working dir
1568 full = listclean or match.traversedir is not None
1586 full = listclean or match.traversedir is not None
1569 for fn, st in pycompat.iteritems(
1587 for fn, st in pycompat.iteritems(
1570 self.walk(match, subrepos, listunknown, listignored, full=full)
1588 self.walk(match, subrepos, listunknown, listignored, full=full)
1571 ):
1589 ):
1572 if not dcontains(fn):
1590 if not dcontains(fn):
1573 if (listignored or mexact(fn)) and dirignore(fn):
1591 if (listignored or mexact(fn)) and dirignore(fn):
1574 if listignored:
1592 if listignored:
1575 iadd(fn)
1593 iadd(fn)
1576 else:
1594 else:
1577 uadd(fn)
1595 uadd(fn)
1578 continue
1596 continue
1579
1597
1580 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1598 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1581 # written like that for performance reasons. dmap[fn] is not a
1599 # written like that for performance reasons. dmap[fn] is not a
1582 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1600 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1583 # opcode has fast paths when the value to be unpacked is a tuple or
1601 # opcode has fast paths when the value to be unpacked is a tuple or
1584 # a list, but falls back to creating a full-fledged iterator in
1602 # a list, but falls back to creating a full-fledged iterator in
1585 # general. That is much slower than simply accessing and storing the
1603 # general. That is much slower than simply accessing and storing the
1586 # tuple members one by one.
1604 # tuple members one by one.
1587 t = dget(fn)
1605 t = dget(fn)
1588 mode = t.mode
1606 mode = t.mode
1589 size = t.size
1607 size = t.size
1590 time = t.mtime
1608 time = t.mtime
1591
1609
1592 if not st and t.tracked:
1610 if not st and t.tracked:
1593 dadd(fn)
1611 dadd(fn)
1594 elif t.merged:
1612 elif t.merged:
1595 madd(fn)
1613 madd(fn)
1596 elif t.added:
1614 elif t.added:
1597 aadd(fn)
1615 aadd(fn)
1598 elif t.removed:
1616 elif t.removed:
1599 radd(fn)
1617 radd(fn)
1600 elif t.tracked:
1618 elif t.tracked:
1601 if (
1619 if (
1602 size >= 0
1620 size >= 0
1603 and (
1621 and (
1604 (size != st.st_size and size != st.st_size & _rangemask)
1622 (size != st.st_size and size != st.st_size & _rangemask)
1605 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1623 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1606 )
1624 )
1607 or t.from_p2
1625 or t.from_p2
1608 or fn in copymap
1626 or fn in copymap
1609 ):
1627 ):
1610 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1628 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1611 # issue6456: Size returned may be longer due to
1629 # issue6456: Size returned may be longer due to
1612 # encryption on EXT-4 fscrypt, undecided.
1630 # encryption on EXT-4 fscrypt, undecided.
1613 ladd(fn)
1631 ladd(fn)
1614 else:
1632 else:
1615 madd(fn)
1633 madd(fn)
1616 elif (
1634 elif (
1617 time != st[stat.ST_MTIME]
1635 time != st[stat.ST_MTIME]
1618 and time != st[stat.ST_MTIME] & _rangemask
1636 and time != st[stat.ST_MTIME] & _rangemask
1619 ):
1637 ):
1620 ladd(fn)
1638 ladd(fn)
1621 elif st[stat.ST_MTIME] == lastnormaltime:
1639 elif st[stat.ST_MTIME] == lastnormaltime:
1622 # fn may have just been marked as normal and it may have
1640 # fn may have just been marked as normal and it may have
1623 # changed in the same second without changing its size.
1641 # changed in the same second without changing its size.
1624 # This can happen if we quickly do multiple commits.
1642 # This can happen if we quickly do multiple commits.
1625 # Force lookup, so we don't miss such a racy file change.
1643 # Force lookup, so we don't miss such a racy file change.
1626 ladd(fn)
1644 ladd(fn)
1627 elif listclean:
1645 elif listclean:
1628 cadd(fn)
1646 cadd(fn)
1629 status = scmutil.status(
1647 status = scmutil.status(
1630 modified, added, removed, deleted, unknown, ignored, clean
1648 modified, added, removed, deleted, unknown, ignored, clean
1631 )
1649 )
1632 return (lookup, status)
1650 return (lookup, status)
1633
1651
1634 def matches(self, match):
1652 def matches(self, match):
1635 """
1653 """
1636 return files in the dirstate (in whatever state) filtered by match
1654 return files in the dirstate (in whatever state) filtered by match
1637 """
1655 """
1638 dmap = self._map
1656 dmap = self._map
1639 if rustmod is not None:
1657 if rustmod is not None:
1640 dmap = self._map._rustmap
1658 dmap = self._map._rustmap
1641
1659
1642 if match.always():
1660 if match.always():
1643 return dmap.keys()
1661 return dmap.keys()
1644 files = match.files()
1662 files = match.files()
1645 if match.isexact():
1663 if match.isexact():
1646 # fast path -- filter the other way around, since typically files is
1664 # fast path -- filter the other way around, since typically files is
1647 # much smaller than dmap
1665 # much smaller than dmap
1648 return [f for f in files if f in dmap]
1666 return [f for f in files if f in dmap]
1649 if match.prefix() and all(fn in dmap for fn in files):
1667 if match.prefix() and all(fn in dmap for fn in files):
1650 # fast path -- all the values are known to be files, so just return
1668 # fast path -- all the values are known to be files, so just return
1651 # that
1669 # that
1652 return list(files)
1670 return list(files)
1653 return [f for f in dmap if match(f)]
1671 return [f for f in dmap if match(f)]
1654
1672
1655 def _actualfilename(self, tr):
1673 def _actualfilename(self, tr):
1656 if tr:
1674 if tr:
1657 return self._pendingfilename
1675 return self._pendingfilename
1658 else:
1676 else:
1659 return self._filename
1677 return self._filename
1660
1678
1661 def savebackup(self, tr, backupname):
1679 def savebackup(self, tr, backupname):
1662 '''Save current dirstate into backup file'''
1680 '''Save current dirstate into backup file'''
1663 filename = self._actualfilename(tr)
1681 filename = self._actualfilename(tr)
1664 assert backupname != filename
1682 assert backupname != filename
1665
1683
1666 # use '_writedirstate' instead of 'write' to write changes certainly,
1684 # use '_writedirstate' instead of 'write' to write changes certainly,
1667 # because the latter omits writing out if transaction is running.
1685 # because the latter omits writing out if transaction is running.
1668 # output file will be used to create backup of dirstate at this point.
1686 # output file will be used to create backup of dirstate at this point.
1669 if self._dirty or not self._opener.exists(filename):
1687 if self._dirty or not self._opener.exists(filename):
1670 self._writedirstate(
1688 self._writedirstate(
1671 tr,
1689 tr,
1672 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1690 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1673 )
1691 )
1674
1692
1675 if tr:
1693 if tr:
1676 # ensure that subsequent tr.writepending returns True for
1694 # ensure that subsequent tr.writepending returns True for
1677 # changes written out above, even if dirstate is never
1695 # changes written out above, even if dirstate is never
1678 # changed after this
1696 # changed after this
1679 tr.addfilegenerator(
1697 tr.addfilegenerator(
1680 b'dirstate',
1698 b'dirstate',
1681 (self._filename,),
1699 (self._filename,),
1682 lambda f: self._writedirstate(tr, f),
1700 lambda f: self._writedirstate(tr, f),
1683 location=b'plain',
1701 location=b'plain',
1684 )
1702 )
1685
1703
1686 # ensure that pending file written above is unlinked at
1704 # ensure that pending file written above is unlinked at
1687 # failure, even if tr.writepending isn't invoked until the
1705 # failure, even if tr.writepending isn't invoked until the
1688 # end of this transaction
1706 # end of this transaction
1689 tr.registertmp(filename, location=b'plain')
1707 tr.registertmp(filename, location=b'plain')
1690
1708
1691 self._opener.tryunlink(backupname)
1709 self._opener.tryunlink(backupname)
1692 # hardlink backup is okay because _writedirstate is always called
1710 # hardlink backup is okay because _writedirstate is always called
1693 # with an "atomictemp=True" file.
1711 # with an "atomictemp=True" file.
1694 util.copyfile(
1712 util.copyfile(
1695 self._opener.join(filename),
1713 self._opener.join(filename),
1696 self._opener.join(backupname),
1714 self._opener.join(backupname),
1697 hardlink=True,
1715 hardlink=True,
1698 )
1716 )
1699
1717
1700 def restorebackup(self, tr, backupname):
1718 def restorebackup(self, tr, backupname):
1701 '''Restore dirstate by backup file'''
1719 '''Restore dirstate by backup file'''
1702 # this "invalidate()" prevents "wlock.release()" from writing
1720 # this "invalidate()" prevents "wlock.release()" from writing
1703 # changes of dirstate out after restoring from backup file
1721 # changes of dirstate out after restoring from backup file
1704 self.invalidate()
1722 self.invalidate()
1705 filename = self._actualfilename(tr)
1723 filename = self._actualfilename(tr)
1706 o = self._opener
1724 o = self._opener
1707 if util.samefile(o.join(backupname), o.join(filename)):
1725 if util.samefile(o.join(backupname), o.join(filename)):
1708 o.unlink(backupname)
1726 o.unlink(backupname)
1709 else:
1727 else:
1710 o.rename(backupname, filename, checkambig=True)
1728 o.rename(backupname, filename, checkambig=True)
1711
1729
1712 def clearbackup(self, tr, backupname):
1730 def clearbackup(self, tr, backupname):
1713 '''Clear backup file'''
1731 '''Clear backup file'''
1714 self._opener.unlink(backupname)
1732 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now