##// END OF EJS Templates
dirstate: deprecate `__getitem__` access...
marmoute -
r48919:dcd97b08 default
parent child Browse files
Show More
@@ -1,1546 +1,1548
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = dirstatemap.DirstateItem
48 DirstateItem = dirstatemap.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._mapcls = dirstatemap.dirstatemap
133 self._mapcls = dirstatemap.dirstatemap
134 # Access and cache cwd early, so we don't access it for the first time
134 # Access and cache cwd early, so we don't access it for the first time
135 # after a working-copy update caused it to not exist (accessing it then
135 # after a working-copy update caused it to not exist (accessing it then
136 # raises an exception).
136 # raises an exception).
137 self._cwd
137 self._cwd
138
138
139 def prefetch_parents(self):
139 def prefetch_parents(self):
140 """make sure the parents are loaded
140 """make sure the parents are loaded
141
141
142 Used to avoid a race condition.
142 Used to avoid a race condition.
143 """
143 """
144 self._pl
144 self._pl
145
145
146 @contextlib.contextmanager
146 @contextlib.contextmanager
147 def parentchange(self):
147 def parentchange(self):
148 """Context manager for handling dirstate parents.
148 """Context manager for handling dirstate parents.
149
149
150 If an exception occurs in the scope of the context manager,
150 If an exception occurs in the scope of the context manager,
151 the incoherent dirstate won't be written when wlock is
151 the incoherent dirstate won't be written when wlock is
152 released.
152 released.
153 """
153 """
154 self._parentwriters += 1
154 self._parentwriters += 1
155 yield
155 yield
156 # Typically we want the "undo" step of a context manager in a
156 # Typically we want the "undo" step of a context manager in a
157 # finally block so it happens even when an exception
157 # finally block so it happens even when an exception
158 # occurs. In this case, however, we only want to decrement
158 # occurs. In this case, however, we only want to decrement
159 # parentwriters if the code in the with statement exits
159 # parentwriters if the code in the with statement exits
160 # normally, so we don't have a try/finally here on purpose.
160 # normally, so we don't have a try/finally here on purpose.
161 self._parentwriters -= 1
161 self._parentwriters -= 1
162
162
163 def pendingparentchange(self):
163 def pendingparentchange(self):
164 """Returns true if the dirstate is in the middle of a set of changes
164 """Returns true if the dirstate is in the middle of a set of changes
165 that modify the dirstate parent.
165 that modify the dirstate parent.
166 """
166 """
167 return self._parentwriters > 0
167 return self._parentwriters > 0
168
168
169 @propertycache
169 @propertycache
170 def _map(self):
170 def _map(self):
171 """Return the dirstate contents (see documentation for dirstatemap)."""
171 """Return the dirstate contents (see documentation for dirstatemap)."""
172 self._map = self._mapcls(
172 self._map = self._mapcls(
173 self._ui,
173 self._ui,
174 self._opener,
174 self._opener,
175 self._root,
175 self._root,
176 self._nodeconstants,
176 self._nodeconstants,
177 self._use_dirstate_v2,
177 self._use_dirstate_v2,
178 )
178 )
179 return self._map
179 return self._map
180
180
181 @property
181 @property
182 def _sparsematcher(self):
182 def _sparsematcher(self):
183 """The matcher for the sparse checkout.
183 """The matcher for the sparse checkout.
184
184
185 The working directory may not include every file from a manifest. The
185 The working directory may not include every file from a manifest. The
186 matcher obtained by this property will match a path if it is to be
186 matcher obtained by this property will match a path if it is to be
187 included in the working directory.
187 included in the working directory.
188 """
188 """
189 # TODO there is potential to cache this property. For now, the matcher
189 # TODO there is potential to cache this property. For now, the matcher
190 # is resolved on every access. (But the called function does use a
190 # is resolved on every access. (But the called function does use a
191 # cache to keep the lookup fast.)
191 # cache to keep the lookup fast.)
192 return self._sparsematchfn()
192 return self._sparsematchfn()
193
193
194 @repocache(b'branch')
194 @repocache(b'branch')
195 def _branch(self):
195 def _branch(self):
196 try:
196 try:
197 return self._opener.read(b"branch").strip() or b"default"
197 return self._opener.read(b"branch").strip() or b"default"
198 except IOError as inst:
198 except IOError as inst:
199 if inst.errno != errno.ENOENT:
199 if inst.errno != errno.ENOENT:
200 raise
200 raise
201 return b"default"
201 return b"default"
202
202
203 @property
203 @property
204 def _pl(self):
204 def _pl(self):
205 return self._map.parents()
205 return self._map.parents()
206
206
207 def hasdir(self, d):
207 def hasdir(self, d):
208 return self._map.hastrackeddir(d)
208 return self._map.hastrackeddir(d)
209
209
210 @rootcache(b'.hgignore')
210 @rootcache(b'.hgignore')
211 def _ignore(self):
211 def _ignore(self):
212 files = self._ignorefiles()
212 files = self._ignorefiles()
213 if not files:
213 if not files:
214 return matchmod.never()
214 return matchmod.never()
215
215
216 pats = [b'include:%s' % f for f in files]
216 pats = [b'include:%s' % f for f in files]
217 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
217 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218
218
219 @propertycache
219 @propertycache
220 def _slash(self):
220 def _slash(self):
221 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
221 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222
222
223 @propertycache
223 @propertycache
224 def _checklink(self):
224 def _checklink(self):
225 return util.checklink(self._root)
225 return util.checklink(self._root)
226
226
227 @propertycache
227 @propertycache
228 def _checkexec(self):
228 def _checkexec(self):
229 return bool(util.checkexec(self._root))
229 return bool(util.checkexec(self._root))
230
230
231 @propertycache
231 @propertycache
232 def _checkcase(self):
232 def _checkcase(self):
233 return not util.fscasesensitive(self._join(b'.hg'))
233 return not util.fscasesensitive(self._join(b'.hg'))
234
234
235 def _join(self, f):
235 def _join(self, f):
236 # much faster than os.path.join()
236 # much faster than os.path.join()
237 # it's safe because f is always a relative path
237 # it's safe because f is always a relative path
238 return self._rootdir + f
238 return self._rootdir + f
239
239
240 def flagfunc(self, buildfallback):
240 def flagfunc(self, buildfallback):
241 if self._checklink and self._checkexec:
241 if self._checklink and self._checkexec:
242
242
243 def f(x):
243 def f(x):
244 try:
244 try:
245 st = os.lstat(self._join(x))
245 st = os.lstat(self._join(x))
246 if util.statislink(st):
246 if util.statislink(st):
247 return b'l'
247 return b'l'
248 if util.statisexec(st):
248 if util.statisexec(st):
249 return b'x'
249 return b'x'
250 except OSError:
250 except OSError:
251 pass
251 pass
252 return b''
252 return b''
253
253
254 return f
254 return f
255
255
256 fallback = buildfallback()
256 fallback = buildfallback()
257 if self._checklink:
257 if self._checklink:
258
258
259 def f(x):
259 def f(x):
260 if os.path.islink(self._join(x)):
260 if os.path.islink(self._join(x)):
261 return b'l'
261 return b'l'
262 if b'x' in fallback(x):
262 if b'x' in fallback(x):
263 return b'x'
263 return b'x'
264 return b''
264 return b''
265
265
266 return f
266 return f
267 if self._checkexec:
267 if self._checkexec:
268
268
269 def f(x):
269 def f(x):
270 if b'l' in fallback(x):
270 if b'l' in fallback(x):
271 return b'l'
271 return b'l'
272 if util.isexec(self._join(x)):
272 if util.isexec(self._join(x)):
273 return b'x'
273 return b'x'
274 return b''
274 return b''
275
275
276 return f
276 return f
277 else:
277 else:
278 return fallback
278 return fallback
279
279
280 @propertycache
280 @propertycache
281 def _cwd(self):
281 def _cwd(self):
282 # internal config: ui.forcecwd
282 # internal config: ui.forcecwd
283 forcecwd = self._ui.config(b'ui', b'forcecwd')
283 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 if forcecwd:
284 if forcecwd:
285 return forcecwd
285 return forcecwd
286 return encoding.getcwd()
286 return encoding.getcwd()
287
287
288 def getcwd(self):
288 def getcwd(self):
289 """Return the path from which a canonical path is calculated.
289 """Return the path from which a canonical path is calculated.
290
290
291 This path should be used to resolve file patterns or to convert
291 This path should be used to resolve file patterns or to convert
292 canonical paths back to file paths for display. It shouldn't be
292 canonical paths back to file paths for display. It shouldn't be
293 used to get real file paths. Use vfs functions instead.
293 used to get real file paths. Use vfs functions instead.
294 """
294 """
295 cwd = self._cwd
295 cwd = self._cwd
296 if cwd == self._root:
296 if cwd == self._root:
297 return b''
297 return b''
298 # self._root ends with a path separator if self._root is '/' or 'C:\'
298 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 rootsep = self._root
299 rootsep = self._root
300 if not util.endswithsep(rootsep):
300 if not util.endswithsep(rootsep):
301 rootsep += pycompat.ossep
301 rootsep += pycompat.ossep
302 if cwd.startswith(rootsep):
302 if cwd.startswith(rootsep):
303 return cwd[len(rootsep) :]
303 return cwd[len(rootsep) :]
304 else:
304 else:
305 # we're outside the repo. return an absolute path.
305 # we're outside the repo. return an absolute path.
306 return cwd
306 return cwd
307
307
308 def pathto(self, f, cwd=None):
308 def pathto(self, f, cwd=None):
309 if cwd is None:
309 if cwd is None:
310 cwd = self.getcwd()
310 cwd = self.getcwd()
311 path = util.pathto(self._root, cwd, f)
311 path = util.pathto(self._root, cwd, f)
312 if self._slash:
312 if self._slash:
313 return util.pconvert(path)
313 return util.pconvert(path)
314 return path
314 return path
315
315
316 def __getitem__(self, key):
316 def __getitem__(self, key):
317 """Return the current state of key (a filename) in the dirstate.
317 """Return the current state of key (a filename) in the dirstate.
318
318
319 States are:
319 States are:
320 n normal
320 n normal
321 m needs merging
321 m needs merging
322 r marked for removal
322 r marked for removal
323 a marked for addition
323 a marked for addition
324 ? not tracked
324 ? not tracked
325
325
326 XXX The "state" is a bit obscure to be in the "public" API. we should
326 XXX The "state" is a bit obscure to be in the "public" API. we should
327 consider migrating all user of this to going through the dirstate entry
327 consider migrating all user of this to going through the dirstate entry
328 instead.
328 instead.
329 """
329 """
330 msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
331 util.nouideprecwarn(msg, b'6.1', stacklevel=2)
330 entry = self._map.get(key)
332 entry = self._map.get(key)
331 if entry is not None:
333 if entry is not None:
332 return entry.state
334 return entry.state
333 return b'?'
335 return b'?'
334
336
335 def get_entry(self, path):
337 def get_entry(self, path):
336 """return a DirstateItem for the associated path"""
338 """return a DirstateItem for the associated path"""
337 entry = self._map.get(path)
339 entry = self._map.get(path)
338 if entry is None:
340 if entry is None:
339 return DirstateItem()
341 return DirstateItem()
340 return entry
342 return entry
341
343
342 def __contains__(self, key):
344 def __contains__(self, key):
343 return key in self._map
345 return key in self._map
344
346
345 def __iter__(self):
347 def __iter__(self):
346 return iter(sorted(self._map))
348 return iter(sorted(self._map))
347
349
348 def items(self):
350 def items(self):
349 return pycompat.iteritems(self._map)
351 return pycompat.iteritems(self._map)
350
352
351 iteritems = items
353 iteritems = items
352
354
353 def parents(self):
355 def parents(self):
354 return [self._validate(p) for p in self._pl]
356 return [self._validate(p) for p in self._pl]
355
357
356 def p1(self):
358 def p1(self):
357 return self._validate(self._pl[0])
359 return self._validate(self._pl[0])
358
360
359 def p2(self):
361 def p2(self):
360 return self._validate(self._pl[1])
362 return self._validate(self._pl[1])
361
363
362 @property
364 @property
363 def in_merge(self):
365 def in_merge(self):
364 """True if a merge is in progress"""
366 """True if a merge is in progress"""
365 return self._pl[1] != self._nodeconstants.nullid
367 return self._pl[1] != self._nodeconstants.nullid
366
368
367 def branch(self):
369 def branch(self):
368 return encoding.tolocal(self._branch)
370 return encoding.tolocal(self._branch)
369
371
370 def setparents(self, p1, p2=None):
372 def setparents(self, p1, p2=None):
371 """Set dirstate parents to p1 and p2.
373 """Set dirstate parents to p1 and p2.
372
374
373 When moving from two parents to one, "merged" entries a
375 When moving from two parents to one, "merged" entries a
374 adjusted to normal and previous copy records discarded and
376 adjusted to normal and previous copy records discarded and
375 returned by the call.
377 returned by the call.
376
378
377 See localrepo.setparents()
379 See localrepo.setparents()
378 """
380 """
379 if p2 is None:
381 if p2 is None:
380 p2 = self._nodeconstants.nullid
382 p2 = self._nodeconstants.nullid
381 if self._parentwriters == 0:
383 if self._parentwriters == 0:
382 raise ValueError(
384 raise ValueError(
383 b"cannot set dirstate parent outside of "
385 b"cannot set dirstate parent outside of "
384 b"dirstate.parentchange context manager"
386 b"dirstate.parentchange context manager"
385 )
387 )
386
388
387 self._dirty = True
389 self._dirty = True
388 oldp2 = self._pl[1]
390 oldp2 = self._pl[1]
389 if self._origpl is None:
391 if self._origpl is None:
390 self._origpl = self._pl
392 self._origpl = self._pl
391 nullid = self._nodeconstants.nullid
393 nullid = self._nodeconstants.nullid
392 # True if we need to fold p2 related state back to a linear case
394 # True if we need to fold p2 related state back to a linear case
393 fold_p2 = oldp2 != nullid and p2 == nullid
395 fold_p2 = oldp2 != nullid and p2 == nullid
394 return self._map.setparents(p1, p2, fold_p2=fold_p2)
396 return self._map.setparents(p1, p2, fold_p2=fold_p2)
395
397
396 def setbranch(self, branch):
398 def setbranch(self, branch):
397 self.__class__._branch.set(self, encoding.fromlocal(branch))
399 self.__class__._branch.set(self, encoding.fromlocal(branch))
398 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
400 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
399 try:
401 try:
400 f.write(self._branch + b'\n')
402 f.write(self._branch + b'\n')
401 f.close()
403 f.close()
402
404
403 # make sure filecache has the correct stat info for _branch after
405 # make sure filecache has the correct stat info for _branch after
404 # replacing the underlying file
406 # replacing the underlying file
405 ce = self._filecache[b'_branch']
407 ce = self._filecache[b'_branch']
406 if ce:
408 if ce:
407 ce.refresh()
409 ce.refresh()
408 except: # re-raises
410 except: # re-raises
409 f.discard()
411 f.discard()
410 raise
412 raise
411
413
412 def invalidate(self):
414 def invalidate(self):
413 """Causes the next access to reread the dirstate.
415 """Causes the next access to reread the dirstate.
414
416
415 This is different from localrepo.invalidatedirstate() because it always
417 This is different from localrepo.invalidatedirstate() because it always
416 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
418 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
417 check whether the dirstate has changed before rereading it."""
419 check whether the dirstate has changed before rereading it."""
418
420
419 for a in ("_map", "_branch", "_ignore"):
421 for a in ("_map", "_branch", "_ignore"):
420 if a in self.__dict__:
422 if a in self.__dict__:
421 delattr(self, a)
423 delattr(self, a)
422 self._lastnormaltime = 0
424 self._lastnormaltime = 0
423 self._dirty = False
425 self._dirty = False
424 self._parentwriters = 0
426 self._parentwriters = 0
425 self._origpl = None
427 self._origpl = None
426
428
427 def copy(self, source, dest):
429 def copy(self, source, dest):
428 """Mark dest as a copy of source. Unmark dest if source is None."""
430 """Mark dest as a copy of source. Unmark dest if source is None."""
429 if source == dest:
431 if source == dest:
430 return
432 return
431 self._dirty = True
433 self._dirty = True
432 if source is not None:
434 if source is not None:
433 self._map.copymap[dest] = source
435 self._map.copymap[dest] = source
434 else:
436 else:
435 self._map.copymap.pop(dest, None)
437 self._map.copymap.pop(dest, None)
436
438
437 def copied(self, file):
439 def copied(self, file):
438 return self._map.copymap.get(file, None)
440 return self._map.copymap.get(file, None)
439
441
440 def copies(self):
442 def copies(self):
441 return self._map.copymap
443 return self._map.copymap
442
444
443 @requires_no_parents_change
445 @requires_no_parents_change
444 def set_tracked(self, filename):
446 def set_tracked(self, filename):
445 """a "public" method for generic code to mark a file as tracked
447 """a "public" method for generic code to mark a file as tracked
446
448
447 This function is to be called outside of "update/merge" case. For
449 This function is to be called outside of "update/merge" case. For
448 example by a command like `hg add X`.
450 example by a command like `hg add X`.
449
451
450 return True the file was previously untracked, False otherwise.
452 return True the file was previously untracked, False otherwise.
451 """
453 """
452 self._dirty = True
454 self._dirty = True
453 entry = self._map.get(filename)
455 entry = self._map.get(filename)
454 if entry is None or not entry.tracked:
456 if entry is None or not entry.tracked:
455 self._check_new_tracked_filename(filename)
457 self._check_new_tracked_filename(filename)
456 return self._map.set_tracked(filename)
458 return self._map.set_tracked(filename)
457
459
458 @requires_no_parents_change
460 @requires_no_parents_change
459 def set_untracked(self, filename):
461 def set_untracked(self, filename):
460 """a "public" method for generic code to mark a file as untracked
462 """a "public" method for generic code to mark a file as untracked
461
463
462 This function is to be called outside of "update/merge" case. For
464 This function is to be called outside of "update/merge" case. For
463 example by a command like `hg remove X`.
465 example by a command like `hg remove X`.
464
466
465 return True the file was previously tracked, False otherwise.
467 return True the file was previously tracked, False otherwise.
466 """
468 """
467 ret = self._map.set_untracked(filename)
469 ret = self._map.set_untracked(filename)
468 if ret:
470 if ret:
469 self._dirty = True
471 self._dirty = True
470 return ret
472 return ret
471
473
472 @requires_no_parents_change
474 @requires_no_parents_change
473 def set_clean(self, filename, parentfiledata=None):
475 def set_clean(self, filename, parentfiledata=None):
474 """record that the current state of the file on disk is known to be clean"""
476 """record that the current state of the file on disk is known to be clean"""
475 self._dirty = True
477 self._dirty = True
476 if parentfiledata:
478 if parentfiledata:
477 (mode, size, mtime) = parentfiledata
479 (mode, size, mtime) = parentfiledata
478 else:
480 else:
479 (mode, size, mtime) = self._get_filedata(filename)
481 (mode, size, mtime) = self._get_filedata(filename)
480 if not self._map[filename].tracked:
482 if not self._map[filename].tracked:
481 self._check_new_tracked_filename(filename)
483 self._check_new_tracked_filename(filename)
482 self._map.set_clean(filename, mode, size, mtime)
484 self._map.set_clean(filename, mode, size, mtime)
483 if mtime > self._lastnormaltime:
485 if mtime > self._lastnormaltime:
484 # Remember the most recent modification timeslot for status(),
486 # Remember the most recent modification timeslot for status(),
485 # to make sure we won't miss future size-preserving file content
487 # to make sure we won't miss future size-preserving file content
486 # modifications that happen within the same timeslot.
488 # modifications that happen within the same timeslot.
487 self._lastnormaltime = mtime
489 self._lastnormaltime = mtime
488
490
489 @requires_no_parents_change
491 @requires_no_parents_change
490 def set_possibly_dirty(self, filename):
492 def set_possibly_dirty(self, filename):
491 """record that the current state of the file on disk is unknown"""
493 """record that the current state of the file on disk is unknown"""
492 self._dirty = True
494 self._dirty = True
493 self._map.set_possibly_dirty(filename)
495 self._map.set_possibly_dirty(filename)
494
496
495 @requires_parents_change
497 @requires_parents_change
496 def update_file_p1(
498 def update_file_p1(
497 self,
499 self,
498 filename,
500 filename,
499 p1_tracked,
501 p1_tracked,
500 ):
502 ):
501 """Set a file as tracked in the parent (or not)
503 """Set a file as tracked in the parent (or not)
502
504
503 This is to be called when adjust the dirstate to a new parent after an history
505 This is to be called when adjust the dirstate to a new parent after an history
504 rewriting operation.
506 rewriting operation.
505
507
506 It should not be called during a merge (p2 != nullid) and only within
508 It should not be called during a merge (p2 != nullid) and only within
507 a `with dirstate.parentchange():` context.
509 a `with dirstate.parentchange():` context.
508 """
510 """
509 if self.in_merge:
511 if self.in_merge:
510 msg = b'update_file_reference should not be called when merging'
512 msg = b'update_file_reference should not be called when merging'
511 raise error.ProgrammingError(msg)
513 raise error.ProgrammingError(msg)
512 entry = self._map.get(filename)
514 entry = self._map.get(filename)
513 if entry is None:
515 if entry is None:
514 wc_tracked = False
516 wc_tracked = False
515 else:
517 else:
516 wc_tracked = entry.tracked
518 wc_tracked = entry.tracked
517 possibly_dirty = False
519 possibly_dirty = False
518 if p1_tracked and wc_tracked:
520 if p1_tracked and wc_tracked:
519 # the underlying reference might have changed, we will have to
521 # the underlying reference might have changed, we will have to
520 # check it.
522 # check it.
521 possibly_dirty = True
523 possibly_dirty = True
522 elif not (p1_tracked or wc_tracked):
524 elif not (p1_tracked or wc_tracked):
523 # the file is no longer relevant to anyone
525 # the file is no longer relevant to anyone
524 if self._map.get(filename) is not None:
526 if self._map.get(filename) is not None:
525 self._map.reset_state(filename)
527 self._map.reset_state(filename)
526 self._dirty = True
528 self._dirty = True
527 elif (not p1_tracked) and wc_tracked:
529 elif (not p1_tracked) and wc_tracked:
528 if entry is not None and entry.added:
530 if entry is not None and entry.added:
529 return # avoid dropping copy information (maybe?)
531 return # avoid dropping copy information (maybe?)
530 elif p1_tracked and not wc_tracked:
532 elif p1_tracked and not wc_tracked:
531 pass
533 pass
532 else:
534 else:
533 assert False, 'unreachable'
535 assert False, 'unreachable'
534
536
535 # this mean we are doing call for file we do not really care about the
537 # this mean we are doing call for file we do not really care about the
536 # data (eg: added or removed), however this should be a minor overhead
538 # data (eg: added or removed), however this should be a minor overhead
537 # compared to the overall update process calling this.
539 # compared to the overall update process calling this.
538 parentfiledata = None
540 parentfiledata = None
539 if wc_tracked:
541 if wc_tracked:
540 parentfiledata = self._get_filedata(filename)
542 parentfiledata = self._get_filedata(filename)
541
543
542 self._map.reset_state(
544 self._map.reset_state(
543 filename,
545 filename,
544 wc_tracked,
546 wc_tracked,
545 p1_tracked,
547 p1_tracked,
546 possibly_dirty=possibly_dirty,
548 possibly_dirty=possibly_dirty,
547 parentfiledata=parentfiledata,
549 parentfiledata=parentfiledata,
548 )
550 )
549 if (
551 if (
550 parentfiledata is not None
552 parentfiledata is not None
551 and parentfiledata[2] > self._lastnormaltime
553 and parentfiledata[2] > self._lastnormaltime
552 ):
554 ):
553 # Remember the most recent modification timeslot for status(),
555 # Remember the most recent modification timeslot for status(),
554 # to make sure we won't miss future size-preserving file content
556 # to make sure we won't miss future size-preserving file content
555 # modifications that happen within the same timeslot.
557 # modifications that happen within the same timeslot.
556 self._lastnormaltime = parentfiledata[2]
558 self._lastnormaltime = parentfiledata[2]
557
559
558 @requires_parents_change
560 @requires_parents_change
559 def update_file(
561 def update_file(
560 self,
562 self,
561 filename,
563 filename,
562 wc_tracked,
564 wc_tracked,
563 p1_tracked,
565 p1_tracked,
564 p2_tracked=False,
566 p2_tracked=False,
565 merged=False,
567 merged=False,
566 clean_p1=False,
568 clean_p1=False,
567 clean_p2=False,
569 clean_p2=False,
568 possibly_dirty=False,
570 possibly_dirty=False,
569 parentfiledata=None,
571 parentfiledata=None,
570 ):
572 ):
571 """update the information about a file in the dirstate
573 """update the information about a file in the dirstate
572
574
573 This is to be called when the direstates parent changes to keep track
575 This is to be called when the direstates parent changes to keep track
574 of what is the file situation in regards to the working copy and its parent.
576 of what is the file situation in regards to the working copy and its parent.
575
577
576 This function must be called within a `dirstate.parentchange` context.
578 This function must be called within a `dirstate.parentchange` context.
577
579
578 note: the API is at an early stage and we might need to adjust it
580 note: the API is at an early stage and we might need to adjust it
579 depending of what information ends up being relevant and useful to
581 depending of what information ends up being relevant and useful to
580 other processing.
582 other processing.
581 """
583 """
582 if merged and (clean_p1 or clean_p2):
584 if merged and (clean_p1 or clean_p2):
583 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
585 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
584 raise error.ProgrammingError(msg)
586 raise error.ProgrammingError(msg)
585
587
586 # note: I do not think we need to double check name clash here since we
588 # note: I do not think we need to double check name clash here since we
587 # are in a update/merge case that should already have taken care of
589 # are in a update/merge case that should already have taken care of
588 # this. The test agrees
590 # this. The test agrees
589
591
590 self._dirty = True
592 self._dirty = True
591
593
592 need_parent_file_data = (
594 need_parent_file_data = (
593 not (possibly_dirty or clean_p2 or merged)
595 not (possibly_dirty or clean_p2 or merged)
594 and wc_tracked
596 and wc_tracked
595 and p1_tracked
597 and p1_tracked
596 )
598 )
597
599
598 # this mean we are doing call for file we do not really care about the
600 # this mean we are doing call for file we do not really care about the
599 # data (eg: added or removed), however this should be a minor overhead
601 # data (eg: added or removed), however this should be a minor overhead
600 # compared to the overall update process calling this.
602 # compared to the overall update process calling this.
601 if need_parent_file_data:
603 if need_parent_file_data:
602 if parentfiledata is None:
604 if parentfiledata is None:
603 parentfiledata = self._get_filedata(filename)
605 parentfiledata = self._get_filedata(filename)
604 mtime = parentfiledata[2]
606 mtime = parentfiledata[2]
605
607
606 if mtime > self._lastnormaltime:
608 if mtime > self._lastnormaltime:
607 # Remember the most recent modification timeslot for
609 # Remember the most recent modification timeslot for
608 # status(), to make sure we won't miss future
610 # status(), to make sure we won't miss future
609 # size-preserving file content modifications that happen
611 # size-preserving file content modifications that happen
610 # within the same timeslot.
612 # within the same timeslot.
611 self._lastnormaltime = mtime
613 self._lastnormaltime = mtime
612
614
613 self._map.reset_state(
615 self._map.reset_state(
614 filename,
616 filename,
615 wc_tracked,
617 wc_tracked,
616 p1_tracked,
618 p1_tracked,
617 p2_tracked=p2_tracked,
619 p2_tracked=p2_tracked,
618 merged=merged,
620 merged=merged,
619 clean_p1=clean_p1,
621 clean_p1=clean_p1,
620 clean_p2=clean_p2,
622 clean_p2=clean_p2,
621 possibly_dirty=possibly_dirty,
623 possibly_dirty=possibly_dirty,
622 parentfiledata=parentfiledata,
624 parentfiledata=parentfiledata,
623 )
625 )
624 if (
626 if (
625 parentfiledata is not None
627 parentfiledata is not None
626 and parentfiledata[2] > self._lastnormaltime
628 and parentfiledata[2] > self._lastnormaltime
627 ):
629 ):
628 # Remember the most recent modification timeslot for status(),
630 # Remember the most recent modification timeslot for status(),
629 # to make sure we won't miss future size-preserving file content
631 # to make sure we won't miss future size-preserving file content
630 # modifications that happen within the same timeslot.
632 # modifications that happen within the same timeslot.
631 self._lastnormaltime = parentfiledata[2]
633 self._lastnormaltime = parentfiledata[2]
632
634
633 def _check_new_tracked_filename(self, filename):
635 def _check_new_tracked_filename(self, filename):
634 scmutil.checkfilename(filename)
636 scmutil.checkfilename(filename)
635 if self._map.hastrackeddir(filename):
637 if self._map.hastrackeddir(filename):
636 msg = _(b'directory %r already in dirstate')
638 msg = _(b'directory %r already in dirstate')
637 msg %= pycompat.bytestr(filename)
639 msg %= pycompat.bytestr(filename)
638 raise error.Abort(msg)
640 raise error.Abort(msg)
639 # shadows
641 # shadows
640 for d in pathutil.finddirs(filename):
642 for d in pathutil.finddirs(filename):
641 if self._map.hastrackeddir(d):
643 if self._map.hastrackeddir(d):
642 break
644 break
643 entry = self._map.get(d)
645 entry = self._map.get(d)
644 if entry is not None and not entry.removed:
646 if entry is not None and not entry.removed:
645 msg = _(b'file %r in dirstate clashes with %r')
647 msg = _(b'file %r in dirstate clashes with %r')
646 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
648 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
647 raise error.Abort(msg)
649 raise error.Abort(msg)
648
650
649 def _get_filedata(self, filename):
651 def _get_filedata(self, filename):
650 """returns"""
652 """returns"""
651 s = os.lstat(self._join(filename))
653 s = os.lstat(self._join(filename))
652 mode = s.st_mode
654 mode = s.st_mode
653 size = s.st_size
655 size = s.st_size
654 mtime = s[stat.ST_MTIME]
656 mtime = s[stat.ST_MTIME]
655 return (mode, size, mtime)
657 return (mode, size, mtime)
656
658
657 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
659 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
658 if exists is None:
660 if exists is None:
659 exists = os.path.lexists(os.path.join(self._root, path))
661 exists = os.path.lexists(os.path.join(self._root, path))
660 if not exists:
662 if not exists:
661 # Maybe a path component exists
663 # Maybe a path component exists
662 if not ignoremissing and b'/' in path:
664 if not ignoremissing and b'/' in path:
663 d, f = path.rsplit(b'/', 1)
665 d, f = path.rsplit(b'/', 1)
664 d = self._normalize(d, False, ignoremissing, None)
666 d = self._normalize(d, False, ignoremissing, None)
665 folded = d + b"/" + f
667 folded = d + b"/" + f
666 else:
668 else:
667 # No path components, preserve original case
669 # No path components, preserve original case
668 folded = path
670 folded = path
669 else:
671 else:
670 # recursively normalize leading directory components
672 # recursively normalize leading directory components
671 # against dirstate
673 # against dirstate
672 if b'/' in normed:
674 if b'/' in normed:
673 d, f = normed.rsplit(b'/', 1)
675 d, f = normed.rsplit(b'/', 1)
674 d = self._normalize(d, False, ignoremissing, True)
676 d = self._normalize(d, False, ignoremissing, True)
675 r = self._root + b"/" + d
677 r = self._root + b"/" + d
676 folded = d + b"/" + util.fspath(f, r)
678 folded = d + b"/" + util.fspath(f, r)
677 else:
679 else:
678 folded = util.fspath(normed, self._root)
680 folded = util.fspath(normed, self._root)
679 storemap[normed] = folded
681 storemap[normed] = folded
680
682
681 return folded
683 return folded
682
684
683 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
685 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
684 normed = util.normcase(path)
686 normed = util.normcase(path)
685 folded = self._map.filefoldmap.get(normed, None)
687 folded = self._map.filefoldmap.get(normed, None)
686 if folded is None:
688 if folded is None:
687 if isknown:
689 if isknown:
688 folded = path
690 folded = path
689 else:
691 else:
690 folded = self._discoverpath(
692 folded = self._discoverpath(
691 path, normed, ignoremissing, exists, self._map.filefoldmap
693 path, normed, ignoremissing, exists, self._map.filefoldmap
692 )
694 )
693 return folded
695 return folded
694
696
695 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
697 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
696 normed = util.normcase(path)
698 normed = util.normcase(path)
697 folded = self._map.filefoldmap.get(normed, None)
699 folded = self._map.filefoldmap.get(normed, None)
698 if folded is None:
700 if folded is None:
699 folded = self._map.dirfoldmap.get(normed, None)
701 folded = self._map.dirfoldmap.get(normed, None)
700 if folded is None:
702 if folded is None:
701 if isknown:
703 if isknown:
702 folded = path
704 folded = path
703 else:
705 else:
704 # store discovered result in dirfoldmap so that future
706 # store discovered result in dirfoldmap so that future
705 # normalizefile calls don't start matching directories
707 # normalizefile calls don't start matching directories
706 folded = self._discoverpath(
708 folded = self._discoverpath(
707 path, normed, ignoremissing, exists, self._map.dirfoldmap
709 path, normed, ignoremissing, exists, self._map.dirfoldmap
708 )
710 )
709 return folded
711 return folded
710
712
711 def normalize(self, path, isknown=False, ignoremissing=False):
713 def normalize(self, path, isknown=False, ignoremissing=False):
712 """
714 """
713 normalize the case of a pathname when on a casefolding filesystem
715 normalize the case of a pathname when on a casefolding filesystem
714
716
715 isknown specifies whether the filename came from walking the
717 isknown specifies whether the filename came from walking the
716 disk, to avoid extra filesystem access.
718 disk, to avoid extra filesystem access.
717
719
718 If ignoremissing is True, missing path are returned
720 If ignoremissing is True, missing path are returned
719 unchanged. Otherwise, we try harder to normalize possibly
721 unchanged. Otherwise, we try harder to normalize possibly
720 existing path components.
722 existing path components.
721
723
722 The normalized case is determined based on the following precedence:
724 The normalized case is determined based on the following precedence:
723
725
724 - version of name already stored in the dirstate
726 - version of name already stored in the dirstate
725 - version of name stored on disk
727 - version of name stored on disk
726 - version provided via command arguments
728 - version provided via command arguments
727 """
729 """
728
730
729 if self._checkcase:
731 if self._checkcase:
730 return self._normalize(path, isknown, ignoremissing)
732 return self._normalize(path, isknown, ignoremissing)
731 return path
733 return path
732
734
733 def clear(self):
735 def clear(self):
734 self._map.clear()
736 self._map.clear()
735 self._lastnormaltime = 0
737 self._lastnormaltime = 0
736 self._dirty = True
738 self._dirty = True
737
739
738 def rebuild(self, parent, allfiles, changedfiles=None):
740 def rebuild(self, parent, allfiles, changedfiles=None):
739 if changedfiles is None:
741 if changedfiles is None:
740 # Rebuild entire dirstate
742 # Rebuild entire dirstate
741 to_lookup = allfiles
743 to_lookup = allfiles
742 to_drop = []
744 to_drop = []
743 lastnormaltime = self._lastnormaltime
745 lastnormaltime = self._lastnormaltime
744 self.clear()
746 self.clear()
745 self._lastnormaltime = lastnormaltime
747 self._lastnormaltime = lastnormaltime
746 elif len(changedfiles) < 10:
748 elif len(changedfiles) < 10:
747 # Avoid turning allfiles into a set, which can be expensive if it's
749 # Avoid turning allfiles into a set, which can be expensive if it's
748 # large.
750 # large.
749 to_lookup = []
751 to_lookup = []
750 to_drop = []
752 to_drop = []
751 for f in changedfiles:
753 for f in changedfiles:
752 if f in allfiles:
754 if f in allfiles:
753 to_lookup.append(f)
755 to_lookup.append(f)
754 else:
756 else:
755 to_drop.append(f)
757 to_drop.append(f)
756 else:
758 else:
757 changedfilesset = set(changedfiles)
759 changedfilesset = set(changedfiles)
758 to_lookup = changedfilesset & set(allfiles)
760 to_lookup = changedfilesset & set(allfiles)
759 to_drop = changedfilesset - to_lookup
761 to_drop = changedfilesset - to_lookup
760
762
761 if self._origpl is None:
763 if self._origpl is None:
762 self._origpl = self._pl
764 self._origpl = self._pl
763 self._map.setparents(parent, self._nodeconstants.nullid)
765 self._map.setparents(parent, self._nodeconstants.nullid)
764
766
765 for f in to_lookup:
767 for f in to_lookup:
766
768
767 if self.in_merge:
769 if self.in_merge:
768 self.set_tracked(f)
770 self.set_tracked(f)
769 else:
771 else:
770 self._map.reset_state(
772 self._map.reset_state(
771 f,
773 f,
772 wc_tracked=True,
774 wc_tracked=True,
773 p1_tracked=True,
775 p1_tracked=True,
774 possibly_dirty=True,
776 possibly_dirty=True,
775 )
777 )
776 for f in to_drop:
778 for f in to_drop:
777 self._map.reset_state(f)
779 self._map.reset_state(f)
778
780
779 self._dirty = True
781 self._dirty = True
780
782
781 def identity(self):
783 def identity(self):
782 """Return identity of dirstate itself to detect changing in storage
784 """Return identity of dirstate itself to detect changing in storage
783
785
784 If identity of previous dirstate is equal to this, writing
786 If identity of previous dirstate is equal to this, writing
785 changes based on the former dirstate out can keep consistency.
787 changes based on the former dirstate out can keep consistency.
786 """
788 """
787 return self._map.identity
789 return self._map.identity
788
790
789 def write(self, tr):
791 def write(self, tr):
790 if not self._dirty:
792 if not self._dirty:
791 return
793 return
792
794
793 filename = self._filename
795 filename = self._filename
794 if tr:
796 if tr:
795 # 'dirstate.write()' is not only for writing in-memory
797 # 'dirstate.write()' is not only for writing in-memory
796 # changes out, but also for dropping ambiguous timestamp.
798 # changes out, but also for dropping ambiguous timestamp.
797 # delayed writing re-raise "ambiguous timestamp issue".
799 # delayed writing re-raise "ambiguous timestamp issue".
798 # See also the wiki page below for detail:
800 # See also the wiki page below for detail:
799 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
801 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
800
802
801 # record when mtime start to be ambiguous
803 # record when mtime start to be ambiguous
802 now = _getfsnow(self._opener)
804 now = _getfsnow(self._opener)
803
805
804 # delay writing in-memory changes out
806 # delay writing in-memory changes out
805 tr.addfilegenerator(
807 tr.addfilegenerator(
806 b'dirstate',
808 b'dirstate',
807 (self._filename,),
809 (self._filename,),
808 lambda f: self._writedirstate(tr, f, now=now),
810 lambda f: self._writedirstate(tr, f, now=now),
809 location=b'plain',
811 location=b'plain',
810 )
812 )
811 return
813 return
812
814
813 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
815 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
814 self._writedirstate(tr, st)
816 self._writedirstate(tr, st)
815
817
816 def addparentchangecallback(self, category, callback):
818 def addparentchangecallback(self, category, callback):
817 """add a callback to be called when the wd parents are changed
819 """add a callback to be called when the wd parents are changed
818
820
819 Callback will be called with the following arguments:
821 Callback will be called with the following arguments:
820 dirstate, (oldp1, oldp2), (newp1, newp2)
822 dirstate, (oldp1, oldp2), (newp1, newp2)
821
823
822 Category is a unique identifier to allow overwriting an old callback
824 Category is a unique identifier to allow overwriting an old callback
823 with a newer callback.
825 with a newer callback.
824 """
826 """
825 self._plchangecallbacks[category] = callback
827 self._plchangecallbacks[category] = callback
826
828
827 def _writedirstate(self, tr, st, now=None):
829 def _writedirstate(self, tr, st, now=None):
828 # notify callbacks about parents change
830 # notify callbacks about parents change
829 if self._origpl is not None and self._origpl != self._pl:
831 if self._origpl is not None and self._origpl != self._pl:
830 for c, callback in sorted(
832 for c, callback in sorted(
831 pycompat.iteritems(self._plchangecallbacks)
833 pycompat.iteritems(self._plchangecallbacks)
832 ):
834 ):
833 callback(self, self._origpl, self._pl)
835 callback(self, self._origpl, self._pl)
834 self._origpl = None
836 self._origpl = None
835
837
836 if now is None:
838 if now is None:
837 # use the modification time of the newly created temporary file as the
839 # use the modification time of the newly created temporary file as the
838 # filesystem's notion of 'now'
840 # filesystem's notion of 'now'
839 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
841 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
840
842
841 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
843 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
842 # timestamp of each entries in dirstate, because of 'now > mtime'
844 # timestamp of each entries in dirstate, because of 'now > mtime'
843 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
845 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
844 if delaywrite > 0:
846 if delaywrite > 0:
845 # do we have any files to delay for?
847 # do we have any files to delay for?
846 for f, e in pycompat.iteritems(self._map):
848 for f, e in pycompat.iteritems(self._map):
847 if e.need_delay(now):
849 if e.need_delay(now):
848 import time # to avoid useless import
850 import time # to avoid useless import
849
851
850 # rather than sleep n seconds, sleep until the next
852 # rather than sleep n seconds, sleep until the next
851 # multiple of n seconds
853 # multiple of n seconds
852 clock = time.time()
854 clock = time.time()
853 start = int(clock) - (int(clock) % delaywrite)
855 start = int(clock) - (int(clock) % delaywrite)
854 end = start + delaywrite
856 end = start + delaywrite
855 time.sleep(end - clock)
857 time.sleep(end - clock)
856 now = end # trust our estimate that the end is near now
858 now = end # trust our estimate that the end is near now
857 break
859 break
858
860
859 self._map.write(tr, st, now)
861 self._map.write(tr, st, now)
860 self._lastnormaltime = 0
862 self._lastnormaltime = 0
861 self._dirty = False
863 self._dirty = False
862
864
863 def _dirignore(self, f):
865 def _dirignore(self, f):
864 if self._ignore(f):
866 if self._ignore(f):
865 return True
867 return True
866 for p in pathutil.finddirs(f):
868 for p in pathutil.finddirs(f):
867 if self._ignore(p):
869 if self._ignore(p):
868 return True
870 return True
869 return False
871 return False
870
872
871 def _ignorefiles(self):
873 def _ignorefiles(self):
872 files = []
874 files = []
873 if os.path.exists(self._join(b'.hgignore')):
875 if os.path.exists(self._join(b'.hgignore')):
874 files.append(self._join(b'.hgignore'))
876 files.append(self._join(b'.hgignore'))
875 for name, path in self._ui.configitems(b"ui"):
877 for name, path in self._ui.configitems(b"ui"):
876 if name == b'ignore' or name.startswith(b'ignore.'):
878 if name == b'ignore' or name.startswith(b'ignore.'):
877 # we need to use os.path.join here rather than self._join
879 # we need to use os.path.join here rather than self._join
878 # because path is arbitrary and user-specified
880 # because path is arbitrary and user-specified
879 files.append(os.path.join(self._rootdir, util.expandpath(path)))
881 files.append(os.path.join(self._rootdir, util.expandpath(path)))
880 return files
882 return files
881
883
882 def _ignorefileandline(self, f):
884 def _ignorefileandline(self, f):
883 files = collections.deque(self._ignorefiles())
885 files = collections.deque(self._ignorefiles())
884 visited = set()
886 visited = set()
885 while files:
887 while files:
886 i = files.popleft()
888 i = files.popleft()
887 patterns = matchmod.readpatternfile(
889 patterns = matchmod.readpatternfile(
888 i, self._ui.warn, sourceinfo=True
890 i, self._ui.warn, sourceinfo=True
889 )
891 )
890 for pattern, lineno, line in patterns:
892 for pattern, lineno, line in patterns:
891 kind, p = matchmod._patsplit(pattern, b'glob')
893 kind, p = matchmod._patsplit(pattern, b'glob')
892 if kind == b"subinclude":
894 if kind == b"subinclude":
893 if p not in visited:
895 if p not in visited:
894 files.append(p)
896 files.append(p)
895 continue
897 continue
896 m = matchmod.match(
898 m = matchmod.match(
897 self._root, b'', [], [pattern], warn=self._ui.warn
899 self._root, b'', [], [pattern], warn=self._ui.warn
898 )
900 )
899 if m(f):
901 if m(f):
900 return (i, lineno, line)
902 return (i, lineno, line)
901 visited.add(i)
903 visited.add(i)
902 return (None, -1, b"")
904 return (None, -1, b"")
903
905
904 def _walkexplicit(self, match, subrepos):
906 def _walkexplicit(self, match, subrepos):
905 """Get stat data about the files explicitly specified by match.
907 """Get stat data about the files explicitly specified by match.
906
908
907 Return a triple (results, dirsfound, dirsnotfound).
909 Return a triple (results, dirsfound, dirsnotfound).
908 - results is a mapping from filename to stat result. It also contains
910 - results is a mapping from filename to stat result. It also contains
909 listings mapping subrepos and .hg to None.
911 listings mapping subrepos and .hg to None.
910 - dirsfound is a list of files found to be directories.
912 - dirsfound is a list of files found to be directories.
911 - dirsnotfound is a list of files that the dirstate thinks are
913 - dirsnotfound is a list of files that the dirstate thinks are
912 directories and that were not found."""
914 directories and that were not found."""
913
915
914 def badtype(mode):
916 def badtype(mode):
915 kind = _(b'unknown')
917 kind = _(b'unknown')
916 if stat.S_ISCHR(mode):
918 if stat.S_ISCHR(mode):
917 kind = _(b'character device')
919 kind = _(b'character device')
918 elif stat.S_ISBLK(mode):
920 elif stat.S_ISBLK(mode):
919 kind = _(b'block device')
921 kind = _(b'block device')
920 elif stat.S_ISFIFO(mode):
922 elif stat.S_ISFIFO(mode):
921 kind = _(b'fifo')
923 kind = _(b'fifo')
922 elif stat.S_ISSOCK(mode):
924 elif stat.S_ISSOCK(mode):
923 kind = _(b'socket')
925 kind = _(b'socket')
924 elif stat.S_ISDIR(mode):
926 elif stat.S_ISDIR(mode):
925 kind = _(b'directory')
927 kind = _(b'directory')
926 return _(b'unsupported file type (type is %s)') % kind
928 return _(b'unsupported file type (type is %s)') % kind
927
929
928 badfn = match.bad
930 badfn = match.bad
929 dmap = self._map
931 dmap = self._map
930 lstat = os.lstat
932 lstat = os.lstat
931 getkind = stat.S_IFMT
933 getkind = stat.S_IFMT
932 dirkind = stat.S_IFDIR
934 dirkind = stat.S_IFDIR
933 regkind = stat.S_IFREG
935 regkind = stat.S_IFREG
934 lnkkind = stat.S_IFLNK
936 lnkkind = stat.S_IFLNK
935 join = self._join
937 join = self._join
936 dirsfound = []
938 dirsfound = []
937 foundadd = dirsfound.append
939 foundadd = dirsfound.append
938 dirsnotfound = []
940 dirsnotfound = []
939 notfoundadd = dirsnotfound.append
941 notfoundadd = dirsnotfound.append
940
942
941 if not match.isexact() and self._checkcase:
943 if not match.isexact() and self._checkcase:
942 normalize = self._normalize
944 normalize = self._normalize
943 else:
945 else:
944 normalize = None
946 normalize = None
945
947
946 files = sorted(match.files())
948 files = sorted(match.files())
947 subrepos.sort()
949 subrepos.sort()
948 i, j = 0, 0
950 i, j = 0, 0
949 while i < len(files) and j < len(subrepos):
951 while i < len(files) and j < len(subrepos):
950 subpath = subrepos[j] + b"/"
952 subpath = subrepos[j] + b"/"
951 if files[i] < subpath:
953 if files[i] < subpath:
952 i += 1
954 i += 1
953 continue
955 continue
954 while i < len(files) and files[i].startswith(subpath):
956 while i < len(files) and files[i].startswith(subpath):
955 del files[i]
957 del files[i]
956 j += 1
958 j += 1
957
959
958 if not files or b'' in files:
960 if not files or b'' in files:
959 files = [b'']
961 files = [b'']
960 # constructing the foldmap is expensive, so don't do it for the
962 # constructing the foldmap is expensive, so don't do it for the
961 # common case where files is ['']
963 # common case where files is ['']
962 normalize = None
964 normalize = None
963 results = dict.fromkeys(subrepos)
965 results = dict.fromkeys(subrepos)
964 results[b'.hg'] = None
966 results[b'.hg'] = None
965
967
966 for ff in files:
968 for ff in files:
967 if normalize:
969 if normalize:
968 nf = normalize(ff, False, True)
970 nf = normalize(ff, False, True)
969 else:
971 else:
970 nf = ff
972 nf = ff
971 if nf in results:
973 if nf in results:
972 continue
974 continue
973
975
974 try:
976 try:
975 st = lstat(join(nf))
977 st = lstat(join(nf))
976 kind = getkind(st.st_mode)
978 kind = getkind(st.st_mode)
977 if kind == dirkind:
979 if kind == dirkind:
978 if nf in dmap:
980 if nf in dmap:
979 # file replaced by dir on disk but still in dirstate
981 # file replaced by dir on disk but still in dirstate
980 results[nf] = None
982 results[nf] = None
981 foundadd((nf, ff))
983 foundadd((nf, ff))
982 elif kind == regkind or kind == lnkkind:
984 elif kind == regkind or kind == lnkkind:
983 results[nf] = st
985 results[nf] = st
984 else:
986 else:
985 badfn(ff, badtype(kind))
987 badfn(ff, badtype(kind))
986 if nf in dmap:
988 if nf in dmap:
987 results[nf] = None
989 results[nf] = None
988 except OSError as inst: # nf not found on disk - it is dirstate only
990 except OSError as inst: # nf not found on disk - it is dirstate only
989 if nf in dmap: # does it exactly match a missing file?
991 if nf in dmap: # does it exactly match a missing file?
990 results[nf] = None
992 results[nf] = None
991 else: # does it match a missing directory?
993 else: # does it match a missing directory?
992 if self._map.hasdir(nf):
994 if self._map.hasdir(nf):
993 notfoundadd(nf)
995 notfoundadd(nf)
994 else:
996 else:
995 badfn(ff, encoding.strtolocal(inst.strerror))
997 badfn(ff, encoding.strtolocal(inst.strerror))
996
998
997 # match.files() may contain explicitly-specified paths that shouldn't
999 # match.files() may contain explicitly-specified paths that shouldn't
998 # be taken; drop them from the list of files found. dirsfound/notfound
1000 # be taken; drop them from the list of files found. dirsfound/notfound
999 # aren't filtered here because they will be tested later.
1001 # aren't filtered here because they will be tested later.
1000 if match.anypats():
1002 if match.anypats():
1001 for f in list(results):
1003 for f in list(results):
1002 if f == b'.hg' or f in subrepos:
1004 if f == b'.hg' or f in subrepos:
1003 # keep sentinel to disable further out-of-repo walks
1005 # keep sentinel to disable further out-of-repo walks
1004 continue
1006 continue
1005 if not match(f):
1007 if not match(f):
1006 del results[f]
1008 del results[f]
1007
1009
1008 # Case insensitive filesystems cannot rely on lstat() failing to detect
1010 # Case insensitive filesystems cannot rely on lstat() failing to detect
1009 # a case-only rename. Prune the stat object for any file that does not
1011 # a case-only rename. Prune the stat object for any file that does not
1010 # match the case in the filesystem, if there are multiple files that
1012 # match the case in the filesystem, if there are multiple files that
1011 # normalize to the same path.
1013 # normalize to the same path.
1012 if match.isexact() and self._checkcase:
1014 if match.isexact() and self._checkcase:
1013 normed = {}
1015 normed = {}
1014
1016
1015 for f, st in pycompat.iteritems(results):
1017 for f, st in pycompat.iteritems(results):
1016 if st is None:
1018 if st is None:
1017 continue
1019 continue
1018
1020
1019 nc = util.normcase(f)
1021 nc = util.normcase(f)
1020 paths = normed.get(nc)
1022 paths = normed.get(nc)
1021
1023
1022 if paths is None:
1024 if paths is None:
1023 paths = set()
1025 paths = set()
1024 normed[nc] = paths
1026 normed[nc] = paths
1025
1027
1026 paths.add(f)
1028 paths.add(f)
1027
1029
1028 for norm, paths in pycompat.iteritems(normed):
1030 for norm, paths in pycompat.iteritems(normed):
1029 if len(paths) > 1:
1031 if len(paths) > 1:
1030 for path in paths:
1032 for path in paths:
1031 folded = self._discoverpath(
1033 folded = self._discoverpath(
1032 path, norm, True, None, self._map.dirfoldmap
1034 path, norm, True, None, self._map.dirfoldmap
1033 )
1035 )
1034 if path != folded:
1036 if path != folded:
1035 results[path] = None
1037 results[path] = None
1036
1038
1037 return results, dirsfound, dirsnotfound
1039 return results, dirsfound, dirsnotfound
1038
1040
1039 def walk(self, match, subrepos, unknown, ignored, full=True):
1041 def walk(self, match, subrepos, unknown, ignored, full=True):
1040 """
1042 """
1041 Walk recursively through the directory tree, finding all files
1043 Walk recursively through the directory tree, finding all files
1042 matched by match.
1044 matched by match.
1043
1045
1044 If full is False, maybe skip some known-clean files.
1046 If full is False, maybe skip some known-clean files.
1045
1047
1046 Return a dict mapping filename to stat-like object (either
1048 Return a dict mapping filename to stat-like object (either
1047 mercurial.osutil.stat instance or return value of os.stat()).
1049 mercurial.osutil.stat instance or return value of os.stat()).
1048
1050
1049 """
1051 """
1050 # full is a flag that extensions that hook into walk can use -- this
1052 # full is a flag that extensions that hook into walk can use -- this
1051 # implementation doesn't use it at all. This satisfies the contract
1053 # implementation doesn't use it at all. This satisfies the contract
1052 # because we only guarantee a "maybe".
1054 # because we only guarantee a "maybe".
1053
1055
1054 if ignored:
1056 if ignored:
1055 ignore = util.never
1057 ignore = util.never
1056 dirignore = util.never
1058 dirignore = util.never
1057 elif unknown:
1059 elif unknown:
1058 ignore = self._ignore
1060 ignore = self._ignore
1059 dirignore = self._dirignore
1061 dirignore = self._dirignore
1060 else:
1062 else:
1061 # if not unknown and not ignored, drop dir recursion and step 2
1063 # if not unknown and not ignored, drop dir recursion and step 2
1062 ignore = util.always
1064 ignore = util.always
1063 dirignore = util.always
1065 dirignore = util.always
1064
1066
1065 matchfn = match.matchfn
1067 matchfn = match.matchfn
1066 matchalways = match.always()
1068 matchalways = match.always()
1067 matchtdir = match.traversedir
1069 matchtdir = match.traversedir
1068 dmap = self._map
1070 dmap = self._map
1069 listdir = util.listdir
1071 listdir = util.listdir
1070 lstat = os.lstat
1072 lstat = os.lstat
1071 dirkind = stat.S_IFDIR
1073 dirkind = stat.S_IFDIR
1072 regkind = stat.S_IFREG
1074 regkind = stat.S_IFREG
1073 lnkkind = stat.S_IFLNK
1075 lnkkind = stat.S_IFLNK
1074 join = self._join
1076 join = self._join
1075
1077
1076 exact = skipstep3 = False
1078 exact = skipstep3 = False
1077 if match.isexact(): # match.exact
1079 if match.isexact(): # match.exact
1078 exact = True
1080 exact = True
1079 dirignore = util.always # skip step 2
1081 dirignore = util.always # skip step 2
1080 elif match.prefix(): # match.match, no patterns
1082 elif match.prefix(): # match.match, no patterns
1081 skipstep3 = True
1083 skipstep3 = True
1082
1084
1083 if not exact and self._checkcase:
1085 if not exact and self._checkcase:
1084 normalize = self._normalize
1086 normalize = self._normalize
1085 normalizefile = self._normalizefile
1087 normalizefile = self._normalizefile
1086 skipstep3 = False
1088 skipstep3 = False
1087 else:
1089 else:
1088 normalize = self._normalize
1090 normalize = self._normalize
1089 normalizefile = None
1091 normalizefile = None
1090
1092
1091 # step 1: find all explicit files
1093 # step 1: find all explicit files
1092 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1094 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1093 if matchtdir:
1095 if matchtdir:
1094 for d in work:
1096 for d in work:
1095 matchtdir(d[0])
1097 matchtdir(d[0])
1096 for d in dirsnotfound:
1098 for d in dirsnotfound:
1097 matchtdir(d)
1099 matchtdir(d)
1098
1100
1099 skipstep3 = skipstep3 and not (work or dirsnotfound)
1101 skipstep3 = skipstep3 and not (work or dirsnotfound)
1100 work = [d for d in work if not dirignore(d[0])]
1102 work = [d for d in work if not dirignore(d[0])]
1101
1103
1102 # step 2: visit subdirectories
1104 # step 2: visit subdirectories
1103 def traverse(work, alreadynormed):
1105 def traverse(work, alreadynormed):
1104 wadd = work.append
1106 wadd = work.append
1105 while work:
1107 while work:
1106 tracing.counter('dirstate.walk work', len(work))
1108 tracing.counter('dirstate.walk work', len(work))
1107 nd = work.pop()
1109 nd = work.pop()
1108 visitentries = match.visitchildrenset(nd)
1110 visitentries = match.visitchildrenset(nd)
1109 if not visitentries:
1111 if not visitentries:
1110 continue
1112 continue
1111 if visitentries == b'this' or visitentries == b'all':
1113 if visitentries == b'this' or visitentries == b'all':
1112 visitentries = None
1114 visitentries = None
1113 skip = None
1115 skip = None
1114 if nd != b'':
1116 if nd != b'':
1115 skip = b'.hg'
1117 skip = b'.hg'
1116 try:
1118 try:
1117 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1119 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1118 entries = listdir(join(nd), stat=True, skip=skip)
1120 entries = listdir(join(nd), stat=True, skip=skip)
1119 except OSError as inst:
1121 except OSError as inst:
1120 if inst.errno in (errno.EACCES, errno.ENOENT):
1122 if inst.errno in (errno.EACCES, errno.ENOENT):
1121 match.bad(
1123 match.bad(
1122 self.pathto(nd), encoding.strtolocal(inst.strerror)
1124 self.pathto(nd), encoding.strtolocal(inst.strerror)
1123 )
1125 )
1124 continue
1126 continue
1125 raise
1127 raise
1126 for f, kind, st in entries:
1128 for f, kind, st in entries:
1127 # Some matchers may return files in the visitentries set,
1129 # Some matchers may return files in the visitentries set,
1128 # instead of 'this', if the matcher explicitly mentions them
1130 # instead of 'this', if the matcher explicitly mentions them
1129 # and is not an exactmatcher. This is acceptable; we do not
1131 # and is not an exactmatcher. This is acceptable; we do not
1130 # make any hard assumptions about file-or-directory below
1132 # make any hard assumptions about file-or-directory below
1131 # based on the presence of `f` in visitentries. If
1133 # based on the presence of `f` in visitentries. If
1132 # visitchildrenset returned a set, we can always skip the
1134 # visitchildrenset returned a set, we can always skip the
1133 # entries *not* in the set it provided regardless of whether
1135 # entries *not* in the set it provided regardless of whether
1134 # they're actually a file or a directory.
1136 # they're actually a file or a directory.
1135 if visitentries and f not in visitentries:
1137 if visitentries and f not in visitentries:
1136 continue
1138 continue
1137 if normalizefile:
1139 if normalizefile:
1138 # even though f might be a directory, we're only
1140 # even though f might be a directory, we're only
1139 # interested in comparing it to files currently in the
1141 # interested in comparing it to files currently in the
1140 # dmap -- therefore normalizefile is enough
1142 # dmap -- therefore normalizefile is enough
1141 nf = normalizefile(
1143 nf = normalizefile(
1142 nd and (nd + b"/" + f) or f, True, True
1144 nd and (nd + b"/" + f) or f, True, True
1143 )
1145 )
1144 else:
1146 else:
1145 nf = nd and (nd + b"/" + f) or f
1147 nf = nd and (nd + b"/" + f) or f
1146 if nf not in results:
1148 if nf not in results:
1147 if kind == dirkind:
1149 if kind == dirkind:
1148 if not ignore(nf):
1150 if not ignore(nf):
1149 if matchtdir:
1151 if matchtdir:
1150 matchtdir(nf)
1152 matchtdir(nf)
1151 wadd(nf)
1153 wadd(nf)
1152 if nf in dmap and (matchalways or matchfn(nf)):
1154 if nf in dmap and (matchalways or matchfn(nf)):
1153 results[nf] = None
1155 results[nf] = None
1154 elif kind == regkind or kind == lnkkind:
1156 elif kind == regkind or kind == lnkkind:
1155 if nf in dmap:
1157 if nf in dmap:
1156 if matchalways or matchfn(nf):
1158 if matchalways or matchfn(nf):
1157 results[nf] = st
1159 results[nf] = st
1158 elif (matchalways or matchfn(nf)) and not ignore(
1160 elif (matchalways or matchfn(nf)) and not ignore(
1159 nf
1161 nf
1160 ):
1162 ):
1161 # unknown file -- normalize if necessary
1163 # unknown file -- normalize if necessary
1162 if not alreadynormed:
1164 if not alreadynormed:
1163 nf = normalize(nf, False, True)
1165 nf = normalize(nf, False, True)
1164 results[nf] = st
1166 results[nf] = st
1165 elif nf in dmap and (matchalways or matchfn(nf)):
1167 elif nf in dmap and (matchalways or matchfn(nf)):
1166 results[nf] = None
1168 results[nf] = None
1167
1169
1168 for nd, d in work:
1170 for nd, d in work:
1169 # alreadynormed means that processwork doesn't have to do any
1171 # alreadynormed means that processwork doesn't have to do any
1170 # expensive directory normalization
1172 # expensive directory normalization
1171 alreadynormed = not normalize or nd == d
1173 alreadynormed = not normalize or nd == d
1172 traverse([d], alreadynormed)
1174 traverse([d], alreadynormed)
1173
1175
1174 for s in subrepos:
1176 for s in subrepos:
1175 del results[s]
1177 del results[s]
1176 del results[b'.hg']
1178 del results[b'.hg']
1177
1179
1178 # step 3: visit remaining files from dmap
1180 # step 3: visit remaining files from dmap
1179 if not skipstep3 and not exact:
1181 if not skipstep3 and not exact:
1180 # If a dmap file is not in results yet, it was either
1182 # If a dmap file is not in results yet, it was either
1181 # a) not matching matchfn b) ignored, c) missing, or d) under a
1183 # a) not matching matchfn b) ignored, c) missing, or d) under a
1182 # symlink directory.
1184 # symlink directory.
1183 if not results and matchalways:
1185 if not results and matchalways:
1184 visit = [f for f in dmap]
1186 visit = [f for f in dmap]
1185 else:
1187 else:
1186 visit = [f for f in dmap if f not in results and matchfn(f)]
1188 visit = [f for f in dmap if f not in results and matchfn(f)]
1187 visit.sort()
1189 visit.sort()
1188
1190
1189 if unknown:
1191 if unknown:
1190 # unknown == True means we walked all dirs under the roots
1192 # unknown == True means we walked all dirs under the roots
1191 # that wasn't ignored, and everything that matched was stat'ed
1193 # that wasn't ignored, and everything that matched was stat'ed
1192 # and is already in results.
1194 # and is already in results.
1193 # The rest must thus be ignored or under a symlink.
1195 # The rest must thus be ignored or under a symlink.
1194 audit_path = pathutil.pathauditor(self._root, cached=True)
1196 audit_path = pathutil.pathauditor(self._root, cached=True)
1195
1197
1196 for nf in iter(visit):
1198 for nf in iter(visit):
1197 # If a stat for the same file was already added with a
1199 # If a stat for the same file was already added with a
1198 # different case, don't add one for this, since that would
1200 # different case, don't add one for this, since that would
1199 # make it appear as if the file exists under both names
1201 # make it appear as if the file exists under both names
1200 # on disk.
1202 # on disk.
1201 if (
1203 if (
1202 normalizefile
1204 normalizefile
1203 and normalizefile(nf, True, True) in results
1205 and normalizefile(nf, True, True) in results
1204 ):
1206 ):
1205 results[nf] = None
1207 results[nf] = None
1206 # Report ignored items in the dmap as long as they are not
1208 # Report ignored items in the dmap as long as they are not
1207 # under a symlink directory.
1209 # under a symlink directory.
1208 elif audit_path.check(nf):
1210 elif audit_path.check(nf):
1209 try:
1211 try:
1210 results[nf] = lstat(join(nf))
1212 results[nf] = lstat(join(nf))
1211 # file was just ignored, no links, and exists
1213 # file was just ignored, no links, and exists
1212 except OSError:
1214 except OSError:
1213 # file doesn't exist
1215 # file doesn't exist
1214 results[nf] = None
1216 results[nf] = None
1215 else:
1217 else:
1216 # It's either missing or under a symlink directory
1218 # It's either missing or under a symlink directory
1217 # which we in this case report as missing
1219 # which we in this case report as missing
1218 results[nf] = None
1220 results[nf] = None
1219 else:
1221 else:
1220 # We may not have walked the full directory tree above,
1222 # We may not have walked the full directory tree above,
1221 # so stat and check everything we missed.
1223 # so stat and check everything we missed.
1222 iv = iter(visit)
1224 iv = iter(visit)
1223 for st in util.statfiles([join(i) for i in visit]):
1225 for st in util.statfiles([join(i) for i in visit]):
1224 results[next(iv)] = st
1226 results[next(iv)] = st
1225 return results
1227 return results
1226
1228
1227 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1229 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1228 # Force Rayon (Rust parallelism library) to respect the number of
1230 # Force Rayon (Rust parallelism library) to respect the number of
1229 # workers. This is a temporary workaround until Rust code knows
1231 # workers. This is a temporary workaround until Rust code knows
1230 # how to read the config file.
1232 # how to read the config file.
1231 numcpus = self._ui.configint(b"worker", b"numcpus")
1233 numcpus = self._ui.configint(b"worker", b"numcpus")
1232 if numcpus is not None:
1234 if numcpus is not None:
1233 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1235 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1234
1236
1235 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1237 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1236 if not workers_enabled:
1238 if not workers_enabled:
1237 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1239 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1238
1240
1239 (
1241 (
1240 lookup,
1242 lookup,
1241 modified,
1243 modified,
1242 added,
1244 added,
1243 removed,
1245 removed,
1244 deleted,
1246 deleted,
1245 clean,
1247 clean,
1246 ignored,
1248 ignored,
1247 unknown,
1249 unknown,
1248 warnings,
1250 warnings,
1249 bad,
1251 bad,
1250 traversed,
1252 traversed,
1251 dirty,
1253 dirty,
1252 ) = rustmod.status(
1254 ) = rustmod.status(
1253 self._map._rustmap,
1255 self._map._rustmap,
1254 matcher,
1256 matcher,
1255 self._rootdir,
1257 self._rootdir,
1256 self._ignorefiles(),
1258 self._ignorefiles(),
1257 self._checkexec,
1259 self._checkexec,
1258 self._lastnormaltime,
1260 self._lastnormaltime,
1259 bool(list_clean),
1261 bool(list_clean),
1260 bool(list_ignored),
1262 bool(list_ignored),
1261 bool(list_unknown),
1263 bool(list_unknown),
1262 bool(matcher.traversedir),
1264 bool(matcher.traversedir),
1263 )
1265 )
1264
1266
1265 self._dirty |= dirty
1267 self._dirty |= dirty
1266
1268
1267 if matcher.traversedir:
1269 if matcher.traversedir:
1268 for dir in traversed:
1270 for dir in traversed:
1269 matcher.traversedir(dir)
1271 matcher.traversedir(dir)
1270
1272
1271 if self._ui.warn:
1273 if self._ui.warn:
1272 for item in warnings:
1274 for item in warnings:
1273 if isinstance(item, tuple):
1275 if isinstance(item, tuple):
1274 file_path, syntax = item
1276 file_path, syntax = item
1275 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1277 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1276 file_path,
1278 file_path,
1277 syntax,
1279 syntax,
1278 )
1280 )
1279 self._ui.warn(msg)
1281 self._ui.warn(msg)
1280 else:
1282 else:
1281 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1283 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1282 self._ui.warn(
1284 self._ui.warn(
1283 msg
1285 msg
1284 % (
1286 % (
1285 pathutil.canonpath(
1287 pathutil.canonpath(
1286 self._rootdir, self._rootdir, item
1288 self._rootdir, self._rootdir, item
1287 ),
1289 ),
1288 b"No such file or directory",
1290 b"No such file or directory",
1289 )
1291 )
1290 )
1292 )
1291
1293
1292 for (fn, message) in bad:
1294 for (fn, message) in bad:
1293 matcher.bad(fn, encoding.strtolocal(message))
1295 matcher.bad(fn, encoding.strtolocal(message))
1294
1296
1295 status = scmutil.status(
1297 status = scmutil.status(
1296 modified=modified,
1298 modified=modified,
1297 added=added,
1299 added=added,
1298 removed=removed,
1300 removed=removed,
1299 deleted=deleted,
1301 deleted=deleted,
1300 unknown=unknown,
1302 unknown=unknown,
1301 ignored=ignored,
1303 ignored=ignored,
1302 clean=clean,
1304 clean=clean,
1303 )
1305 )
1304 return (lookup, status)
1306 return (lookup, status)
1305
1307
1306 def status(self, match, subrepos, ignored, clean, unknown):
1308 def status(self, match, subrepos, ignored, clean, unknown):
1307 """Determine the status of the working copy relative to the
1309 """Determine the status of the working copy relative to the
1308 dirstate and return a pair of (unsure, status), where status is of type
1310 dirstate and return a pair of (unsure, status), where status is of type
1309 scmutil.status and:
1311 scmutil.status and:
1310
1312
1311 unsure:
1313 unsure:
1312 files that might have been modified since the dirstate was
1314 files that might have been modified since the dirstate was
1313 written, but need to be read to be sure (size is the same
1315 written, but need to be read to be sure (size is the same
1314 but mtime differs)
1316 but mtime differs)
1315 status.modified:
1317 status.modified:
1316 files that have definitely been modified since the dirstate
1318 files that have definitely been modified since the dirstate
1317 was written (different size or mode)
1319 was written (different size or mode)
1318 status.clean:
1320 status.clean:
1319 files that have definitely not been modified since the
1321 files that have definitely not been modified since the
1320 dirstate was written
1322 dirstate was written
1321 """
1323 """
1322 listignored, listclean, listunknown = ignored, clean, unknown
1324 listignored, listclean, listunknown = ignored, clean, unknown
1323 lookup, modified, added, unknown, ignored = [], [], [], [], []
1325 lookup, modified, added, unknown, ignored = [], [], [], [], []
1324 removed, deleted, clean = [], [], []
1326 removed, deleted, clean = [], [], []
1325
1327
1326 dmap = self._map
1328 dmap = self._map
1327 dmap.preload()
1329 dmap.preload()
1328
1330
1329 use_rust = True
1331 use_rust = True
1330
1332
1331 allowed_matchers = (
1333 allowed_matchers = (
1332 matchmod.alwaysmatcher,
1334 matchmod.alwaysmatcher,
1333 matchmod.exactmatcher,
1335 matchmod.exactmatcher,
1334 matchmod.includematcher,
1336 matchmod.includematcher,
1335 )
1337 )
1336
1338
1337 if rustmod is None:
1339 if rustmod is None:
1338 use_rust = False
1340 use_rust = False
1339 elif self._checkcase:
1341 elif self._checkcase:
1340 # Case-insensitive filesystems are not handled yet
1342 # Case-insensitive filesystems are not handled yet
1341 use_rust = False
1343 use_rust = False
1342 elif subrepos:
1344 elif subrepos:
1343 use_rust = False
1345 use_rust = False
1344 elif sparse.enabled:
1346 elif sparse.enabled:
1345 use_rust = False
1347 use_rust = False
1346 elif not isinstance(match, allowed_matchers):
1348 elif not isinstance(match, allowed_matchers):
1347 # Some matchers have yet to be implemented
1349 # Some matchers have yet to be implemented
1348 use_rust = False
1350 use_rust = False
1349
1351
1350 if use_rust:
1352 if use_rust:
1351 try:
1353 try:
1352 return self._rust_status(
1354 return self._rust_status(
1353 match, listclean, listignored, listunknown
1355 match, listclean, listignored, listunknown
1354 )
1356 )
1355 except rustmod.FallbackError:
1357 except rustmod.FallbackError:
1356 pass
1358 pass
1357
1359
1358 def noop(f):
1360 def noop(f):
1359 pass
1361 pass
1360
1362
1361 dcontains = dmap.__contains__
1363 dcontains = dmap.__contains__
1362 dget = dmap.__getitem__
1364 dget = dmap.__getitem__
1363 ladd = lookup.append # aka "unsure"
1365 ladd = lookup.append # aka "unsure"
1364 madd = modified.append
1366 madd = modified.append
1365 aadd = added.append
1367 aadd = added.append
1366 uadd = unknown.append if listunknown else noop
1368 uadd = unknown.append if listunknown else noop
1367 iadd = ignored.append if listignored else noop
1369 iadd = ignored.append if listignored else noop
1368 radd = removed.append
1370 radd = removed.append
1369 dadd = deleted.append
1371 dadd = deleted.append
1370 cadd = clean.append if listclean else noop
1372 cadd = clean.append if listclean else noop
1371 mexact = match.exact
1373 mexact = match.exact
1372 dirignore = self._dirignore
1374 dirignore = self._dirignore
1373 checkexec = self._checkexec
1375 checkexec = self._checkexec
1374 copymap = self._map.copymap
1376 copymap = self._map.copymap
1375 lastnormaltime = self._lastnormaltime
1377 lastnormaltime = self._lastnormaltime
1376
1378
1377 # We need to do full walks when either
1379 # We need to do full walks when either
1378 # - we're listing all clean files, or
1380 # - we're listing all clean files, or
1379 # - match.traversedir does something, because match.traversedir should
1381 # - match.traversedir does something, because match.traversedir should
1380 # be called for every dir in the working dir
1382 # be called for every dir in the working dir
1381 full = listclean or match.traversedir is not None
1383 full = listclean or match.traversedir is not None
1382 for fn, st in pycompat.iteritems(
1384 for fn, st in pycompat.iteritems(
1383 self.walk(match, subrepos, listunknown, listignored, full=full)
1385 self.walk(match, subrepos, listunknown, listignored, full=full)
1384 ):
1386 ):
1385 if not dcontains(fn):
1387 if not dcontains(fn):
1386 if (listignored or mexact(fn)) and dirignore(fn):
1388 if (listignored or mexact(fn)) and dirignore(fn):
1387 if listignored:
1389 if listignored:
1388 iadd(fn)
1390 iadd(fn)
1389 else:
1391 else:
1390 uadd(fn)
1392 uadd(fn)
1391 continue
1393 continue
1392
1394
1393 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1395 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1394 # written like that for performance reasons. dmap[fn] is not a
1396 # written like that for performance reasons. dmap[fn] is not a
1395 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1397 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1396 # opcode has fast paths when the value to be unpacked is a tuple or
1398 # opcode has fast paths when the value to be unpacked is a tuple or
1397 # a list, but falls back to creating a full-fledged iterator in
1399 # a list, but falls back to creating a full-fledged iterator in
1398 # general. That is much slower than simply accessing and storing the
1400 # general. That is much slower than simply accessing and storing the
1399 # tuple members one by one.
1401 # tuple members one by one.
1400 t = dget(fn)
1402 t = dget(fn)
1401 mode = t.mode
1403 mode = t.mode
1402 size = t.size
1404 size = t.size
1403 time = t.mtime
1405 time = t.mtime
1404
1406
1405 if not st and t.tracked:
1407 if not st and t.tracked:
1406 dadd(fn)
1408 dadd(fn)
1407 elif t.merged:
1409 elif t.merged:
1408 madd(fn)
1410 madd(fn)
1409 elif t.added:
1411 elif t.added:
1410 aadd(fn)
1412 aadd(fn)
1411 elif t.removed:
1413 elif t.removed:
1412 radd(fn)
1414 radd(fn)
1413 elif t.tracked:
1415 elif t.tracked:
1414 if (
1416 if (
1415 size >= 0
1417 size >= 0
1416 and (
1418 and (
1417 (size != st.st_size and size != st.st_size & _rangemask)
1419 (size != st.st_size and size != st.st_size & _rangemask)
1418 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1420 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1419 )
1421 )
1420 or t.from_p2
1422 or t.from_p2
1421 or fn in copymap
1423 or fn in copymap
1422 ):
1424 ):
1423 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1425 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1424 # issue6456: Size returned may be longer due to
1426 # issue6456: Size returned may be longer due to
1425 # encryption on EXT-4 fscrypt, undecided.
1427 # encryption on EXT-4 fscrypt, undecided.
1426 ladd(fn)
1428 ladd(fn)
1427 else:
1429 else:
1428 madd(fn)
1430 madd(fn)
1429 elif (
1431 elif (
1430 time != st[stat.ST_MTIME]
1432 time != st[stat.ST_MTIME]
1431 and time != st[stat.ST_MTIME] & _rangemask
1433 and time != st[stat.ST_MTIME] & _rangemask
1432 ):
1434 ):
1433 ladd(fn)
1435 ladd(fn)
1434 elif st[stat.ST_MTIME] == lastnormaltime:
1436 elif st[stat.ST_MTIME] == lastnormaltime:
1435 # fn may have just been marked as normal and it may have
1437 # fn may have just been marked as normal and it may have
1436 # changed in the same second without changing its size.
1438 # changed in the same second without changing its size.
1437 # This can happen if we quickly do multiple commits.
1439 # This can happen if we quickly do multiple commits.
1438 # Force lookup, so we don't miss such a racy file change.
1440 # Force lookup, so we don't miss such a racy file change.
1439 ladd(fn)
1441 ladd(fn)
1440 elif listclean:
1442 elif listclean:
1441 cadd(fn)
1443 cadd(fn)
1442 status = scmutil.status(
1444 status = scmutil.status(
1443 modified, added, removed, deleted, unknown, ignored, clean
1445 modified, added, removed, deleted, unknown, ignored, clean
1444 )
1446 )
1445 return (lookup, status)
1447 return (lookup, status)
1446
1448
1447 def matches(self, match):
1449 def matches(self, match):
1448 """
1450 """
1449 return files in the dirstate (in whatever state) filtered by match
1451 return files in the dirstate (in whatever state) filtered by match
1450 """
1452 """
1451 dmap = self._map
1453 dmap = self._map
1452 if rustmod is not None:
1454 if rustmod is not None:
1453 dmap = self._map._rustmap
1455 dmap = self._map._rustmap
1454
1456
1455 if match.always():
1457 if match.always():
1456 return dmap.keys()
1458 return dmap.keys()
1457 files = match.files()
1459 files = match.files()
1458 if match.isexact():
1460 if match.isexact():
1459 # fast path -- filter the other way around, since typically files is
1461 # fast path -- filter the other way around, since typically files is
1460 # much smaller than dmap
1462 # much smaller than dmap
1461 return [f for f in files if f in dmap]
1463 return [f for f in files if f in dmap]
1462 if match.prefix() and all(fn in dmap for fn in files):
1464 if match.prefix() and all(fn in dmap for fn in files):
1463 # fast path -- all the values are known to be files, so just return
1465 # fast path -- all the values are known to be files, so just return
1464 # that
1466 # that
1465 return list(files)
1467 return list(files)
1466 return [f for f in dmap if match(f)]
1468 return [f for f in dmap if match(f)]
1467
1469
1468 def _actualfilename(self, tr):
1470 def _actualfilename(self, tr):
1469 if tr:
1471 if tr:
1470 return self._pendingfilename
1472 return self._pendingfilename
1471 else:
1473 else:
1472 return self._filename
1474 return self._filename
1473
1475
1474 def savebackup(self, tr, backupname):
1476 def savebackup(self, tr, backupname):
1475 '''Save current dirstate into backup file'''
1477 '''Save current dirstate into backup file'''
1476 filename = self._actualfilename(tr)
1478 filename = self._actualfilename(tr)
1477 assert backupname != filename
1479 assert backupname != filename
1478
1480
1479 # use '_writedirstate' instead of 'write' to write changes certainly,
1481 # use '_writedirstate' instead of 'write' to write changes certainly,
1480 # because the latter omits writing out if transaction is running.
1482 # because the latter omits writing out if transaction is running.
1481 # output file will be used to create backup of dirstate at this point.
1483 # output file will be used to create backup of dirstate at this point.
1482 if self._dirty or not self._opener.exists(filename):
1484 if self._dirty or not self._opener.exists(filename):
1483 self._writedirstate(
1485 self._writedirstate(
1484 tr,
1486 tr,
1485 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1487 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1486 )
1488 )
1487
1489
1488 if tr:
1490 if tr:
1489 # ensure that subsequent tr.writepending returns True for
1491 # ensure that subsequent tr.writepending returns True for
1490 # changes written out above, even if dirstate is never
1492 # changes written out above, even if dirstate is never
1491 # changed after this
1493 # changed after this
1492 tr.addfilegenerator(
1494 tr.addfilegenerator(
1493 b'dirstate',
1495 b'dirstate',
1494 (self._filename,),
1496 (self._filename,),
1495 lambda f: self._writedirstate(tr, f),
1497 lambda f: self._writedirstate(tr, f),
1496 location=b'plain',
1498 location=b'plain',
1497 )
1499 )
1498
1500
1499 # ensure that pending file written above is unlinked at
1501 # ensure that pending file written above is unlinked at
1500 # failure, even if tr.writepending isn't invoked until the
1502 # failure, even if tr.writepending isn't invoked until the
1501 # end of this transaction
1503 # end of this transaction
1502 tr.registertmp(filename, location=b'plain')
1504 tr.registertmp(filename, location=b'plain')
1503
1505
1504 self._opener.tryunlink(backupname)
1506 self._opener.tryunlink(backupname)
1505 # hardlink backup is okay because _writedirstate is always called
1507 # hardlink backup is okay because _writedirstate is always called
1506 # with an "atomictemp=True" file.
1508 # with an "atomictemp=True" file.
1507 util.copyfile(
1509 util.copyfile(
1508 self._opener.join(filename),
1510 self._opener.join(filename),
1509 self._opener.join(backupname),
1511 self._opener.join(backupname),
1510 hardlink=True,
1512 hardlink=True,
1511 )
1513 )
1512
1514
1513 def restorebackup(self, tr, backupname):
1515 def restorebackup(self, tr, backupname):
1514 '''Restore dirstate by backup file'''
1516 '''Restore dirstate by backup file'''
1515 # this "invalidate()" prevents "wlock.release()" from writing
1517 # this "invalidate()" prevents "wlock.release()" from writing
1516 # changes of dirstate out after restoring from backup file
1518 # changes of dirstate out after restoring from backup file
1517 self.invalidate()
1519 self.invalidate()
1518 filename = self._actualfilename(tr)
1520 filename = self._actualfilename(tr)
1519 o = self._opener
1521 o = self._opener
1520 if util.samefile(o.join(backupname), o.join(filename)):
1522 if util.samefile(o.join(backupname), o.join(filename)):
1521 o.unlink(backupname)
1523 o.unlink(backupname)
1522 else:
1524 else:
1523 o.rename(backupname, filename, checkambig=True)
1525 o.rename(backupname, filename, checkambig=True)
1524
1526
1525 def clearbackup(self, tr, backupname):
1527 def clearbackup(self, tr, backupname):
1526 '''Clear backup file'''
1528 '''Clear backup file'''
1527 self._opener.unlink(backupname)
1529 self._opener.unlink(backupname)
1528
1530
1529 def verify(self, m1, m2):
1531 def verify(self, m1, m2):
1530 """check the dirstate content again the parent manifest and yield errors"""
1532 """check the dirstate content again the parent manifest and yield errors"""
1531 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1533 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1532 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1534 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1533 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1535 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1534 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1536 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1535 for f, entry in self.items():
1537 for f, entry in self.items():
1536 state = entry.state
1538 state = entry.state
1537 if state in b"nr" and f not in m1:
1539 if state in b"nr" and f not in m1:
1538 yield (missing_from_p1, f, state)
1540 yield (missing_from_p1, f, state)
1539 if state in b"a" and f in m1:
1541 if state in b"a" and f in m1:
1540 yield (unexpected_in_p1, f, state)
1542 yield (unexpected_in_p1, f, state)
1541 if state in b"m" and f not in m1 and f not in m2:
1543 if state in b"m" and f not in m1 and f not in m2:
1542 yield (missing_from_ps, f, state)
1544 yield (missing_from_ps, f, state)
1543 for f in m1:
1545 for f in m1:
1544 state = self.get_entry(f).state
1546 state = self.get_entry(f).state
1545 if state not in b"nrm":
1547 if state not in b"nrm":
1546 yield (missing_from_ds, f, state)
1548 yield (missing_from_ds, f, state)
General Comments 0
You need to be logged in to leave comments. Login now