##// END OF EJS Templates
dirstate: group return logic and clarify each function in flagfunc...
Raphaël Gomès -
r49103:0d6a099b default
parent child Browse files
Show More
@@ -1,1528 +1,1531 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .dirstateutils import (
34 from .dirstateutils import (
35 timestamp,
35 timestamp,
36 )
36 )
37
37
38 from .interfaces import (
38 from .interfaces import (
39 dirstate as intdirstate,
39 dirstate as intdirstate,
40 util as interfaceutil,
40 util as interfaceutil,
41 )
41 )
42
42
43 parsers = policy.importmod('parsers')
43 parsers = policy.importmod('parsers')
44 rustmod = policy.importrust('dirstate')
44 rustmod = policy.importrust('dirstate')
45
45
46 HAS_FAST_DIRSTATE_V2 = rustmod is not None
46 HAS_FAST_DIRSTATE_V2 = rustmod is not None
47
47
48 propertycache = util.propertycache
48 propertycache = util.propertycache
49 filecache = scmutil.filecache
49 filecache = scmutil.filecache
50 _rangemask = dirstatemap.rangemask
50 _rangemask = dirstatemap.rangemask
51
51
52 DirstateItem = dirstatemap.DirstateItem
52 DirstateItem = dirstatemap.DirstateItem
53
53
54
54
55 class repocache(filecache):
55 class repocache(filecache):
56 """filecache for files in .hg/"""
56 """filecache for files in .hg/"""
57
57
58 def join(self, obj, fname):
58 def join(self, obj, fname):
59 return obj._opener.join(fname)
59 return obj._opener.join(fname)
60
60
61
61
62 class rootcache(filecache):
62 class rootcache(filecache):
63 """filecache for files in the repository root"""
63 """filecache for files in the repository root"""
64
64
65 def join(self, obj, fname):
65 def join(self, obj, fname):
66 return obj._join(fname)
66 return obj._join(fname)
67
67
68
68
69 def _getfsnow(vfs):
69 def _getfsnow(vfs):
70 '''Get "now" timestamp on filesystem'''
70 '''Get "now" timestamp on filesystem'''
71 tmpfd, tmpname = vfs.mkstemp()
71 tmpfd, tmpname = vfs.mkstemp()
72 try:
72 try:
73 return timestamp.mtime_of(os.fstat(tmpfd))
73 return timestamp.mtime_of(os.fstat(tmpfd))
74 finally:
74 finally:
75 os.close(tmpfd)
75 os.close(tmpfd)
76 vfs.unlink(tmpname)
76 vfs.unlink(tmpname)
77
77
78
78
79 def requires_parents_change(func):
79 def requires_parents_change(func):
80 def wrap(self, *args, **kwargs):
80 def wrap(self, *args, **kwargs):
81 if not self.pendingparentchange():
81 if not self.pendingparentchange():
82 msg = 'calling `%s` outside of a parentchange context'
82 msg = 'calling `%s` outside of a parentchange context'
83 msg %= func.__name__
83 msg %= func.__name__
84 raise error.ProgrammingError(msg)
84 raise error.ProgrammingError(msg)
85 return func(self, *args, **kwargs)
85 return func(self, *args, **kwargs)
86
86
87 return wrap
87 return wrap
88
88
89
89
90 def requires_no_parents_change(func):
90 def requires_no_parents_change(func):
91 def wrap(self, *args, **kwargs):
91 def wrap(self, *args, **kwargs):
92 if self.pendingparentchange():
92 if self.pendingparentchange():
93 msg = 'calling `%s` inside of a parentchange context'
93 msg = 'calling `%s` inside of a parentchange context'
94 msg %= func.__name__
94 msg %= func.__name__
95 raise error.ProgrammingError(msg)
95 raise error.ProgrammingError(msg)
96 return func(self, *args, **kwargs)
96 return func(self, *args, **kwargs)
97
97
98 return wrap
98 return wrap
99
99
100
100
101 @interfaceutil.implementer(intdirstate.idirstate)
101 @interfaceutil.implementer(intdirstate.idirstate)
102 class dirstate(object):
102 class dirstate(object):
103 def __init__(
103 def __init__(
104 self,
104 self,
105 opener,
105 opener,
106 ui,
106 ui,
107 root,
107 root,
108 validate,
108 validate,
109 sparsematchfn,
109 sparsematchfn,
110 nodeconstants,
110 nodeconstants,
111 use_dirstate_v2,
111 use_dirstate_v2,
112 ):
112 ):
113 """Create a new dirstate object.
113 """Create a new dirstate object.
114
114
115 opener is an open()-like callable that can be used to open the
115 opener is an open()-like callable that can be used to open the
116 dirstate file; root is the root of the directory tracked by
116 dirstate file; root is the root of the directory tracked by
117 the dirstate.
117 the dirstate.
118 """
118 """
119 self._use_dirstate_v2 = use_dirstate_v2
119 self._use_dirstate_v2 = use_dirstate_v2
120 self._nodeconstants = nodeconstants
120 self._nodeconstants = nodeconstants
121 self._opener = opener
121 self._opener = opener
122 self._validate = validate
122 self._validate = validate
123 self._root = root
123 self._root = root
124 self._sparsematchfn = sparsematchfn
124 self._sparsematchfn = sparsematchfn
125 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
125 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
126 # UNC path pointing to root share (issue4557)
126 # UNC path pointing to root share (issue4557)
127 self._rootdir = pathutil.normasprefix(root)
127 self._rootdir = pathutil.normasprefix(root)
128 self._dirty = False
128 self._dirty = False
129 self._lastnormaltime = timestamp.zero()
129 self._lastnormaltime = timestamp.zero()
130 self._ui = ui
130 self._ui = ui
131 self._filecache = {}
131 self._filecache = {}
132 self._parentwriters = 0
132 self._parentwriters = 0
133 self._filename = b'dirstate'
133 self._filename = b'dirstate'
134 self._pendingfilename = b'%s.pending' % self._filename
134 self._pendingfilename = b'%s.pending' % self._filename
135 self._plchangecallbacks = {}
135 self._plchangecallbacks = {}
136 self._origpl = None
136 self._origpl = None
137 self._mapcls = dirstatemap.dirstatemap
137 self._mapcls = dirstatemap.dirstatemap
138 # Access and cache cwd early, so we don't access it for the first time
138 # Access and cache cwd early, so we don't access it for the first time
139 # after a working-copy update caused it to not exist (accessing it then
139 # after a working-copy update caused it to not exist (accessing it then
140 # raises an exception).
140 # raises an exception).
141 self._cwd
141 self._cwd
142
142
143 def prefetch_parents(self):
143 def prefetch_parents(self):
144 """make sure the parents are loaded
144 """make sure the parents are loaded
145
145
146 Used to avoid a race condition.
146 Used to avoid a race condition.
147 """
147 """
148 self._pl
148 self._pl
149
149
150 @contextlib.contextmanager
150 @contextlib.contextmanager
151 def parentchange(self):
151 def parentchange(self):
152 """Context manager for handling dirstate parents.
152 """Context manager for handling dirstate parents.
153
153
154 If an exception occurs in the scope of the context manager,
154 If an exception occurs in the scope of the context manager,
155 the incoherent dirstate won't be written when wlock is
155 the incoherent dirstate won't be written when wlock is
156 released.
156 released.
157 """
157 """
158 self._parentwriters += 1
158 self._parentwriters += 1
159 yield
159 yield
160 # Typically we want the "undo" step of a context manager in a
160 # Typically we want the "undo" step of a context manager in a
161 # finally block so it happens even when an exception
161 # finally block so it happens even when an exception
162 # occurs. In this case, however, we only want to decrement
162 # occurs. In this case, however, we only want to decrement
163 # parentwriters if the code in the with statement exits
163 # parentwriters if the code in the with statement exits
164 # normally, so we don't have a try/finally here on purpose.
164 # normally, so we don't have a try/finally here on purpose.
165 self._parentwriters -= 1
165 self._parentwriters -= 1
166
166
167 def pendingparentchange(self):
167 def pendingparentchange(self):
168 """Returns true if the dirstate is in the middle of a set of changes
168 """Returns true if the dirstate is in the middle of a set of changes
169 that modify the dirstate parent.
169 that modify the dirstate parent.
170 """
170 """
171 return self._parentwriters > 0
171 return self._parentwriters > 0
172
172
173 @propertycache
173 @propertycache
174 def _map(self):
174 def _map(self):
175 """Return the dirstate contents (see documentation for dirstatemap)."""
175 """Return the dirstate contents (see documentation for dirstatemap)."""
176 self._map = self._mapcls(
176 self._map = self._mapcls(
177 self._ui,
177 self._ui,
178 self._opener,
178 self._opener,
179 self._root,
179 self._root,
180 self._nodeconstants,
180 self._nodeconstants,
181 self._use_dirstate_v2,
181 self._use_dirstate_v2,
182 )
182 )
183 return self._map
183 return self._map
184
184
185 @property
185 @property
186 def _sparsematcher(self):
186 def _sparsematcher(self):
187 """The matcher for the sparse checkout.
187 """The matcher for the sparse checkout.
188
188
189 The working directory may not include every file from a manifest. The
189 The working directory may not include every file from a manifest. The
190 matcher obtained by this property will match a path if it is to be
190 matcher obtained by this property will match a path if it is to be
191 included in the working directory.
191 included in the working directory.
192 """
192 """
193 # TODO there is potential to cache this property. For now, the matcher
193 # TODO there is potential to cache this property. For now, the matcher
194 # is resolved on every access. (But the called function does use a
194 # is resolved on every access. (But the called function does use a
195 # cache to keep the lookup fast.)
195 # cache to keep the lookup fast.)
196 return self._sparsematchfn()
196 return self._sparsematchfn()
197
197
198 @repocache(b'branch')
198 @repocache(b'branch')
199 def _branch(self):
199 def _branch(self):
200 try:
200 try:
201 return self._opener.read(b"branch").strip() or b"default"
201 return self._opener.read(b"branch").strip() or b"default"
202 except IOError as inst:
202 except IOError as inst:
203 if inst.errno != errno.ENOENT:
203 if inst.errno != errno.ENOENT:
204 raise
204 raise
205 return b"default"
205 return b"default"
206
206
207 @property
207 @property
208 def _pl(self):
208 def _pl(self):
209 return self._map.parents()
209 return self._map.parents()
210
210
211 def hasdir(self, d):
211 def hasdir(self, d):
212 return self._map.hastrackeddir(d)
212 return self._map.hastrackeddir(d)
213
213
214 @rootcache(b'.hgignore')
214 @rootcache(b'.hgignore')
215 def _ignore(self):
215 def _ignore(self):
216 files = self._ignorefiles()
216 files = self._ignorefiles()
217 if not files:
217 if not files:
218 return matchmod.never()
218 return matchmod.never()
219
219
220 pats = [b'include:%s' % f for f in files]
220 pats = [b'include:%s' % f for f in files]
221 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
221 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
222
222
223 @propertycache
223 @propertycache
224 def _slash(self):
224 def _slash(self):
225 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
225 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
226
226
227 @propertycache
227 @propertycache
228 def _checklink(self):
228 def _checklink(self):
229 return util.checklink(self._root)
229 return util.checklink(self._root)
230
230
231 @propertycache
231 @propertycache
232 def _checkexec(self):
232 def _checkexec(self):
233 return bool(util.checkexec(self._root))
233 return bool(util.checkexec(self._root))
234
234
235 @propertycache
235 @propertycache
236 def _checkcase(self):
236 def _checkcase(self):
237 return not util.fscasesensitive(self._join(b'.hg'))
237 return not util.fscasesensitive(self._join(b'.hg'))
238
238
239 def _join(self, f):
239 def _join(self, f):
240 # much faster than os.path.join()
240 # much faster than os.path.join()
241 # it's safe because f is always a relative path
241 # it's safe because f is always a relative path
242 return self._rootdir + f
242 return self._rootdir + f
243
243
244 def flagfunc(self, buildfallback):
244 def flagfunc(self, buildfallback):
245 if self._checklink and self._checkexec:
245 if not (self._checklink and self._checkexec):
246 fallback = buildfallback()
246
247
247 def f(x):
248 def check_both(x):
249 """This platform supports symlinks and exec permissions"""
248 try:
250 try:
249 st = os.lstat(self._join(x))
251 st = os.lstat(self._join(x))
250 if util.statislink(st):
252 if util.statislink(st):
251 return b'l'
253 return b'l'
252 if util.statisexec(st):
254 if util.statisexec(st):
253 return b'x'
255 return b'x'
254 except OSError:
256 except OSError:
255 pass
257 pass
256 return b''
258 return b''
257
259
258 return f
260 def check_link(x):
259
261 """This platform only supports symlinks"""
260 fallback = buildfallback()
261 if self._checklink:
262
263 def f(x):
264 if os.path.islink(self._join(x)):
262 if os.path.islink(self._join(x)):
265 return b'l'
263 return b'l'
266 entry = self.get_entry(x)
264 entry = self.get_entry(x)
267 if entry.has_fallback_exec:
265 if entry.has_fallback_exec:
268 if entry.fallback_exec:
266 if entry.fallback_exec:
269 return b'x'
267 return b'x'
270 elif b'x' in fallback(x):
268 elif b'x' in fallback(x):
271 return b'x'
269 return b'x'
272 return b''
270 return b''
273
271
274 return f
272 def check_exec(x):
275 if self._checkexec:
273 """This platform only supports exec permissions"""
276
277 def f(x):
278 if b'l' in fallback(x):
274 if b'l' in fallback(x):
279 return b'l'
275 return b'l'
280 entry = self.get_entry(x)
276 entry = self.get_entry(x)
281 if entry.has_fallback_symlink:
277 if entry.has_fallback_symlink:
282 if entry.fallback_symlink:
278 if entry.fallback_symlink:
283 return b'l'
279 return b'l'
284 if util.isexec(self._join(x)):
280 if util.isexec(self._join(x)):
285 return b'x'
281 return b'x'
286 return b''
282 return b''
287
283
288 return f
284 def check_fallback(x):
289 else:
285 """This platform supports neither symlinks nor exec permissions, so
290
286 check the fallback in the dirstate if it exists, otherwise figure it
291 def f(x):
287 out the more expensive way from the parents."""
292 entry = self.get_entry(x)
288 entry = self.get_entry(x)
293 if entry.has_fallback_symlink:
289 if entry.has_fallback_symlink:
294 if entry.fallback_symlink:
290 if entry.fallback_symlink:
295 return b'l'
291 return b'l'
296 if entry.has_fallback_exec:
292 if entry.has_fallback_exec:
297 if entry.fallback_exec:
293 if entry.fallback_exec:
298 return b'x'
294 return b'x'
299 elif entry.has_fallback_symlink:
295 elif entry.has_fallback_symlink:
300 return b''
296 return b''
301 return fallback(x)
297 return fallback(x)
302
298
303 return f
299 if self._checklink and self._checkexec:
300 return check_both
301 elif self._checklink:
302 return check_link
303 elif self._checkexec:
304 return check_exec
305 else:
306 return check_fallback
304
307
305 @propertycache
308 @propertycache
306 def _cwd(self):
309 def _cwd(self):
307 # internal config: ui.forcecwd
310 # internal config: ui.forcecwd
308 forcecwd = self._ui.config(b'ui', b'forcecwd')
311 forcecwd = self._ui.config(b'ui', b'forcecwd')
309 if forcecwd:
312 if forcecwd:
310 return forcecwd
313 return forcecwd
311 return encoding.getcwd()
314 return encoding.getcwd()
312
315
313 def getcwd(self):
316 def getcwd(self):
314 """Return the path from which a canonical path is calculated.
317 """Return the path from which a canonical path is calculated.
315
318
316 This path should be used to resolve file patterns or to convert
319 This path should be used to resolve file patterns or to convert
317 canonical paths back to file paths for display. It shouldn't be
320 canonical paths back to file paths for display. It shouldn't be
318 used to get real file paths. Use vfs functions instead.
321 used to get real file paths. Use vfs functions instead.
319 """
322 """
320 cwd = self._cwd
323 cwd = self._cwd
321 if cwd == self._root:
324 if cwd == self._root:
322 return b''
325 return b''
323 # self._root ends with a path separator if self._root is '/' or 'C:\'
326 # self._root ends with a path separator if self._root is '/' or 'C:\'
324 rootsep = self._root
327 rootsep = self._root
325 if not util.endswithsep(rootsep):
328 if not util.endswithsep(rootsep):
326 rootsep += pycompat.ossep
329 rootsep += pycompat.ossep
327 if cwd.startswith(rootsep):
330 if cwd.startswith(rootsep):
328 return cwd[len(rootsep) :]
331 return cwd[len(rootsep) :]
329 else:
332 else:
330 # we're outside the repo. return an absolute path.
333 # we're outside the repo. return an absolute path.
331 return cwd
334 return cwd
332
335
333 def pathto(self, f, cwd=None):
336 def pathto(self, f, cwd=None):
334 if cwd is None:
337 if cwd is None:
335 cwd = self.getcwd()
338 cwd = self.getcwd()
336 path = util.pathto(self._root, cwd, f)
339 path = util.pathto(self._root, cwd, f)
337 if self._slash:
340 if self._slash:
338 return util.pconvert(path)
341 return util.pconvert(path)
339 return path
342 return path
340
343
341 def __getitem__(self, key):
344 def __getitem__(self, key):
342 """Return the current state of key (a filename) in the dirstate.
345 """Return the current state of key (a filename) in the dirstate.
343
346
344 States are:
347 States are:
345 n normal
348 n normal
346 m needs merging
349 m needs merging
347 r marked for removal
350 r marked for removal
348 a marked for addition
351 a marked for addition
349 ? not tracked
352 ? not tracked
350
353
351 XXX The "state" is a bit obscure to be in the "public" API. we should
354 XXX The "state" is a bit obscure to be in the "public" API. we should
352 consider migrating all user of this to going through the dirstate entry
355 consider migrating all user of this to going through the dirstate entry
353 instead.
356 instead.
354 """
357 """
355 msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
358 msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
356 util.nouideprecwarn(msg, b'6.1', stacklevel=2)
359 util.nouideprecwarn(msg, b'6.1', stacklevel=2)
357 entry = self._map.get(key)
360 entry = self._map.get(key)
358 if entry is not None:
361 if entry is not None:
359 return entry.state
362 return entry.state
360 return b'?'
363 return b'?'
361
364
362 def get_entry(self, path):
365 def get_entry(self, path):
363 """return a DirstateItem for the associated path"""
366 """return a DirstateItem for the associated path"""
364 entry = self._map.get(path)
367 entry = self._map.get(path)
365 if entry is None:
368 if entry is None:
366 return DirstateItem()
369 return DirstateItem()
367 return entry
370 return entry
368
371
369 def __contains__(self, key):
372 def __contains__(self, key):
370 return key in self._map
373 return key in self._map
371
374
372 def __iter__(self):
375 def __iter__(self):
373 return iter(sorted(self._map))
376 return iter(sorted(self._map))
374
377
375 def items(self):
378 def items(self):
376 return pycompat.iteritems(self._map)
379 return pycompat.iteritems(self._map)
377
380
378 iteritems = items
381 iteritems = items
379
382
380 def parents(self):
383 def parents(self):
381 return [self._validate(p) for p in self._pl]
384 return [self._validate(p) for p in self._pl]
382
385
383 def p1(self):
386 def p1(self):
384 return self._validate(self._pl[0])
387 return self._validate(self._pl[0])
385
388
386 def p2(self):
389 def p2(self):
387 return self._validate(self._pl[1])
390 return self._validate(self._pl[1])
388
391
389 @property
392 @property
390 def in_merge(self):
393 def in_merge(self):
391 """True if a merge is in progress"""
394 """True if a merge is in progress"""
392 return self._pl[1] != self._nodeconstants.nullid
395 return self._pl[1] != self._nodeconstants.nullid
393
396
394 def branch(self):
397 def branch(self):
395 return encoding.tolocal(self._branch)
398 return encoding.tolocal(self._branch)
396
399
397 def setparents(self, p1, p2=None):
400 def setparents(self, p1, p2=None):
398 """Set dirstate parents to p1 and p2.
401 """Set dirstate parents to p1 and p2.
399
402
400 When moving from two parents to one, "merged" entries a
403 When moving from two parents to one, "merged" entries a
401 adjusted to normal and previous copy records discarded and
404 adjusted to normal and previous copy records discarded and
402 returned by the call.
405 returned by the call.
403
406
404 See localrepo.setparents()
407 See localrepo.setparents()
405 """
408 """
406 if p2 is None:
409 if p2 is None:
407 p2 = self._nodeconstants.nullid
410 p2 = self._nodeconstants.nullid
408 if self._parentwriters == 0:
411 if self._parentwriters == 0:
409 raise ValueError(
412 raise ValueError(
410 b"cannot set dirstate parent outside of "
413 b"cannot set dirstate parent outside of "
411 b"dirstate.parentchange context manager"
414 b"dirstate.parentchange context manager"
412 )
415 )
413
416
414 self._dirty = True
417 self._dirty = True
415 oldp2 = self._pl[1]
418 oldp2 = self._pl[1]
416 if self._origpl is None:
419 if self._origpl is None:
417 self._origpl = self._pl
420 self._origpl = self._pl
418 nullid = self._nodeconstants.nullid
421 nullid = self._nodeconstants.nullid
419 # True if we need to fold p2 related state back to a linear case
422 # True if we need to fold p2 related state back to a linear case
420 fold_p2 = oldp2 != nullid and p2 == nullid
423 fold_p2 = oldp2 != nullid and p2 == nullid
421 return self._map.setparents(p1, p2, fold_p2=fold_p2)
424 return self._map.setparents(p1, p2, fold_p2=fold_p2)
422
425
423 def setbranch(self, branch):
426 def setbranch(self, branch):
424 self.__class__._branch.set(self, encoding.fromlocal(branch))
427 self.__class__._branch.set(self, encoding.fromlocal(branch))
425 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
428 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
426 try:
429 try:
427 f.write(self._branch + b'\n')
430 f.write(self._branch + b'\n')
428 f.close()
431 f.close()
429
432
430 # make sure filecache has the correct stat info for _branch after
433 # make sure filecache has the correct stat info for _branch after
431 # replacing the underlying file
434 # replacing the underlying file
432 ce = self._filecache[b'_branch']
435 ce = self._filecache[b'_branch']
433 if ce:
436 if ce:
434 ce.refresh()
437 ce.refresh()
435 except: # re-raises
438 except: # re-raises
436 f.discard()
439 f.discard()
437 raise
440 raise
438
441
439 def invalidate(self):
442 def invalidate(self):
440 """Causes the next access to reread the dirstate.
443 """Causes the next access to reread the dirstate.
441
444
442 This is different from localrepo.invalidatedirstate() because it always
445 This is different from localrepo.invalidatedirstate() because it always
443 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
446 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
444 check whether the dirstate has changed before rereading it."""
447 check whether the dirstate has changed before rereading it."""
445
448
446 for a in ("_map", "_branch", "_ignore"):
449 for a in ("_map", "_branch", "_ignore"):
447 if a in self.__dict__:
450 if a in self.__dict__:
448 delattr(self, a)
451 delattr(self, a)
449 self._lastnormaltime = timestamp.zero()
452 self._lastnormaltime = timestamp.zero()
450 self._dirty = False
453 self._dirty = False
451 self._parentwriters = 0
454 self._parentwriters = 0
452 self._origpl = None
455 self._origpl = None
453
456
454 def copy(self, source, dest):
457 def copy(self, source, dest):
455 """Mark dest as a copy of source. Unmark dest if source is None."""
458 """Mark dest as a copy of source. Unmark dest if source is None."""
456 if source == dest:
459 if source == dest:
457 return
460 return
458 self._dirty = True
461 self._dirty = True
459 if source is not None:
462 if source is not None:
460 self._map.copymap[dest] = source
463 self._map.copymap[dest] = source
461 else:
464 else:
462 self._map.copymap.pop(dest, None)
465 self._map.copymap.pop(dest, None)
463
466
464 def copied(self, file):
467 def copied(self, file):
465 return self._map.copymap.get(file, None)
468 return self._map.copymap.get(file, None)
466
469
467 def copies(self):
470 def copies(self):
468 return self._map.copymap
471 return self._map.copymap
469
472
470 @requires_no_parents_change
473 @requires_no_parents_change
471 def set_tracked(self, filename):
474 def set_tracked(self, filename):
472 """a "public" method for generic code to mark a file as tracked
475 """a "public" method for generic code to mark a file as tracked
473
476
474 This function is to be called outside of "update/merge" case. For
477 This function is to be called outside of "update/merge" case. For
475 example by a command like `hg add X`.
478 example by a command like `hg add X`.
476
479
477 return True the file was previously untracked, False otherwise.
480 return True the file was previously untracked, False otherwise.
478 """
481 """
479 self._dirty = True
482 self._dirty = True
480 entry = self._map.get(filename)
483 entry = self._map.get(filename)
481 if entry is None or not entry.tracked:
484 if entry is None or not entry.tracked:
482 self._check_new_tracked_filename(filename)
485 self._check_new_tracked_filename(filename)
483 return self._map.set_tracked(filename)
486 return self._map.set_tracked(filename)
484
487
485 @requires_no_parents_change
488 @requires_no_parents_change
486 def set_untracked(self, filename):
489 def set_untracked(self, filename):
487 """a "public" method for generic code to mark a file as untracked
490 """a "public" method for generic code to mark a file as untracked
488
491
489 This function is to be called outside of "update/merge" case. For
492 This function is to be called outside of "update/merge" case. For
490 example by a command like `hg remove X`.
493 example by a command like `hg remove X`.
491
494
492 return True the file was previously tracked, False otherwise.
495 return True the file was previously tracked, False otherwise.
493 """
496 """
494 ret = self._map.set_untracked(filename)
497 ret = self._map.set_untracked(filename)
495 if ret:
498 if ret:
496 self._dirty = True
499 self._dirty = True
497 return ret
500 return ret
498
501
499 @requires_no_parents_change
502 @requires_no_parents_change
500 def set_clean(self, filename, parentfiledata=None):
503 def set_clean(self, filename, parentfiledata=None):
501 """record that the current state of the file on disk is known to be clean"""
504 """record that the current state of the file on disk is known to be clean"""
502 self._dirty = True
505 self._dirty = True
503 if parentfiledata:
506 if parentfiledata:
504 (mode, size, mtime) = parentfiledata
507 (mode, size, mtime) = parentfiledata
505 else:
508 else:
506 (mode, size, mtime) = self._get_filedata(filename)
509 (mode, size, mtime) = self._get_filedata(filename)
507 if not self._map[filename].tracked:
510 if not self._map[filename].tracked:
508 self._check_new_tracked_filename(filename)
511 self._check_new_tracked_filename(filename)
509 self._map.set_clean(filename, mode, size, mtime)
512 self._map.set_clean(filename, mode, size, mtime)
510 if mtime > self._lastnormaltime:
513 if mtime > self._lastnormaltime:
511 # Remember the most recent modification timeslot for status(),
514 # Remember the most recent modification timeslot for status(),
512 # to make sure we won't miss future size-preserving file content
515 # to make sure we won't miss future size-preserving file content
513 # modifications that happen within the same timeslot.
516 # modifications that happen within the same timeslot.
514 self._lastnormaltime = mtime
517 self._lastnormaltime = mtime
515
518
516 @requires_no_parents_change
519 @requires_no_parents_change
517 def set_possibly_dirty(self, filename):
520 def set_possibly_dirty(self, filename):
518 """record that the current state of the file on disk is unknown"""
521 """record that the current state of the file on disk is unknown"""
519 self._dirty = True
522 self._dirty = True
520 self._map.set_possibly_dirty(filename)
523 self._map.set_possibly_dirty(filename)
521
524
522 @requires_parents_change
525 @requires_parents_change
523 def update_file_p1(
526 def update_file_p1(
524 self,
527 self,
525 filename,
528 filename,
526 p1_tracked,
529 p1_tracked,
527 ):
530 ):
528 """Set a file as tracked in the parent (or not)
531 """Set a file as tracked in the parent (or not)
529
532
530 This is to be called when adjust the dirstate to a new parent after an history
533 This is to be called when adjust the dirstate to a new parent after an history
531 rewriting operation.
534 rewriting operation.
532
535
533 It should not be called during a merge (p2 != nullid) and only within
536 It should not be called during a merge (p2 != nullid) and only within
534 a `with dirstate.parentchange():` context.
537 a `with dirstate.parentchange():` context.
535 """
538 """
536 if self.in_merge:
539 if self.in_merge:
537 msg = b'update_file_reference should not be called when merging'
540 msg = b'update_file_reference should not be called when merging'
538 raise error.ProgrammingError(msg)
541 raise error.ProgrammingError(msg)
539 entry = self._map.get(filename)
542 entry = self._map.get(filename)
540 if entry is None:
543 if entry is None:
541 wc_tracked = False
544 wc_tracked = False
542 else:
545 else:
543 wc_tracked = entry.tracked
546 wc_tracked = entry.tracked
544 if not (p1_tracked or wc_tracked):
547 if not (p1_tracked or wc_tracked):
545 # the file is no longer relevant to anyone
548 # the file is no longer relevant to anyone
546 if self._map.get(filename) is not None:
549 if self._map.get(filename) is not None:
547 self._map.reset_state(filename)
550 self._map.reset_state(filename)
548 self._dirty = True
551 self._dirty = True
549 elif (not p1_tracked) and wc_tracked:
552 elif (not p1_tracked) and wc_tracked:
550 if entry is not None and entry.added:
553 if entry is not None and entry.added:
551 return # avoid dropping copy information (maybe?)
554 return # avoid dropping copy information (maybe?)
552
555
553 parentfiledata = None
556 parentfiledata = None
554 if wc_tracked and p1_tracked:
557 if wc_tracked and p1_tracked:
555 parentfiledata = self._get_filedata(filename)
558 parentfiledata = self._get_filedata(filename)
556
559
557 self._map.reset_state(
560 self._map.reset_state(
558 filename,
561 filename,
559 wc_tracked,
562 wc_tracked,
560 p1_tracked,
563 p1_tracked,
561 # the underlying reference might have changed, we will have to
564 # the underlying reference might have changed, we will have to
562 # check it.
565 # check it.
563 has_meaningful_mtime=False,
566 has_meaningful_mtime=False,
564 parentfiledata=parentfiledata,
567 parentfiledata=parentfiledata,
565 )
568 )
566 if (
569 if (
567 parentfiledata is not None
570 parentfiledata is not None
568 and parentfiledata[2] > self._lastnormaltime
571 and parentfiledata[2] > self._lastnormaltime
569 ):
572 ):
570 # Remember the most recent modification timeslot for status(),
573 # Remember the most recent modification timeslot for status(),
571 # to make sure we won't miss future size-preserving file content
574 # to make sure we won't miss future size-preserving file content
572 # modifications that happen within the same timeslot.
575 # modifications that happen within the same timeslot.
573 self._lastnormaltime = parentfiledata[2]
576 self._lastnormaltime = parentfiledata[2]
574
577
575 @requires_parents_change
578 @requires_parents_change
576 def update_file(
579 def update_file(
577 self,
580 self,
578 filename,
581 filename,
579 wc_tracked,
582 wc_tracked,
580 p1_tracked,
583 p1_tracked,
581 p2_info=False,
584 p2_info=False,
582 possibly_dirty=False,
585 possibly_dirty=False,
583 parentfiledata=None,
586 parentfiledata=None,
584 ):
587 ):
585 """update the information about a file in the dirstate
588 """update the information about a file in the dirstate
586
589
587 This is to be called when the direstates parent changes to keep track
590 This is to be called when the direstates parent changes to keep track
588 of what is the file situation in regards to the working copy and its parent.
591 of what is the file situation in regards to the working copy and its parent.
589
592
590 This function must be called within a `dirstate.parentchange` context.
593 This function must be called within a `dirstate.parentchange` context.
591
594
592 note: the API is at an early stage and we might need to adjust it
595 note: the API is at an early stage and we might need to adjust it
593 depending of what information ends up being relevant and useful to
596 depending of what information ends up being relevant and useful to
594 other processing.
597 other processing.
595 """
598 """
596
599
597 # note: I do not think we need to double check name clash here since we
600 # note: I do not think we need to double check name clash here since we
598 # are in a update/merge case that should already have taken care of
601 # are in a update/merge case that should already have taken care of
599 # this. The test agrees
602 # this. The test agrees
600
603
601 self._dirty = True
604 self._dirty = True
602
605
603 need_parent_file_data = (
606 need_parent_file_data = (
604 not possibly_dirty and not p2_info and wc_tracked and p1_tracked
607 not possibly_dirty and not p2_info and wc_tracked and p1_tracked
605 )
608 )
606
609
607 if need_parent_file_data and parentfiledata is None:
610 if need_parent_file_data and parentfiledata is None:
608 parentfiledata = self._get_filedata(filename)
611 parentfiledata = self._get_filedata(filename)
609
612
610 self._map.reset_state(
613 self._map.reset_state(
611 filename,
614 filename,
612 wc_tracked,
615 wc_tracked,
613 p1_tracked,
616 p1_tracked,
614 p2_info=p2_info,
617 p2_info=p2_info,
615 has_meaningful_mtime=not possibly_dirty,
618 has_meaningful_mtime=not possibly_dirty,
616 parentfiledata=parentfiledata,
619 parentfiledata=parentfiledata,
617 )
620 )
618 if (
621 if (
619 parentfiledata is not None
622 parentfiledata is not None
620 and parentfiledata[2] > self._lastnormaltime
623 and parentfiledata[2] > self._lastnormaltime
621 ):
624 ):
622 # Remember the most recent modification timeslot for status(),
625 # Remember the most recent modification timeslot for status(),
623 # to make sure we won't miss future size-preserving file content
626 # to make sure we won't miss future size-preserving file content
624 # modifications that happen within the same timeslot.
627 # modifications that happen within the same timeslot.
625 self._lastnormaltime = parentfiledata[2]
628 self._lastnormaltime = parentfiledata[2]
626
629
627 def _check_new_tracked_filename(self, filename):
630 def _check_new_tracked_filename(self, filename):
628 scmutil.checkfilename(filename)
631 scmutil.checkfilename(filename)
629 if self._map.hastrackeddir(filename):
632 if self._map.hastrackeddir(filename):
630 msg = _(b'directory %r already in dirstate')
633 msg = _(b'directory %r already in dirstate')
631 msg %= pycompat.bytestr(filename)
634 msg %= pycompat.bytestr(filename)
632 raise error.Abort(msg)
635 raise error.Abort(msg)
633 # shadows
636 # shadows
634 for d in pathutil.finddirs(filename):
637 for d in pathutil.finddirs(filename):
635 if self._map.hastrackeddir(d):
638 if self._map.hastrackeddir(d):
636 break
639 break
637 entry = self._map.get(d)
640 entry = self._map.get(d)
638 if entry is not None and not entry.removed:
641 if entry is not None and not entry.removed:
639 msg = _(b'file %r in dirstate clashes with %r')
642 msg = _(b'file %r in dirstate clashes with %r')
640 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
643 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
641 raise error.Abort(msg)
644 raise error.Abort(msg)
642
645
643 def _get_filedata(self, filename):
646 def _get_filedata(self, filename):
644 """returns"""
647 """returns"""
645 s = os.lstat(self._join(filename))
648 s = os.lstat(self._join(filename))
646 mode = s.st_mode
649 mode = s.st_mode
647 size = s.st_size
650 size = s.st_size
648 mtime = timestamp.mtime_of(s)
651 mtime = timestamp.mtime_of(s)
649 return (mode, size, mtime)
652 return (mode, size, mtime)
650
653
651 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
654 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
652 if exists is None:
655 if exists is None:
653 exists = os.path.lexists(os.path.join(self._root, path))
656 exists = os.path.lexists(os.path.join(self._root, path))
654 if not exists:
657 if not exists:
655 # Maybe a path component exists
658 # Maybe a path component exists
656 if not ignoremissing and b'/' in path:
659 if not ignoremissing and b'/' in path:
657 d, f = path.rsplit(b'/', 1)
660 d, f = path.rsplit(b'/', 1)
658 d = self._normalize(d, False, ignoremissing, None)
661 d = self._normalize(d, False, ignoremissing, None)
659 folded = d + b"/" + f
662 folded = d + b"/" + f
660 else:
663 else:
661 # No path components, preserve original case
664 # No path components, preserve original case
662 folded = path
665 folded = path
663 else:
666 else:
664 # recursively normalize leading directory components
667 # recursively normalize leading directory components
665 # against dirstate
668 # against dirstate
666 if b'/' in normed:
669 if b'/' in normed:
667 d, f = normed.rsplit(b'/', 1)
670 d, f = normed.rsplit(b'/', 1)
668 d = self._normalize(d, False, ignoremissing, True)
671 d = self._normalize(d, False, ignoremissing, True)
669 r = self._root + b"/" + d
672 r = self._root + b"/" + d
670 folded = d + b"/" + util.fspath(f, r)
673 folded = d + b"/" + util.fspath(f, r)
671 else:
674 else:
672 folded = util.fspath(normed, self._root)
675 folded = util.fspath(normed, self._root)
673 storemap[normed] = folded
676 storemap[normed] = folded
674
677
675 return folded
678 return folded
676
679
677 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
680 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
678 normed = util.normcase(path)
681 normed = util.normcase(path)
679 folded = self._map.filefoldmap.get(normed, None)
682 folded = self._map.filefoldmap.get(normed, None)
680 if folded is None:
683 if folded is None:
681 if isknown:
684 if isknown:
682 folded = path
685 folded = path
683 else:
686 else:
684 folded = self._discoverpath(
687 folded = self._discoverpath(
685 path, normed, ignoremissing, exists, self._map.filefoldmap
688 path, normed, ignoremissing, exists, self._map.filefoldmap
686 )
689 )
687 return folded
690 return folded
688
691
689 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
692 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
690 normed = util.normcase(path)
693 normed = util.normcase(path)
691 folded = self._map.filefoldmap.get(normed, None)
694 folded = self._map.filefoldmap.get(normed, None)
692 if folded is None:
695 if folded is None:
693 folded = self._map.dirfoldmap.get(normed, None)
696 folded = self._map.dirfoldmap.get(normed, None)
694 if folded is None:
697 if folded is None:
695 if isknown:
698 if isknown:
696 folded = path
699 folded = path
697 else:
700 else:
698 # store discovered result in dirfoldmap so that future
701 # store discovered result in dirfoldmap so that future
699 # normalizefile calls don't start matching directories
702 # normalizefile calls don't start matching directories
700 folded = self._discoverpath(
703 folded = self._discoverpath(
701 path, normed, ignoremissing, exists, self._map.dirfoldmap
704 path, normed, ignoremissing, exists, self._map.dirfoldmap
702 )
705 )
703 return folded
706 return folded
704
707
705 def normalize(self, path, isknown=False, ignoremissing=False):
708 def normalize(self, path, isknown=False, ignoremissing=False):
706 """
709 """
707 normalize the case of a pathname when on a casefolding filesystem
710 normalize the case of a pathname when on a casefolding filesystem
708
711
709 isknown specifies whether the filename came from walking the
712 isknown specifies whether the filename came from walking the
710 disk, to avoid extra filesystem access.
713 disk, to avoid extra filesystem access.
711
714
712 If ignoremissing is True, missing path are returned
715 If ignoremissing is True, missing path are returned
713 unchanged. Otherwise, we try harder to normalize possibly
716 unchanged. Otherwise, we try harder to normalize possibly
714 existing path components.
717 existing path components.
715
718
716 The normalized case is determined based on the following precedence:
719 The normalized case is determined based on the following precedence:
717
720
718 - version of name already stored in the dirstate
721 - version of name already stored in the dirstate
719 - version of name stored on disk
722 - version of name stored on disk
720 - version provided via command arguments
723 - version provided via command arguments
721 """
724 """
722
725
723 if self._checkcase:
726 if self._checkcase:
724 return self._normalize(path, isknown, ignoremissing)
727 return self._normalize(path, isknown, ignoremissing)
725 return path
728 return path
726
729
727 def clear(self):
730 def clear(self):
728 self._map.clear()
731 self._map.clear()
729 self._lastnormaltime = timestamp.zero()
732 self._lastnormaltime = timestamp.zero()
730 self._dirty = True
733 self._dirty = True
731
734
732 def rebuild(self, parent, allfiles, changedfiles=None):
735 def rebuild(self, parent, allfiles, changedfiles=None):
733 if changedfiles is None:
736 if changedfiles is None:
734 # Rebuild entire dirstate
737 # Rebuild entire dirstate
735 to_lookup = allfiles
738 to_lookup = allfiles
736 to_drop = []
739 to_drop = []
737 lastnormaltime = self._lastnormaltime
740 lastnormaltime = self._lastnormaltime
738 self.clear()
741 self.clear()
739 self._lastnormaltime = lastnormaltime
742 self._lastnormaltime = lastnormaltime
740 elif len(changedfiles) < 10:
743 elif len(changedfiles) < 10:
741 # Avoid turning allfiles into a set, which can be expensive if it's
744 # Avoid turning allfiles into a set, which can be expensive if it's
742 # large.
745 # large.
743 to_lookup = []
746 to_lookup = []
744 to_drop = []
747 to_drop = []
745 for f in changedfiles:
748 for f in changedfiles:
746 if f in allfiles:
749 if f in allfiles:
747 to_lookup.append(f)
750 to_lookup.append(f)
748 else:
751 else:
749 to_drop.append(f)
752 to_drop.append(f)
750 else:
753 else:
751 changedfilesset = set(changedfiles)
754 changedfilesset = set(changedfiles)
752 to_lookup = changedfilesset & set(allfiles)
755 to_lookup = changedfilesset & set(allfiles)
753 to_drop = changedfilesset - to_lookup
756 to_drop = changedfilesset - to_lookup
754
757
755 if self._origpl is None:
758 if self._origpl is None:
756 self._origpl = self._pl
759 self._origpl = self._pl
757 self._map.setparents(parent, self._nodeconstants.nullid)
760 self._map.setparents(parent, self._nodeconstants.nullid)
758
761
759 for f in to_lookup:
762 for f in to_lookup:
760
763
761 if self.in_merge:
764 if self.in_merge:
762 self.set_tracked(f)
765 self.set_tracked(f)
763 else:
766 else:
764 self._map.reset_state(
767 self._map.reset_state(
765 f,
768 f,
766 wc_tracked=True,
769 wc_tracked=True,
767 p1_tracked=True,
770 p1_tracked=True,
768 )
771 )
769 for f in to_drop:
772 for f in to_drop:
770 self._map.reset_state(f)
773 self._map.reset_state(f)
771
774
772 self._dirty = True
775 self._dirty = True
773
776
774 def identity(self):
777 def identity(self):
775 """Return identity of dirstate itself to detect changing in storage
778 """Return identity of dirstate itself to detect changing in storage
776
779
777 If identity of previous dirstate is equal to this, writing
780 If identity of previous dirstate is equal to this, writing
778 changes based on the former dirstate out can keep consistency.
781 changes based on the former dirstate out can keep consistency.
779 """
782 """
780 return self._map.identity
783 return self._map.identity
781
784
782 def write(self, tr):
785 def write(self, tr):
783 if not self._dirty:
786 if not self._dirty:
784 return
787 return
785
788
786 filename = self._filename
789 filename = self._filename
787 if tr:
790 if tr:
788 # 'dirstate.write()' is not only for writing in-memory
791 # 'dirstate.write()' is not only for writing in-memory
789 # changes out, but also for dropping ambiguous timestamp.
792 # changes out, but also for dropping ambiguous timestamp.
790 # delayed writing re-raise "ambiguous timestamp issue".
793 # delayed writing re-raise "ambiguous timestamp issue".
791 # See also the wiki page below for detail:
794 # See also the wiki page below for detail:
792 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
795 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
793
796
794 # record when mtime start to be ambiguous
797 # record when mtime start to be ambiguous
795 now = _getfsnow(self._opener)
798 now = _getfsnow(self._opener)
796
799
797 # delay writing in-memory changes out
800 # delay writing in-memory changes out
798 tr.addfilegenerator(
801 tr.addfilegenerator(
799 b'dirstate',
802 b'dirstate',
800 (self._filename,),
803 (self._filename,),
801 lambda f: self._writedirstate(tr, f, now=now),
804 lambda f: self._writedirstate(tr, f, now=now),
802 location=b'plain',
805 location=b'plain',
803 )
806 )
804 return
807 return
805
808
806 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
809 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
807 self._writedirstate(tr, st)
810 self._writedirstate(tr, st)
808
811
809 def addparentchangecallback(self, category, callback):
812 def addparentchangecallback(self, category, callback):
810 """add a callback to be called when the wd parents are changed
813 """add a callback to be called when the wd parents are changed
811
814
812 Callback will be called with the following arguments:
815 Callback will be called with the following arguments:
813 dirstate, (oldp1, oldp2), (newp1, newp2)
816 dirstate, (oldp1, oldp2), (newp1, newp2)
814
817
815 Category is a unique identifier to allow overwriting an old callback
818 Category is a unique identifier to allow overwriting an old callback
816 with a newer callback.
819 with a newer callback.
817 """
820 """
818 self._plchangecallbacks[category] = callback
821 self._plchangecallbacks[category] = callback
819
822
820 def _writedirstate(self, tr, st, now=None):
823 def _writedirstate(self, tr, st, now=None):
821 # notify callbacks about parents change
824 # notify callbacks about parents change
822 if self._origpl is not None and self._origpl != self._pl:
825 if self._origpl is not None and self._origpl != self._pl:
823 for c, callback in sorted(
826 for c, callback in sorted(
824 pycompat.iteritems(self._plchangecallbacks)
827 pycompat.iteritems(self._plchangecallbacks)
825 ):
828 ):
826 callback(self, self._origpl, self._pl)
829 callback(self, self._origpl, self._pl)
827 self._origpl = None
830 self._origpl = None
828
831
829 if now is None:
832 if now is None:
830 # use the modification time of the newly created temporary file as the
833 # use the modification time of the newly created temporary file as the
831 # filesystem's notion of 'now'
834 # filesystem's notion of 'now'
832 now = timestamp.mtime_of(util.fstat(st))
835 now = timestamp.mtime_of(util.fstat(st))
833
836
834 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
837 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
835 # timestamp of each entries in dirstate, because of 'now > mtime'
838 # timestamp of each entries in dirstate, because of 'now > mtime'
836 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
839 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
837 if delaywrite > 0:
840 if delaywrite > 0:
838 # do we have any files to delay for?
841 # do we have any files to delay for?
839 for f, e in pycompat.iteritems(self._map):
842 for f, e in pycompat.iteritems(self._map):
840 if e.need_delay(now):
843 if e.need_delay(now):
841 import time # to avoid useless import
844 import time # to avoid useless import
842
845
843 # rather than sleep n seconds, sleep until the next
846 # rather than sleep n seconds, sleep until the next
844 # multiple of n seconds
847 # multiple of n seconds
845 clock = time.time()
848 clock = time.time()
846 start = int(clock) - (int(clock) % delaywrite)
849 start = int(clock) - (int(clock) % delaywrite)
847 end = start + delaywrite
850 end = start + delaywrite
848 time.sleep(end - clock)
851 time.sleep(end - clock)
849 # trust our estimate that the end is near now
852 # trust our estimate that the end is near now
850 now = timestamp.timestamp((end, 0))
853 now = timestamp.timestamp((end, 0))
851 break
854 break
852
855
853 self._map.write(tr, st, now)
856 self._map.write(tr, st, now)
854 self._lastnormaltime = timestamp.zero()
857 self._lastnormaltime = timestamp.zero()
855 self._dirty = False
858 self._dirty = False
856
859
857 def _dirignore(self, f):
860 def _dirignore(self, f):
858 if self._ignore(f):
861 if self._ignore(f):
859 return True
862 return True
860 for p in pathutil.finddirs(f):
863 for p in pathutil.finddirs(f):
861 if self._ignore(p):
864 if self._ignore(p):
862 return True
865 return True
863 return False
866 return False
864
867
865 def _ignorefiles(self):
868 def _ignorefiles(self):
866 files = []
869 files = []
867 if os.path.exists(self._join(b'.hgignore')):
870 if os.path.exists(self._join(b'.hgignore')):
868 files.append(self._join(b'.hgignore'))
871 files.append(self._join(b'.hgignore'))
869 for name, path in self._ui.configitems(b"ui"):
872 for name, path in self._ui.configitems(b"ui"):
870 if name == b'ignore' or name.startswith(b'ignore.'):
873 if name == b'ignore' or name.startswith(b'ignore.'):
871 # we need to use os.path.join here rather than self._join
874 # we need to use os.path.join here rather than self._join
872 # because path is arbitrary and user-specified
875 # because path is arbitrary and user-specified
873 files.append(os.path.join(self._rootdir, util.expandpath(path)))
876 files.append(os.path.join(self._rootdir, util.expandpath(path)))
874 return files
877 return files
875
878
876 def _ignorefileandline(self, f):
879 def _ignorefileandline(self, f):
877 files = collections.deque(self._ignorefiles())
880 files = collections.deque(self._ignorefiles())
878 visited = set()
881 visited = set()
879 while files:
882 while files:
880 i = files.popleft()
883 i = files.popleft()
881 patterns = matchmod.readpatternfile(
884 patterns = matchmod.readpatternfile(
882 i, self._ui.warn, sourceinfo=True
885 i, self._ui.warn, sourceinfo=True
883 )
886 )
884 for pattern, lineno, line in patterns:
887 for pattern, lineno, line in patterns:
885 kind, p = matchmod._patsplit(pattern, b'glob')
888 kind, p = matchmod._patsplit(pattern, b'glob')
886 if kind == b"subinclude":
889 if kind == b"subinclude":
887 if p not in visited:
890 if p not in visited:
888 files.append(p)
891 files.append(p)
889 continue
892 continue
890 m = matchmod.match(
893 m = matchmod.match(
891 self._root, b'', [], [pattern], warn=self._ui.warn
894 self._root, b'', [], [pattern], warn=self._ui.warn
892 )
895 )
893 if m(f):
896 if m(f):
894 return (i, lineno, line)
897 return (i, lineno, line)
895 visited.add(i)
898 visited.add(i)
896 return (None, -1, b"")
899 return (None, -1, b"")
897
900
898 def _walkexplicit(self, match, subrepos):
901 def _walkexplicit(self, match, subrepos):
899 """Get stat data about the files explicitly specified by match.
902 """Get stat data about the files explicitly specified by match.
900
903
901 Return a triple (results, dirsfound, dirsnotfound).
904 Return a triple (results, dirsfound, dirsnotfound).
902 - results is a mapping from filename to stat result. It also contains
905 - results is a mapping from filename to stat result. It also contains
903 listings mapping subrepos and .hg to None.
906 listings mapping subrepos and .hg to None.
904 - dirsfound is a list of files found to be directories.
907 - dirsfound is a list of files found to be directories.
905 - dirsnotfound is a list of files that the dirstate thinks are
908 - dirsnotfound is a list of files that the dirstate thinks are
906 directories and that were not found."""
909 directories and that were not found."""
907
910
908 def badtype(mode):
911 def badtype(mode):
909 kind = _(b'unknown')
912 kind = _(b'unknown')
910 if stat.S_ISCHR(mode):
913 if stat.S_ISCHR(mode):
911 kind = _(b'character device')
914 kind = _(b'character device')
912 elif stat.S_ISBLK(mode):
915 elif stat.S_ISBLK(mode):
913 kind = _(b'block device')
916 kind = _(b'block device')
914 elif stat.S_ISFIFO(mode):
917 elif stat.S_ISFIFO(mode):
915 kind = _(b'fifo')
918 kind = _(b'fifo')
916 elif stat.S_ISSOCK(mode):
919 elif stat.S_ISSOCK(mode):
917 kind = _(b'socket')
920 kind = _(b'socket')
918 elif stat.S_ISDIR(mode):
921 elif stat.S_ISDIR(mode):
919 kind = _(b'directory')
922 kind = _(b'directory')
920 return _(b'unsupported file type (type is %s)') % kind
923 return _(b'unsupported file type (type is %s)') % kind
921
924
922 badfn = match.bad
925 badfn = match.bad
923 dmap = self._map
926 dmap = self._map
924 lstat = os.lstat
927 lstat = os.lstat
925 getkind = stat.S_IFMT
928 getkind = stat.S_IFMT
926 dirkind = stat.S_IFDIR
929 dirkind = stat.S_IFDIR
927 regkind = stat.S_IFREG
930 regkind = stat.S_IFREG
928 lnkkind = stat.S_IFLNK
931 lnkkind = stat.S_IFLNK
929 join = self._join
932 join = self._join
930 dirsfound = []
933 dirsfound = []
931 foundadd = dirsfound.append
934 foundadd = dirsfound.append
932 dirsnotfound = []
935 dirsnotfound = []
933 notfoundadd = dirsnotfound.append
936 notfoundadd = dirsnotfound.append
934
937
935 if not match.isexact() and self._checkcase:
938 if not match.isexact() and self._checkcase:
936 normalize = self._normalize
939 normalize = self._normalize
937 else:
940 else:
938 normalize = None
941 normalize = None
939
942
940 files = sorted(match.files())
943 files = sorted(match.files())
941 subrepos.sort()
944 subrepos.sort()
942 i, j = 0, 0
945 i, j = 0, 0
943 while i < len(files) and j < len(subrepos):
946 while i < len(files) and j < len(subrepos):
944 subpath = subrepos[j] + b"/"
947 subpath = subrepos[j] + b"/"
945 if files[i] < subpath:
948 if files[i] < subpath:
946 i += 1
949 i += 1
947 continue
950 continue
948 while i < len(files) and files[i].startswith(subpath):
951 while i < len(files) and files[i].startswith(subpath):
949 del files[i]
952 del files[i]
950 j += 1
953 j += 1
951
954
952 if not files or b'' in files:
955 if not files or b'' in files:
953 files = [b'']
956 files = [b'']
954 # constructing the foldmap is expensive, so don't do it for the
957 # constructing the foldmap is expensive, so don't do it for the
955 # common case where files is ['']
958 # common case where files is ['']
956 normalize = None
959 normalize = None
957 results = dict.fromkeys(subrepos)
960 results = dict.fromkeys(subrepos)
958 results[b'.hg'] = None
961 results[b'.hg'] = None
959
962
960 for ff in files:
963 for ff in files:
961 if normalize:
964 if normalize:
962 nf = normalize(ff, False, True)
965 nf = normalize(ff, False, True)
963 else:
966 else:
964 nf = ff
967 nf = ff
965 if nf in results:
968 if nf in results:
966 continue
969 continue
967
970
968 try:
971 try:
969 st = lstat(join(nf))
972 st = lstat(join(nf))
970 kind = getkind(st.st_mode)
973 kind = getkind(st.st_mode)
971 if kind == dirkind:
974 if kind == dirkind:
972 if nf in dmap:
975 if nf in dmap:
973 # file replaced by dir on disk but still in dirstate
976 # file replaced by dir on disk but still in dirstate
974 results[nf] = None
977 results[nf] = None
975 foundadd((nf, ff))
978 foundadd((nf, ff))
976 elif kind == regkind or kind == lnkkind:
979 elif kind == regkind or kind == lnkkind:
977 results[nf] = st
980 results[nf] = st
978 else:
981 else:
979 badfn(ff, badtype(kind))
982 badfn(ff, badtype(kind))
980 if nf in dmap:
983 if nf in dmap:
981 results[nf] = None
984 results[nf] = None
982 except OSError as inst: # nf not found on disk - it is dirstate only
985 except OSError as inst: # nf not found on disk - it is dirstate only
983 if nf in dmap: # does it exactly match a missing file?
986 if nf in dmap: # does it exactly match a missing file?
984 results[nf] = None
987 results[nf] = None
985 else: # does it match a missing directory?
988 else: # does it match a missing directory?
986 if self._map.hasdir(nf):
989 if self._map.hasdir(nf):
987 notfoundadd(nf)
990 notfoundadd(nf)
988 else:
991 else:
989 badfn(ff, encoding.strtolocal(inst.strerror))
992 badfn(ff, encoding.strtolocal(inst.strerror))
990
993
991 # match.files() may contain explicitly-specified paths that shouldn't
994 # match.files() may contain explicitly-specified paths that shouldn't
992 # be taken; drop them from the list of files found. dirsfound/notfound
995 # be taken; drop them from the list of files found. dirsfound/notfound
993 # aren't filtered here because they will be tested later.
996 # aren't filtered here because they will be tested later.
994 if match.anypats():
997 if match.anypats():
995 for f in list(results):
998 for f in list(results):
996 if f == b'.hg' or f in subrepos:
999 if f == b'.hg' or f in subrepos:
997 # keep sentinel to disable further out-of-repo walks
1000 # keep sentinel to disable further out-of-repo walks
998 continue
1001 continue
999 if not match(f):
1002 if not match(f):
1000 del results[f]
1003 del results[f]
1001
1004
1002 # Case insensitive filesystems cannot rely on lstat() failing to detect
1005 # Case insensitive filesystems cannot rely on lstat() failing to detect
1003 # a case-only rename. Prune the stat object for any file that does not
1006 # a case-only rename. Prune the stat object for any file that does not
1004 # match the case in the filesystem, if there are multiple files that
1007 # match the case in the filesystem, if there are multiple files that
1005 # normalize to the same path.
1008 # normalize to the same path.
1006 if match.isexact() and self._checkcase:
1009 if match.isexact() and self._checkcase:
1007 normed = {}
1010 normed = {}
1008
1011
1009 for f, st in pycompat.iteritems(results):
1012 for f, st in pycompat.iteritems(results):
1010 if st is None:
1013 if st is None:
1011 continue
1014 continue
1012
1015
1013 nc = util.normcase(f)
1016 nc = util.normcase(f)
1014 paths = normed.get(nc)
1017 paths = normed.get(nc)
1015
1018
1016 if paths is None:
1019 if paths is None:
1017 paths = set()
1020 paths = set()
1018 normed[nc] = paths
1021 normed[nc] = paths
1019
1022
1020 paths.add(f)
1023 paths.add(f)
1021
1024
1022 for norm, paths in pycompat.iteritems(normed):
1025 for norm, paths in pycompat.iteritems(normed):
1023 if len(paths) > 1:
1026 if len(paths) > 1:
1024 for path in paths:
1027 for path in paths:
1025 folded = self._discoverpath(
1028 folded = self._discoverpath(
1026 path, norm, True, None, self._map.dirfoldmap
1029 path, norm, True, None, self._map.dirfoldmap
1027 )
1030 )
1028 if path != folded:
1031 if path != folded:
1029 results[path] = None
1032 results[path] = None
1030
1033
1031 return results, dirsfound, dirsnotfound
1034 return results, dirsfound, dirsnotfound
1032
1035
1033 def walk(self, match, subrepos, unknown, ignored, full=True):
1036 def walk(self, match, subrepos, unknown, ignored, full=True):
1034 """
1037 """
1035 Walk recursively through the directory tree, finding all files
1038 Walk recursively through the directory tree, finding all files
1036 matched by match.
1039 matched by match.
1037
1040
1038 If full is False, maybe skip some known-clean files.
1041 If full is False, maybe skip some known-clean files.
1039
1042
1040 Return a dict mapping filename to stat-like object (either
1043 Return a dict mapping filename to stat-like object (either
1041 mercurial.osutil.stat instance or return value of os.stat()).
1044 mercurial.osutil.stat instance or return value of os.stat()).
1042
1045
1043 """
1046 """
1044 # full is a flag that extensions that hook into walk can use -- this
1047 # full is a flag that extensions that hook into walk can use -- this
1045 # implementation doesn't use it at all. This satisfies the contract
1048 # implementation doesn't use it at all. This satisfies the contract
1046 # because we only guarantee a "maybe".
1049 # because we only guarantee a "maybe".
1047
1050
1048 if ignored:
1051 if ignored:
1049 ignore = util.never
1052 ignore = util.never
1050 dirignore = util.never
1053 dirignore = util.never
1051 elif unknown:
1054 elif unknown:
1052 ignore = self._ignore
1055 ignore = self._ignore
1053 dirignore = self._dirignore
1056 dirignore = self._dirignore
1054 else:
1057 else:
1055 # if not unknown and not ignored, drop dir recursion and step 2
1058 # if not unknown and not ignored, drop dir recursion and step 2
1056 ignore = util.always
1059 ignore = util.always
1057 dirignore = util.always
1060 dirignore = util.always
1058
1061
1059 matchfn = match.matchfn
1062 matchfn = match.matchfn
1060 matchalways = match.always()
1063 matchalways = match.always()
1061 matchtdir = match.traversedir
1064 matchtdir = match.traversedir
1062 dmap = self._map
1065 dmap = self._map
1063 listdir = util.listdir
1066 listdir = util.listdir
1064 lstat = os.lstat
1067 lstat = os.lstat
1065 dirkind = stat.S_IFDIR
1068 dirkind = stat.S_IFDIR
1066 regkind = stat.S_IFREG
1069 regkind = stat.S_IFREG
1067 lnkkind = stat.S_IFLNK
1070 lnkkind = stat.S_IFLNK
1068 join = self._join
1071 join = self._join
1069
1072
1070 exact = skipstep3 = False
1073 exact = skipstep3 = False
1071 if match.isexact(): # match.exact
1074 if match.isexact(): # match.exact
1072 exact = True
1075 exact = True
1073 dirignore = util.always # skip step 2
1076 dirignore = util.always # skip step 2
1074 elif match.prefix(): # match.match, no patterns
1077 elif match.prefix(): # match.match, no patterns
1075 skipstep3 = True
1078 skipstep3 = True
1076
1079
1077 if not exact and self._checkcase:
1080 if not exact and self._checkcase:
1078 normalize = self._normalize
1081 normalize = self._normalize
1079 normalizefile = self._normalizefile
1082 normalizefile = self._normalizefile
1080 skipstep3 = False
1083 skipstep3 = False
1081 else:
1084 else:
1082 normalize = self._normalize
1085 normalize = self._normalize
1083 normalizefile = None
1086 normalizefile = None
1084
1087
1085 # step 1: find all explicit files
1088 # step 1: find all explicit files
1086 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1089 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1087 if matchtdir:
1090 if matchtdir:
1088 for d in work:
1091 for d in work:
1089 matchtdir(d[0])
1092 matchtdir(d[0])
1090 for d in dirsnotfound:
1093 for d in dirsnotfound:
1091 matchtdir(d)
1094 matchtdir(d)
1092
1095
1093 skipstep3 = skipstep3 and not (work or dirsnotfound)
1096 skipstep3 = skipstep3 and not (work or dirsnotfound)
1094 work = [d for d in work if not dirignore(d[0])]
1097 work = [d for d in work if not dirignore(d[0])]
1095
1098
1096 # step 2: visit subdirectories
1099 # step 2: visit subdirectories
1097 def traverse(work, alreadynormed):
1100 def traverse(work, alreadynormed):
1098 wadd = work.append
1101 wadd = work.append
1099 while work:
1102 while work:
1100 tracing.counter('dirstate.walk work', len(work))
1103 tracing.counter('dirstate.walk work', len(work))
1101 nd = work.pop()
1104 nd = work.pop()
1102 visitentries = match.visitchildrenset(nd)
1105 visitentries = match.visitchildrenset(nd)
1103 if not visitentries:
1106 if not visitentries:
1104 continue
1107 continue
1105 if visitentries == b'this' or visitentries == b'all':
1108 if visitentries == b'this' or visitentries == b'all':
1106 visitentries = None
1109 visitentries = None
1107 skip = None
1110 skip = None
1108 if nd != b'':
1111 if nd != b'':
1109 skip = b'.hg'
1112 skip = b'.hg'
1110 try:
1113 try:
1111 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1114 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1112 entries = listdir(join(nd), stat=True, skip=skip)
1115 entries = listdir(join(nd), stat=True, skip=skip)
1113 except OSError as inst:
1116 except OSError as inst:
1114 if inst.errno in (errno.EACCES, errno.ENOENT):
1117 if inst.errno in (errno.EACCES, errno.ENOENT):
1115 match.bad(
1118 match.bad(
1116 self.pathto(nd), encoding.strtolocal(inst.strerror)
1119 self.pathto(nd), encoding.strtolocal(inst.strerror)
1117 )
1120 )
1118 continue
1121 continue
1119 raise
1122 raise
1120 for f, kind, st in entries:
1123 for f, kind, st in entries:
1121 # Some matchers may return files in the visitentries set,
1124 # Some matchers may return files in the visitentries set,
1122 # instead of 'this', if the matcher explicitly mentions them
1125 # instead of 'this', if the matcher explicitly mentions them
1123 # and is not an exactmatcher. This is acceptable; we do not
1126 # and is not an exactmatcher. This is acceptable; we do not
1124 # make any hard assumptions about file-or-directory below
1127 # make any hard assumptions about file-or-directory below
1125 # based on the presence of `f` in visitentries. If
1128 # based on the presence of `f` in visitentries. If
1126 # visitchildrenset returned a set, we can always skip the
1129 # visitchildrenset returned a set, we can always skip the
1127 # entries *not* in the set it provided regardless of whether
1130 # entries *not* in the set it provided regardless of whether
1128 # they're actually a file or a directory.
1131 # they're actually a file or a directory.
1129 if visitentries and f not in visitentries:
1132 if visitentries and f not in visitentries:
1130 continue
1133 continue
1131 if normalizefile:
1134 if normalizefile:
1132 # even though f might be a directory, we're only
1135 # even though f might be a directory, we're only
1133 # interested in comparing it to files currently in the
1136 # interested in comparing it to files currently in the
1134 # dmap -- therefore normalizefile is enough
1137 # dmap -- therefore normalizefile is enough
1135 nf = normalizefile(
1138 nf = normalizefile(
1136 nd and (nd + b"/" + f) or f, True, True
1139 nd and (nd + b"/" + f) or f, True, True
1137 )
1140 )
1138 else:
1141 else:
1139 nf = nd and (nd + b"/" + f) or f
1142 nf = nd and (nd + b"/" + f) or f
1140 if nf not in results:
1143 if nf not in results:
1141 if kind == dirkind:
1144 if kind == dirkind:
1142 if not ignore(nf):
1145 if not ignore(nf):
1143 if matchtdir:
1146 if matchtdir:
1144 matchtdir(nf)
1147 matchtdir(nf)
1145 wadd(nf)
1148 wadd(nf)
1146 if nf in dmap and (matchalways or matchfn(nf)):
1149 if nf in dmap and (matchalways or matchfn(nf)):
1147 results[nf] = None
1150 results[nf] = None
1148 elif kind == regkind or kind == lnkkind:
1151 elif kind == regkind or kind == lnkkind:
1149 if nf in dmap:
1152 if nf in dmap:
1150 if matchalways or matchfn(nf):
1153 if matchalways or matchfn(nf):
1151 results[nf] = st
1154 results[nf] = st
1152 elif (matchalways or matchfn(nf)) and not ignore(
1155 elif (matchalways or matchfn(nf)) and not ignore(
1153 nf
1156 nf
1154 ):
1157 ):
1155 # unknown file -- normalize if necessary
1158 # unknown file -- normalize if necessary
1156 if not alreadynormed:
1159 if not alreadynormed:
1157 nf = normalize(nf, False, True)
1160 nf = normalize(nf, False, True)
1158 results[nf] = st
1161 results[nf] = st
1159 elif nf in dmap and (matchalways or matchfn(nf)):
1162 elif nf in dmap and (matchalways or matchfn(nf)):
1160 results[nf] = None
1163 results[nf] = None
1161
1164
1162 for nd, d in work:
1165 for nd, d in work:
1163 # alreadynormed means that processwork doesn't have to do any
1166 # alreadynormed means that processwork doesn't have to do any
1164 # expensive directory normalization
1167 # expensive directory normalization
1165 alreadynormed = not normalize or nd == d
1168 alreadynormed = not normalize or nd == d
1166 traverse([d], alreadynormed)
1169 traverse([d], alreadynormed)
1167
1170
1168 for s in subrepos:
1171 for s in subrepos:
1169 del results[s]
1172 del results[s]
1170 del results[b'.hg']
1173 del results[b'.hg']
1171
1174
1172 # step 3: visit remaining files from dmap
1175 # step 3: visit remaining files from dmap
1173 if not skipstep3 and not exact:
1176 if not skipstep3 and not exact:
1174 # If a dmap file is not in results yet, it was either
1177 # If a dmap file is not in results yet, it was either
1175 # a) not matching matchfn b) ignored, c) missing, or d) under a
1178 # a) not matching matchfn b) ignored, c) missing, or d) under a
1176 # symlink directory.
1179 # symlink directory.
1177 if not results and matchalways:
1180 if not results and matchalways:
1178 visit = [f for f in dmap]
1181 visit = [f for f in dmap]
1179 else:
1182 else:
1180 visit = [f for f in dmap if f not in results and matchfn(f)]
1183 visit = [f for f in dmap if f not in results and matchfn(f)]
1181 visit.sort()
1184 visit.sort()
1182
1185
1183 if unknown:
1186 if unknown:
1184 # unknown == True means we walked all dirs under the roots
1187 # unknown == True means we walked all dirs under the roots
1185 # that wasn't ignored, and everything that matched was stat'ed
1188 # that wasn't ignored, and everything that matched was stat'ed
1186 # and is already in results.
1189 # and is already in results.
1187 # The rest must thus be ignored or under a symlink.
1190 # The rest must thus be ignored or under a symlink.
1188 audit_path = pathutil.pathauditor(self._root, cached=True)
1191 audit_path = pathutil.pathauditor(self._root, cached=True)
1189
1192
1190 for nf in iter(visit):
1193 for nf in iter(visit):
1191 # If a stat for the same file was already added with a
1194 # If a stat for the same file was already added with a
1192 # different case, don't add one for this, since that would
1195 # different case, don't add one for this, since that would
1193 # make it appear as if the file exists under both names
1196 # make it appear as if the file exists under both names
1194 # on disk.
1197 # on disk.
1195 if (
1198 if (
1196 normalizefile
1199 normalizefile
1197 and normalizefile(nf, True, True) in results
1200 and normalizefile(nf, True, True) in results
1198 ):
1201 ):
1199 results[nf] = None
1202 results[nf] = None
1200 # Report ignored items in the dmap as long as they are not
1203 # Report ignored items in the dmap as long as they are not
1201 # under a symlink directory.
1204 # under a symlink directory.
1202 elif audit_path.check(nf):
1205 elif audit_path.check(nf):
1203 try:
1206 try:
1204 results[nf] = lstat(join(nf))
1207 results[nf] = lstat(join(nf))
1205 # file was just ignored, no links, and exists
1208 # file was just ignored, no links, and exists
1206 except OSError:
1209 except OSError:
1207 # file doesn't exist
1210 # file doesn't exist
1208 results[nf] = None
1211 results[nf] = None
1209 else:
1212 else:
1210 # It's either missing or under a symlink directory
1213 # It's either missing or under a symlink directory
1211 # which we in this case report as missing
1214 # which we in this case report as missing
1212 results[nf] = None
1215 results[nf] = None
1213 else:
1216 else:
1214 # We may not have walked the full directory tree above,
1217 # We may not have walked the full directory tree above,
1215 # so stat and check everything we missed.
1218 # so stat and check everything we missed.
1216 iv = iter(visit)
1219 iv = iter(visit)
1217 for st in util.statfiles([join(i) for i in visit]):
1220 for st in util.statfiles([join(i) for i in visit]):
1218 results[next(iv)] = st
1221 results[next(iv)] = st
1219 return results
1222 return results
1220
1223
1221 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1224 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1222 # Force Rayon (Rust parallelism library) to respect the number of
1225 # Force Rayon (Rust parallelism library) to respect the number of
1223 # workers. This is a temporary workaround until Rust code knows
1226 # workers. This is a temporary workaround until Rust code knows
1224 # how to read the config file.
1227 # how to read the config file.
1225 numcpus = self._ui.configint(b"worker", b"numcpus")
1228 numcpus = self._ui.configint(b"worker", b"numcpus")
1226 if numcpus is not None:
1229 if numcpus is not None:
1227 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1230 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1228
1231
1229 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1232 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1230 if not workers_enabled:
1233 if not workers_enabled:
1231 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1234 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1232
1235
1233 (
1236 (
1234 lookup,
1237 lookup,
1235 modified,
1238 modified,
1236 added,
1239 added,
1237 removed,
1240 removed,
1238 deleted,
1241 deleted,
1239 clean,
1242 clean,
1240 ignored,
1243 ignored,
1241 unknown,
1244 unknown,
1242 warnings,
1245 warnings,
1243 bad,
1246 bad,
1244 traversed,
1247 traversed,
1245 dirty,
1248 dirty,
1246 ) = rustmod.status(
1249 ) = rustmod.status(
1247 self._map._map,
1250 self._map._map,
1248 matcher,
1251 matcher,
1249 self._rootdir,
1252 self._rootdir,
1250 self._ignorefiles(),
1253 self._ignorefiles(),
1251 self._checkexec,
1254 self._checkexec,
1252 self._lastnormaltime,
1255 self._lastnormaltime,
1253 bool(list_clean),
1256 bool(list_clean),
1254 bool(list_ignored),
1257 bool(list_ignored),
1255 bool(list_unknown),
1258 bool(list_unknown),
1256 bool(matcher.traversedir),
1259 bool(matcher.traversedir),
1257 )
1260 )
1258
1261
1259 self._dirty |= dirty
1262 self._dirty |= dirty
1260
1263
1261 if matcher.traversedir:
1264 if matcher.traversedir:
1262 for dir in traversed:
1265 for dir in traversed:
1263 matcher.traversedir(dir)
1266 matcher.traversedir(dir)
1264
1267
1265 if self._ui.warn:
1268 if self._ui.warn:
1266 for item in warnings:
1269 for item in warnings:
1267 if isinstance(item, tuple):
1270 if isinstance(item, tuple):
1268 file_path, syntax = item
1271 file_path, syntax = item
1269 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1272 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1270 file_path,
1273 file_path,
1271 syntax,
1274 syntax,
1272 )
1275 )
1273 self._ui.warn(msg)
1276 self._ui.warn(msg)
1274 else:
1277 else:
1275 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1278 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1276 self._ui.warn(
1279 self._ui.warn(
1277 msg
1280 msg
1278 % (
1281 % (
1279 pathutil.canonpath(
1282 pathutil.canonpath(
1280 self._rootdir, self._rootdir, item
1283 self._rootdir, self._rootdir, item
1281 ),
1284 ),
1282 b"No such file or directory",
1285 b"No such file or directory",
1283 )
1286 )
1284 )
1287 )
1285
1288
1286 for (fn, message) in bad:
1289 for (fn, message) in bad:
1287 matcher.bad(fn, encoding.strtolocal(message))
1290 matcher.bad(fn, encoding.strtolocal(message))
1288
1291
1289 status = scmutil.status(
1292 status = scmutil.status(
1290 modified=modified,
1293 modified=modified,
1291 added=added,
1294 added=added,
1292 removed=removed,
1295 removed=removed,
1293 deleted=deleted,
1296 deleted=deleted,
1294 unknown=unknown,
1297 unknown=unknown,
1295 ignored=ignored,
1298 ignored=ignored,
1296 clean=clean,
1299 clean=clean,
1297 )
1300 )
1298 return (lookup, status)
1301 return (lookup, status)
1299
1302
1300 def status(self, match, subrepos, ignored, clean, unknown):
1303 def status(self, match, subrepos, ignored, clean, unknown):
1301 """Determine the status of the working copy relative to the
1304 """Determine the status of the working copy relative to the
1302 dirstate and return a pair of (unsure, status), where status is of type
1305 dirstate and return a pair of (unsure, status), where status is of type
1303 scmutil.status and:
1306 scmutil.status and:
1304
1307
1305 unsure:
1308 unsure:
1306 files that might have been modified since the dirstate was
1309 files that might have been modified since the dirstate was
1307 written, but need to be read to be sure (size is the same
1310 written, but need to be read to be sure (size is the same
1308 but mtime differs)
1311 but mtime differs)
1309 status.modified:
1312 status.modified:
1310 files that have definitely been modified since the dirstate
1313 files that have definitely been modified since the dirstate
1311 was written (different size or mode)
1314 was written (different size or mode)
1312 status.clean:
1315 status.clean:
1313 files that have definitely not been modified since the
1316 files that have definitely not been modified since the
1314 dirstate was written
1317 dirstate was written
1315 """
1318 """
1316 listignored, listclean, listunknown = ignored, clean, unknown
1319 listignored, listclean, listunknown = ignored, clean, unknown
1317 lookup, modified, added, unknown, ignored = [], [], [], [], []
1320 lookup, modified, added, unknown, ignored = [], [], [], [], []
1318 removed, deleted, clean = [], [], []
1321 removed, deleted, clean = [], [], []
1319
1322
1320 dmap = self._map
1323 dmap = self._map
1321 dmap.preload()
1324 dmap.preload()
1322
1325
1323 use_rust = True
1326 use_rust = True
1324
1327
1325 allowed_matchers = (
1328 allowed_matchers = (
1326 matchmod.alwaysmatcher,
1329 matchmod.alwaysmatcher,
1327 matchmod.exactmatcher,
1330 matchmod.exactmatcher,
1328 matchmod.includematcher,
1331 matchmod.includematcher,
1329 )
1332 )
1330
1333
1331 if rustmod is None:
1334 if rustmod is None:
1332 use_rust = False
1335 use_rust = False
1333 elif self._checkcase:
1336 elif self._checkcase:
1334 # Case-insensitive filesystems are not handled yet
1337 # Case-insensitive filesystems are not handled yet
1335 use_rust = False
1338 use_rust = False
1336 elif subrepos:
1339 elif subrepos:
1337 use_rust = False
1340 use_rust = False
1338 elif sparse.enabled:
1341 elif sparse.enabled:
1339 use_rust = False
1342 use_rust = False
1340 elif not isinstance(match, allowed_matchers):
1343 elif not isinstance(match, allowed_matchers):
1341 # Some matchers have yet to be implemented
1344 # Some matchers have yet to be implemented
1342 use_rust = False
1345 use_rust = False
1343
1346
1344 if use_rust:
1347 if use_rust:
1345 try:
1348 try:
1346 return self._rust_status(
1349 return self._rust_status(
1347 match, listclean, listignored, listunknown
1350 match, listclean, listignored, listunknown
1348 )
1351 )
1349 except rustmod.FallbackError:
1352 except rustmod.FallbackError:
1350 pass
1353 pass
1351
1354
1352 def noop(f):
1355 def noop(f):
1353 pass
1356 pass
1354
1357
1355 dcontains = dmap.__contains__
1358 dcontains = dmap.__contains__
1356 dget = dmap.__getitem__
1359 dget = dmap.__getitem__
1357 ladd = lookup.append # aka "unsure"
1360 ladd = lookup.append # aka "unsure"
1358 madd = modified.append
1361 madd = modified.append
1359 aadd = added.append
1362 aadd = added.append
1360 uadd = unknown.append if listunknown else noop
1363 uadd = unknown.append if listunknown else noop
1361 iadd = ignored.append if listignored else noop
1364 iadd = ignored.append if listignored else noop
1362 radd = removed.append
1365 radd = removed.append
1363 dadd = deleted.append
1366 dadd = deleted.append
1364 cadd = clean.append if listclean else noop
1367 cadd = clean.append if listclean else noop
1365 mexact = match.exact
1368 mexact = match.exact
1366 dirignore = self._dirignore
1369 dirignore = self._dirignore
1367 checkexec = self._checkexec
1370 checkexec = self._checkexec
1368 copymap = self._map.copymap
1371 copymap = self._map.copymap
1369 lastnormaltime = self._lastnormaltime
1372 lastnormaltime = self._lastnormaltime
1370
1373
1371 # We need to do full walks when either
1374 # We need to do full walks when either
1372 # - we're listing all clean files, or
1375 # - we're listing all clean files, or
1373 # - match.traversedir does something, because match.traversedir should
1376 # - match.traversedir does something, because match.traversedir should
1374 # be called for every dir in the working dir
1377 # be called for every dir in the working dir
1375 full = listclean or match.traversedir is not None
1378 full = listclean or match.traversedir is not None
1376 for fn, st in pycompat.iteritems(
1379 for fn, st in pycompat.iteritems(
1377 self.walk(match, subrepos, listunknown, listignored, full=full)
1380 self.walk(match, subrepos, listunknown, listignored, full=full)
1378 ):
1381 ):
1379 if not dcontains(fn):
1382 if not dcontains(fn):
1380 if (listignored or mexact(fn)) and dirignore(fn):
1383 if (listignored or mexact(fn)) and dirignore(fn):
1381 if listignored:
1384 if listignored:
1382 iadd(fn)
1385 iadd(fn)
1383 else:
1386 else:
1384 uadd(fn)
1387 uadd(fn)
1385 continue
1388 continue
1386
1389
1387 t = dget(fn)
1390 t = dget(fn)
1388 mode = t.mode
1391 mode = t.mode
1389 size = t.size
1392 size = t.size
1390
1393
1391 if not st and t.tracked:
1394 if not st and t.tracked:
1392 dadd(fn)
1395 dadd(fn)
1393 elif t.p2_info:
1396 elif t.p2_info:
1394 madd(fn)
1397 madd(fn)
1395 elif t.added:
1398 elif t.added:
1396 aadd(fn)
1399 aadd(fn)
1397 elif t.removed:
1400 elif t.removed:
1398 radd(fn)
1401 radd(fn)
1399 elif t.tracked:
1402 elif t.tracked:
1400 if (
1403 if (
1401 size >= 0
1404 size >= 0
1402 and (
1405 and (
1403 (size != st.st_size and size != st.st_size & _rangemask)
1406 (size != st.st_size and size != st.st_size & _rangemask)
1404 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1407 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1405 )
1408 )
1406 or fn in copymap
1409 or fn in copymap
1407 ):
1410 ):
1408 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1411 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1409 # issue6456: Size returned may be longer due to
1412 # issue6456: Size returned may be longer due to
1410 # encryption on EXT-4 fscrypt, undecided.
1413 # encryption on EXT-4 fscrypt, undecided.
1411 ladd(fn)
1414 ladd(fn)
1412 else:
1415 else:
1413 madd(fn)
1416 madd(fn)
1414 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1417 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1415 ladd(fn)
1418 ladd(fn)
1416 elif timestamp.mtime_of(st) == lastnormaltime:
1419 elif timestamp.mtime_of(st) == lastnormaltime:
1417 # fn may have just been marked as normal and it may have
1420 # fn may have just been marked as normal and it may have
1418 # changed in the same second without changing its size.
1421 # changed in the same second without changing its size.
1419 # This can happen if we quickly do multiple commits.
1422 # This can happen if we quickly do multiple commits.
1420 # Force lookup, so we don't miss such a racy file change.
1423 # Force lookup, so we don't miss such a racy file change.
1421 ladd(fn)
1424 ladd(fn)
1422 elif listclean:
1425 elif listclean:
1423 cadd(fn)
1426 cadd(fn)
1424 status = scmutil.status(
1427 status = scmutil.status(
1425 modified, added, removed, deleted, unknown, ignored, clean
1428 modified, added, removed, deleted, unknown, ignored, clean
1426 )
1429 )
1427 return (lookup, status)
1430 return (lookup, status)
1428
1431
1429 def matches(self, match):
1432 def matches(self, match):
1430 """
1433 """
1431 return files in the dirstate (in whatever state) filtered by match
1434 return files in the dirstate (in whatever state) filtered by match
1432 """
1435 """
1433 dmap = self._map
1436 dmap = self._map
1434 if rustmod is not None:
1437 if rustmod is not None:
1435 dmap = self._map._map
1438 dmap = self._map._map
1436
1439
1437 if match.always():
1440 if match.always():
1438 return dmap.keys()
1441 return dmap.keys()
1439 files = match.files()
1442 files = match.files()
1440 if match.isexact():
1443 if match.isexact():
1441 # fast path -- filter the other way around, since typically files is
1444 # fast path -- filter the other way around, since typically files is
1442 # much smaller than dmap
1445 # much smaller than dmap
1443 return [f for f in files if f in dmap]
1446 return [f for f in files if f in dmap]
1444 if match.prefix() and all(fn in dmap for fn in files):
1447 if match.prefix() and all(fn in dmap for fn in files):
1445 # fast path -- all the values are known to be files, so just return
1448 # fast path -- all the values are known to be files, so just return
1446 # that
1449 # that
1447 return list(files)
1450 return list(files)
1448 return [f for f in dmap if match(f)]
1451 return [f for f in dmap if match(f)]
1449
1452
1450 def _actualfilename(self, tr):
1453 def _actualfilename(self, tr):
1451 if tr:
1454 if tr:
1452 return self._pendingfilename
1455 return self._pendingfilename
1453 else:
1456 else:
1454 return self._filename
1457 return self._filename
1455
1458
1456 def savebackup(self, tr, backupname):
1459 def savebackup(self, tr, backupname):
1457 '''Save current dirstate into backup file'''
1460 '''Save current dirstate into backup file'''
1458 filename = self._actualfilename(tr)
1461 filename = self._actualfilename(tr)
1459 assert backupname != filename
1462 assert backupname != filename
1460
1463
1461 # use '_writedirstate' instead of 'write' to write changes certainly,
1464 # use '_writedirstate' instead of 'write' to write changes certainly,
1462 # because the latter omits writing out if transaction is running.
1465 # because the latter omits writing out if transaction is running.
1463 # output file will be used to create backup of dirstate at this point.
1466 # output file will be used to create backup of dirstate at this point.
1464 if self._dirty or not self._opener.exists(filename):
1467 if self._dirty or not self._opener.exists(filename):
1465 self._writedirstate(
1468 self._writedirstate(
1466 tr,
1469 tr,
1467 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1470 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1468 )
1471 )
1469
1472
1470 if tr:
1473 if tr:
1471 # ensure that subsequent tr.writepending returns True for
1474 # ensure that subsequent tr.writepending returns True for
1472 # changes written out above, even if dirstate is never
1475 # changes written out above, even if dirstate is never
1473 # changed after this
1476 # changed after this
1474 tr.addfilegenerator(
1477 tr.addfilegenerator(
1475 b'dirstate',
1478 b'dirstate',
1476 (self._filename,),
1479 (self._filename,),
1477 lambda f: self._writedirstate(tr, f),
1480 lambda f: self._writedirstate(tr, f),
1478 location=b'plain',
1481 location=b'plain',
1479 )
1482 )
1480
1483
1481 # ensure that pending file written above is unlinked at
1484 # ensure that pending file written above is unlinked at
1482 # failure, even if tr.writepending isn't invoked until the
1485 # failure, even if tr.writepending isn't invoked until the
1483 # end of this transaction
1486 # end of this transaction
1484 tr.registertmp(filename, location=b'plain')
1487 tr.registertmp(filename, location=b'plain')
1485
1488
1486 self._opener.tryunlink(backupname)
1489 self._opener.tryunlink(backupname)
1487 # hardlink backup is okay because _writedirstate is always called
1490 # hardlink backup is okay because _writedirstate is always called
1488 # with an "atomictemp=True" file.
1491 # with an "atomictemp=True" file.
1489 util.copyfile(
1492 util.copyfile(
1490 self._opener.join(filename),
1493 self._opener.join(filename),
1491 self._opener.join(backupname),
1494 self._opener.join(backupname),
1492 hardlink=True,
1495 hardlink=True,
1493 )
1496 )
1494
1497
1495 def restorebackup(self, tr, backupname):
1498 def restorebackup(self, tr, backupname):
1496 '''Restore dirstate by backup file'''
1499 '''Restore dirstate by backup file'''
1497 # this "invalidate()" prevents "wlock.release()" from writing
1500 # this "invalidate()" prevents "wlock.release()" from writing
1498 # changes of dirstate out after restoring from backup file
1501 # changes of dirstate out after restoring from backup file
1499 self.invalidate()
1502 self.invalidate()
1500 filename = self._actualfilename(tr)
1503 filename = self._actualfilename(tr)
1501 o = self._opener
1504 o = self._opener
1502 if util.samefile(o.join(backupname), o.join(filename)):
1505 if util.samefile(o.join(backupname), o.join(filename)):
1503 o.unlink(backupname)
1506 o.unlink(backupname)
1504 else:
1507 else:
1505 o.rename(backupname, filename, checkambig=True)
1508 o.rename(backupname, filename, checkambig=True)
1506
1509
1507 def clearbackup(self, tr, backupname):
1510 def clearbackup(self, tr, backupname):
1508 '''Clear backup file'''
1511 '''Clear backup file'''
1509 self._opener.unlink(backupname)
1512 self._opener.unlink(backupname)
1510
1513
1511 def verify(self, m1, m2):
1514 def verify(self, m1, m2):
1512 """check the dirstate content again the parent manifest and yield errors"""
1515 """check the dirstate content again the parent manifest and yield errors"""
1513 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1516 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1514 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1517 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1515 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1518 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1516 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1519 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1517 for f, entry in self.items():
1520 for f, entry in self.items():
1518 state = entry.state
1521 state = entry.state
1519 if state in b"nr" and f not in m1:
1522 if state in b"nr" and f not in m1:
1520 yield (missing_from_p1, f, state)
1523 yield (missing_from_p1, f, state)
1521 if state in b"a" and f in m1:
1524 if state in b"a" and f in m1:
1522 yield (unexpected_in_p1, f, state)
1525 yield (unexpected_in_p1, f, state)
1523 if state in b"m" and f not in m1 and f not in m2:
1526 if state in b"m" and f not in m1 and f not in m2:
1524 yield (missing_from_ps, f, state)
1527 yield (missing_from_ps, f, state)
1525 for f in m1:
1528 for f in m1:
1526 state = self.get_entry(f).state
1529 state = self.get_entry(f).state
1527 if state not in b"nrm":
1530 if state not in b"nrm":
1528 yield (missing_from_ds, f, state)
1531 yield (missing_from_ds, f, state)
General Comments 0
You need to be logged in to leave comments. Login now