##// END OF EJS Templates
dirstate: add missing return on platforms without exec or symlink...
Raphaël Gomès -
r49102:8f54d9c7 default
parent child Browse files
Show More
@@ -1,1526 +1,1528 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .dirstateutils import (
34 from .dirstateutils import (
35 timestamp,
35 timestamp,
36 )
36 )
37
37
38 from .interfaces import (
38 from .interfaces import (
39 dirstate as intdirstate,
39 dirstate as intdirstate,
40 util as interfaceutil,
40 util as interfaceutil,
41 )
41 )
42
42
43 parsers = policy.importmod('parsers')
43 parsers = policy.importmod('parsers')
44 rustmod = policy.importrust('dirstate')
44 rustmod = policy.importrust('dirstate')
45
45
46 HAS_FAST_DIRSTATE_V2 = rustmod is not None
46 HAS_FAST_DIRSTATE_V2 = rustmod is not None
47
47
48 propertycache = util.propertycache
48 propertycache = util.propertycache
49 filecache = scmutil.filecache
49 filecache = scmutil.filecache
50 _rangemask = dirstatemap.rangemask
50 _rangemask = dirstatemap.rangemask
51
51
52 DirstateItem = dirstatemap.DirstateItem
52 DirstateItem = dirstatemap.DirstateItem
53
53
54
54
55 class repocache(filecache):
55 class repocache(filecache):
56 """filecache for files in .hg/"""
56 """filecache for files in .hg/"""
57
57
58 def join(self, obj, fname):
58 def join(self, obj, fname):
59 return obj._opener.join(fname)
59 return obj._opener.join(fname)
60
60
61
61
62 class rootcache(filecache):
62 class rootcache(filecache):
63 """filecache for files in the repository root"""
63 """filecache for files in the repository root"""
64
64
65 def join(self, obj, fname):
65 def join(self, obj, fname):
66 return obj._join(fname)
66 return obj._join(fname)
67
67
68
68
69 def _getfsnow(vfs):
69 def _getfsnow(vfs):
70 '''Get "now" timestamp on filesystem'''
70 '''Get "now" timestamp on filesystem'''
71 tmpfd, tmpname = vfs.mkstemp()
71 tmpfd, tmpname = vfs.mkstemp()
72 try:
72 try:
73 return timestamp.mtime_of(os.fstat(tmpfd))
73 return timestamp.mtime_of(os.fstat(tmpfd))
74 finally:
74 finally:
75 os.close(tmpfd)
75 os.close(tmpfd)
76 vfs.unlink(tmpname)
76 vfs.unlink(tmpname)
77
77
78
78
79 def requires_parents_change(func):
79 def requires_parents_change(func):
80 def wrap(self, *args, **kwargs):
80 def wrap(self, *args, **kwargs):
81 if not self.pendingparentchange():
81 if not self.pendingparentchange():
82 msg = 'calling `%s` outside of a parentchange context'
82 msg = 'calling `%s` outside of a parentchange context'
83 msg %= func.__name__
83 msg %= func.__name__
84 raise error.ProgrammingError(msg)
84 raise error.ProgrammingError(msg)
85 return func(self, *args, **kwargs)
85 return func(self, *args, **kwargs)
86
86
87 return wrap
87 return wrap
88
88
89
89
90 def requires_no_parents_change(func):
90 def requires_no_parents_change(func):
91 def wrap(self, *args, **kwargs):
91 def wrap(self, *args, **kwargs):
92 if self.pendingparentchange():
92 if self.pendingparentchange():
93 msg = 'calling `%s` inside of a parentchange context'
93 msg = 'calling `%s` inside of a parentchange context'
94 msg %= func.__name__
94 msg %= func.__name__
95 raise error.ProgrammingError(msg)
95 raise error.ProgrammingError(msg)
96 return func(self, *args, **kwargs)
96 return func(self, *args, **kwargs)
97
97
98 return wrap
98 return wrap
99
99
100
100
101 @interfaceutil.implementer(intdirstate.idirstate)
101 @interfaceutil.implementer(intdirstate.idirstate)
102 class dirstate(object):
102 class dirstate(object):
103 def __init__(
103 def __init__(
104 self,
104 self,
105 opener,
105 opener,
106 ui,
106 ui,
107 root,
107 root,
108 validate,
108 validate,
109 sparsematchfn,
109 sparsematchfn,
110 nodeconstants,
110 nodeconstants,
111 use_dirstate_v2,
111 use_dirstate_v2,
112 ):
112 ):
113 """Create a new dirstate object.
113 """Create a new dirstate object.
114
114
115 opener is an open()-like callable that can be used to open the
115 opener is an open()-like callable that can be used to open the
116 dirstate file; root is the root of the directory tracked by
116 dirstate file; root is the root of the directory tracked by
117 the dirstate.
117 the dirstate.
118 """
118 """
119 self._use_dirstate_v2 = use_dirstate_v2
119 self._use_dirstate_v2 = use_dirstate_v2
120 self._nodeconstants = nodeconstants
120 self._nodeconstants = nodeconstants
121 self._opener = opener
121 self._opener = opener
122 self._validate = validate
122 self._validate = validate
123 self._root = root
123 self._root = root
124 self._sparsematchfn = sparsematchfn
124 self._sparsematchfn = sparsematchfn
125 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
125 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
126 # UNC path pointing to root share (issue4557)
126 # UNC path pointing to root share (issue4557)
127 self._rootdir = pathutil.normasprefix(root)
127 self._rootdir = pathutil.normasprefix(root)
128 self._dirty = False
128 self._dirty = False
129 self._lastnormaltime = timestamp.zero()
129 self._lastnormaltime = timestamp.zero()
130 self._ui = ui
130 self._ui = ui
131 self._filecache = {}
131 self._filecache = {}
132 self._parentwriters = 0
132 self._parentwriters = 0
133 self._filename = b'dirstate'
133 self._filename = b'dirstate'
134 self._pendingfilename = b'%s.pending' % self._filename
134 self._pendingfilename = b'%s.pending' % self._filename
135 self._plchangecallbacks = {}
135 self._plchangecallbacks = {}
136 self._origpl = None
136 self._origpl = None
137 self._mapcls = dirstatemap.dirstatemap
137 self._mapcls = dirstatemap.dirstatemap
138 # Access and cache cwd early, so we don't access it for the first time
138 # Access and cache cwd early, so we don't access it for the first time
139 # after a working-copy update caused it to not exist (accessing it then
139 # after a working-copy update caused it to not exist (accessing it then
140 # raises an exception).
140 # raises an exception).
141 self._cwd
141 self._cwd
142
142
143 def prefetch_parents(self):
143 def prefetch_parents(self):
144 """make sure the parents are loaded
144 """make sure the parents are loaded
145
145
146 Used to avoid a race condition.
146 Used to avoid a race condition.
147 """
147 """
148 self._pl
148 self._pl
149
149
150 @contextlib.contextmanager
150 @contextlib.contextmanager
151 def parentchange(self):
151 def parentchange(self):
152 """Context manager for handling dirstate parents.
152 """Context manager for handling dirstate parents.
153
153
154 If an exception occurs in the scope of the context manager,
154 If an exception occurs in the scope of the context manager,
155 the incoherent dirstate won't be written when wlock is
155 the incoherent dirstate won't be written when wlock is
156 released.
156 released.
157 """
157 """
158 self._parentwriters += 1
158 self._parentwriters += 1
159 yield
159 yield
160 # Typically we want the "undo" step of a context manager in a
160 # Typically we want the "undo" step of a context manager in a
161 # finally block so it happens even when an exception
161 # finally block so it happens even when an exception
162 # occurs. In this case, however, we only want to decrement
162 # occurs. In this case, however, we only want to decrement
163 # parentwriters if the code in the with statement exits
163 # parentwriters if the code in the with statement exits
164 # normally, so we don't have a try/finally here on purpose.
164 # normally, so we don't have a try/finally here on purpose.
165 self._parentwriters -= 1
165 self._parentwriters -= 1
166
166
167 def pendingparentchange(self):
167 def pendingparentchange(self):
168 """Returns true if the dirstate is in the middle of a set of changes
168 """Returns true if the dirstate is in the middle of a set of changes
169 that modify the dirstate parent.
169 that modify the dirstate parent.
170 """
170 """
171 return self._parentwriters > 0
171 return self._parentwriters > 0
172
172
173 @propertycache
173 @propertycache
174 def _map(self):
174 def _map(self):
175 """Return the dirstate contents (see documentation for dirstatemap)."""
175 """Return the dirstate contents (see documentation for dirstatemap)."""
176 self._map = self._mapcls(
176 self._map = self._mapcls(
177 self._ui,
177 self._ui,
178 self._opener,
178 self._opener,
179 self._root,
179 self._root,
180 self._nodeconstants,
180 self._nodeconstants,
181 self._use_dirstate_v2,
181 self._use_dirstate_v2,
182 )
182 )
183 return self._map
183 return self._map
184
184
185 @property
185 @property
186 def _sparsematcher(self):
186 def _sparsematcher(self):
187 """The matcher for the sparse checkout.
187 """The matcher for the sparse checkout.
188
188
189 The working directory may not include every file from a manifest. The
189 The working directory may not include every file from a manifest. The
190 matcher obtained by this property will match a path if it is to be
190 matcher obtained by this property will match a path if it is to be
191 included in the working directory.
191 included in the working directory.
192 """
192 """
193 # TODO there is potential to cache this property. For now, the matcher
193 # TODO there is potential to cache this property. For now, the matcher
194 # is resolved on every access. (But the called function does use a
194 # is resolved on every access. (But the called function does use a
195 # cache to keep the lookup fast.)
195 # cache to keep the lookup fast.)
196 return self._sparsematchfn()
196 return self._sparsematchfn()
197
197
198 @repocache(b'branch')
198 @repocache(b'branch')
199 def _branch(self):
199 def _branch(self):
200 try:
200 try:
201 return self._opener.read(b"branch").strip() or b"default"
201 return self._opener.read(b"branch").strip() or b"default"
202 except IOError as inst:
202 except IOError as inst:
203 if inst.errno != errno.ENOENT:
203 if inst.errno != errno.ENOENT:
204 raise
204 raise
205 return b"default"
205 return b"default"
206
206
207 @property
207 @property
208 def _pl(self):
208 def _pl(self):
209 return self._map.parents()
209 return self._map.parents()
210
210
211 def hasdir(self, d):
211 def hasdir(self, d):
212 return self._map.hastrackeddir(d)
212 return self._map.hastrackeddir(d)
213
213
214 @rootcache(b'.hgignore')
214 @rootcache(b'.hgignore')
215 def _ignore(self):
215 def _ignore(self):
216 files = self._ignorefiles()
216 files = self._ignorefiles()
217 if not files:
217 if not files:
218 return matchmod.never()
218 return matchmod.never()
219
219
220 pats = [b'include:%s' % f for f in files]
220 pats = [b'include:%s' % f for f in files]
221 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
221 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
222
222
223 @propertycache
223 @propertycache
224 def _slash(self):
224 def _slash(self):
225 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
225 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
226
226
227 @propertycache
227 @propertycache
228 def _checklink(self):
228 def _checklink(self):
229 return util.checklink(self._root)
229 return util.checklink(self._root)
230
230
231 @propertycache
231 @propertycache
232 def _checkexec(self):
232 def _checkexec(self):
233 return bool(util.checkexec(self._root))
233 return bool(util.checkexec(self._root))
234
234
235 @propertycache
235 @propertycache
236 def _checkcase(self):
236 def _checkcase(self):
237 return not util.fscasesensitive(self._join(b'.hg'))
237 return not util.fscasesensitive(self._join(b'.hg'))
238
238
239 def _join(self, f):
239 def _join(self, f):
240 # much faster than os.path.join()
240 # much faster than os.path.join()
241 # it's safe because f is always a relative path
241 # it's safe because f is always a relative path
242 return self._rootdir + f
242 return self._rootdir + f
243
243
244 def flagfunc(self, buildfallback):
244 def flagfunc(self, buildfallback):
245 if self._checklink and self._checkexec:
245 if self._checklink and self._checkexec:
246
246
247 def f(x):
247 def f(x):
248 try:
248 try:
249 st = os.lstat(self._join(x))
249 st = os.lstat(self._join(x))
250 if util.statislink(st):
250 if util.statislink(st):
251 return b'l'
251 return b'l'
252 if util.statisexec(st):
252 if util.statisexec(st):
253 return b'x'
253 return b'x'
254 except OSError:
254 except OSError:
255 pass
255 pass
256 return b''
256 return b''
257
257
258 return f
258 return f
259
259
260 fallback = buildfallback()
260 fallback = buildfallback()
261 if self._checklink:
261 if self._checklink:
262
262
263 def f(x):
263 def f(x):
264 if os.path.islink(self._join(x)):
264 if os.path.islink(self._join(x)):
265 return b'l'
265 return b'l'
266 entry = self.get_entry(x)
266 entry = self.get_entry(x)
267 if entry.has_fallback_exec:
267 if entry.has_fallback_exec:
268 if entry.fallback_exec:
268 if entry.fallback_exec:
269 return b'x'
269 return b'x'
270 elif b'x' in fallback(x):
270 elif b'x' in fallback(x):
271 return b'x'
271 return b'x'
272 return b''
272 return b''
273
273
274 return f
274 return f
275 if self._checkexec:
275 if self._checkexec:
276
276
277 def f(x):
277 def f(x):
278 if b'l' in fallback(x):
278 if b'l' in fallback(x):
279 return b'l'
279 return b'l'
280 entry = self.get_entry(x)
280 entry = self.get_entry(x)
281 if entry.has_fallback_symlink:
281 if entry.has_fallback_symlink:
282 if entry.fallback_symlink:
282 if entry.fallback_symlink:
283 return b'l'
283 return b'l'
284 if util.isexec(self._join(x)):
284 if util.isexec(self._join(x)):
285 return b'x'
285 return b'x'
286 return b''
286 return b''
287
287
288 return f
288 return f
289 else:
289 else:
290
290
291 def f(x):
291 def f(x):
292 entry = self.get_entry(x)
292 entry = self.get_entry(x)
293 if entry.has_fallback_symlink:
293 if entry.has_fallback_symlink:
294 if entry.fallback_symlink:
294 if entry.fallback_symlink:
295 return b'l'
295 return b'l'
296 if entry.has_fallback_exec:
296 if entry.has_fallback_exec:
297 if entry.fallback_exec:
297 if entry.fallback_exec:
298 return b'x'
298 return b'x'
299 elif entry.has_fallback_symlink:
299 elif entry.has_fallback_symlink:
300 return b''
300 return b''
301 return fallback(x)
301 return fallback(x)
302
302
303 return f
304
303 @propertycache
305 @propertycache
304 def _cwd(self):
306 def _cwd(self):
305 # internal config: ui.forcecwd
307 # internal config: ui.forcecwd
306 forcecwd = self._ui.config(b'ui', b'forcecwd')
308 forcecwd = self._ui.config(b'ui', b'forcecwd')
307 if forcecwd:
309 if forcecwd:
308 return forcecwd
310 return forcecwd
309 return encoding.getcwd()
311 return encoding.getcwd()
310
312
311 def getcwd(self):
313 def getcwd(self):
312 """Return the path from which a canonical path is calculated.
314 """Return the path from which a canonical path is calculated.
313
315
314 This path should be used to resolve file patterns or to convert
316 This path should be used to resolve file patterns or to convert
315 canonical paths back to file paths for display. It shouldn't be
317 canonical paths back to file paths for display. It shouldn't be
316 used to get real file paths. Use vfs functions instead.
318 used to get real file paths. Use vfs functions instead.
317 """
319 """
318 cwd = self._cwd
320 cwd = self._cwd
319 if cwd == self._root:
321 if cwd == self._root:
320 return b''
322 return b''
321 # self._root ends with a path separator if self._root is '/' or 'C:\'
323 # self._root ends with a path separator if self._root is '/' or 'C:\'
322 rootsep = self._root
324 rootsep = self._root
323 if not util.endswithsep(rootsep):
325 if not util.endswithsep(rootsep):
324 rootsep += pycompat.ossep
326 rootsep += pycompat.ossep
325 if cwd.startswith(rootsep):
327 if cwd.startswith(rootsep):
326 return cwd[len(rootsep) :]
328 return cwd[len(rootsep) :]
327 else:
329 else:
328 # we're outside the repo. return an absolute path.
330 # we're outside the repo. return an absolute path.
329 return cwd
331 return cwd
330
332
331 def pathto(self, f, cwd=None):
333 def pathto(self, f, cwd=None):
332 if cwd is None:
334 if cwd is None:
333 cwd = self.getcwd()
335 cwd = self.getcwd()
334 path = util.pathto(self._root, cwd, f)
336 path = util.pathto(self._root, cwd, f)
335 if self._slash:
337 if self._slash:
336 return util.pconvert(path)
338 return util.pconvert(path)
337 return path
339 return path
338
340
339 def __getitem__(self, key):
341 def __getitem__(self, key):
340 """Return the current state of key (a filename) in the dirstate.
342 """Return the current state of key (a filename) in the dirstate.
341
343
342 States are:
344 States are:
343 n normal
345 n normal
344 m needs merging
346 m needs merging
345 r marked for removal
347 r marked for removal
346 a marked for addition
348 a marked for addition
347 ? not tracked
349 ? not tracked
348
350
349 XXX The "state" is a bit obscure to be in the "public" API. we should
351 XXX The "state" is a bit obscure to be in the "public" API. we should
350 consider migrating all user of this to going through the dirstate entry
352 consider migrating all user of this to going through the dirstate entry
351 instead.
353 instead.
352 """
354 """
353 msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
355 msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
354 util.nouideprecwarn(msg, b'6.1', stacklevel=2)
356 util.nouideprecwarn(msg, b'6.1', stacklevel=2)
355 entry = self._map.get(key)
357 entry = self._map.get(key)
356 if entry is not None:
358 if entry is not None:
357 return entry.state
359 return entry.state
358 return b'?'
360 return b'?'
359
361
360 def get_entry(self, path):
362 def get_entry(self, path):
361 """return a DirstateItem for the associated path"""
363 """return a DirstateItem for the associated path"""
362 entry = self._map.get(path)
364 entry = self._map.get(path)
363 if entry is None:
365 if entry is None:
364 return DirstateItem()
366 return DirstateItem()
365 return entry
367 return entry
366
368
367 def __contains__(self, key):
369 def __contains__(self, key):
368 return key in self._map
370 return key in self._map
369
371
370 def __iter__(self):
372 def __iter__(self):
371 return iter(sorted(self._map))
373 return iter(sorted(self._map))
372
374
373 def items(self):
375 def items(self):
374 return pycompat.iteritems(self._map)
376 return pycompat.iteritems(self._map)
375
377
376 iteritems = items
378 iteritems = items
377
379
378 def parents(self):
380 def parents(self):
379 return [self._validate(p) for p in self._pl]
381 return [self._validate(p) for p in self._pl]
380
382
381 def p1(self):
383 def p1(self):
382 return self._validate(self._pl[0])
384 return self._validate(self._pl[0])
383
385
384 def p2(self):
386 def p2(self):
385 return self._validate(self._pl[1])
387 return self._validate(self._pl[1])
386
388
387 @property
389 @property
388 def in_merge(self):
390 def in_merge(self):
389 """True if a merge is in progress"""
391 """True if a merge is in progress"""
390 return self._pl[1] != self._nodeconstants.nullid
392 return self._pl[1] != self._nodeconstants.nullid
391
393
392 def branch(self):
394 def branch(self):
393 return encoding.tolocal(self._branch)
395 return encoding.tolocal(self._branch)
394
396
395 def setparents(self, p1, p2=None):
397 def setparents(self, p1, p2=None):
396 """Set dirstate parents to p1 and p2.
398 """Set dirstate parents to p1 and p2.
397
399
398 When moving from two parents to one, "merged" entries a
400 When moving from two parents to one, "merged" entries a
399 adjusted to normal and previous copy records discarded and
401 adjusted to normal and previous copy records discarded and
400 returned by the call.
402 returned by the call.
401
403
402 See localrepo.setparents()
404 See localrepo.setparents()
403 """
405 """
404 if p2 is None:
406 if p2 is None:
405 p2 = self._nodeconstants.nullid
407 p2 = self._nodeconstants.nullid
406 if self._parentwriters == 0:
408 if self._parentwriters == 0:
407 raise ValueError(
409 raise ValueError(
408 b"cannot set dirstate parent outside of "
410 b"cannot set dirstate parent outside of "
409 b"dirstate.parentchange context manager"
411 b"dirstate.parentchange context manager"
410 )
412 )
411
413
412 self._dirty = True
414 self._dirty = True
413 oldp2 = self._pl[1]
415 oldp2 = self._pl[1]
414 if self._origpl is None:
416 if self._origpl is None:
415 self._origpl = self._pl
417 self._origpl = self._pl
416 nullid = self._nodeconstants.nullid
418 nullid = self._nodeconstants.nullid
417 # True if we need to fold p2 related state back to a linear case
419 # True if we need to fold p2 related state back to a linear case
418 fold_p2 = oldp2 != nullid and p2 == nullid
420 fold_p2 = oldp2 != nullid and p2 == nullid
419 return self._map.setparents(p1, p2, fold_p2=fold_p2)
421 return self._map.setparents(p1, p2, fold_p2=fold_p2)
420
422
421 def setbranch(self, branch):
423 def setbranch(self, branch):
422 self.__class__._branch.set(self, encoding.fromlocal(branch))
424 self.__class__._branch.set(self, encoding.fromlocal(branch))
423 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
425 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
424 try:
426 try:
425 f.write(self._branch + b'\n')
427 f.write(self._branch + b'\n')
426 f.close()
428 f.close()
427
429
428 # make sure filecache has the correct stat info for _branch after
430 # make sure filecache has the correct stat info for _branch after
429 # replacing the underlying file
431 # replacing the underlying file
430 ce = self._filecache[b'_branch']
432 ce = self._filecache[b'_branch']
431 if ce:
433 if ce:
432 ce.refresh()
434 ce.refresh()
433 except: # re-raises
435 except: # re-raises
434 f.discard()
436 f.discard()
435 raise
437 raise
436
438
437 def invalidate(self):
439 def invalidate(self):
438 """Causes the next access to reread the dirstate.
440 """Causes the next access to reread the dirstate.
439
441
440 This is different from localrepo.invalidatedirstate() because it always
442 This is different from localrepo.invalidatedirstate() because it always
441 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
443 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
442 check whether the dirstate has changed before rereading it."""
444 check whether the dirstate has changed before rereading it."""
443
445
444 for a in ("_map", "_branch", "_ignore"):
446 for a in ("_map", "_branch", "_ignore"):
445 if a in self.__dict__:
447 if a in self.__dict__:
446 delattr(self, a)
448 delattr(self, a)
447 self._lastnormaltime = timestamp.zero()
449 self._lastnormaltime = timestamp.zero()
448 self._dirty = False
450 self._dirty = False
449 self._parentwriters = 0
451 self._parentwriters = 0
450 self._origpl = None
452 self._origpl = None
451
453
452 def copy(self, source, dest):
454 def copy(self, source, dest):
453 """Mark dest as a copy of source. Unmark dest if source is None."""
455 """Mark dest as a copy of source. Unmark dest if source is None."""
454 if source == dest:
456 if source == dest:
455 return
457 return
456 self._dirty = True
458 self._dirty = True
457 if source is not None:
459 if source is not None:
458 self._map.copymap[dest] = source
460 self._map.copymap[dest] = source
459 else:
461 else:
460 self._map.copymap.pop(dest, None)
462 self._map.copymap.pop(dest, None)
461
463
462 def copied(self, file):
464 def copied(self, file):
463 return self._map.copymap.get(file, None)
465 return self._map.copymap.get(file, None)
464
466
465 def copies(self):
467 def copies(self):
466 return self._map.copymap
468 return self._map.copymap
467
469
468 @requires_no_parents_change
470 @requires_no_parents_change
469 def set_tracked(self, filename):
471 def set_tracked(self, filename):
470 """a "public" method for generic code to mark a file as tracked
472 """a "public" method for generic code to mark a file as tracked
471
473
472 This function is to be called outside of "update/merge" case. For
474 This function is to be called outside of "update/merge" case. For
473 example by a command like `hg add X`.
475 example by a command like `hg add X`.
474
476
475 return True the file was previously untracked, False otherwise.
477 return True the file was previously untracked, False otherwise.
476 """
478 """
477 self._dirty = True
479 self._dirty = True
478 entry = self._map.get(filename)
480 entry = self._map.get(filename)
479 if entry is None or not entry.tracked:
481 if entry is None or not entry.tracked:
480 self._check_new_tracked_filename(filename)
482 self._check_new_tracked_filename(filename)
481 return self._map.set_tracked(filename)
483 return self._map.set_tracked(filename)
482
484
483 @requires_no_parents_change
485 @requires_no_parents_change
484 def set_untracked(self, filename):
486 def set_untracked(self, filename):
485 """a "public" method for generic code to mark a file as untracked
487 """a "public" method for generic code to mark a file as untracked
486
488
487 This function is to be called outside of "update/merge" case. For
489 This function is to be called outside of "update/merge" case. For
488 example by a command like `hg remove X`.
490 example by a command like `hg remove X`.
489
491
490 return True the file was previously tracked, False otherwise.
492 return True the file was previously tracked, False otherwise.
491 """
493 """
492 ret = self._map.set_untracked(filename)
494 ret = self._map.set_untracked(filename)
493 if ret:
495 if ret:
494 self._dirty = True
496 self._dirty = True
495 return ret
497 return ret
496
498
497 @requires_no_parents_change
499 @requires_no_parents_change
498 def set_clean(self, filename, parentfiledata=None):
500 def set_clean(self, filename, parentfiledata=None):
499 """record that the current state of the file on disk is known to be clean"""
501 """record that the current state of the file on disk is known to be clean"""
500 self._dirty = True
502 self._dirty = True
501 if parentfiledata:
503 if parentfiledata:
502 (mode, size, mtime) = parentfiledata
504 (mode, size, mtime) = parentfiledata
503 else:
505 else:
504 (mode, size, mtime) = self._get_filedata(filename)
506 (mode, size, mtime) = self._get_filedata(filename)
505 if not self._map[filename].tracked:
507 if not self._map[filename].tracked:
506 self._check_new_tracked_filename(filename)
508 self._check_new_tracked_filename(filename)
507 self._map.set_clean(filename, mode, size, mtime)
509 self._map.set_clean(filename, mode, size, mtime)
508 if mtime > self._lastnormaltime:
510 if mtime > self._lastnormaltime:
509 # Remember the most recent modification timeslot for status(),
511 # Remember the most recent modification timeslot for status(),
510 # to make sure we won't miss future size-preserving file content
512 # to make sure we won't miss future size-preserving file content
511 # modifications that happen within the same timeslot.
513 # modifications that happen within the same timeslot.
512 self._lastnormaltime = mtime
514 self._lastnormaltime = mtime
513
515
514 @requires_no_parents_change
516 @requires_no_parents_change
515 def set_possibly_dirty(self, filename):
517 def set_possibly_dirty(self, filename):
516 """record that the current state of the file on disk is unknown"""
518 """record that the current state of the file on disk is unknown"""
517 self._dirty = True
519 self._dirty = True
518 self._map.set_possibly_dirty(filename)
520 self._map.set_possibly_dirty(filename)
519
521
520 @requires_parents_change
522 @requires_parents_change
521 def update_file_p1(
523 def update_file_p1(
522 self,
524 self,
523 filename,
525 filename,
524 p1_tracked,
526 p1_tracked,
525 ):
527 ):
526 """Set a file as tracked in the parent (or not)
528 """Set a file as tracked in the parent (or not)
527
529
528 This is to be called when adjust the dirstate to a new parent after an history
530 This is to be called when adjust the dirstate to a new parent after an history
529 rewriting operation.
531 rewriting operation.
530
532
531 It should not be called during a merge (p2 != nullid) and only within
533 It should not be called during a merge (p2 != nullid) and only within
532 a `with dirstate.parentchange():` context.
534 a `with dirstate.parentchange():` context.
533 """
535 """
534 if self.in_merge:
536 if self.in_merge:
535 msg = b'update_file_reference should not be called when merging'
537 msg = b'update_file_reference should not be called when merging'
536 raise error.ProgrammingError(msg)
538 raise error.ProgrammingError(msg)
537 entry = self._map.get(filename)
539 entry = self._map.get(filename)
538 if entry is None:
540 if entry is None:
539 wc_tracked = False
541 wc_tracked = False
540 else:
542 else:
541 wc_tracked = entry.tracked
543 wc_tracked = entry.tracked
542 if not (p1_tracked or wc_tracked):
544 if not (p1_tracked or wc_tracked):
543 # the file is no longer relevant to anyone
545 # the file is no longer relevant to anyone
544 if self._map.get(filename) is not None:
546 if self._map.get(filename) is not None:
545 self._map.reset_state(filename)
547 self._map.reset_state(filename)
546 self._dirty = True
548 self._dirty = True
547 elif (not p1_tracked) and wc_tracked:
549 elif (not p1_tracked) and wc_tracked:
548 if entry is not None and entry.added:
550 if entry is not None and entry.added:
549 return # avoid dropping copy information (maybe?)
551 return # avoid dropping copy information (maybe?)
550
552
551 parentfiledata = None
553 parentfiledata = None
552 if wc_tracked and p1_tracked:
554 if wc_tracked and p1_tracked:
553 parentfiledata = self._get_filedata(filename)
555 parentfiledata = self._get_filedata(filename)
554
556
555 self._map.reset_state(
557 self._map.reset_state(
556 filename,
558 filename,
557 wc_tracked,
559 wc_tracked,
558 p1_tracked,
560 p1_tracked,
559 # the underlying reference might have changed, we will have to
561 # the underlying reference might have changed, we will have to
560 # check it.
562 # check it.
561 has_meaningful_mtime=False,
563 has_meaningful_mtime=False,
562 parentfiledata=parentfiledata,
564 parentfiledata=parentfiledata,
563 )
565 )
564 if (
566 if (
565 parentfiledata is not None
567 parentfiledata is not None
566 and parentfiledata[2] > self._lastnormaltime
568 and parentfiledata[2] > self._lastnormaltime
567 ):
569 ):
568 # Remember the most recent modification timeslot for status(),
570 # Remember the most recent modification timeslot for status(),
569 # to make sure we won't miss future size-preserving file content
571 # to make sure we won't miss future size-preserving file content
570 # modifications that happen within the same timeslot.
572 # modifications that happen within the same timeslot.
571 self._lastnormaltime = parentfiledata[2]
573 self._lastnormaltime = parentfiledata[2]
572
574
573 @requires_parents_change
575 @requires_parents_change
574 def update_file(
576 def update_file(
575 self,
577 self,
576 filename,
578 filename,
577 wc_tracked,
579 wc_tracked,
578 p1_tracked,
580 p1_tracked,
579 p2_info=False,
581 p2_info=False,
580 possibly_dirty=False,
582 possibly_dirty=False,
581 parentfiledata=None,
583 parentfiledata=None,
582 ):
584 ):
583 """update the information about a file in the dirstate
585 """update the information about a file in the dirstate
584
586
585 This is to be called when the direstates parent changes to keep track
587 This is to be called when the direstates parent changes to keep track
586 of what is the file situation in regards to the working copy and its parent.
588 of what is the file situation in regards to the working copy and its parent.
587
589
588 This function must be called within a `dirstate.parentchange` context.
590 This function must be called within a `dirstate.parentchange` context.
589
591
590 note: the API is at an early stage and we might need to adjust it
592 note: the API is at an early stage and we might need to adjust it
591 depending of what information ends up being relevant and useful to
593 depending of what information ends up being relevant and useful to
592 other processing.
594 other processing.
593 """
595 """
594
596
595 # note: I do not think we need to double check name clash here since we
597 # note: I do not think we need to double check name clash here since we
596 # are in a update/merge case that should already have taken care of
598 # are in a update/merge case that should already have taken care of
597 # this. The test agrees
599 # this. The test agrees
598
600
599 self._dirty = True
601 self._dirty = True
600
602
601 need_parent_file_data = (
603 need_parent_file_data = (
602 not possibly_dirty and not p2_info and wc_tracked and p1_tracked
604 not possibly_dirty and not p2_info and wc_tracked and p1_tracked
603 )
605 )
604
606
605 if need_parent_file_data and parentfiledata is None:
607 if need_parent_file_data and parentfiledata is None:
606 parentfiledata = self._get_filedata(filename)
608 parentfiledata = self._get_filedata(filename)
607
609
608 self._map.reset_state(
610 self._map.reset_state(
609 filename,
611 filename,
610 wc_tracked,
612 wc_tracked,
611 p1_tracked,
613 p1_tracked,
612 p2_info=p2_info,
614 p2_info=p2_info,
613 has_meaningful_mtime=not possibly_dirty,
615 has_meaningful_mtime=not possibly_dirty,
614 parentfiledata=parentfiledata,
616 parentfiledata=parentfiledata,
615 )
617 )
616 if (
618 if (
617 parentfiledata is not None
619 parentfiledata is not None
618 and parentfiledata[2] > self._lastnormaltime
620 and parentfiledata[2] > self._lastnormaltime
619 ):
621 ):
620 # Remember the most recent modification timeslot for status(),
622 # Remember the most recent modification timeslot for status(),
621 # to make sure we won't miss future size-preserving file content
623 # to make sure we won't miss future size-preserving file content
622 # modifications that happen within the same timeslot.
624 # modifications that happen within the same timeslot.
623 self._lastnormaltime = parentfiledata[2]
625 self._lastnormaltime = parentfiledata[2]
624
626
625 def _check_new_tracked_filename(self, filename):
627 def _check_new_tracked_filename(self, filename):
626 scmutil.checkfilename(filename)
628 scmutil.checkfilename(filename)
627 if self._map.hastrackeddir(filename):
629 if self._map.hastrackeddir(filename):
628 msg = _(b'directory %r already in dirstate')
630 msg = _(b'directory %r already in dirstate')
629 msg %= pycompat.bytestr(filename)
631 msg %= pycompat.bytestr(filename)
630 raise error.Abort(msg)
632 raise error.Abort(msg)
631 # shadows
633 # shadows
632 for d in pathutil.finddirs(filename):
634 for d in pathutil.finddirs(filename):
633 if self._map.hastrackeddir(d):
635 if self._map.hastrackeddir(d):
634 break
636 break
635 entry = self._map.get(d)
637 entry = self._map.get(d)
636 if entry is not None and not entry.removed:
638 if entry is not None and not entry.removed:
637 msg = _(b'file %r in dirstate clashes with %r')
639 msg = _(b'file %r in dirstate clashes with %r')
638 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
640 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
639 raise error.Abort(msg)
641 raise error.Abort(msg)
640
642
641 def _get_filedata(self, filename):
643 def _get_filedata(self, filename):
642 """returns"""
644 """returns"""
643 s = os.lstat(self._join(filename))
645 s = os.lstat(self._join(filename))
644 mode = s.st_mode
646 mode = s.st_mode
645 size = s.st_size
647 size = s.st_size
646 mtime = timestamp.mtime_of(s)
648 mtime = timestamp.mtime_of(s)
647 return (mode, size, mtime)
649 return (mode, size, mtime)
648
650
649 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
651 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
650 if exists is None:
652 if exists is None:
651 exists = os.path.lexists(os.path.join(self._root, path))
653 exists = os.path.lexists(os.path.join(self._root, path))
652 if not exists:
654 if not exists:
653 # Maybe a path component exists
655 # Maybe a path component exists
654 if not ignoremissing and b'/' in path:
656 if not ignoremissing and b'/' in path:
655 d, f = path.rsplit(b'/', 1)
657 d, f = path.rsplit(b'/', 1)
656 d = self._normalize(d, False, ignoremissing, None)
658 d = self._normalize(d, False, ignoremissing, None)
657 folded = d + b"/" + f
659 folded = d + b"/" + f
658 else:
660 else:
659 # No path components, preserve original case
661 # No path components, preserve original case
660 folded = path
662 folded = path
661 else:
663 else:
662 # recursively normalize leading directory components
664 # recursively normalize leading directory components
663 # against dirstate
665 # against dirstate
664 if b'/' in normed:
666 if b'/' in normed:
665 d, f = normed.rsplit(b'/', 1)
667 d, f = normed.rsplit(b'/', 1)
666 d = self._normalize(d, False, ignoremissing, True)
668 d = self._normalize(d, False, ignoremissing, True)
667 r = self._root + b"/" + d
669 r = self._root + b"/" + d
668 folded = d + b"/" + util.fspath(f, r)
670 folded = d + b"/" + util.fspath(f, r)
669 else:
671 else:
670 folded = util.fspath(normed, self._root)
672 folded = util.fspath(normed, self._root)
671 storemap[normed] = folded
673 storemap[normed] = folded
672
674
673 return folded
675 return folded
674
676
675 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
677 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
676 normed = util.normcase(path)
678 normed = util.normcase(path)
677 folded = self._map.filefoldmap.get(normed, None)
679 folded = self._map.filefoldmap.get(normed, None)
678 if folded is None:
680 if folded is None:
679 if isknown:
681 if isknown:
680 folded = path
682 folded = path
681 else:
683 else:
682 folded = self._discoverpath(
684 folded = self._discoverpath(
683 path, normed, ignoremissing, exists, self._map.filefoldmap
685 path, normed, ignoremissing, exists, self._map.filefoldmap
684 )
686 )
685 return folded
687 return folded
686
688
687 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
689 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
688 normed = util.normcase(path)
690 normed = util.normcase(path)
689 folded = self._map.filefoldmap.get(normed, None)
691 folded = self._map.filefoldmap.get(normed, None)
690 if folded is None:
692 if folded is None:
691 folded = self._map.dirfoldmap.get(normed, None)
693 folded = self._map.dirfoldmap.get(normed, None)
692 if folded is None:
694 if folded is None:
693 if isknown:
695 if isknown:
694 folded = path
696 folded = path
695 else:
697 else:
696 # store discovered result in dirfoldmap so that future
698 # store discovered result in dirfoldmap so that future
697 # normalizefile calls don't start matching directories
699 # normalizefile calls don't start matching directories
698 folded = self._discoverpath(
700 folded = self._discoverpath(
699 path, normed, ignoremissing, exists, self._map.dirfoldmap
701 path, normed, ignoremissing, exists, self._map.dirfoldmap
700 )
702 )
701 return folded
703 return folded
702
704
703 def normalize(self, path, isknown=False, ignoremissing=False):
705 def normalize(self, path, isknown=False, ignoremissing=False):
704 """
706 """
705 normalize the case of a pathname when on a casefolding filesystem
707 normalize the case of a pathname when on a casefolding filesystem
706
708
707 isknown specifies whether the filename came from walking the
709 isknown specifies whether the filename came from walking the
708 disk, to avoid extra filesystem access.
710 disk, to avoid extra filesystem access.
709
711
710 If ignoremissing is True, missing path are returned
712 If ignoremissing is True, missing path are returned
711 unchanged. Otherwise, we try harder to normalize possibly
713 unchanged. Otherwise, we try harder to normalize possibly
712 existing path components.
714 existing path components.
713
715
714 The normalized case is determined based on the following precedence:
716 The normalized case is determined based on the following precedence:
715
717
716 - version of name already stored in the dirstate
718 - version of name already stored in the dirstate
717 - version of name stored on disk
719 - version of name stored on disk
718 - version provided via command arguments
720 - version provided via command arguments
719 """
721 """
720
722
721 if self._checkcase:
723 if self._checkcase:
722 return self._normalize(path, isknown, ignoremissing)
724 return self._normalize(path, isknown, ignoremissing)
723 return path
725 return path
724
726
725 def clear(self):
727 def clear(self):
726 self._map.clear()
728 self._map.clear()
727 self._lastnormaltime = timestamp.zero()
729 self._lastnormaltime = timestamp.zero()
728 self._dirty = True
730 self._dirty = True
729
731
730 def rebuild(self, parent, allfiles, changedfiles=None):
732 def rebuild(self, parent, allfiles, changedfiles=None):
731 if changedfiles is None:
733 if changedfiles is None:
732 # Rebuild entire dirstate
734 # Rebuild entire dirstate
733 to_lookup = allfiles
735 to_lookup = allfiles
734 to_drop = []
736 to_drop = []
735 lastnormaltime = self._lastnormaltime
737 lastnormaltime = self._lastnormaltime
736 self.clear()
738 self.clear()
737 self._lastnormaltime = lastnormaltime
739 self._lastnormaltime = lastnormaltime
738 elif len(changedfiles) < 10:
740 elif len(changedfiles) < 10:
739 # Avoid turning allfiles into a set, which can be expensive if it's
741 # Avoid turning allfiles into a set, which can be expensive if it's
740 # large.
742 # large.
741 to_lookup = []
743 to_lookup = []
742 to_drop = []
744 to_drop = []
743 for f in changedfiles:
745 for f in changedfiles:
744 if f in allfiles:
746 if f in allfiles:
745 to_lookup.append(f)
747 to_lookup.append(f)
746 else:
748 else:
747 to_drop.append(f)
749 to_drop.append(f)
748 else:
750 else:
749 changedfilesset = set(changedfiles)
751 changedfilesset = set(changedfiles)
750 to_lookup = changedfilesset & set(allfiles)
752 to_lookup = changedfilesset & set(allfiles)
751 to_drop = changedfilesset - to_lookup
753 to_drop = changedfilesset - to_lookup
752
754
753 if self._origpl is None:
755 if self._origpl is None:
754 self._origpl = self._pl
756 self._origpl = self._pl
755 self._map.setparents(parent, self._nodeconstants.nullid)
757 self._map.setparents(parent, self._nodeconstants.nullid)
756
758
757 for f in to_lookup:
759 for f in to_lookup:
758
760
759 if self.in_merge:
761 if self.in_merge:
760 self.set_tracked(f)
762 self.set_tracked(f)
761 else:
763 else:
762 self._map.reset_state(
764 self._map.reset_state(
763 f,
765 f,
764 wc_tracked=True,
766 wc_tracked=True,
765 p1_tracked=True,
767 p1_tracked=True,
766 )
768 )
767 for f in to_drop:
769 for f in to_drop:
768 self._map.reset_state(f)
770 self._map.reset_state(f)
769
771
770 self._dirty = True
772 self._dirty = True
771
773
772 def identity(self):
774 def identity(self):
773 """Return identity of dirstate itself to detect changing in storage
775 """Return identity of dirstate itself to detect changing in storage
774
776
775 If identity of previous dirstate is equal to this, writing
777 If identity of previous dirstate is equal to this, writing
776 changes based on the former dirstate out can keep consistency.
778 changes based on the former dirstate out can keep consistency.
777 """
779 """
778 return self._map.identity
780 return self._map.identity
779
781
780 def write(self, tr):
782 def write(self, tr):
781 if not self._dirty:
783 if not self._dirty:
782 return
784 return
783
785
784 filename = self._filename
786 filename = self._filename
785 if tr:
787 if tr:
786 # 'dirstate.write()' is not only for writing in-memory
788 # 'dirstate.write()' is not only for writing in-memory
787 # changes out, but also for dropping ambiguous timestamp.
789 # changes out, but also for dropping ambiguous timestamp.
788 # delayed writing re-raise "ambiguous timestamp issue".
790 # delayed writing re-raise "ambiguous timestamp issue".
789 # See also the wiki page below for detail:
791 # See also the wiki page below for detail:
790 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
792 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
791
793
792 # record when mtime start to be ambiguous
794 # record when mtime start to be ambiguous
793 now = _getfsnow(self._opener)
795 now = _getfsnow(self._opener)
794
796
795 # delay writing in-memory changes out
797 # delay writing in-memory changes out
796 tr.addfilegenerator(
798 tr.addfilegenerator(
797 b'dirstate',
799 b'dirstate',
798 (self._filename,),
800 (self._filename,),
799 lambda f: self._writedirstate(tr, f, now=now),
801 lambda f: self._writedirstate(tr, f, now=now),
800 location=b'plain',
802 location=b'plain',
801 )
803 )
802 return
804 return
803
805
804 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
806 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
805 self._writedirstate(tr, st)
807 self._writedirstate(tr, st)
806
808
807 def addparentchangecallback(self, category, callback):
809 def addparentchangecallback(self, category, callback):
808 """add a callback to be called when the wd parents are changed
810 """add a callback to be called when the wd parents are changed
809
811
810 Callback will be called with the following arguments:
812 Callback will be called with the following arguments:
811 dirstate, (oldp1, oldp2), (newp1, newp2)
813 dirstate, (oldp1, oldp2), (newp1, newp2)
812
814
813 Category is a unique identifier to allow overwriting an old callback
815 Category is a unique identifier to allow overwriting an old callback
814 with a newer callback.
816 with a newer callback.
815 """
817 """
816 self._plchangecallbacks[category] = callback
818 self._plchangecallbacks[category] = callback
817
819
818 def _writedirstate(self, tr, st, now=None):
820 def _writedirstate(self, tr, st, now=None):
819 # notify callbacks about parents change
821 # notify callbacks about parents change
820 if self._origpl is not None and self._origpl != self._pl:
822 if self._origpl is not None and self._origpl != self._pl:
821 for c, callback in sorted(
823 for c, callback in sorted(
822 pycompat.iteritems(self._plchangecallbacks)
824 pycompat.iteritems(self._plchangecallbacks)
823 ):
825 ):
824 callback(self, self._origpl, self._pl)
826 callback(self, self._origpl, self._pl)
825 self._origpl = None
827 self._origpl = None
826
828
827 if now is None:
829 if now is None:
828 # use the modification time of the newly created temporary file as the
830 # use the modification time of the newly created temporary file as the
829 # filesystem's notion of 'now'
831 # filesystem's notion of 'now'
830 now = timestamp.mtime_of(util.fstat(st))
832 now = timestamp.mtime_of(util.fstat(st))
831
833
832 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
834 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
833 # timestamp of each entries in dirstate, because of 'now > mtime'
835 # timestamp of each entries in dirstate, because of 'now > mtime'
834 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
836 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
835 if delaywrite > 0:
837 if delaywrite > 0:
836 # do we have any files to delay for?
838 # do we have any files to delay for?
837 for f, e in pycompat.iteritems(self._map):
839 for f, e in pycompat.iteritems(self._map):
838 if e.need_delay(now):
840 if e.need_delay(now):
839 import time # to avoid useless import
841 import time # to avoid useless import
840
842
841 # rather than sleep n seconds, sleep until the next
843 # rather than sleep n seconds, sleep until the next
842 # multiple of n seconds
844 # multiple of n seconds
843 clock = time.time()
845 clock = time.time()
844 start = int(clock) - (int(clock) % delaywrite)
846 start = int(clock) - (int(clock) % delaywrite)
845 end = start + delaywrite
847 end = start + delaywrite
846 time.sleep(end - clock)
848 time.sleep(end - clock)
847 # trust our estimate that the end is near now
849 # trust our estimate that the end is near now
848 now = timestamp.timestamp((end, 0))
850 now = timestamp.timestamp((end, 0))
849 break
851 break
850
852
851 self._map.write(tr, st, now)
853 self._map.write(tr, st, now)
852 self._lastnormaltime = timestamp.zero()
854 self._lastnormaltime = timestamp.zero()
853 self._dirty = False
855 self._dirty = False
854
856
855 def _dirignore(self, f):
857 def _dirignore(self, f):
856 if self._ignore(f):
858 if self._ignore(f):
857 return True
859 return True
858 for p in pathutil.finddirs(f):
860 for p in pathutil.finddirs(f):
859 if self._ignore(p):
861 if self._ignore(p):
860 return True
862 return True
861 return False
863 return False
862
864
863 def _ignorefiles(self):
865 def _ignorefiles(self):
864 files = []
866 files = []
865 if os.path.exists(self._join(b'.hgignore')):
867 if os.path.exists(self._join(b'.hgignore')):
866 files.append(self._join(b'.hgignore'))
868 files.append(self._join(b'.hgignore'))
867 for name, path in self._ui.configitems(b"ui"):
869 for name, path in self._ui.configitems(b"ui"):
868 if name == b'ignore' or name.startswith(b'ignore.'):
870 if name == b'ignore' or name.startswith(b'ignore.'):
869 # we need to use os.path.join here rather than self._join
871 # we need to use os.path.join here rather than self._join
870 # because path is arbitrary and user-specified
872 # because path is arbitrary and user-specified
871 files.append(os.path.join(self._rootdir, util.expandpath(path)))
873 files.append(os.path.join(self._rootdir, util.expandpath(path)))
872 return files
874 return files
873
875
874 def _ignorefileandline(self, f):
876 def _ignorefileandline(self, f):
875 files = collections.deque(self._ignorefiles())
877 files = collections.deque(self._ignorefiles())
876 visited = set()
878 visited = set()
877 while files:
879 while files:
878 i = files.popleft()
880 i = files.popleft()
879 patterns = matchmod.readpatternfile(
881 patterns = matchmod.readpatternfile(
880 i, self._ui.warn, sourceinfo=True
882 i, self._ui.warn, sourceinfo=True
881 )
883 )
882 for pattern, lineno, line in patterns:
884 for pattern, lineno, line in patterns:
883 kind, p = matchmod._patsplit(pattern, b'glob')
885 kind, p = matchmod._patsplit(pattern, b'glob')
884 if kind == b"subinclude":
886 if kind == b"subinclude":
885 if p not in visited:
887 if p not in visited:
886 files.append(p)
888 files.append(p)
887 continue
889 continue
888 m = matchmod.match(
890 m = matchmod.match(
889 self._root, b'', [], [pattern], warn=self._ui.warn
891 self._root, b'', [], [pattern], warn=self._ui.warn
890 )
892 )
891 if m(f):
893 if m(f):
892 return (i, lineno, line)
894 return (i, lineno, line)
893 visited.add(i)
895 visited.add(i)
894 return (None, -1, b"")
896 return (None, -1, b"")
895
897
896 def _walkexplicit(self, match, subrepos):
898 def _walkexplicit(self, match, subrepos):
897 """Get stat data about the files explicitly specified by match.
899 """Get stat data about the files explicitly specified by match.
898
900
899 Return a triple (results, dirsfound, dirsnotfound).
901 Return a triple (results, dirsfound, dirsnotfound).
900 - results is a mapping from filename to stat result. It also contains
902 - results is a mapping from filename to stat result. It also contains
901 listings mapping subrepos and .hg to None.
903 listings mapping subrepos and .hg to None.
902 - dirsfound is a list of files found to be directories.
904 - dirsfound is a list of files found to be directories.
903 - dirsnotfound is a list of files that the dirstate thinks are
905 - dirsnotfound is a list of files that the dirstate thinks are
904 directories and that were not found."""
906 directories and that were not found."""
905
907
906 def badtype(mode):
908 def badtype(mode):
907 kind = _(b'unknown')
909 kind = _(b'unknown')
908 if stat.S_ISCHR(mode):
910 if stat.S_ISCHR(mode):
909 kind = _(b'character device')
911 kind = _(b'character device')
910 elif stat.S_ISBLK(mode):
912 elif stat.S_ISBLK(mode):
911 kind = _(b'block device')
913 kind = _(b'block device')
912 elif stat.S_ISFIFO(mode):
914 elif stat.S_ISFIFO(mode):
913 kind = _(b'fifo')
915 kind = _(b'fifo')
914 elif stat.S_ISSOCK(mode):
916 elif stat.S_ISSOCK(mode):
915 kind = _(b'socket')
917 kind = _(b'socket')
916 elif stat.S_ISDIR(mode):
918 elif stat.S_ISDIR(mode):
917 kind = _(b'directory')
919 kind = _(b'directory')
918 return _(b'unsupported file type (type is %s)') % kind
920 return _(b'unsupported file type (type is %s)') % kind
919
921
920 badfn = match.bad
922 badfn = match.bad
921 dmap = self._map
923 dmap = self._map
922 lstat = os.lstat
924 lstat = os.lstat
923 getkind = stat.S_IFMT
925 getkind = stat.S_IFMT
924 dirkind = stat.S_IFDIR
926 dirkind = stat.S_IFDIR
925 regkind = stat.S_IFREG
927 regkind = stat.S_IFREG
926 lnkkind = stat.S_IFLNK
928 lnkkind = stat.S_IFLNK
927 join = self._join
929 join = self._join
928 dirsfound = []
930 dirsfound = []
929 foundadd = dirsfound.append
931 foundadd = dirsfound.append
930 dirsnotfound = []
932 dirsnotfound = []
931 notfoundadd = dirsnotfound.append
933 notfoundadd = dirsnotfound.append
932
934
933 if not match.isexact() and self._checkcase:
935 if not match.isexact() and self._checkcase:
934 normalize = self._normalize
936 normalize = self._normalize
935 else:
937 else:
936 normalize = None
938 normalize = None
937
939
938 files = sorted(match.files())
940 files = sorted(match.files())
939 subrepos.sort()
941 subrepos.sort()
940 i, j = 0, 0
942 i, j = 0, 0
941 while i < len(files) and j < len(subrepos):
943 while i < len(files) and j < len(subrepos):
942 subpath = subrepos[j] + b"/"
944 subpath = subrepos[j] + b"/"
943 if files[i] < subpath:
945 if files[i] < subpath:
944 i += 1
946 i += 1
945 continue
947 continue
946 while i < len(files) and files[i].startswith(subpath):
948 while i < len(files) and files[i].startswith(subpath):
947 del files[i]
949 del files[i]
948 j += 1
950 j += 1
949
951
950 if not files or b'' in files:
952 if not files or b'' in files:
951 files = [b'']
953 files = [b'']
952 # constructing the foldmap is expensive, so don't do it for the
954 # constructing the foldmap is expensive, so don't do it for the
953 # common case where files is ['']
955 # common case where files is ['']
954 normalize = None
956 normalize = None
955 results = dict.fromkeys(subrepos)
957 results = dict.fromkeys(subrepos)
956 results[b'.hg'] = None
958 results[b'.hg'] = None
957
959
958 for ff in files:
960 for ff in files:
959 if normalize:
961 if normalize:
960 nf = normalize(ff, False, True)
962 nf = normalize(ff, False, True)
961 else:
963 else:
962 nf = ff
964 nf = ff
963 if nf in results:
965 if nf in results:
964 continue
966 continue
965
967
966 try:
968 try:
967 st = lstat(join(nf))
969 st = lstat(join(nf))
968 kind = getkind(st.st_mode)
970 kind = getkind(st.st_mode)
969 if kind == dirkind:
971 if kind == dirkind:
970 if nf in dmap:
972 if nf in dmap:
971 # file replaced by dir on disk but still in dirstate
973 # file replaced by dir on disk but still in dirstate
972 results[nf] = None
974 results[nf] = None
973 foundadd((nf, ff))
975 foundadd((nf, ff))
974 elif kind == regkind or kind == lnkkind:
976 elif kind == regkind or kind == lnkkind:
975 results[nf] = st
977 results[nf] = st
976 else:
978 else:
977 badfn(ff, badtype(kind))
979 badfn(ff, badtype(kind))
978 if nf in dmap:
980 if nf in dmap:
979 results[nf] = None
981 results[nf] = None
980 except OSError as inst: # nf not found on disk - it is dirstate only
982 except OSError as inst: # nf not found on disk - it is dirstate only
981 if nf in dmap: # does it exactly match a missing file?
983 if nf in dmap: # does it exactly match a missing file?
982 results[nf] = None
984 results[nf] = None
983 else: # does it match a missing directory?
985 else: # does it match a missing directory?
984 if self._map.hasdir(nf):
986 if self._map.hasdir(nf):
985 notfoundadd(nf)
987 notfoundadd(nf)
986 else:
988 else:
987 badfn(ff, encoding.strtolocal(inst.strerror))
989 badfn(ff, encoding.strtolocal(inst.strerror))
988
990
989 # match.files() may contain explicitly-specified paths that shouldn't
991 # match.files() may contain explicitly-specified paths that shouldn't
990 # be taken; drop them from the list of files found. dirsfound/notfound
992 # be taken; drop them from the list of files found. dirsfound/notfound
991 # aren't filtered here because they will be tested later.
993 # aren't filtered here because they will be tested later.
992 if match.anypats():
994 if match.anypats():
993 for f in list(results):
995 for f in list(results):
994 if f == b'.hg' or f in subrepos:
996 if f == b'.hg' or f in subrepos:
995 # keep sentinel to disable further out-of-repo walks
997 # keep sentinel to disable further out-of-repo walks
996 continue
998 continue
997 if not match(f):
999 if not match(f):
998 del results[f]
1000 del results[f]
999
1001
1000 # Case insensitive filesystems cannot rely on lstat() failing to detect
1002 # Case insensitive filesystems cannot rely on lstat() failing to detect
1001 # a case-only rename. Prune the stat object for any file that does not
1003 # a case-only rename. Prune the stat object for any file that does not
1002 # match the case in the filesystem, if there are multiple files that
1004 # match the case in the filesystem, if there are multiple files that
1003 # normalize to the same path.
1005 # normalize to the same path.
1004 if match.isexact() and self._checkcase:
1006 if match.isexact() and self._checkcase:
1005 normed = {}
1007 normed = {}
1006
1008
1007 for f, st in pycompat.iteritems(results):
1009 for f, st in pycompat.iteritems(results):
1008 if st is None:
1010 if st is None:
1009 continue
1011 continue
1010
1012
1011 nc = util.normcase(f)
1013 nc = util.normcase(f)
1012 paths = normed.get(nc)
1014 paths = normed.get(nc)
1013
1015
1014 if paths is None:
1016 if paths is None:
1015 paths = set()
1017 paths = set()
1016 normed[nc] = paths
1018 normed[nc] = paths
1017
1019
1018 paths.add(f)
1020 paths.add(f)
1019
1021
1020 for norm, paths in pycompat.iteritems(normed):
1022 for norm, paths in pycompat.iteritems(normed):
1021 if len(paths) > 1:
1023 if len(paths) > 1:
1022 for path in paths:
1024 for path in paths:
1023 folded = self._discoverpath(
1025 folded = self._discoverpath(
1024 path, norm, True, None, self._map.dirfoldmap
1026 path, norm, True, None, self._map.dirfoldmap
1025 )
1027 )
1026 if path != folded:
1028 if path != folded:
1027 results[path] = None
1029 results[path] = None
1028
1030
1029 return results, dirsfound, dirsnotfound
1031 return results, dirsfound, dirsnotfound
1030
1032
1031 def walk(self, match, subrepos, unknown, ignored, full=True):
1033 def walk(self, match, subrepos, unknown, ignored, full=True):
1032 """
1034 """
1033 Walk recursively through the directory tree, finding all files
1035 Walk recursively through the directory tree, finding all files
1034 matched by match.
1036 matched by match.
1035
1037
1036 If full is False, maybe skip some known-clean files.
1038 If full is False, maybe skip some known-clean files.
1037
1039
1038 Return a dict mapping filename to stat-like object (either
1040 Return a dict mapping filename to stat-like object (either
1039 mercurial.osutil.stat instance or return value of os.stat()).
1041 mercurial.osutil.stat instance or return value of os.stat()).
1040
1042
1041 """
1043 """
1042 # full is a flag that extensions that hook into walk can use -- this
1044 # full is a flag that extensions that hook into walk can use -- this
1043 # implementation doesn't use it at all. This satisfies the contract
1045 # implementation doesn't use it at all. This satisfies the contract
1044 # because we only guarantee a "maybe".
1046 # because we only guarantee a "maybe".
1045
1047
1046 if ignored:
1048 if ignored:
1047 ignore = util.never
1049 ignore = util.never
1048 dirignore = util.never
1050 dirignore = util.never
1049 elif unknown:
1051 elif unknown:
1050 ignore = self._ignore
1052 ignore = self._ignore
1051 dirignore = self._dirignore
1053 dirignore = self._dirignore
1052 else:
1054 else:
1053 # if not unknown and not ignored, drop dir recursion and step 2
1055 # if not unknown and not ignored, drop dir recursion and step 2
1054 ignore = util.always
1056 ignore = util.always
1055 dirignore = util.always
1057 dirignore = util.always
1056
1058
1057 matchfn = match.matchfn
1059 matchfn = match.matchfn
1058 matchalways = match.always()
1060 matchalways = match.always()
1059 matchtdir = match.traversedir
1061 matchtdir = match.traversedir
1060 dmap = self._map
1062 dmap = self._map
1061 listdir = util.listdir
1063 listdir = util.listdir
1062 lstat = os.lstat
1064 lstat = os.lstat
1063 dirkind = stat.S_IFDIR
1065 dirkind = stat.S_IFDIR
1064 regkind = stat.S_IFREG
1066 regkind = stat.S_IFREG
1065 lnkkind = stat.S_IFLNK
1067 lnkkind = stat.S_IFLNK
1066 join = self._join
1068 join = self._join
1067
1069
1068 exact = skipstep3 = False
1070 exact = skipstep3 = False
1069 if match.isexact(): # match.exact
1071 if match.isexact(): # match.exact
1070 exact = True
1072 exact = True
1071 dirignore = util.always # skip step 2
1073 dirignore = util.always # skip step 2
1072 elif match.prefix(): # match.match, no patterns
1074 elif match.prefix(): # match.match, no patterns
1073 skipstep3 = True
1075 skipstep3 = True
1074
1076
1075 if not exact and self._checkcase:
1077 if not exact and self._checkcase:
1076 normalize = self._normalize
1078 normalize = self._normalize
1077 normalizefile = self._normalizefile
1079 normalizefile = self._normalizefile
1078 skipstep3 = False
1080 skipstep3 = False
1079 else:
1081 else:
1080 normalize = self._normalize
1082 normalize = self._normalize
1081 normalizefile = None
1083 normalizefile = None
1082
1084
1083 # step 1: find all explicit files
1085 # step 1: find all explicit files
1084 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1086 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1085 if matchtdir:
1087 if matchtdir:
1086 for d in work:
1088 for d in work:
1087 matchtdir(d[0])
1089 matchtdir(d[0])
1088 for d in dirsnotfound:
1090 for d in dirsnotfound:
1089 matchtdir(d)
1091 matchtdir(d)
1090
1092
1091 skipstep3 = skipstep3 and not (work or dirsnotfound)
1093 skipstep3 = skipstep3 and not (work or dirsnotfound)
1092 work = [d for d in work if not dirignore(d[0])]
1094 work = [d for d in work if not dirignore(d[0])]
1093
1095
1094 # step 2: visit subdirectories
1096 # step 2: visit subdirectories
1095 def traverse(work, alreadynormed):
1097 def traverse(work, alreadynormed):
1096 wadd = work.append
1098 wadd = work.append
1097 while work:
1099 while work:
1098 tracing.counter('dirstate.walk work', len(work))
1100 tracing.counter('dirstate.walk work', len(work))
1099 nd = work.pop()
1101 nd = work.pop()
1100 visitentries = match.visitchildrenset(nd)
1102 visitentries = match.visitchildrenset(nd)
1101 if not visitentries:
1103 if not visitentries:
1102 continue
1104 continue
1103 if visitentries == b'this' or visitentries == b'all':
1105 if visitentries == b'this' or visitentries == b'all':
1104 visitentries = None
1106 visitentries = None
1105 skip = None
1107 skip = None
1106 if nd != b'':
1108 if nd != b'':
1107 skip = b'.hg'
1109 skip = b'.hg'
1108 try:
1110 try:
1109 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1111 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1110 entries = listdir(join(nd), stat=True, skip=skip)
1112 entries = listdir(join(nd), stat=True, skip=skip)
1111 except OSError as inst:
1113 except OSError as inst:
1112 if inst.errno in (errno.EACCES, errno.ENOENT):
1114 if inst.errno in (errno.EACCES, errno.ENOENT):
1113 match.bad(
1115 match.bad(
1114 self.pathto(nd), encoding.strtolocal(inst.strerror)
1116 self.pathto(nd), encoding.strtolocal(inst.strerror)
1115 )
1117 )
1116 continue
1118 continue
1117 raise
1119 raise
1118 for f, kind, st in entries:
1120 for f, kind, st in entries:
1119 # Some matchers may return files in the visitentries set,
1121 # Some matchers may return files in the visitentries set,
1120 # instead of 'this', if the matcher explicitly mentions them
1122 # instead of 'this', if the matcher explicitly mentions them
1121 # and is not an exactmatcher. This is acceptable; we do not
1123 # and is not an exactmatcher. This is acceptable; we do not
1122 # make any hard assumptions about file-or-directory below
1124 # make any hard assumptions about file-or-directory below
1123 # based on the presence of `f` in visitentries. If
1125 # based on the presence of `f` in visitentries. If
1124 # visitchildrenset returned a set, we can always skip the
1126 # visitchildrenset returned a set, we can always skip the
1125 # entries *not* in the set it provided regardless of whether
1127 # entries *not* in the set it provided regardless of whether
1126 # they're actually a file or a directory.
1128 # they're actually a file or a directory.
1127 if visitentries and f not in visitentries:
1129 if visitentries and f not in visitentries:
1128 continue
1130 continue
1129 if normalizefile:
1131 if normalizefile:
1130 # even though f might be a directory, we're only
1132 # even though f might be a directory, we're only
1131 # interested in comparing it to files currently in the
1133 # interested in comparing it to files currently in the
1132 # dmap -- therefore normalizefile is enough
1134 # dmap -- therefore normalizefile is enough
1133 nf = normalizefile(
1135 nf = normalizefile(
1134 nd and (nd + b"/" + f) or f, True, True
1136 nd and (nd + b"/" + f) or f, True, True
1135 )
1137 )
1136 else:
1138 else:
1137 nf = nd and (nd + b"/" + f) or f
1139 nf = nd and (nd + b"/" + f) or f
1138 if nf not in results:
1140 if nf not in results:
1139 if kind == dirkind:
1141 if kind == dirkind:
1140 if not ignore(nf):
1142 if not ignore(nf):
1141 if matchtdir:
1143 if matchtdir:
1142 matchtdir(nf)
1144 matchtdir(nf)
1143 wadd(nf)
1145 wadd(nf)
1144 if nf in dmap and (matchalways or matchfn(nf)):
1146 if nf in dmap and (matchalways or matchfn(nf)):
1145 results[nf] = None
1147 results[nf] = None
1146 elif kind == regkind or kind == lnkkind:
1148 elif kind == regkind or kind == lnkkind:
1147 if nf in dmap:
1149 if nf in dmap:
1148 if matchalways or matchfn(nf):
1150 if matchalways or matchfn(nf):
1149 results[nf] = st
1151 results[nf] = st
1150 elif (matchalways or matchfn(nf)) and not ignore(
1152 elif (matchalways or matchfn(nf)) and not ignore(
1151 nf
1153 nf
1152 ):
1154 ):
1153 # unknown file -- normalize if necessary
1155 # unknown file -- normalize if necessary
1154 if not alreadynormed:
1156 if not alreadynormed:
1155 nf = normalize(nf, False, True)
1157 nf = normalize(nf, False, True)
1156 results[nf] = st
1158 results[nf] = st
1157 elif nf in dmap and (matchalways or matchfn(nf)):
1159 elif nf in dmap and (matchalways or matchfn(nf)):
1158 results[nf] = None
1160 results[nf] = None
1159
1161
1160 for nd, d in work:
1162 for nd, d in work:
1161 # alreadynormed means that processwork doesn't have to do any
1163 # alreadynormed means that processwork doesn't have to do any
1162 # expensive directory normalization
1164 # expensive directory normalization
1163 alreadynormed = not normalize or nd == d
1165 alreadynormed = not normalize or nd == d
1164 traverse([d], alreadynormed)
1166 traverse([d], alreadynormed)
1165
1167
1166 for s in subrepos:
1168 for s in subrepos:
1167 del results[s]
1169 del results[s]
1168 del results[b'.hg']
1170 del results[b'.hg']
1169
1171
1170 # step 3: visit remaining files from dmap
1172 # step 3: visit remaining files from dmap
1171 if not skipstep3 and not exact:
1173 if not skipstep3 and not exact:
1172 # If a dmap file is not in results yet, it was either
1174 # If a dmap file is not in results yet, it was either
1173 # a) not matching matchfn b) ignored, c) missing, or d) under a
1175 # a) not matching matchfn b) ignored, c) missing, or d) under a
1174 # symlink directory.
1176 # symlink directory.
1175 if not results and matchalways:
1177 if not results and matchalways:
1176 visit = [f for f in dmap]
1178 visit = [f for f in dmap]
1177 else:
1179 else:
1178 visit = [f for f in dmap if f not in results and matchfn(f)]
1180 visit = [f for f in dmap if f not in results and matchfn(f)]
1179 visit.sort()
1181 visit.sort()
1180
1182
1181 if unknown:
1183 if unknown:
1182 # unknown == True means we walked all dirs under the roots
1184 # unknown == True means we walked all dirs under the roots
1183 # that wasn't ignored, and everything that matched was stat'ed
1185 # that wasn't ignored, and everything that matched was stat'ed
1184 # and is already in results.
1186 # and is already in results.
1185 # The rest must thus be ignored or under a symlink.
1187 # The rest must thus be ignored or under a symlink.
1186 audit_path = pathutil.pathauditor(self._root, cached=True)
1188 audit_path = pathutil.pathauditor(self._root, cached=True)
1187
1189
1188 for nf in iter(visit):
1190 for nf in iter(visit):
1189 # If a stat for the same file was already added with a
1191 # If a stat for the same file was already added with a
1190 # different case, don't add one for this, since that would
1192 # different case, don't add one for this, since that would
1191 # make it appear as if the file exists under both names
1193 # make it appear as if the file exists under both names
1192 # on disk.
1194 # on disk.
1193 if (
1195 if (
1194 normalizefile
1196 normalizefile
1195 and normalizefile(nf, True, True) in results
1197 and normalizefile(nf, True, True) in results
1196 ):
1198 ):
1197 results[nf] = None
1199 results[nf] = None
1198 # Report ignored items in the dmap as long as they are not
1200 # Report ignored items in the dmap as long as they are not
1199 # under a symlink directory.
1201 # under a symlink directory.
1200 elif audit_path.check(nf):
1202 elif audit_path.check(nf):
1201 try:
1203 try:
1202 results[nf] = lstat(join(nf))
1204 results[nf] = lstat(join(nf))
1203 # file was just ignored, no links, and exists
1205 # file was just ignored, no links, and exists
1204 except OSError:
1206 except OSError:
1205 # file doesn't exist
1207 # file doesn't exist
1206 results[nf] = None
1208 results[nf] = None
1207 else:
1209 else:
1208 # It's either missing or under a symlink directory
1210 # It's either missing or under a symlink directory
1209 # which we in this case report as missing
1211 # which we in this case report as missing
1210 results[nf] = None
1212 results[nf] = None
1211 else:
1213 else:
1212 # We may not have walked the full directory tree above,
1214 # We may not have walked the full directory tree above,
1213 # so stat and check everything we missed.
1215 # so stat and check everything we missed.
1214 iv = iter(visit)
1216 iv = iter(visit)
1215 for st in util.statfiles([join(i) for i in visit]):
1217 for st in util.statfiles([join(i) for i in visit]):
1216 results[next(iv)] = st
1218 results[next(iv)] = st
1217 return results
1219 return results
1218
1220
1219 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1221 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1220 # Force Rayon (Rust parallelism library) to respect the number of
1222 # Force Rayon (Rust parallelism library) to respect the number of
1221 # workers. This is a temporary workaround until Rust code knows
1223 # workers. This is a temporary workaround until Rust code knows
1222 # how to read the config file.
1224 # how to read the config file.
1223 numcpus = self._ui.configint(b"worker", b"numcpus")
1225 numcpus = self._ui.configint(b"worker", b"numcpus")
1224 if numcpus is not None:
1226 if numcpus is not None:
1225 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1227 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1226
1228
1227 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1229 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1228 if not workers_enabled:
1230 if not workers_enabled:
1229 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1231 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1230
1232
1231 (
1233 (
1232 lookup,
1234 lookup,
1233 modified,
1235 modified,
1234 added,
1236 added,
1235 removed,
1237 removed,
1236 deleted,
1238 deleted,
1237 clean,
1239 clean,
1238 ignored,
1240 ignored,
1239 unknown,
1241 unknown,
1240 warnings,
1242 warnings,
1241 bad,
1243 bad,
1242 traversed,
1244 traversed,
1243 dirty,
1245 dirty,
1244 ) = rustmod.status(
1246 ) = rustmod.status(
1245 self._map._map,
1247 self._map._map,
1246 matcher,
1248 matcher,
1247 self._rootdir,
1249 self._rootdir,
1248 self._ignorefiles(),
1250 self._ignorefiles(),
1249 self._checkexec,
1251 self._checkexec,
1250 self._lastnormaltime,
1252 self._lastnormaltime,
1251 bool(list_clean),
1253 bool(list_clean),
1252 bool(list_ignored),
1254 bool(list_ignored),
1253 bool(list_unknown),
1255 bool(list_unknown),
1254 bool(matcher.traversedir),
1256 bool(matcher.traversedir),
1255 )
1257 )
1256
1258
1257 self._dirty |= dirty
1259 self._dirty |= dirty
1258
1260
1259 if matcher.traversedir:
1261 if matcher.traversedir:
1260 for dir in traversed:
1262 for dir in traversed:
1261 matcher.traversedir(dir)
1263 matcher.traversedir(dir)
1262
1264
1263 if self._ui.warn:
1265 if self._ui.warn:
1264 for item in warnings:
1266 for item in warnings:
1265 if isinstance(item, tuple):
1267 if isinstance(item, tuple):
1266 file_path, syntax = item
1268 file_path, syntax = item
1267 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1269 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1268 file_path,
1270 file_path,
1269 syntax,
1271 syntax,
1270 )
1272 )
1271 self._ui.warn(msg)
1273 self._ui.warn(msg)
1272 else:
1274 else:
1273 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1275 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1274 self._ui.warn(
1276 self._ui.warn(
1275 msg
1277 msg
1276 % (
1278 % (
1277 pathutil.canonpath(
1279 pathutil.canonpath(
1278 self._rootdir, self._rootdir, item
1280 self._rootdir, self._rootdir, item
1279 ),
1281 ),
1280 b"No such file or directory",
1282 b"No such file or directory",
1281 )
1283 )
1282 )
1284 )
1283
1285
1284 for (fn, message) in bad:
1286 for (fn, message) in bad:
1285 matcher.bad(fn, encoding.strtolocal(message))
1287 matcher.bad(fn, encoding.strtolocal(message))
1286
1288
1287 status = scmutil.status(
1289 status = scmutil.status(
1288 modified=modified,
1290 modified=modified,
1289 added=added,
1291 added=added,
1290 removed=removed,
1292 removed=removed,
1291 deleted=deleted,
1293 deleted=deleted,
1292 unknown=unknown,
1294 unknown=unknown,
1293 ignored=ignored,
1295 ignored=ignored,
1294 clean=clean,
1296 clean=clean,
1295 )
1297 )
1296 return (lookup, status)
1298 return (lookup, status)
1297
1299
1298 def status(self, match, subrepos, ignored, clean, unknown):
1300 def status(self, match, subrepos, ignored, clean, unknown):
1299 """Determine the status of the working copy relative to the
1301 """Determine the status of the working copy relative to the
1300 dirstate and return a pair of (unsure, status), where status is of type
1302 dirstate and return a pair of (unsure, status), where status is of type
1301 scmutil.status and:
1303 scmutil.status and:
1302
1304
1303 unsure:
1305 unsure:
1304 files that might have been modified since the dirstate was
1306 files that might have been modified since the dirstate was
1305 written, but need to be read to be sure (size is the same
1307 written, but need to be read to be sure (size is the same
1306 but mtime differs)
1308 but mtime differs)
1307 status.modified:
1309 status.modified:
1308 files that have definitely been modified since the dirstate
1310 files that have definitely been modified since the dirstate
1309 was written (different size or mode)
1311 was written (different size or mode)
1310 status.clean:
1312 status.clean:
1311 files that have definitely not been modified since the
1313 files that have definitely not been modified since the
1312 dirstate was written
1314 dirstate was written
1313 """
1315 """
1314 listignored, listclean, listunknown = ignored, clean, unknown
1316 listignored, listclean, listunknown = ignored, clean, unknown
1315 lookup, modified, added, unknown, ignored = [], [], [], [], []
1317 lookup, modified, added, unknown, ignored = [], [], [], [], []
1316 removed, deleted, clean = [], [], []
1318 removed, deleted, clean = [], [], []
1317
1319
1318 dmap = self._map
1320 dmap = self._map
1319 dmap.preload()
1321 dmap.preload()
1320
1322
1321 use_rust = True
1323 use_rust = True
1322
1324
1323 allowed_matchers = (
1325 allowed_matchers = (
1324 matchmod.alwaysmatcher,
1326 matchmod.alwaysmatcher,
1325 matchmod.exactmatcher,
1327 matchmod.exactmatcher,
1326 matchmod.includematcher,
1328 matchmod.includematcher,
1327 )
1329 )
1328
1330
1329 if rustmod is None:
1331 if rustmod is None:
1330 use_rust = False
1332 use_rust = False
1331 elif self._checkcase:
1333 elif self._checkcase:
1332 # Case-insensitive filesystems are not handled yet
1334 # Case-insensitive filesystems are not handled yet
1333 use_rust = False
1335 use_rust = False
1334 elif subrepos:
1336 elif subrepos:
1335 use_rust = False
1337 use_rust = False
1336 elif sparse.enabled:
1338 elif sparse.enabled:
1337 use_rust = False
1339 use_rust = False
1338 elif not isinstance(match, allowed_matchers):
1340 elif not isinstance(match, allowed_matchers):
1339 # Some matchers have yet to be implemented
1341 # Some matchers have yet to be implemented
1340 use_rust = False
1342 use_rust = False
1341
1343
1342 if use_rust:
1344 if use_rust:
1343 try:
1345 try:
1344 return self._rust_status(
1346 return self._rust_status(
1345 match, listclean, listignored, listunknown
1347 match, listclean, listignored, listunknown
1346 )
1348 )
1347 except rustmod.FallbackError:
1349 except rustmod.FallbackError:
1348 pass
1350 pass
1349
1351
1350 def noop(f):
1352 def noop(f):
1351 pass
1353 pass
1352
1354
1353 dcontains = dmap.__contains__
1355 dcontains = dmap.__contains__
1354 dget = dmap.__getitem__
1356 dget = dmap.__getitem__
1355 ladd = lookup.append # aka "unsure"
1357 ladd = lookup.append # aka "unsure"
1356 madd = modified.append
1358 madd = modified.append
1357 aadd = added.append
1359 aadd = added.append
1358 uadd = unknown.append if listunknown else noop
1360 uadd = unknown.append if listunknown else noop
1359 iadd = ignored.append if listignored else noop
1361 iadd = ignored.append if listignored else noop
1360 radd = removed.append
1362 radd = removed.append
1361 dadd = deleted.append
1363 dadd = deleted.append
1362 cadd = clean.append if listclean else noop
1364 cadd = clean.append if listclean else noop
1363 mexact = match.exact
1365 mexact = match.exact
1364 dirignore = self._dirignore
1366 dirignore = self._dirignore
1365 checkexec = self._checkexec
1367 checkexec = self._checkexec
1366 copymap = self._map.copymap
1368 copymap = self._map.copymap
1367 lastnormaltime = self._lastnormaltime
1369 lastnormaltime = self._lastnormaltime
1368
1370
1369 # We need to do full walks when either
1371 # We need to do full walks when either
1370 # - we're listing all clean files, or
1372 # - we're listing all clean files, or
1371 # - match.traversedir does something, because match.traversedir should
1373 # - match.traversedir does something, because match.traversedir should
1372 # be called for every dir in the working dir
1374 # be called for every dir in the working dir
1373 full = listclean or match.traversedir is not None
1375 full = listclean or match.traversedir is not None
1374 for fn, st in pycompat.iteritems(
1376 for fn, st in pycompat.iteritems(
1375 self.walk(match, subrepos, listunknown, listignored, full=full)
1377 self.walk(match, subrepos, listunknown, listignored, full=full)
1376 ):
1378 ):
1377 if not dcontains(fn):
1379 if not dcontains(fn):
1378 if (listignored or mexact(fn)) and dirignore(fn):
1380 if (listignored or mexact(fn)) and dirignore(fn):
1379 if listignored:
1381 if listignored:
1380 iadd(fn)
1382 iadd(fn)
1381 else:
1383 else:
1382 uadd(fn)
1384 uadd(fn)
1383 continue
1385 continue
1384
1386
1385 t = dget(fn)
1387 t = dget(fn)
1386 mode = t.mode
1388 mode = t.mode
1387 size = t.size
1389 size = t.size
1388
1390
1389 if not st and t.tracked:
1391 if not st and t.tracked:
1390 dadd(fn)
1392 dadd(fn)
1391 elif t.p2_info:
1393 elif t.p2_info:
1392 madd(fn)
1394 madd(fn)
1393 elif t.added:
1395 elif t.added:
1394 aadd(fn)
1396 aadd(fn)
1395 elif t.removed:
1397 elif t.removed:
1396 radd(fn)
1398 radd(fn)
1397 elif t.tracked:
1399 elif t.tracked:
1398 if (
1400 if (
1399 size >= 0
1401 size >= 0
1400 and (
1402 and (
1401 (size != st.st_size and size != st.st_size & _rangemask)
1403 (size != st.st_size and size != st.st_size & _rangemask)
1402 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1404 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1403 )
1405 )
1404 or fn in copymap
1406 or fn in copymap
1405 ):
1407 ):
1406 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1408 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1407 # issue6456: Size returned may be longer due to
1409 # issue6456: Size returned may be longer due to
1408 # encryption on EXT-4 fscrypt, undecided.
1410 # encryption on EXT-4 fscrypt, undecided.
1409 ladd(fn)
1411 ladd(fn)
1410 else:
1412 else:
1411 madd(fn)
1413 madd(fn)
1412 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1414 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1413 ladd(fn)
1415 ladd(fn)
1414 elif timestamp.mtime_of(st) == lastnormaltime:
1416 elif timestamp.mtime_of(st) == lastnormaltime:
1415 # fn may have just been marked as normal and it may have
1417 # fn may have just been marked as normal and it may have
1416 # changed in the same second without changing its size.
1418 # changed in the same second without changing its size.
1417 # This can happen if we quickly do multiple commits.
1419 # This can happen if we quickly do multiple commits.
1418 # Force lookup, so we don't miss such a racy file change.
1420 # Force lookup, so we don't miss such a racy file change.
1419 ladd(fn)
1421 ladd(fn)
1420 elif listclean:
1422 elif listclean:
1421 cadd(fn)
1423 cadd(fn)
1422 status = scmutil.status(
1424 status = scmutil.status(
1423 modified, added, removed, deleted, unknown, ignored, clean
1425 modified, added, removed, deleted, unknown, ignored, clean
1424 )
1426 )
1425 return (lookup, status)
1427 return (lookup, status)
1426
1428
1427 def matches(self, match):
1429 def matches(self, match):
1428 """
1430 """
1429 return files in the dirstate (in whatever state) filtered by match
1431 return files in the dirstate (in whatever state) filtered by match
1430 """
1432 """
1431 dmap = self._map
1433 dmap = self._map
1432 if rustmod is not None:
1434 if rustmod is not None:
1433 dmap = self._map._map
1435 dmap = self._map._map
1434
1436
1435 if match.always():
1437 if match.always():
1436 return dmap.keys()
1438 return dmap.keys()
1437 files = match.files()
1439 files = match.files()
1438 if match.isexact():
1440 if match.isexact():
1439 # fast path -- filter the other way around, since typically files is
1441 # fast path -- filter the other way around, since typically files is
1440 # much smaller than dmap
1442 # much smaller than dmap
1441 return [f for f in files if f in dmap]
1443 return [f for f in files if f in dmap]
1442 if match.prefix() and all(fn in dmap for fn in files):
1444 if match.prefix() and all(fn in dmap for fn in files):
1443 # fast path -- all the values are known to be files, so just return
1445 # fast path -- all the values are known to be files, so just return
1444 # that
1446 # that
1445 return list(files)
1447 return list(files)
1446 return [f for f in dmap if match(f)]
1448 return [f for f in dmap if match(f)]
1447
1449
1448 def _actualfilename(self, tr):
1450 def _actualfilename(self, tr):
1449 if tr:
1451 if tr:
1450 return self._pendingfilename
1452 return self._pendingfilename
1451 else:
1453 else:
1452 return self._filename
1454 return self._filename
1453
1455
1454 def savebackup(self, tr, backupname):
1456 def savebackup(self, tr, backupname):
1455 '''Save current dirstate into backup file'''
1457 '''Save current dirstate into backup file'''
1456 filename = self._actualfilename(tr)
1458 filename = self._actualfilename(tr)
1457 assert backupname != filename
1459 assert backupname != filename
1458
1460
1459 # use '_writedirstate' instead of 'write' to write changes certainly,
1461 # use '_writedirstate' instead of 'write' to write changes certainly,
1460 # because the latter omits writing out if transaction is running.
1462 # because the latter omits writing out if transaction is running.
1461 # output file will be used to create backup of dirstate at this point.
1463 # output file will be used to create backup of dirstate at this point.
1462 if self._dirty or not self._opener.exists(filename):
1464 if self._dirty or not self._opener.exists(filename):
1463 self._writedirstate(
1465 self._writedirstate(
1464 tr,
1466 tr,
1465 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1467 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1466 )
1468 )
1467
1469
1468 if tr:
1470 if tr:
1469 # ensure that subsequent tr.writepending returns True for
1471 # ensure that subsequent tr.writepending returns True for
1470 # changes written out above, even if dirstate is never
1472 # changes written out above, even if dirstate is never
1471 # changed after this
1473 # changed after this
1472 tr.addfilegenerator(
1474 tr.addfilegenerator(
1473 b'dirstate',
1475 b'dirstate',
1474 (self._filename,),
1476 (self._filename,),
1475 lambda f: self._writedirstate(tr, f),
1477 lambda f: self._writedirstate(tr, f),
1476 location=b'plain',
1478 location=b'plain',
1477 )
1479 )
1478
1480
1479 # ensure that pending file written above is unlinked at
1481 # ensure that pending file written above is unlinked at
1480 # failure, even if tr.writepending isn't invoked until the
1482 # failure, even if tr.writepending isn't invoked until the
1481 # end of this transaction
1483 # end of this transaction
1482 tr.registertmp(filename, location=b'plain')
1484 tr.registertmp(filename, location=b'plain')
1483
1485
1484 self._opener.tryunlink(backupname)
1486 self._opener.tryunlink(backupname)
1485 # hardlink backup is okay because _writedirstate is always called
1487 # hardlink backup is okay because _writedirstate is always called
1486 # with an "atomictemp=True" file.
1488 # with an "atomictemp=True" file.
1487 util.copyfile(
1489 util.copyfile(
1488 self._opener.join(filename),
1490 self._opener.join(filename),
1489 self._opener.join(backupname),
1491 self._opener.join(backupname),
1490 hardlink=True,
1492 hardlink=True,
1491 )
1493 )
1492
1494
1493 def restorebackup(self, tr, backupname):
1495 def restorebackup(self, tr, backupname):
1494 '''Restore dirstate by backup file'''
1496 '''Restore dirstate by backup file'''
1495 # this "invalidate()" prevents "wlock.release()" from writing
1497 # this "invalidate()" prevents "wlock.release()" from writing
1496 # changes of dirstate out after restoring from backup file
1498 # changes of dirstate out after restoring from backup file
1497 self.invalidate()
1499 self.invalidate()
1498 filename = self._actualfilename(tr)
1500 filename = self._actualfilename(tr)
1499 o = self._opener
1501 o = self._opener
1500 if util.samefile(o.join(backupname), o.join(filename)):
1502 if util.samefile(o.join(backupname), o.join(filename)):
1501 o.unlink(backupname)
1503 o.unlink(backupname)
1502 else:
1504 else:
1503 o.rename(backupname, filename, checkambig=True)
1505 o.rename(backupname, filename, checkambig=True)
1504
1506
1505 def clearbackup(self, tr, backupname):
1507 def clearbackup(self, tr, backupname):
1506 '''Clear backup file'''
1508 '''Clear backup file'''
1507 self._opener.unlink(backupname)
1509 self._opener.unlink(backupname)
1508
1510
1509 def verify(self, m1, m2):
1511 def verify(self, m1, m2):
1510 """check the dirstate content again the parent manifest and yield errors"""
1512 """check the dirstate content again the parent manifest and yield errors"""
1511 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1513 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1512 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1514 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1513 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1515 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1514 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1516 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1515 for f, entry in self.items():
1517 for f, entry in self.items():
1516 state = entry.state
1518 state = entry.state
1517 if state in b"nr" and f not in m1:
1519 if state in b"nr" and f not in m1:
1518 yield (missing_from_p1, f, state)
1520 yield (missing_from_p1, f, state)
1519 if state in b"a" and f in m1:
1521 if state in b"a" and f in m1:
1520 yield (unexpected_in_p1, f, state)
1522 yield (unexpected_in_p1, f, state)
1521 if state in b"m" and f not in m1 and f not in m2:
1523 if state in b"m" and f not in m1 and f not in m2:
1522 yield (missing_from_ps, f, state)
1524 yield (missing_from_ps, f, state)
1523 for f in m1:
1525 for f in m1:
1524 state = self.get_entry(f).state
1526 state = self.get_entry(f).state
1525 if state not in b"nrm":
1527 if state not in b"nrm":
1526 yield (missing_from_ds, f, state)
1528 yield (missing_from_ds, f, state)
General Comments 0
You need to be logged in to leave comments. Login now