##// END OF EJS Templates
dirstate: drop some duplicated code...
marmoute -
r48957:180e8fa3 default
parent child Browse files
Show More
@@ -1,1526 +1,1517 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = dirstatemap.DirstateItem
48 DirstateItem = dirstatemap.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._mapcls = dirstatemap.dirstatemap
133 self._mapcls = dirstatemap.dirstatemap
134 # Access and cache cwd early, so we don't access it for the first time
134 # Access and cache cwd early, so we don't access it for the first time
135 # after a working-copy update caused it to not exist (accessing it then
135 # after a working-copy update caused it to not exist (accessing it then
136 # raises an exception).
136 # raises an exception).
137 self._cwd
137 self._cwd
138
138
139 def prefetch_parents(self):
139 def prefetch_parents(self):
140 """make sure the parents are loaded
140 """make sure the parents are loaded
141
141
142 Used to avoid a race condition.
142 Used to avoid a race condition.
143 """
143 """
144 self._pl
144 self._pl
145
145
146 @contextlib.contextmanager
146 @contextlib.contextmanager
147 def parentchange(self):
147 def parentchange(self):
148 """Context manager for handling dirstate parents.
148 """Context manager for handling dirstate parents.
149
149
150 If an exception occurs in the scope of the context manager,
150 If an exception occurs in the scope of the context manager,
151 the incoherent dirstate won't be written when wlock is
151 the incoherent dirstate won't be written when wlock is
152 released.
152 released.
153 """
153 """
154 self._parentwriters += 1
154 self._parentwriters += 1
155 yield
155 yield
156 # Typically we want the "undo" step of a context manager in a
156 # Typically we want the "undo" step of a context manager in a
157 # finally block so it happens even when an exception
157 # finally block so it happens even when an exception
158 # occurs. In this case, however, we only want to decrement
158 # occurs. In this case, however, we only want to decrement
159 # parentwriters if the code in the with statement exits
159 # parentwriters if the code in the with statement exits
160 # normally, so we don't have a try/finally here on purpose.
160 # normally, so we don't have a try/finally here on purpose.
161 self._parentwriters -= 1
161 self._parentwriters -= 1
162
162
163 def pendingparentchange(self):
163 def pendingparentchange(self):
164 """Returns true if the dirstate is in the middle of a set of changes
164 """Returns true if the dirstate is in the middle of a set of changes
165 that modify the dirstate parent.
165 that modify the dirstate parent.
166 """
166 """
167 return self._parentwriters > 0
167 return self._parentwriters > 0
168
168
169 @propertycache
169 @propertycache
170 def _map(self):
170 def _map(self):
171 """Return the dirstate contents (see documentation for dirstatemap)."""
171 """Return the dirstate contents (see documentation for dirstatemap)."""
172 self._map = self._mapcls(
172 self._map = self._mapcls(
173 self._ui,
173 self._ui,
174 self._opener,
174 self._opener,
175 self._root,
175 self._root,
176 self._nodeconstants,
176 self._nodeconstants,
177 self._use_dirstate_v2,
177 self._use_dirstate_v2,
178 )
178 )
179 return self._map
179 return self._map
180
180
181 @property
181 @property
182 def _sparsematcher(self):
182 def _sparsematcher(self):
183 """The matcher for the sparse checkout.
183 """The matcher for the sparse checkout.
184
184
185 The working directory may not include every file from a manifest. The
185 The working directory may not include every file from a manifest. The
186 matcher obtained by this property will match a path if it is to be
186 matcher obtained by this property will match a path if it is to be
187 included in the working directory.
187 included in the working directory.
188 """
188 """
189 # TODO there is potential to cache this property. For now, the matcher
189 # TODO there is potential to cache this property. For now, the matcher
190 # is resolved on every access. (But the called function does use a
190 # is resolved on every access. (But the called function does use a
191 # cache to keep the lookup fast.)
191 # cache to keep the lookup fast.)
192 return self._sparsematchfn()
192 return self._sparsematchfn()
193
193
194 @repocache(b'branch')
194 @repocache(b'branch')
195 def _branch(self):
195 def _branch(self):
196 try:
196 try:
197 return self._opener.read(b"branch").strip() or b"default"
197 return self._opener.read(b"branch").strip() or b"default"
198 except IOError as inst:
198 except IOError as inst:
199 if inst.errno != errno.ENOENT:
199 if inst.errno != errno.ENOENT:
200 raise
200 raise
201 return b"default"
201 return b"default"
202
202
203 @property
203 @property
204 def _pl(self):
204 def _pl(self):
205 return self._map.parents()
205 return self._map.parents()
206
206
207 def hasdir(self, d):
207 def hasdir(self, d):
208 return self._map.hastrackeddir(d)
208 return self._map.hastrackeddir(d)
209
209
210 @rootcache(b'.hgignore')
210 @rootcache(b'.hgignore')
211 def _ignore(self):
211 def _ignore(self):
212 files = self._ignorefiles()
212 files = self._ignorefiles()
213 if not files:
213 if not files:
214 return matchmod.never()
214 return matchmod.never()
215
215
216 pats = [b'include:%s' % f for f in files]
216 pats = [b'include:%s' % f for f in files]
217 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
217 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218
218
219 @propertycache
219 @propertycache
220 def _slash(self):
220 def _slash(self):
221 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
221 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222
222
223 @propertycache
223 @propertycache
224 def _checklink(self):
224 def _checklink(self):
225 return util.checklink(self._root)
225 return util.checklink(self._root)
226
226
227 @propertycache
227 @propertycache
228 def _checkexec(self):
228 def _checkexec(self):
229 return bool(util.checkexec(self._root))
229 return bool(util.checkexec(self._root))
230
230
231 @propertycache
231 @propertycache
232 def _checkcase(self):
232 def _checkcase(self):
233 return not util.fscasesensitive(self._join(b'.hg'))
233 return not util.fscasesensitive(self._join(b'.hg'))
234
234
235 def _join(self, f):
235 def _join(self, f):
236 # much faster than os.path.join()
236 # much faster than os.path.join()
237 # it's safe because f is always a relative path
237 # it's safe because f is always a relative path
238 return self._rootdir + f
238 return self._rootdir + f
239
239
240 def flagfunc(self, buildfallback):
240 def flagfunc(self, buildfallback):
241 if self._checklink and self._checkexec:
241 if self._checklink and self._checkexec:
242
242
243 def f(x):
243 def f(x):
244 try:
244 try:
245 st = os.lstat(self._join(x))
245 st = os.lstat(self._join(x))
246 if util.statislink(st):
246 if util.statislink(st):
247 return b'l'
247 return b'l'
248 if util.statisexec(st):
248 if util.statisexec(st):
249 return b'x'
249 return b'x'
250 except OSError:
250 except OSError:
251 pass
251 pass
252 return b''
252 return b''
253
253
254 return f
254 return f
255
255
256 fallback = buildfallback()
256 fallback = buildfallback()
257 if self._checklink:
257 if self._checklink:
258
258
259 def f(x):
259 def f(x):
260 if os.path.islink(self._join(x)):
260 if os.path.islink(self._join(x)):
261 return b'l'
261 return b'l'
262 if b'x' in fallback(x):
262 if b'x' in fallback(x):
263 return b'x'
263 return b'x'
264 return b''
264 return b''
265
265
266 return f
266 return f
267 if self._checkexec:
267 if self._checkexec:
268
268
269 def f(x):
269 def f(x):
270 if b'l' in fallback(x):
270 if b'l' in fallback(x):
271 return b'l'
271 return b'l'
272 if util.isexec(self._join(x)):
272 if util.isexec(self._join(x)):
273 return b'x'
273 return b'x'
274 return b''
274 return b''
275
275
276 return f
276 return f
277 else:
277 else:
278 return fallback
278 return fallback
279
279
280 @propertycache
280 @propertycache
281 def _cwd(self):
281 def _cwd(self):
282 # internal config: ui.forcecwd
282 # internal config: ui.forcecwd
283 forcecwd = self._ui.config(b'ui', b'forcecwd')
283 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 if forcecwd:
284 if forcecwd:
285 return forcecwd
285 return forcecwd
286 return encoding.getcwd()
286 return encoding.getcwd()
287
287
288 def getcwd(self):
288 def getcwd(self):
289 """Return the path from which a canonical path is calculated.
289 """Return the path from which a canonical path is calculated.
290
290
291 This path should be used to resolve file patterns or to convert
291 This path should be used to resolve file patterns or to convert
292 canonical paths back to file paths for display. It shouldn't be
292 canonical paths back to file paths for display. It shouldn't be
293 used to get real file paths. Use vfs functions instead.
293 used to get real file paths. Use vfs functions instead.
294 """
294 """
295 cwd = self._cwd
295 cwd = self._cwd
296 if cwd == self._root:
296 if cwd == self._root:
297 return b''
297 return b''
298 # self._root ends with a path separator if self._root is '/' or 'C:\'
298 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 rootsep = self._root
299 rootsep = self._root
300 if not util.endswithsep(rootsep):
300 if not util.endswithsep(rootsep):
301 rootsep += pycompat.ossep
301 rootsep += pycompat.ossep
302 if cwd.startswith(rootsep):
302 if cwd.startswith(rootsep):
303 return cwd[len(rootsep) :]
303 return cwd[len(rootsep) :]
304 else:
304 else:
305 # we're outside the repo. return an absolute path.
305 # we're outside the repo. return an absolute path.
306 return cwd
306 return cwd
307
307
308 def pathto(self, f, cwd=None):
308 def pathto(self, f, cwd=None):
309 if cwd is None:
309 if cwd is None:
310 cwd = self.getcwd()
310 cwd = self.getcwd()
311 path = util.pathto(self._root, cwd, f)
311 path = util.pathto(self._root, cwd, f)
312 if self._slash:
312 if self._slash:
313 return util.pconvert(path)
313 return util.pconvert(path)
314 return path
314 return path
315
315
316 def __getitem__(self, key):
316 def __getitem__(self, key):
317 """Return the current state of key (a filename) in the dirstate.
317 """Return the current state of key (a filename) in the dirstate.
318
318
319 States are:
319 States are:
320 n normal
320 n normal
321 m needs merging
321 m needs merging
322 r marked for removal
322 r marked for removal
323 a marked for addition
323 a marked for addition
324 ? not tracked
324 ? not tracked
325
325
326 XXX The "state" is a bit obscure to be in the "public" API. we should
326 XXX The "state" is a bit obscure to be in the "public" API. we should
327 consider migrating all user of this to going through the dirstate entry
327 consider migrating all user of this to going through the dirstate entry
328 instead.
328 instead.
329 """
329 """
330 msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
330 msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
331 util.nouideprecwarn(msg, b'6.1', stacklevel=2)
331 util.nouideprecwarn(msg, b'6.1', stacklevel=2)
332 entry = self._map.get(key)
332 entry = self._map.get(key)
333 if entry is not None:
333 if entry is not None:
334 return entry.state
334 return entry.state
335 return b'?'
335 return b'?'
336
336
337 def get_entry(self, path):
337 def get_entry(self, path):
338 """return a DirstateItem for the associated path"""
338 """return a DirstateItem for the associated path"""
339 entry = self._map.get(path)
339 entry = self._map.get(path)
340 if entry is None:
340 if entry is None:
341 return DirstateItem()
341 return DirstateItem()
342 return entry
342 return entry
343
343
344 def __contains__(self, key):
344 def __contains__(self, key):
345 return key in self._map
345 return key in self._map
346
346
347 def __iter__(self):
347 def __iter__(self):
348 return iter(sorted(self._map))
348 return iter(sorted(self._map))
349
349
350 def items(self):
350 def items(self):
351 return pycompat.iteritems(self._map)
351 return pycompat.iteritems(self._map)
352
352
353 iteritems = items
353 iteritems = items
354
354
355 def parents(self):
355 def parents(self):
356 return [self._validate(p) for p in self._pl]
356 return [self._validate(p) for p in self._pl]
357
357
358 def p1(self):
358 def p1(self):
359 return self._validate(self._pl[0])
359 return self._validate(self._pl[0])
360
360
361 def p2(self):
361 def p2(self):
362 return self._validate(self._pl[1])
362 return self._validate(self._pl[1])
363
363
364 @property
364 @property
365 def in_merge(self):
365 def in_merge(self):
366 """True if a merge is in progress"""
366 """True if a merge is in progress"""
367 return self._pl[1] != self._nodeconstants.nullid
367 return self._pl[1] != self._nodeconstants.nullid
368
368
369 def branch(self):
369 def branch(self):
370 return encoding.tolocal(self._branch)
370 return encoding.tolocal(self._branch)
371
371
372 def setparents(self, p1, p2=None):
372 def setparents(self, p1, p2=None):
373 """Set dirstate parents to p1 and p2.
373 """Set dirstate parents to p1 and p2.
374
374
375 When moving from two parents to one, "merged" entries a
375 When moving from two parents to one, "merged" entries a
376 adjusted to normal and previous copy records discarded and
376 adjusted to normal and previous copy records discarded and
377 returned by the call.
377 returned by the call.
378
378
379 See localrepo.setparents()
379 See localrepo.setparents()
380 """
380 """
381 if p2 is None:
381 if p2 is None:
382 p2 = self._nodeconstants.nullid
382 p2 = self._nodeconstants.nullid
383 if self._parentwriters == 0:
383 if self._parentwriters == 0:
384 raise ValueError(
384 raise ValueError(
385 b"cannot set dirstate parent outside of "
385 b"cannot set dirstate parent outside of "
386 b"dirstate.parentchange context manager"
386 b"dirstate.parentchange context manager"
387 )
387 )
388
388
389 self._dirty = True
389 self._dirty = True
390 oldp2 = self._pl[1]
390 oldp2 = self._pl[1]
391 if self._origpl is None:
391 if self._origpl is None:
392 self._origpl = self._pl
392 self._origpl = self._pl
393 nullid = self._nodeconstants.nullid
393 nullid = self._nodeconstants.nullid
394 # True if we need to fold p2 related state back to a linear case
394 # True if we need to fold p2 related state back to a linear case
395 fold_p2 = oldp2 != nullid and p2 == nullid
395 fold_p2 = oldp2 != nullid and p2 == nullid
396 return self._map.setparents(p1, p2, fold_p2=fold_p2)
396 return self._map.setparents(p1, p2, fold_p2=fold_p2)
397
397
398 def setbranch(self, branch):
398 def setbranch(self, branch):
399 self.__class__._branch.set(self, encoding.fromlocal(branch))
399 self.__class__._branch.set(self, encoding.fromlocal(branch))
400 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
400 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
401 try:
401 try:
402 f.write(self._branch + b'\n')
402 f.write(self._branch + b'\n')
403 f.close()
403 f.close()
404
404
405 # make sure filecache has the correct stat info for _branch after
405 # make sure filecache has the correct stat info for _branch after
406 # replacing the underlying file
406 # replacing the underlying file
407 ce = self._filecache[b'_branch']
407 ce = self._filecache[b'_branch']
408 if ce:
408 if ce:
409 ce.refresh()
409 ce.refresh()
410 except: # re-raises
410 except: # re-raises
411 f.discard()
411 f.discard()
412 raise
412 raise
413
413
414 def invalidate(self):
414 def invalidate(self):
415 """Causes the next access to reread the dirstate.
415 """Causes the next access to reread the dirstate.
416
416
417 This is different from localrepo.invalidatedirstate() because it always
417 This is different from localrepo.invalidatedirstate() because it always
418 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
418 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
419 check whether the dirstate has changed before rereading it."""
419 check whether the dirstate has changed before rereading it."""
420
420
421 for a in ("_map", "_branch", "_ignore"):
421 for a in ("_map", "_branch", "_ignore"):
422 if a in self.__dict__:
422 if a in self.__dict__:
423 delattr(self, a)
423 delattr(self, a)
424 self._lastnormaltime = 0
424 self._lastnormaltime = 0
425 self._dirty = False
425 self._dirty = False
426 self._parentwriters = 0
426 self._parentwriters = 0
427 self._origpl = None
427 self._origpl = None
428
428
429 def copy(self, source, dest):
429 def copy(self, source, dest):
430 """Mark dest as a copy of source. Unmark dest if source is None."""
430 """Mark dest as a copy of source. Unmark dest if source is None."""
431 if source == dest:
431 if source == dest:
432 return
432 return
433 self._dirty = True
433 self._dirty = True
434 if source is not None:
434 if source is not None:
435 self._map.copymap[dest] = source
435 self._map.copymap[dest] = source
436 else:
436 else:
437 self._map.copymap.pop(dest, None)
437 self._map.copymap.pop(dest, None)
438
438
439 def copied(self, file):
439 def copied(self, file):
440 return self._map.copymap.get(file, None)
440 return self._map.copymap.get(file, None)
441
441
442 def copies(self):
442 def copies(self):
443 return self._map.copymap
443 return self._map.copymap
444
444
445 @requires_no_parents_change
445 @requires_no_parents_change
446 def set_tracked(self, filename):
446 def set_tracked(self, filename):
447 """a "public" method for generic code to mark a file as tracked
447 """a "public" method for generic code to mark a file as tracked
448
448
449 This function is to be called outside of "update/merge" case. For
449 This function is to be called outside of "update/merge" case. For
450 example by a command like `hg add X`.
450 example by a command like `hg add X`.
451
451
452 return True the file was previously untracked, False otherwise.
452 return True the file was previously untracked, False otherwise.
453 """
453 """
454 self._dirty = True
454 self._dirty = True
455 entry = self._map.get(filename)
455 entry = self._map.get(filename)
456 if entry is None or not entry.tracked:
456 if entry is None or not entry.tracked:
457 self._check_new_tracked_filename(filename)
457 self._check_new_tracked_filename(filename)
458 return self._map.set_tracked(filename)
458 return self._map.set_tracked(filename)
459
459
460 @requires_no_parents_change
460 @requires_no_parents_change
461 def set_untracked(self, filename):
461 def set_untracked(self, filename):
462 """a "public" method for generic code to mark a file as untracked
462 """a "public" method for generic code to mark a file as untracked
463
463
464 This function is to be called outside of "update/merge" case. For
464 This function is to be called outside of "update/merge" case. For
465 example by a command like `hg remove X`.
465 example by a command like `hg remove X`.
466
466
467 return True the file was previously tracked, False otherwise.
467 return True the file was previously tracked, False otherwise.
468 """
468 """
469 ret = self._map.set_untracked(filename)
469 ret = self._map.set_untracked(filename)
470 if ret:
470 if ret:
471 self._dirty = True
471 self._dirty = True
472 return ret
472 return ret
473
473
474 @requires_no_parents_change
474 @requires_no_parents_change
475 def set_clean(self, filename, parentfiledata=None):
475 def set_clean(self, filename, parentfiledata=None):
476 """record that the current state of the file on disk is known to be clean"""
476 """record that the current state of the file on disk is known to be clean"""
477 self._dirty = True
477 self._dirty = True
478 if parentfiledata:
478 if parentfiledata:
479 (mode, size, mtime) = parentfiledata
479 (mode, size, mtime) = parentfiledata
480 else:
480 else:
481 (mode, size, mtime) = self._get_filedata(filename)
481 (mode, size, mtime) = self._get_filedata(filename)
482 if not self._map[filename].tracked:
482 if not self._map[filename].tracked:
483 self._check_new_tracked_filename(filename)
483 self._check_new_tracked_filename(filename)
484 self._map.set_clean(filename, mode, size, mtime)
484 self._map.set_clean(filename, mode, size, mtime)
485 if mtime > self._lastnormaltime:
485 if mtime > self._lastnormaltime:
486 # Remember the most recent modification timeslot for status(),
486 # Remember the most recent modification timeslot for status(),
487 # to make sure we won't miss future size-preserving file content
487 # to make sure we won't miss future size-preserving file content
488 # modifications that happen within the same timeslot.
488 # modifications that happen within the same timeslot.
489 self._lastnormaltime = mtime
489 self._lastnormaltime = mtime
490
490
491 @requires_no_parents_change
491 @requires_no_parents_change
492 def set_possibly_dirty(self, filename):
492 def set_possibly_dirty(self, filename):
493 """record that the current state of the file on disk is unknown"""
493 """record that the current state of the file on disk is unknown"""
494 self._dirty = True
494 self._dirty = True
495 self._map.set_possibly_dirty(filename)
495 self._map.set_possibly_dirty(filename)
496
496
497 @requires_parents_change
497 @requires_parents_change
498 def update_file_p1(
498 def update_file_p1(
499 self,
499 self,
500 filename,
500 filename,
501 p1_tracked,
501 p1_tracked,
502 ):
502 ):
503 """Set a file as tracked in the parent (or not)
503 """Set a file as tracked in the parent (or not)
504
504
505 This is to be called when adjust the dirstate to a new parent after an history
505 This is to be called when adjust the dirstate to a new parent after an history
506 rewriting operation.
506 rewriting operation.
507
507
508 It should not be called during a merge (p2 != nullid) and only within
508 It should not be called during a merge (p2 != nullid) and only within
509 a `with dirstate.parentchange():` context.
509 a `with dirstate.parentchange():` context.
510 """
510 """
511 if self.in_merge:
511 if self.in_merge:
512 msg = b'update_file_reference should not be called when merging'
512 msg = b'update_file_reference should not be called when merging'
513 raise error.ProgrammingError(msg)
513 raise error.ProgrammingError(msg)
514 entry = self._map.get(filename)
514 entry = self._map.get(filename)
515 if entry is None:
515 if entry is None:
516 wc_tracked = False
516 wc_tracked = False
517 else:
517 else:
518 wc_tracked = entry.tracked
518 wc_tracked = entry.tracked
519 if not (p1_tracked or wc_tracked):
519 if not (p1_tracked or wc_tracked):
520 # the file is no longer relevant to anyone
520 # the file is no longer relevant to anyone
521 if self._map.get(filename) is not None:
521 if self._map.get(filename) is not None:
522 self._map.reset_state(filename)
522 self._map.reset_state(filename)
523 self._dirty = True
523 self._dirty = True
524 elif (not p1_tracked) and wc_tracked:
524 elif (not p1_tracked) and wc_tracked:
525 if entry is not None and entry.added:
525 if entry is not None and entry.added:
526 return # avoid dropping copy information (maybe?)
526 return # avoid dropping copy information (maybe?)
527
527
528 parentfiledata = None
528 parentfiledata = None
529 if wc_tracked and p1_tracked:
529 if wc_tracked and p1_tracked:
530 parentfiledata = self._get_filedata(filename)
530 parentfiledata = self._get_filedata(filename)
531
531
532 self._map.reset_state(
532 self._map.reset_state(
533 filename,
533 filename,
534 wc_tracked,
534 wc_tracked,
535 p1_tracked,
535 p1_tracked,
536 # the underlying reference might have changed, we will have to
536 # the underlying reference might have changed, we will have to
537 # check it.
537 # check it.
538 has_meaningful_mtime=False,
538 has_meaningful_mtime=False,
539 parentfiledata=parentfiledata,
539 parentfiledata=parentfiledata,
540 )
540 )
541 if (
541 if (
542 parentfiledata is not None
542 parentfiledata is not None
543 and parentfiledata[2] > self._lastnormaltime
543 and parentfiledata[2] > self._lastnormaltime
544 ):
544 ):
545 # Remember the most recent modification timeslot for status(),
545 # Remember the most recent modification timeslot for status(),
546 # to make sure we won't miss future size-preserving file content
546 # to make sure we won't miss future size-preserving file content
547 # modifications that happen within the same timeslot.
547 # modifications that happen within the same timeslot.
548 self._lastnormaltime = parentfiledata[2]
548 self._lastnormaltime = parentfiledata[2]
549
549
550 @requires_parents_change
550 @requires_parents_change
551 def update_file(
551 def update_file(
552 self,
552 self,
553 filename,
553 filename,
554 wc_tracked,
554 wc_tracked,
555 p1_tracked,
555 p1_tracked,
556 p2_info=False,
556 p2_info=False,
557 possibly_dirty=False,
557 possibly_dirty=False,
558 parentfiledata=None,
558 parentfiledata=None,
559 ):
559 ):
560 """update the information about a file in the dirstate
560 """update the information about a file in the dirstate
561
561
562 This is to be called when the direstates parent changes to keep track
562 This is to be called when the direstates parent changes to keep track
563 of what is the file situation in regards to the working copy and its parent.
563 of what is the file situation in regards to the working copy and its parent.
564
564
565 This function must be called within a `dirstate.parentchange` context.
565 This function must be called within a `dirstate.parentchange` context.
566
566
567 note: the API is at an early stage and we might need to adjust it
567 note: the API is at an early stage and we might need to adjust it
568 depending of what information ends up being relevant and useful to
568 depending of what information ends up being relevant and useful to
569 other processing.
569 other processing.
570 """
570 """
571
571
572 # note: I do not think we need to double check name clash here since we
572 # note: I do not think we need to double check name clash here since we
573 # are in a update/merge case that should already have taken care of
573 # are in a update/merge case that should already have taken care of
574 # this. The test agrees
574 # this. The test agrees
575
575
576 self._dirty = True
576 self._dirty = True
577
577
578 need_parent_file_data = (
578 need_parent_file_data = (
579 not possibly_dirty and not p2_info and wc_tracked and p1_tracked
579 not possibly_dirty and not p2_info and wc_tracked and p1_tracked
580 )
580 )
581
581
582 # this mean we are doing call for file we do not really care about the
582 # this mean we are doing call for file we do not really care about the
583 # data (eg: added or removed), however this should be a minor overhead
583 # data (eg: added or removed), however this should be a minor overhead
584 # compared to the overall update process calling this.
584 # compared to the overall update process calling this.
585 if need_parent_file_data:
585 if need_parent_file_data or parentfiledata is None:
586 if parentfiledata is None:
587 parentfiledata = self._get_filedata(filename)
586 parentfiledata = self._get_filedata(filename)
588 mtime = parentfiledata[2]
589
590 if mtime > self._lastnormaltime:
591 # Remember the most recent modification timeslot for
592 # status(), to make sure we won't miss future
593 # size-preserving file content modifications that happen
594 # within the same timeslot.
595 self._lastnormaltime = mtime
596
587
597 self._map.reset_state(
588 self._map.reset_state(
598 filename,
589 filename,
599 wc_tracked,
590 wc_tracked,
600 p1_tracked,
591 p1_tracked,
601 p2_info=p2_info,
592 p2_info=p2_info,
602 has_meaningful_mtime=not possibly_dirty,
593 has_meaningful_mtime=not possibly_dirty,
603 parentfiledata=parentfiledata,
594 parentfiledata=parentfiledata,
604 )
595 )
605 if (
596 if (
606 parentfiledata is not None
597 parentfiledata is not None
607 and parentfiledata[2] > self._lastnormaltime
598 and parentfiledata[2] > self._lastnormaltime
608 ):
599 ):
609 # Remember the most recent modification timeslot for status(),
600 # Remember the most recent modification timeslot for status(),
610 # to make sure we won't miss future size-preserving file content
601 # to make sure we won't miss future size-preserving file content
611 # modifications that happen within the same timeslot.
602 # modifications that happen within the same timeslot.
612 self._lastnormaltime = parentfiledata[2]
603 self._lastnormaltime = parentfiledata[2]
613
604
614 def _check_new_tracked_filename(self, filename):
605 def _check_new_tracked_filename(self, filename):
615 scmutil.checkfilename(filename)
606 scmutil.checkfilename(filename)
616 if self._map.hastrackeddir(filename):
607 if self._map.hastrackeddir(filename):
617 msg = _(b'directory %r already in dirstate')
608 msg = _(b'directory %r already in dirstate')
618 msg %= pycompat.bytestr(filename)
609 msg %= pycompat.bytestr(filename)
619 raise error.Abort(msg)
610 raise error.Abort(msg)
620 # shadows
611 # shadows
621 for d in pathutil.finddirs(filename):
612 for d in pathutil.finddirs(filename):
622 if self._map.hastrackeddir(d):
613 if self._map.hastrackeddir(d):
623 break
614 break
624 entry = self._map.get(d)
615 entry = self._map.get(d)
625 if entry is not None and not entry.removed:
616 if entry is not None and not entry.removed:
626 msg = _(b'file %r in dirstate clashes with %r')
617 msg = _(b'file %r in dirstate clashes with %r')
627 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
618 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
628 raise error.Abort(msg)
619 raise error.Abort(msg)
629
620
630 def _get_filedata(self, filename):
621 def _get_filedata(self, filename):
631 """returns"""
622 """returns"""
632 s = os.lstat(self._join(filename))
623 s = os.lstat(self._join(filename))
633 mode = s.st_mode
624 mode = s.st_mode
634 size = s.st_size
625 size = s.st_size
635 mtime = s[stat.ST_MTIME]
626 mtime = s[stat.ST_MTIME]
636 return (mode, size, mtime)
627 return (mode, size, mtime)
637
628
638 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
629 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
639 if exists is None:
630 if exists is None:
640 exists = os.path.lexists(os.path.join(self._root, path))
631 exists = os.path.lexists(os.path.join(self._root, path))
641 if not exists:
632 if not exists:
642 # Maybe a path component exists
633 # Maybe a path component exists
643 if not ignoremissing and b'/' in path:
634 if not ignoremissing and b'/' in path:
644 d, f = path.rsplit(b'/', 1)
635 d, f = path.rsplit(b'/', 1)
645 d = self._normalize(d, False, ignoremissing, None)
636 d = self._normalize(d, False, ignoremissing, None)
646 folded = d + b"/" + f
637 folded = d + b"/" + f
647 else:
638 else:
648 # No path components, preserve original case
639 # No path components, preserve original case
649 folded = path
640 folded = path
650 else:
641 else:
651 # recursively normalize leading directory components
642 # recursively normalize leading directory components
652 # against dirstate
643 # against dirstate
653 if b'/' in normed:
644 if b'/' in normed:
654 d, f = normed.rsplit(b'/', 1)
645 d, f = normed.rsplit(b'/', 1)
655 d = self._normalize(d, False, ignoremissing, True)
646 d = self._normalize(d, False, ignoremissing, True)
656 r = self._root + b"/" + d
647 r = self._root + b"/" + d
657 folded = d + b"/" + util.fspath(f, r)
648 folded = d + b"/" + util.fspath(f, r)
658 else:
649 else:
659 folded = util.fspath(normed, self._root)
650 folded = util.fspath(normed, self._root)
660 storemap[normed] = folded
651 storemap[normed] = folded
661
652
662 return folded
653 return folded
663
654
664 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
655 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
665 normed = util.normcase(path)
656 normed = util.normcase(path)
666 folded = self._map.filefoldmap.get(normed, None)
657 folded = self._map.filefoldmap.get(normed, None)
667 if folded is None:
658 if folded is None:
668 if isknown:
659 if isknown:
669 folded = path
660 folded = path
670 else:
661 else:
671 folded = self._discoverpath(
662 folded = self._discoverpath(
672 path, normed, ignoremissing, exists, self._map.filefoldmap
663 path, normed, ignoremissing, exists, self._map.filefoldmap
673 )
664 )
674 return folded
665 return folded
675
666
676 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
667 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
677 normed = util.normcase(path)
668 normed = util.normcase(path)
678 folded = self._map.filefoldmap.get(normed, None)
669 folded = self._map.filefoldmap.get(normed, None)
679 if folded is None:
670 if folded is None:
680 folded = self._map.dirfoldmap.get(normed, None)
671 folded = self._map.dirfoldmap.get(normed, None)
681 if folded is None:
672 if folded is None:
682 if isknown:
673 if isknown:
683 folded = path
674 folded = path
684 else:
675 else:
685 # store discovered result in dirfoldmap so that future
676 # store discovered result in dirfoldmap so that future
686 # normalizefile calls don't start matching directories
677 # normalizefile calls don't start matching directories
687 folded = self._discoverpath(
678 folded = self._discoverpath(
688 path, normed, ignoremissing, exists, self._map.dirfoldmap
679 path, normed, ignoremissing, exists, self._map.dirfoldmap
689 )
680 )
690 return folded
681 return folded
691
682
692 def normalize(self, path, isknown=False, ignoremissing=False):
683 def normalize(self, path, isknown=False, ignoremissing=False):
693 """
684 """
694 normalize the case of a pathname when on a casefolding filesystem
685 normalize the case of a pathname when on a casefolding filesystem
695
686
696 isknown specifies whether the filename came from walking the
687 isknown specifies whether the filename came from walking the
697 disk, to avoid extra filesystem access.
688 disk, to avoid extra filesystem access.
698
689
699 If ignoremissing is True, missing path are returned
690 If ignoremissing is True, missing path are returned
700 unchanged. Otherwise, we try harder to normalize possibly
691 unchanged. Otherwise, we try harder to normalize possibly
701 existing path components.
692 existing path components.
702
693
703 The normalized case is determined based on the following precedence:
694 The normalized case is determined based on the following precedence:
704
695
705 - version of name already stored in the dirstate
696 - version of name already stored in the dirstate
706 - version of name stored on disk
697 - version of name stored on disk
707 - version provided via command arguments
698 - version provided via command arguments
708 """
699 """
709
700
710 if self._checkcase:
701 if self._checkcase:
711 return self._normalize(path, isknown, ignoremissing)
702 return self._normalize(path, isknown, ignoremissing)
712 return path
703 return path
713
704
714 def clear(self):
705 def clear(self):
715 self._map.clear()
706 self._map.clear()
716 self._lastnormaltime = 0
707 self._lastnormaltime = 0
717 self._dirty = True
708 self._dirty = True
718
709
719 def rebuild(self, parent, allfiles, changedfiles=None):
710 def rebuild(self, parent, allfiles, changedfiles=None):
720 if changedfiles is None:
711 if changedfiles is None:
721 # Rebuild entire dirstate
712 # Rebuild entire dirstate
722 to_lookup = allfiles
713 to_lookup = allfiles
723 to_drop = []
714 to_drop = []
724 lastnormaltime = self._lastnormaltime
715 lastnormaltime = self._lastnormaltime
725 self.clear()
716 self.clear()
726 self._lastnormaltime = lastnormaltime
717 self._lastnormaltime = lastnormaltime
727 elif len(changedfiles) < 10:
718 elif len(changedfiles) < 10:
728 # Avoid turning allfiles into a set, which can be expensive if it's
719 # Avoid turning allfiles into a set, which can be expensive if it's
729 # large.
720 # large.
730 to_lookup = []
721 to_lookup = []
731 to_drop = []
722 to_drop = []
732 for f in changedfiles:
723 for f in changedfiles:
733 if f in allfiles:
724 if f in allfiles:
734 to_lookup.append(f)
725 to_lookup.append(f)
735 else:
726 else:
736 to_drop.append(f)
727 to_drop.append(f)
737 else:
728 else:
738 changedfilesset = set(changedfiles)
729 changedfilesset = set(changedfiles)
739 to_lookup = changedfilesset & set(allfiles)
730 to_lookup = changedfilesset & set(allfiles)
740 to_drop = changedfilesset - to_lookup
731 to_drop = changedfilesset - to_lookup
741
732
742 if self._origpl is None:
733 if self._origpl is None:
743 self._origpl = self._pl
734 self._origpl = self._pl
744 self._map.setparents(parent, self._nodeconstants.nullid)
735 self._map.setparents(parent, self._nodeconstants.nullid)
745
736
746 for f in to_lookup:
737 for f in to_lookup:
747
738
748 if self.in_merge:
739 if self.in_merge:
749 self.set_tracked(f)
740 self.set_tracked(f)
750 else:
741 else:
751 self._map.reset_state(
742 self._map.reset_state(
752 f,
743 f,
753 wc_tracked=True,
744 wc_tracked=True,
754 p1_tracked=True,
745 p1_tracked=True,
755 )
746 )
756 for f in to_drop:
747 for f in to_drop:
757 self._map.reset_state(f)
748 self._map.reset_state(f)
758
749
759 self._dirty = True
750 self._dirty = True
760
751
761 def identity(self):
752 def identity(self):
762 """Return identity of dirstate itself to detect changing in storage
753 """Return identity of dirstate itself to detect changing in storage
763
754
764 If identity of previous dirstate is equal to this, writing
755 If identity of previous dirstate is equal to this, writing
765 changes based on the former dirstate out can keep consistency.
756 changes based on the former dirstate out can keep consistency.
766 """
757 """
767 return self._map.identity
758 return self._map.identity
768
759
769 def write(self, tr):
760 def write(self, tr):
770 if not self._dirty:
761 if not self._dirty:
771 return
762 return
772
763
773 filename = self._filename
764 filename = self._filename
774 if tr:
765 if tr:
775 # 'dirstate.write()' is not only for writing in-memory
766 # 'dirstate.write()' is not only for writing in-memory
776 # changes out, but also for dropping ambiguous timestamp.
767 # changes out, but also for dropping ambiguous timestamp.
777 # delayed writing re-raise "ambiguous timestamp issue".
768 # delayed writing re-raise "ambiguous timestamp issue".
778 # See also the wiki page below for detail:
769 # See also the wiki page below for detail:
779 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
770 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
780
771
781 # record when mtime start to be ambiguous
772 # record when mtime start to be ambiguous
782 now = _getfsnow(self._opener)
773 now = _getfsnow(self._opener)
783
774
784 # delay writing in-memory changes out
775 # delay writing in-memory changes out
785 tr.addfilegenerator(
776 tr.addfilegenerator(
786 b'dirstate',
777 b'dirstate',
787 (self._filename,),
778 (self._filename,),
788 lambda f: self._writedirstate(tr, f, now=now),
779 lambda f: self._writedirstate(tr, f, now=now),
789 location=b'plain',
780 location=b'plain',
790 )
781 )
791 return
782 return
792
783
793 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
784 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
794 self._writedirstate(tr, st)
785 self._writedirstate(tr, st)
795
786
796 def addparentchangecallback(self, category, callback):
787 def addparentchangecallback(self, category, callback):
797 """add a callback to be called when the wd parents are changed
788 """add a callback to be called when the wd parents are changed
798
789
799 Callback will be called with the following arguments:
790 Callback will be called with the following arguments:
800 dirstate, (oldp1, oldp2), (newp1, newp2)
791 dirstate, (oldp1, oldp2), (newp1, newp2)
801
792
802 Category is a unique identifier to allow overwriting an old callback
793 Category is a unique identifier to allow overwriting an old callback
803 with a newer callback.
794 with a newer callback.
804 """
795 """
805 self._plchangecallbacks[category] = callback
796 self._plchangecallbacks[category] = callback
806
797
807 def _writedirstate(self, tr, st, now=None):
798 def _writedirstate(self, tr, st, now=None):
808 # notify callbacks about parents change
799 # notify callbacks about parents change
809 if self._origpl is not None and self._origpl != self._pl:
800 if self._origpl is not None and self._origpl != self._pl:
810 for c, callback in sorted(
801 for c, callback in sorted(
811 pycompat.iteritems(self._plchangecallbacks)
802 pycompat.iteritems(self._plchangecallbacks)
812 ):
803 ):
813 callback(self, self._origpl, self._pl)
804 callback(self, self._origpl, self._pl)
814 self._origpl = None
805 self._origpl = None
815
806
816 if now is None:
807 if now is None:
817 # use the modification time of the newly created temporary file as the
808 # use the modification time of the newly created temporary file as the
818 # filesystem's notion of 'now'
809 # filesystem's notion of 'now'
819 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
810 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
820
811
821 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
812 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
822 # timestamp of each entries in dirstate, because of 'now > mtime'
813 # timestamp of each entries in dirstate, because of 'now > mtime'
823 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
814 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
824 if delaywrite > 0:
815 if delaywrite > 0:
825 # do we have any files to delay for?
816 # do we have any files to delay for?
826 for f, e in pycompat.iteritems(self._map):
817 for f, e in pycompat.iteritems(self._map):
827 if e.need_delay(now):
818 if e.need_delay(now):
828 import time # to avoid useless import
819 import time # to avoid useless import
829
820
830 # rather than sleep n seconds, sleep until the next
821 # rather than sleep n seconds, sleep until the next
831 # multiple of n seconds
822 # multiple of n seconds
832 clock = time.time()
823 clock = time.time()
833 start = int(clock) - (int(clock) % delaywrite)
824 start = int(clock) - (int(clock) % delaywrite)
834 end = start + delaywrite
825 end = start + delaywrite
835 time.sleep(end - clock)
826 time.sleep(end - clock)
836 now = end # trust our estimate that the end is near now
827 now = end # trust our estimate that the end is near now
837 break
828 break
838
829
839 self._map.write(tr, st, now)
830 self._map.write(tr, st, now)
840 self._lastnormaltime = 0
831 self._lastnormaltime = 0
841 self._dirty = False
832 self._dirty = False
842
833
843 def _dirignore(self, f):
834 def _dirignore(self, f):
844 if self._ignore(f):
835 if self._ignore(f):
845 return True
836 return True
846 for p in pathutil.finddirs(f):
837 for p in pathutil.finddirs(f):
847 if self._ignore(p):
838 if self._ignore(p):
848 return True
839 return True
849 return False
840 return False
850
841
851 def _ignorefiles(self):
842 def _ignorefiles(self):
852 files = []
843 files = []
853 if os.path.exists(self._join(b'.hgignore')):
844 if os.path.exists(self._join(b'.hgignore')):
854 files.append(self._join(b'.hgignore'))
845 files.append(self._join(b'.hgignore'))
855 for name, path in self._ui.configitems(b"ui"):
846 for name, path in self._ui.configitems(b"ui"):
856 if name == b'ignore' or name.startswith(b'ignore.'):
847 if name == b'ignore' or name.startswith(b'ignore.'):
857 # we need to use os.path.join here rather than self._join
848 # we need to use os.path.join here rather than self._join
858 # because path is arbitrary and user-specified
849 # because path is arbitrary and user-specified
859 files.append(os.path.join(self._rootdir, util.expandpath(path)))
850 files.append(os.path.join(self._rootdir, util.expandpath(path)))
860 return files
851 return files
861
852
862 def _ignorefileandline(self, f):
853 def _ignorefileandline(self, f):
863 files = collections.deque(self._ignorefiles())
854 files = collections.deque(self._ignorefiles())
864 visited = set()
855 visited = set()
865 while files:
856 while files:
866 i = files.popleft()
857 i = files.popleft()
867 patterns = matchmod.readpatternfile(
858 patterns = matchmod.readpatternfile(
868 i, self._ui.warn, sourceinfo=True
859 i, self._ui.warn, sourceinfo=True
869 )
860 )
870 for pattern, lineno, line in patterns:
861 for pattern, lineno, line in patterns:
871 kind, p = matchmod._patsplit(pattern, b'glob')
862 kind, p = matchmod._patsplit(pattern, b'glob')
872 if kind == b"subinclude":
863 if kind == b"subinclude":
873 if p not in visited:
864 if p not in visited:
874 files.append(p)
865 files.append(p)
875 continue
866 continue
876 m = matchmod.match(
867 m = matchmod.match(
877 self._root, b'', [], [pattern], warn=self._ui.warn
868 self._root, b'', [], [pattern], warn=self._ui.warn
878 )
869 )
879 if m(f):
870 if m(f):
880 return (i, lineno, line)
871 return (i, lineno, line)
881 visited.add(i)
872 visited.add(i)
882 return (None, -1, b"")
873 return (None, -1, b"")
883
874
884 def _walkexplicit(self, match, subrepos):
875 def _walkexplicit(self, match, subrepos):
885 """Get stat data about the files explicitly specified by match.
876 """Get stat data about the files explicitly specified by match.
886
877
887 Return a triple (results, dirsfound, dirsnotfound).
878 Return a triple (results, dirsfound, dirsnotfound).
888 - results is a mapping from filename to stat result. It also contains
879 - results is a mapping from filename to stat result. It also contains
889 listings mapping subrepos and .hg to None.
880 listings mapping subrepos and .hg to None.
890 - dirsfound is a list of files found to be directories.
881 - dirsfound is a list of files found to be directories.
891 - dirsnotfound is a list of files that the dirstate thinks are
882 - dirsnotfound is a list of files that the dirstate thinks are
892 directories and that were not found."""
883 directories and that were not found."""
893
884
894 def badtype(mode):
885 def badtype(mode):
895 kind = _(b'unknown')
886 kind = _(b'unknown')
896 if stat.S_ISCHR(mode):
887 if stat.S_ISCHR(mode):
897 kind = _(b'character device')
888 kind = _(b'character device')
898 elif stat.S_ISBLK(mode):
889 elif stat.S_ISBLK(mode):
899 kind = _(b'block device')
890 kind = _(b'block device')
900 elif stat.S_ISFIFO(mode):
891 elif stat.S_ISFIFO(mode):
901 kind = _(b'fifo')
892 kind = _(b'fifo')
902 elif stat.S_ISSOCK(mode):
893 elif stat.S_ISSOCK(mode):
903 kind = _(b'socket')
894 kind = _(b'socket')
904 elif stat.S_ISDIR(mode):
895 elif stat.S_ISDIR(mode):
905 kind = _(b'directory')
896 kind = _(b'directory')
906 return _(b'unsupported file type (type is %s)') % kind
897 return _(b'unsupported file type (type is %s)') % kind
907
898
908 badfn = match.bad
899 badfn = match.bad
909 dmap = self._map
900 dmap = self._map
910 lstat = os.lstat
901 lstat = os.lstat
911 getkind = stat.S_IFMT
902 getkind = stat.S_IFMT
912 dirkind = stat.S_IFDIR
903 dirkind = stat.S_IFDIR
913 regkind = stat.S_IFREG
904 regkind = stat.S_IFREG
914 lnkkind = stat.S_IFLNK
905 lnkkind = stat.S_IFLNK
915 join = self._join
906 join = self._join
916 dirsfound = []
907 dirsfound = []
917 foundadd = dirsfound.append
908 foundadd = dirsfound.append
918 dirsnotfound = []
909 dirsnotfound = []
919 notfoundadd = dirsnotfound.append
910 notfoundadd = dirsnotfound.append
920
911
921 if not match.isexact() and self._checkcase:
912 if not match.isexact() and self._checkcase:
922 normalize = self._normalize
913 normalize = self._normalize
923 else:
914 else:
924 normalize = None
915 normalize = None
925
916
926 files = sorted(match.files())
917 files = sorted(match.files())
927 subrepos.sort()
918 subrepos.sort()
928 i, j = 0, 0
919 i, j = 0, 0
929 while i < len(files) and j < len(subrepos):
920 while i < len(files) and j < len(subrepos):
930 subpath = subrepos[j] + b"/"
921 subpath = subrepos[j] + b"/"
931 if files[i] < subpath:
922 if files[i] < subpath:
932 i += 1
923 i += 1
933 continue
924 continue
934 while i < len(files) and files[i].startswith(subpath):
925 while i < len(files) and files[i].startswith(subpath):
935 del files[i]
926 del files[i]
936 j += 1
927 j += 1
937
928
938 if not files or b'' in files:
929 if not files or b'' in files:
939 files = [b'']
930 files = [b'']
940 # constructing the foldmap is expensive, so don't do it for the
931 # constructing the foldmap is expensive, so don't do it for the
941 # common case where files is ['']
932 # common case where files is ['']
942 normalize = None
933 normalize = None
943 results = dict.fromkeys(subrepos)
934 results = dict.fromkeys(subrepos)
944 results[b'.hg'] = None
935 results[b'.hg'] = None
945
936
946 for ff in files:
937 for ff in files:
947 if normalize:
938 if normalize:
948 nf = normalize(ff, False, True)
939 nf = normalize(ff, False, True)
949 else:
940 else:
950 nf = ff
941 nf = ff
951 if nf in results:
942 if nf in results:
952 continue
943 continue
953
944
954 try:
945 try:
955 st = lstat(join(nf))
946 st = lstat(join(nf))
956 kind = getkind(st.st_mode)
947 kind = getkind(st.st_mode)
957 if kind == dirkind:
948 if kind == dirkind:
958 if nf in dmap:
949 if nf in dmap:
959 # file replaced by dir on disk but still in dirstate
950 # file replaced by dir on disk but still in dirstate
960 results[nf] = None
951 results[nf] = None
961 foundadd((nf, ff))
952 foundadd((nf, ff))
962 elif kind == regkind or kind == lnkkind:
953 elif kind == regkind or kind == lnkkind:
963 results[nf] = st
954 results[nf] = st
964 else:
955 else:
965 badfn(ff, badtype(kind))
956 badfn(ff, badtype(kind))
966 if nf in dmap:
957 if nf in dmap:
967 results[nf] = None
958 results[nf] = None
968 except OSError as inst: # nf not found on disk - it is dirstate only
959 except OSError as inst: # nf not found on disk - it is dirstate only
969 if nf in dmap: # does it exactly match a missing file?
960 if nf in dmap: # does it exactly match a missing file?
970 results[nf] = None
961 results[nf] = None
971 else: # does it match a missing directory?
962 else: # does it match a missing directory?
972 if self._map.hasdir(nf):
963 if self._map.hasdir(nf):
973 notfoundadd(nf)
964 notfoundadd(nf)
974 else:
965 else:
975 badfn(ff, encoding.strtolocal(inst.strerror))
966 badfn(ff, encoding.strtolocal(inst.strerror))
976
967
977 # match.files() may contain explicitly-specified paths that shouldn't
968 # match.files() may contain explicitly-specified paths that shouldn't
978 # be taken; drop them from the list of files found. dirsfound/notfound
969 # be taken; drop them from the list of files found. dirsfound/notfound
979 # aren't filtered here because they will be tested later.
970 # aren't filtered here because they will be tested later.
980 if match.anypats():
971 if match.anypats():
981 for f in list(results):
972 for f in list(results):
982 if f == b'.hg' or f in subrepos:
973 if f == b'.hg' or f in subrepos:
983 # keep sentinel to disable further out-of-repo walks
974 # keep sentinel to disable further out-of-repo walks
984 continue
975 continue
985 if not match(f):
976 if not match(f):
986 del results[f]
977 del results[f]
987
978
988 # Case insensitive filesystems cannot rely on lstat() failing to detect
979 # Case insensitive filesystems cannot rely on lstat() failing to detect
989 # a case-only rename. Prune the stat object for any file that does not
980 # a case-only rename. Prune the stat object for any file that does not
990 # match the case in the filesystem, if there are multiple files that
981 # match the case in the filesystem, if there are multiple files that
991 # normalize to the same path.
982 # normalize to the same path.
992 if match.isexact() and self._checkcase:
983 if match.isexact() and self._checkcase:
993 normed = {}
984 normed = {}
994
985
995 for f, st in pycompat.iteritems(results):
986 for f, st in pycompat.iteritems(results):
996 if st is None:
987 if st is None:
997 continue
988 continue
998
989
999 nc = util.normcase(f)
990 nc = util.normcase(f)
1000 paths = normed.get(nc)
991 paths = normed.get(nc)
1001
992
1002 if paths is None:
993 if paths is None:
1003 paths = set()
994 paths = set()
1004 normed[nc] = paths
995 normed[nc] = paths
1005
996
1006 paths.add(f)
997 paths.add(f)
1007
998
1008 for norm, paths in pycompat.iteritems(normed):
999 for norm, paths in pycompat.iteritems(normed):
1009 if len(paths) > 1:
1000 if len(paths) > 1:
1010 for path in paths:
1001 for path in paths:
1011 folded = self._discoverpath(
1002 folded = self._discoverpath(
1012 path, norm, True, None, self._map.dirfoldmap
1003 path, norm, True, None, self._map.dirfoldmap
1013 )
1004 )
1014 if path != folded:
1005 if path != folded:
1015 results[path] = None
1006 results[path] = None
1016
1007
1017 return results, dirsfound, dirsnotfound
1008 return results, dirsfound, dirsnotfound
1018
1009
1019 def walk(self, match, subrepos, unknown, ignored, full=True):
1010 def walk(self, match, subrepos, unknown, ignored, full=True):
1020 """
1011 """
1021 Walk recursively through the directory tree, finding all files
1012 Walk recursively through the directory tree, finding all files
1022 matched by match.
1013 matched by match.
1023
1014
1024 If full is False, maybe skip some known-clean files.
1015 If full is False, maybe skip some known-clean files.
1025
1016
1026 Return a dict mapping filename to stat-like object (either
1017 Return a dict mapping filename to stat-like object (either
1027 mercurial.osutil.stat instance or return value of os.stat()).
1018 mercurial.osutil.stat instance or return value of os.stat()).
1028
1019
1029 """
1020 """
1030 # full is a flag that extensions that hook into walk can use -- this
1021 # full is a flag that extensions that hook into walk can use -- this
1031 # implementation doesn't use it at all. This satisfies the contract
1022 # implementation doesn't use it at all. This satisfies the contract
1032 # because we only guarantee a "maybe".
1023 # because we only guarantee a "maybe".
1033
1024
1034 if ignored:
1025 if ignored:
1035 ignore = util.never
1026 ignore = util.never
1036 dirignore = util.never
1027 dirignore = util.never
1037 elif unknown:
1028 elif unknown:
1038 ignore = self._ignore
1029 ignore = self._ignore
1039 dirignore = self._dirignore
1030 dirignore = self._dirignore
1040 else:
1031 else:
1041 # if not unknown and not ignored, drop dir recursion and step 2
1032 # if not unknown and not ignored, drop dir recursion and step 2
1042 ignore = util.always
1033 ignore = util.always
1043 dirignore = util.always
1034 dirignore = util.always
1044
1035
1045 matchfn = match.matchfn
1036 matchfn = match.matchfn
1046 matchalways = match.always()
1037 matchalways = match.always()
1047 matchtdir = match.traversedir
1038 matchtdir = match.traversedir
1048 dmap = self._map
1039 dmap = self._map
1049 listdir = util.listdir
1040 listdir = util.listdir
1050 lstat = os.lstat
1041 lstat = os.lstat
1051 dirkind = stat.S_IFDIR
1042 dirkind = stat.S_IFDIR
1052 regkind = stat.S_IFREG
1043 regkind = stat.S_IFREG
1053 lnkkind = stat.S_IFLNK
1044 lnkkind = stat.S_IFLNK
1054 join = self._join
1045 join = self._join
1055
1046
1056 exact = skipstep3 = False
1047 exact = skipstep3 = False
1057 if match.isexact(): # match.exact
1048 if match.isexact(): # match.exact
1058 exact = True
1049 exact = True
1059 dirignore = util.always # skip step 2
1050 dirignore = util.always # skip step 2
1060 elif match.prefix(): # match.match, no patterns
1051 elif match.prefix(): # match.match, no patterns
1061 skipstep3 = True
1052 skipstep3 = True
1062
1053
1063 if not exact and self._checkcase:
1054 if not exact and self._checkcase:
1064 normalize = self._normalize
1055 normalize = self._normalize
1065 normalizefile = self._normalizefile
1056 normalizefile = self._normalizefile
1066 skipstep3 = False
1057 skipstep3 = False
1067 else:
1058 else:
1068 normalize = self._normalize
1059 normalize = self._normalize
1069 normalizefile = None
1060 normalizefile = None
1070
1061
1071 # step 1: find all explicit files
1062 # step 1: find all explicit files
1072 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1063 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1073 if matchtdir:
1064 if matchtdir:
1074 for d in work:
1065 for d in work:
1075 matchtdir(d[0])
1066 matchtdir(d[0])
1076 for d in dirsnotfound:
1067 for d in dirsnotfound:
1077 matchtdir(d)
1068 matchtdir(d)
1078
1069
1079 skipstep3 = skipstep3 and not (work or dirsnotfound)
1070 skipstep3 = skipstep3 and not (work or dirsnotfound)
1080 work = [d for d in work if not dirignore(d[0])]
1071 work = [d for d in work if not dirignore(d[0])]
1081
1072
1082 # step 2: visit subdirectories
1073 # step 2: visit subdirectories
1083 def traverse(work, alreadynormed):
1074 def traverse(work, alreadynormed):
1084 wadd = work.append
1075 wadd = work.append
1085 while work:
1076 while work:
1086 tracing.counter('dirstate.walk work', len(work))
1077 tracing.counter('dirstate.walk work', len(work))
1087 nd = work.pop()
1078 nd = work.pop()
1088 visitentries = match.visitchildrenset(nd)
1079 visitentries = match.visitchildrenset(nd)
1089 if not visitentries:
1080 if not visitentries:
1090 continue
1081 continue
1091 if visitentries == b'this' or visitentries == b'all':
1082 if visitentries == b'this' or visitentries == b'all':
1092 visitentries = None
1083 visitentries = None
1093 skip = None
1084 skip = None
1094 if nd != b'':
1085 if nd != b'':
1095 skip = b'.hg'
1086 skip = b'.hg'
1096 try:
1087 try:
1097 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1088 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1098 entries = listdir(join(nd), stat=True, skip=skip)
1089 entries = listdir(join(nd), stat=True, skip=skip)
1099 except OSError as inst:
1090 except OSError as inst:
1100 if inst.errno in (errno.EACCES, errno.ENOENT):
1091 if inst.errno in (errno.EACCES, errno.ENOENT):
1101 match.bad(
1092 match.bad(
1102 self.pathto(nd), encoding.strtolocal(inst.strerror)
1093 self.pathto(nd), encoding.strtolocal(inst.strerror)
1103 )
1094 )
1104 continue
1095 continue
1105 raise
1096 raise
1106 for f, kind, st in entries:
1097 for f, kind, st in entries:
1107 # Some matchers may return files in the visitentries set,
1098 # Some matchers may return files in the visitentries set,
1108 # instead of 'this', if the matcher explicitly mentions them
1099 # instead of 'this', if the matcher explicitly mentions them
1109 # and is not an exactmatcher. This is acceptable; we do not
1100 # and is not an exactmatcher. This is acceptable; we do not
1110 # make any hard assumptions about file-or-directory below
1101 # make any hard assumptions about file-or-directory below
1111 # based on the presence of `f` in visitentries. If
1102 # based on the presence of `f` in visitentries. If
1112 # visitchildrenset returned a set, we can always skip the
1103 # visitchildrenset returned a set, we can always skip the
1113 # entries *not* in the set it provided regardless of whether
1104 # entries *not* in the set it provided regardless of whether
1114 # they're actually a file or a directory.
1105 # they're actually a file or a directory.
1115 if visitentries and f not in visitentries:
1106 if visitentries and f not in visitentries:
1116 continue
1107 continue
1117 if normalizefile:
1108 if normalizefile:
1118 # even though f might be a directory, we're only
1109 # even though f might be a directory, we're only
1119 # interested in comparing it to files currently in the
1110 # interested in comparing it to files currently in the
1120 # dmap -- therefore normalizefile is enough
1111 # dmap -- therefore normalizefile is enough
1121 nf = normalizefile(
1112 nf = normalizefile(
1122 nd and (nd + b"/" + f) or f, True, True
1113 nd and (nd + b"/" + f) or f, True, True
1123 )
1114 )
1124 else:
1115 else:
1125 nf = nd and (nd + b"/" + f) or f
1116 nf = nd and (nd + b"/" + f) or f
1126 if nf not in results:
1117 if nf not in results:
1127 if kind == dirkind:
1118 if kind == dirkind:
1128 if not ignore(nf):
1119 if not ignore(nf):
1129 if matchtdir:
1120 if matchtdir:
1130 matchtdir(nf)
1121 matchtdir(nf)
1131 wadd(nf)
1122 wadd(nf)
1132 if nf in dmap and (matchalways or matchfn(nf)):
1123 if nf in dmap and (matchalways or matchfn(nf)):
1133 results[nf] = None
1124 results[nf] = None
1134 elif kind == regkind or kind == lnkkind:
1125 elif kind == regkind or kind == lnkkind:
1135 if nf in dmap:
1126 if nf in dmap:
1136 if matchalways or matchfn(nf):
1127 if matchalways or matchfn(nf):
1137 results[nf] = st
1128 results[nf] = st
1138 elif (matchalways or matchfn(nf)) and not ignore(
1129 elif (matchalways or matchfn(nf)) and not ignore(
1139 nf
1130 nf
1140 ):
1131 ):
1141 # unknown file -- normalize if necessary
1132 # unknown file -- normalize if necessary
1142 if not alreadynormed:
1133 if not alreadynormed:
1143 nf = normalize(nf, False, True)
1134 nf = normalize(nf, False, True)
1144 results[nf] = st
1135 results[nf] = st
1145 elif nf in dmap and (matchalways or matchfn(nf)):
1136 elif nf in dmap and (matchalways or matchfn(nf)):
1146 results[nf] = None
1137 results[nf] = None
1147
1138
1148 for nd, d in work:
1139 for nd, d in work:
1149 # alreadynormed means that processwork doesn't have to do any
1140 # alreadynormed means that processwork doesn't have to do any
1150 # expensive directory normalization
1141 # expensive directory normalization
1151 alreadynormed = not normalize or nd == d
1142 alreadynormed = not normalize or nd == d
1152 traverse([d], alreadynormed)
1143 traverse([d], alreadynormed)
1153
1144
1154 for s in subrepos:
1145 for s in subrepos:
1155 del results[s]
1146 del results[s]
1156 del results[b'.hg']
1147 del results[b'.hg']
1157
1148
1158 # step 3: visit remaining files from dmap
1149 # step 3: visit remaining files from dmap
1159 if not skipstep3 and not exact:
1150 if not skipstep3 and not exact:
1160 # If a dmap file is not in results yet, it was either
1151 # If a dmap file is not in results yet, it was either
1161 # a) not matching matchfn b) ignored, c) missing, or d) under a
1152 # a) not matching matchfn b) ignored, c) missing, or d) under a
1162 # symlink directory.
1153 # symlink directory.
1163 if not results and matchalways:
1154 if not results and matchalways:
1164 visit = [f for f in dmap]
1155 visit = [f for f in dmap]
1165 else:
1156 else:
1166 visit = [f for f in dmap if f not in results and matchfn(f)]
1157 visit = [f for f in dmap if f not in results and matchfn(f)]
1167 visit.sort()
1158 visit.sort()
1168
1159
1169 if unknown:
1160 if unknown:
1170 # unknown == True means we walked all dirs under the roots
1161 # unknown == True means we walked all dirs under the roots
1171 # that wasn't ignored, and everything that matched was stat'ed
1162 # that wasn't ignored, and everything that matched was stat'ed
1172 # and is already in results.
1163 # and is already in results.
1173 # The rest must thus be ignored or under a symlink.
1164 # The rest must thus be ignored or under a symlink.
1174 audit_path = pathutil.pathauditor(self._root, cached=True)
1165 audit_path = pathutil.pathauditor(self._root, cached=True)
1175
1166
1176 for nf in iter(visit):
1167 for nf in iter(visit):
1177 # If a stat for the same file was already added with a
1168 # If a stat for the same file was already added with a
1178 # different case, don't add one for this, since that would
1169 # different case, don't add one for this, since that would
1179 # make it appear as if the file exists under both names
1170 # make it appear as if the file exists under both names
1180 # on disk.
1171 # on disk.
1181 if (
1172 if (
1182 normalizefile
1173 normalizefile
1183 and normalizefile(nf, True, True) in results
1174 and normalizefile(nf, True, True) in results
1184 ):
1175 ):
1185 results[nf] = None
1176 results[nf] = None
1186 # Report ignored items in the dmap as long as they are not
1177 # Report ignored items in the dmap as long as they are not
1187 # under a symlink directory.
1178 # under a symlink directory.
1188 elif audit_path.check(nf):
1179 elif audit_path.check(nf):
1189 try:
1180 try:
1190 results[nf] = lstat(join(nf))
1181 results[nf] = lstat(join(nf))
1191 # file was just ignored, no links, and exists
1182 # file was just ignored, no links, and exists
1192 except OSError:
1183 except OSError:
1193 # file doesn't exist
1184 # file doesn't exist
1194 results[nf] = None
1185 results[nf] = None
1195 else:
1186 else:
1196 # It's either missing or under a symlink directory
1187 # It's either missing or under a symlink directory
1197 # which we in this case report as missing
1188 # which we in this case report as missing
1198 results[nf] = None
1189 results[nf] = None
1199 else:
1190 else:
1200 # We may not have walked the full directory tree above,
1191 # We may not have walked the full directory tree above,
1201 # so stat and check everything we missed.
1192 # so stat and check everything we missed.
1202 iv = iter(visit)
1193 iv = iter(visit)
1203 for st in util.statfiles([join(i) for i in visit]):
1194 for st in util.statfiles([join(i) for i in visit]):
1204 results[next(iv)] = st
1195 results[next(iv)] = st
1205 return results
1196 return results
1206
1197
1207 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1198 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1208 # Force Rayon (Rust parallelism library) to respect the number of
1199 # Force Rayon (Rust parallelism library) to respect the number of
1209 # workers. This is a temporary workaround until Rust code knows
1200 # workers. This is a temporary workaround until Rust code knows
1210 # how to read the config file.
1201 # how to read the config file.
1211 numcpus = self._ui.configint(b"worker", b"numcpus")
1202 numcpus = self._ui.configint(b"worker", b"numcpus")
1212 if numcpus is not None:
1203 if numcpus is not None:
1213 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1204 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1214
1205
1215 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1206 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1216 if not workers_enabled:
1207 if not workers_enabled:
1217 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1208 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1218
1209
1219 (
1210 (
1220 lookup,
1211 lookup,
1221 modified,
1212 modified,
1222 added,
1213 added,
1223 removed,
1214 removed,
1224 deleted,
1215 deleted,
1225 clean,
1216 clean,
1226 ignored,
1217 ignored,
1227 unknown,
1218 unknown,
1228 warnings,
1219 warnings,
1229 bad,
1220 bad,
1230 traversed,
1221 traversed,
1231 dirty,
1222 dirty,
1232 ) = rustmod.status(
1223 ) = rustmod.status(
1233 self._map._map,
1224 self._map._map,
1234 matcher,
1225 matcher,
1235 self._rootdir,
1226 self._rootdir,
1236 self._ignorefiles(),
1227 self._ignorefiles(),
1237 self._checkexec,
1228 self._checkexec,
1238 self._lastnormaltime,
1229 self._lastnormaltime,
1239 bool(list_clean),
1230 bool(list_clean),
1240 bool(list_ignored),
1231 bool(list_ignored),
1241 bool(list_unknown),
1232 bool(list_unknown),
1242 bool(matcher.traversedir),
1233 bool(matcher.traversedir),
1243 )
1234 )
1244
1235
1245 self._dirty |= dirty
1236 self._dirty |= dirty
1246
1237
1247 if matcher.traversedir:
1238 if matcher.traversedir:
1248 for dir in traversed:
1239 for dir in traversed:
1249 matcher.traversedir(dir)
1240 matcher.traversedir(dir)
1250
1241
1251 if self._ui.warn:
1242 if self._ui.warn:
1252 for item in warnings:
1243 for item in warnings:
1253 if isinstance(item, tuple):
1244 if isinstance(item, tuple):
1254 file_path, syntax = item
1245 file_path, syntax = item
1255 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1246 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1256 file_path,
1247 file_path,
1257 syntax,
1248 syntax,
1258 )
1249 )
1259 self._ui.warn(msg)
1250 self._ui.warn(msg)
1260 else:
1251 else:
1261 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1252 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1262 self._ui.warn(
1253 self._ui.warn(
1263 msg
1254 msg
1264 % (
1255 % (
1265 pathutil.canonpath(
1256 pathutil.canonpath(
1266 self._rootdir, self._rootdir, item
1257 self._rootdir, self._rootdir, item
1267 ),
1258 ),
1268 b"No such file or directory",
1259 b"No such file or directory",
1269 )
1260 )
1270 )
1261 )
1271
1262
1272 for (fn, message) in bad:
1263 for (fn, message) in bad:
1273 matcher.bad(fn, encoding.strtolocal(message))
1264 matcher.bad(fn, encoding.strtolocal(message))
1274
1265
1275 status = scmutil.status(
1266 status = scmutil.status(
1276 modified=modified,
1267 modified=modified,
1277 added=added,
1268 added=added,
1278 removed=removed,
1269 removed=removed,
1279 deleted=deleted,
1270 deleted=deleted,
1280 unknown=unknown,
1271 unknown=unknown,
1281 ignored=ignored,
1272 ignored=ignored,
1282 clean=clean,
1273 clean=clean,
1283 )
1274 )
1284 return (lookup, status)
1275 return (lookup, status)
1285
1276
1286 def status(self, match, subrepos, ignored, clean, unknown):
1277 def status(self, match, subrepos, ignored, clean, unknown):
1287 """Determine the status of the working copy relative to the
1278 """Determine the status of the working copy relative to the
1288 dirstate and return a pair of (unsure, status), where status is of type
1279 dirstate and return a pair of (unsure, status), where status is of type
1289 scmutil.status and:
1280 scmutil.status and:
1290
1281
1291 unsure:
1282 unsure:
1292 files that might have been modified since the dirstate was
1283 files that might have been modified since the dirstate was
1293 written, but need to be read to be sure (size is the same
1284 written, but need to be read to be sure (size is the same
1294 but mtime differs)
1285 but mtime differs)
1295 status.modified:
1286 status.modified:
1296 files that have definitely been modified since the dirstate
1287 files that have definitely been modified since the dirstate
1297 was written (different size or mode)
1288 was written (different size or mode)
1298 status.clean:
1289 status.clean:
1299 files that have definitely not been modified since the
1290 files that have definitely not been modified since the
1300 dirstate was written
1291 dirstate was written
1301 """
1292 """
1302 listignored, listclean, listunknown = ignored, clean, unknown
1293 listignored, listclean, listunknown = ignored, clean, unknown
1303 lookup, modified, added, unknown, ignored = [], [], [], [], []
1294 lookup, modified, added, unknown, ignored = [], [], [], [], []
1304 removed, deleted, clean = [], [], []
1295 removed, deleted, clean = [], [], []
1305
1296
1306 dmap = self._map
1297 dmap = self._map
1307 dmap.preload()
1298 dmap.preload()
1308
1299
1309 use_rust = True
1300 use_rust = True
1310
1301
1311 allowed_matchers = (
1302 allowed_matchers = (
1312 matchmod.alwaysmatcher,
1303 matchmod.alwaysmatcher,
1313 matchmod.exactmatcher,
1304 matchmod.exactmatcher,
1314 matchmod.includematcher,
1305 matchmod.includematcher,
1315 )
1306 )
1316
1307
1317 if rustmod is None:
1308 if rustmod is None:
1318 use_rust = False
1309 use_rust = False
1319 elif self._checkcase:
1310 elif self._checkcase:
1320 # Case-insensitive filesystems are not handled yet
1311 # Case-insensitive filesystems are not handled yet
1321 use_rust = False
1312 use_rust = False
1322 elif subrepos:
1313 elif subrepos:
1323 use_rust = False
1314 use_rust = False
1324 elif sparse.enabled:
1315 elif sparse.enabled:
1325 use_rust = False
1316 use_rust = False
1326 elif not isinstance(match, allowed_matchers):
1317 elif not isinstance(match, allowed_matchers):
1327 # Some matchers have yet to be implemented
1318 # Some matchers have yet to be implemented
1328 use_rust = False
1319 use_rust = False
1329
1320
1330 if use_rust:
1321 if use_rust:
1331 try:
1322 try:
1332 return self._rust_status(
1323 return self._rust_status(
1333 match, listclean, listignored, listunknown
1324 match, listclean, listignored, listunknown
1334 )
1325 )
1335 except rustmod.FallbackError:
1326 except rustmod.FallbackError:
1336 pass
1327 pass
1337
1328
1338 def noop(f):
1329 def noop(f):
1339 pass
1330 pass
1340
1331
1341 dcontains = dmap.__contains__
1332 dcontains = dmap.__contains__
1342 dget = dmap.__getitem__
1333 dget = dmap.__getitem__
1343 ladd = lookup.append # aka "unsure"
1334 ladd = lookup.append # aka "unsure"
1344 madd = modified.append
1335 madd = modified.append
1345 aadd = added.append
1336 aadd = added.append
1346 uadd = unknown.append if listunknown else noop
1337 uadd = unknown.append if listunknown else noop
1347 iadd = ignored.append if listignored else noop
1338 iadd = ignored.append if listignored else noop
1348 radd = removed.append
1339 radd = removed.append
1349 dadd = deleted.append
1340 dadd = deleted.append
1350 cadd = clean.append if listclean else noop
1341 cadd = clean.append if listclean else noop
1351 mexact = match.exact
1342 mexact = match.exact
1352 dirignore = self._dirignore
1343 dirignore = self._dirignore
1353 checkexec = self._checkexec
1344 checkexec = self._checkexec
1354 copymap = self._map.copymap
1345 copymap = self._map.copymap
1355 lastnormaltime = self._lastnormaltime
1346 lastnormaltime = self._lastnormaltime
1356
1347
1357 # We need to do full walks when either
1348 # We need to do full walks when either
1358 # - we're listing all clean files, or
1349 # - we're listing all clean files, or
1359 # - match.traversedir does something, because match.traversedir should
1350 # - match.traversedir does something, because match.traversedir should
1360 # be called for every dir in the working dir
1351 # be called for every dir in the working dir
1361 full = listclean or match.traversedir is not None
1352 full = listclean or match.traversedir is not None
1362 for fn, st in pycompat.iteritems(
1353 for fn, st in pycompat.iteritems(
1363 self.walk(match, subrepos, listunknown, listignored, full=full)
1354 self.walk(match, subrepos, listunknown, listignored, full=full)
1364 ):
1355 ):
1365 if not dcontains(fn):
1356 if not dcontains(fn):
1366 if (listignored or mexact(fn)) and dirignore(fn):
1357 if (listignored or mexact(fn)) and dirignore(fn):
1367 if listignored:
1358 if listignored:
1368 iadd(fn)
1359 iadd(fn)
1369 else:
1360 else:
1370 uadd(fn)
1361 uadd(fn)
1371 continue
1362 continue
1372
1363
1373 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1364 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1374 # written like that for performance reasons. dmap[fn] is not a
1365 # written like that for performance reasons. dmap[fn] is not a
1375 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1366 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1376 # opcode has fast paths when the value to be unpacked is a tuple or
1367 # opcode has fast paths when the value to be unpacked is a tuple or
1377 # a list, but falls back to creating a full-fledged iterator in
1368 # a list, but falls back to creating a full-fledged iterator in
1378 # general. That is much slower than simply accessing and storing the
1369 # general. That is much slower than simply accessing and storing the
1379 # tuple members one by one.
1370 # tuple members one by one.
1380 t = dget(fn)
1371 t = dget(fn)
1381 mode = t.mode
1372 mode = t.mode
1382 size = t.size
1373 size = t.size
1383 time = t.mtime
1374 time = t.mtime
1384
1375
1385 if not st and t.tracked:
1376 if not st and t.tracked:
1386 dadd(fn)
1377 dadd(fn)
1387 elif t.merged:
1378 elif t.merged:
1388 madd(fn)
1379 madd(fn)
1389 elif t.added:
1380 elif t.added:
1390 aadd(fn)
1381 aadd(fn)
1391 elif t.removed:
1382 elif t.removed:
1392 radd(fn)
1383 radd(fn)
1393 elif t.tracked:
1384 elif t.tracked:
1394 if (
1385 if (
1395 size >= 0
1386 size >= 0
1396 and (
1387 and (
1397 (size != st.st_size and size != st.st_size & _rangemask)
1388 (size != st.st_size and size != st.st_size & _rangemask)
1398 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1389 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1399 )
1390 )
1400 or t.from_p2
1391 or t.from_p2
1401 or fn in copymap
1392 or fn in copymap
1402 ):
1393 ):
1403 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1394 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1404 # issue6456: Size returned may be longer due to
1395 # issue6456: Size returned may be longer due to
1405 # encryption on EXT-4 fscrypt, undecided.
1396 # encryption on EXT-4 fscrypt, undecided.
1406 ladd(fn)
1397 ladd(fn)
1407 else:
1398 else:
1408 madd(fn)
1399 madd(fn)
1409 elif (
1400 elif (
1410 time != st[stat.ST_MTIME]
1401 time != st[stat.ST_MTIME]
1411 and time != st[stat.ST_MTIME] & _rangemask
1402 and time != st[stat.ST_MTIME] & _rangemask
1412 ):
1403 ):
1413 ladd(fn)
1404 ladd(fn)
1414 elif st[stat.ST_MTIME] == lastnormaltime:
1405 elif st[stat.ST_MTIME] == lastnormaltime:
1415 # fn may have just been marked as normal and it may have
1406 # fn may have just been marked as normal and it may have
1416 # changed in the same second without changing its size.
1407 # changed in the same second without changing its size.
1417 # This can happen if we quickly do multiple commits.
1408 # This can happen if we quickly do multiple commits.
1418 # Force lookup, so we don't miss such a racy file change.
1409 # Force lookup, so we don't miss such a racy file change.
1419 ladd(fn)
1410 ladd(fn)
1420 elif listclean:
1411 elif listclean:
1421 cadd(fn)
1412 cadd(fn)
1422 status = scmutil.status(
1413 status = scmutil.status(
1423 modified, added, removed, deleted, unknown, ignored, clean
1414 modified, added, removed, deleted, unknown, ignored, clean
1424 )
1415 )
1425 return (lookup, status)
1416 return (lookup, status)
1426
1417
1427 def matches(self, match):
1418 def matches(self, match):
1428 """
1419 """
1429 return files in the dirstate (in whatever state) filtered by match
1420 return files in the dirstate (in whatever state) filtered by match
1430 """
1421 """
1431 dmap = self._map
1422 dmap = self._map
1432 if rustmod is not None:
1423 if rustmod is not None:
1433 dmap = self._map._map
1424 dmap = self._map._map
1434
1425
1435 if match.always():
1426 if match.always():
1436 return dmap.keys()
1427 return dmap.keys()
1437 files = match.files()
1428 files = match.files()
1438 if match.isexact():
1429 if match.isexact():
1439 # fast path -- filter the other way around, since typically files is
1430 # fast path -- filter the other way around, since typically files is
1440 # much smaller than dmap
1431 # much smaller than dmap
1441 return [f for f in files if f in dmap]
1432 return [f for f in files if f in dmap]
1442 if match.prefix() and all(fn in dmap for fn in files):
1433 if match.prefix() and all(fn in dmap for fn in files):
1443 # fast path -- all the values are known to be files, so just return
1434 # fast path -- all the values are known to be files, so just return
1444 # that
1435 # that
1445 return list(files)
1436 return list(files)
1446 return [f for f in dmap if match(f)]
1437 return [f for f in dmap if match(f)]
1447
1438
1448 def _actualfilename(self, tr):
1439 def _actualfilename(self, tr):
1449 if tr:
1440 if tr:
1450 return self._pendingfilename
1441 return self._pendingfilename
1451 else:
1442 else:
1452 return self._filename
1443 return self._filename
1453
1444
1454 def savebackup(self, tr, backupname):
1445 def savebackup(self, tr, backupname):
1455 '''Save current dirstate into backup file'''
1446 '''Save current dirstate into backup file'''
1456 filename = self._actualfilename(tr)
1447 filename = self._actualfilename(tr)
1457 assert backupname != filename
1448 assert backupname != filename
1458
1449
1459 # use '_writedirstate' instead of 'write' to write changes certainly,
1450 # use '_writedirstate' instead of 'write' to write changes certainly,
1460 # because the latter omits writing out if transaction is running.
1451 # because the latter omits writing out if transaction is running.
1461 # output file will be used to create backup of dirstate at this point.
1452 # output file will be used to create backup of dirstate at this point.
1462 if self._dirty or not self._opener.exists(filename):
1453 if self._dirty or not self._opener.exists(filename):
1463 self._writedirstate(
1454 self._writedirstate(
1464 tr,
1455 tr,
1465 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1456 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1466 )
1457 )
1467
1458
1468 if tr:
1459 if tr:
1469 # ensure that subsequent tr.writepending returns True for
1460 # ensure that subsequent tr.writepending returns True for
1470 # changes written out above, even if dirstate is never
1461 # changes written out above, even if dirstate is never
1471 # changed after this
1462 # changed after this
1472 tr.addfilegenerator(
1463 tr.addfilegenerator(
1473 b'dirstate',
1464 b'dirstate',
1474 (self._filename,),
1465 (self._filename,),
1475 lambda f: self._writedirstate(tr, f),
1466 lambda f: self._writedirstate(tr, f),
1476 location=b'plain',
1467 location=b'plain',
1477 )
1468 )
1478
1469
1479 # ensure that pending file written above is unlinked at
1470 # ensure that pending file written above is unlinked at
1480 # failure, even if tr.writepending isn't invoked until the
1471 # failure, even if tr.writepending isn't invoked until the
1481 # end of this transaction
1472 # end of this transaction
1482 tr.registertmp(filename, location=b'plain')
1473 tr.registertmp(filename, location=b'plain')
1483
1474
1484 self._opener.tryunlink(backupname)
1475 self._opener.tryunlink(backupname)
1485 # hardlink backup is okay because _writedirstate is always called
1476 # hardlink backup is okay because _writedirstate is always called
1486 # with an "atomictemp=True" file.
1477 # with an "atomictemp=True" file.
1487 util.copyfile(
1478 util.copyfile(
1488 self._opener.join(filename),
1479 self._opener.join(filename),
1489 self._opener.join(backupname),
1480 self._opener.join(backupname),
1490 hardlink=True,
1481 hardlink=True,
1491 )
1482 )
1492
1483
1493 def restorebackup(self, tr, backupname):
1484 def restorebackup(self, tr, backupname):
1494 '''Restore dirstate by backup file'''
1485 '''Restore dirstate by backup file'''
1495 # this "invalidate()" prevents "wlock.release()" from writing
1486 # this "invalidate()" prevents "wlock.release()" from writing
1496 # changes of dirstate out after restoring from backup file
1487 # changes of dirstate out after restoring from backup file
1497 self.invalidate()
1488 self.invalidate()
1498 filename = self._actualfilename(tr)
1489 filename = self._actualfilename(tr)
1499 o = self._opener
1490 o = self._opener
1500 if util.samefile(o.join(backupname), o.join(filename)):
1491 if util.samefile(o.join(backupname), o.join(filename)):
1501 o.unlink(backupname)
1492 o.unlink(backupname)
1502 else:
1493 else:
1503 o.rename(backupname, filename, checkambig=True)
1494 o.rename(backupname, filename, checkambig=True)
1504
1495
1505 def clearbackup(self, tr, backupname):
1496 def clearbackup(self, tr, backupname):
1506 '''Clear backup file'''
1497 '''Clear backup file'''
1507 self._opener.unlink(backupname)
1498 self._opener.unlink(backupname)
1508
1499
1509 def verify(self, m1, m2):
1500 def verify(self, m1, m2):
1510 """check the dirstate content again the parent manifest and yield errors"""
1501 """check the dirstate content again the parent manifest and yield errors"""
1511 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1502 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1512 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1503 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1513 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1504 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1514 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1505 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1515 for f, entry in self.items():
1506 for f, entry in self.items():
1516 state = entry.state
1507 state = entry.state
1517 if state in b"nr" and f not in m1:
1508 if state in b"nr" and f not in m1:
1518 yield (missing_from_p1, f, state)
1509 yield (missing_from_p1, f, state)
1519 if state in b"a" and f in m1:
1510 if state in b"a" and f in m1:
1520 yield (unexpected_in_p1, f, state)
1511 yield (unexpected_in_p1, f, state)
1521 if state in b"m" and f not in m1 and f not in m2:
1512 if state in b"m" and f not in m1 and f not in m2:
1522 yield (missing_from_ps, f, state)
1513 yield (missing_from_ps, f, state)
1523 for f in m1:
1514 for f in m1:
1524 state = self.get_entry(f).state
1515 state = self.get_entry(f).state
1525 if state not in b"nrm":
1516 if state not in b"nrm":
1526 yield (missing_from_ds, f, state)
1517 yield (missing_from_ds, f, state)
General Comments 0
You need to be logged in to leave comments. Login now