##// END OF EJS Templates
dirstate: drop an incorrect comment...
marmoute -
r48958:de0977ec default
parent child Browse files
Show More
@@ -1,1517 +1,1514 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = dirstatemap.DirstateItem
48 DirstateItem = dirstatemap.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._mapcls = dirstatemap.dirstatemap
133 self._mapcls = dirstatemap.dirstatemap
134 # Access and cache cwd early, so we don't access it for the first time
134 # Access and cache cwd early, so we don't access it for the first time
135 # after a working-copy update caused it to not exist (accessing it then
135 # after a working-copy update caused it to not exist (accessing it then
136 # raises an exception).
136 # raises an exception).
137 self._cwd
137 self._cwd
138
138
139 def prefetch_parents(self):
139 def prefetch_parents(self):
140 """make sure the parents are loaded
140 """make sure the parents are loaded
141
141
142 Used to avoid a race condition.
142 Used to avoid a race condition.
143 """
143 """
144 self._pl
144 self._pl
145
145
146 @contextlib.contextmanager
146 @contextlib.contextmanager
147 def parentchange(self):
147 def parentchange(self):
148 """Context manager for handling dirstate parents.
148 """Context manager for handling dirstate parents.
149
149
150 If an exception occurs in the scope of the context manager,
150 If an exception occurs in the scope of the context manager,
151 the incoherent dirstate won't be written when wlock is
151 the incoherent dirstate won't be written when wlock is
152 released.
152 released.
153 """
153 """
154 self._parentwriters += 1
154 self._parentwriters += 1
155 yield
155 yield
156 # Typically we want the "undo" step of a context manager in a
156 # Typically we want the "undo" step of a context manager in a
157 # finally block so it happens even when an exception
157 # finally block so it happens even when an exception
158 # occurs. In this case, however, we only want to decrement
158 # occurs. In this case, however, we only want to decrement
159 # parentwriters if the code in the with statement exits
159 # parentwriters if the code in the with statement exits
160 # normally, so we don't have a try/finally here on purpose.
160 # normally, so we don't have a try/finally here on purpose.
161 self._parentwriters -= 1
161 self._parentwriters -= 1
162
162
163 def pendingparentchange(self):
163 def pendingparentchange(self):
164 """Returns true if the dirstate is in the middle of a set of changes
164 """Returns true if the dirstate is in the middle of a set of changes
165 that modify the dirstate parent.
165 that modify the dirstate parent.
166 """
166 """
167 return self._parentwriters > 0
167 return self._parentwriters > 0
168
168
169 @propertycache
169 @propertycache
170 def _map(self):
170 def _map(self):
171 """Return the dirstate contents (see documentation for dirstatemap)."""
171 """Return the dirstate contents (see documentation for dirstatemap)."""
172 self._map = self._mapcls(
172 self._map = self._mapcls(
173 self._ui,
173 self._ui,
174 self._opener,
174 self._opener,
175 self._root,
175 self._root,
176 self._nodeconstants,
176 self._nodeconstants,
177 self._use_dirstate_v2,
177 self._use_dirstate_v2,
178 )
178 )
179 return self._map
179 return self._map
180
180
181 @property
181 @property
182 def _sparsematcher(self):
182 def _sparsematcher(self):
183 """The matcher for the sparse checkout.
183 """The matcher for the sparse checkout.
184
184
185 The working directory may not include every file from a manifest. The
185 The working directory may not include every file from a manifest. The
186 matcher obtained by this property will match a path if it is to be
186 matcher obtained by this property will match a path if it is to be
187 included in the working directory.
187 included in the working directory.
188 """
188 """
189 # TODO there is potential to cache this property. For now, the matcher
189 # TODO there is potential to cache this property. For now, the matcher
190 # is resolved on every access. (But the called function does use a
190 # is resolved on every access. (But the called function does use a
191 # cache to keep the lookup fast.)
191 # cache to keep the lookup fast.)
192 return self._sparsematchfn()
192 return self._sparsematchfn()
193
193
194 @repocache(b'branch')
194 @repocache(b'branch')
195 def _branch(self):
195 def _branch(self):
196 try:
196 try:
197 return self._opener.read(b"branch").strip() or b"default"
197 return self._opener.read(b"branch").strip() or b"default"
198 except IOError as inst:
198 except IOError as inst:
199 if inst.errno != errno.ENOENT:
199 if inst.errno != errno.ENOENT:
200 raise
200 raise
201 return b"default"
201 return b"default"
202
202
203 @property
203 @property
204 def _pl(self):
204 def _pl(self):
205 return self._map.parents()
205 return self._map.parents()
206
206
207 def hasdir(self, d):
207 def hasdir(self, d):
208 return self._map.hastrackeddir(d)
208 return self._map.hastrackeddir(d)
209
209
210 @rootcache(b'.hgignore')
210 @rootcache(b'.hgignore')
211 def _ignore(self):
211 def _ignore(self):
212 files = self._ignorefiles()
212 files = self._ignorefiles()
213 if not files:
213 if not files:
214 return matchmod.never()
214 return matchmod.never()
215
215
216 pats = [b'include:%s' % f for f in files]
216 pats = [b'include:%s' % f for f in files]
217 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
217 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218
218
219 @propertycache
219 @propertycache
220 def _slash(self):
220 def _slash(self):
221 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
221 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222
222
223 @propertycache
223 @propertycache
224 def _checklink(self):
224 def _checklink(self):
225 return util.checklink(self._root)
225 return util.checklink(self._root)
226
226
227 @propertycache
227 @propertycache
228 def _checkexec(self):
228 def _checkexec(self):
229 return bool(util.checkexec(self._root))
229 return bool(util.checkexec(self._root))
230
230
231 @propertycache
231 @propertycache
232 def _checkcase(self):
232 def _checkcase(self):
233 return not util.fscasesensitive(self._join(b'.hg'))
233 return not util.fscasesensitive(self._join(b'.hg'))
234
234
235 def _join(self, f):
235 def _join(self, f):
236 # much faster than os.path.join()
236 # much faster than os.path.join()
237 # it's safe because f is always a relative path
237 # it's safe because f is always a relative path
238 return self._rootdir + f
238 return self._rootdir + f
239
239
240 def flagfunc(self, buildfallback):
240 def flagfunc(self, buildfallback):
241 if self._checklink and self._checkexec:
241 if self._checklink and self._checkexec:
242
242
243 def f(x):
243 def f(x):
244 try:
244 try:
245 st = os.lstat(self._join(x))
245 st = os.lstat(self._join(x))
246 if util.statislink(st):
246 if util.statislink(st):
247 return b'l'
247 return b'l'
248 if util.statisexec(st):
248 if util.statisexec(st):
249 return b'x'
249 return b'x'
250 except OSError:
250 except OSError:
251 pass
251 pass
252 return b''
252 return b''
253
253
254 return f
254 return f
255
255
256 fallback = buildfallback()
256 fallback = buildfallback()
257 if self._checklink:
257 if self._checklink:
258
258
259 def f(x):
259 def f(x):
260 if os.path.islink(self._join(x)):
260 if os.path.islink(self._join(x)):
261 return b'l'
261 return b'l'
262 if b'x' in fallback(x):
262 if b'x' in fallback(x):
263 return b'x'
263 return b'x'
264 return b''
264 return b''
265
265
266 return f
266 return f
267 if self._checkexec:
267 if self._checkexec:
268
268
269 def f(x):
269 def f(x):
270 if b'l' in fallback(x):
270 if b'l' in fallback(x):
271 return b'l'
271 return b'l'
272 if util.isexec(self._join(x)):
272 if util.isexec(self._join(x)):
273 return b'x'
273 return b'x'
274 return b''
274 return b''
275
275
276 return f
276 return f
277 else:
277 else:
278 return fallback
278 return fallback
279
279
280 @propertycache
280 @propertycache
281 def _cwd(self):
281 def _cwd(self):
282 # internal config: ui.forcecwd
282 # internal config: ui.forcecwd
283 forcecwd = self._ui.config(b'ui', b'forcecwd')
283 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 if forcecwd:
284 if forcecwd:
285 return forcecwd
285 return forcecwd
286 return encoding.getcwd()
286 return encoding.getcwd()
287
287
288 def getcwd(self):
288 def getcwd(self):
289 """Return the path from which a canonical path is calculated.
289 """Return the path from which a canonical path is calculated.
290
290
291 This path should be used to resolve file patterns or to convert
291 This path should be used to resolve file patterns or to convert
292 canonical paths back to file paths for display. It shouldn't be
292 canonical paths back to file paths for display. It shouldn't be
293 used to get real file paths. Use vfs functions instead.
293 used to get real file paths. Use vfs functions instead.
294 """
294 """
295 cwd = self._cwd
295 cwd = self._cwd
296 if cwd == self._root:
296 if cwd == self._root:
297 return b''
297 return b''
298 # self._root ends with a path separator if self._root is '/' or 'C:\'
298 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 rootsep = self._root
299 rootsep = self._root
300 if not util.endswithsep(rootsep):
300 if not util.endswithsep(rootsep):
301 rootsep += pycompat.ossep
301 rootsep += pycompat.ossep
302 if cwd.startswith(rootsep):
302 if cwd.startswith(rootsep):
303 return cwd[len(rootsep) :]
303 return cwd[len(rootsep) :]
304 else:
304 else:
305 # we're outside the repo. return an absolute path.
305 # we're outside the repo. return an absolute path.
306 return cwd
306 return cwd
307
307
308 def pathto(self, f, cwd=None):
308 def pathto(self, f, cwd=None):
309 if cwd is None:
309 if cwd is None:
310 cwd = self.getcwd()
310 cwd = self.getcwd()
311 path = util.pathto(self._root, cwd, f)
311 path = util.pathto(self._root, cwd, f)
312 if self._slash:
312 if self._slash:
313 return util.pconvert(path)
313 return util.pconvert(path)
314 return path
314 return path
315
315
316 def __getitem__(self, key):
316 def __getitem__(self, key):
317 """Return the current state of key (a filename) in the dirstate.
317 """Return the current state of key (a filename) in the dirstate.
318
318
319 States are:
319 States are:
320 n normal
320 n normal
321 m needs merging
321 m needs merging
322 r marked for removal
322 r marked for removal
323 a marked for addition
323 a marked for addition
324 ? not tracked
324 ? not tracked
325
325
326 XXX The "state" is a bit obscure to be in the "public" API. we should
326 XXX The "state" is a bit obscure to be in the "public" API. we should
327 consider migrating all user of this to going through the dirstate entry
327 consider migrating all user of this to going through the dirstate entry
328 instead.
328 instead.
329 """
329 """
330 msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
330 msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
331 util.nouideprecwarn(msg, b'6.1', stacklevel=2)
331 util.nouideprecwarn(msg, b'6.1', stacklevel=2)
332 entry = self._map.get(key)
332 entry = self._map.get(key)
333 if entry is not None:
333 if entry is not None:
334 return entry.state
334 return entry.state
335 return b'?'
335 return b'?'
336
336
337 def get_entry(self, path):
337 def get_entry(self, path):
338 """return a DirstateItem for the associated path"""
338 """return a DirstateItem for the associated path"""
339 entry = self._map.get(path)
339 entry = self._map.get(path)
340 if entry is None:
340 if entry is None:
341 return DirstateItem()
341 return DirstateItem()
342 return entry
342 return entry
343
343
344 def __contains__(self, key):
344 def __contains__(self, key):
345 return key in self._map
345 return key in self._map
346
346
347 def __iter__(self):
347 def __iter__(self):
348 return iter(sorted(self._map))
348 return iter(sorted(self._map))
349
349
350 def items(self):
350 def items(self):
351 return pycompat.iteritems(self._map)
351 return pycompat.iteritems(self._map)
352
352
353 iteritems = items
353 iteritems = items
354
354
355 def parents(self):
355 def parents(self):
356 return [self._validate(p) for p in self._pl]
356 return [self._validate(p) for p in self._pl]
357
357
358 def p1(self):
358 def p1(self):
359 return self._validate(self._pl[0])
359 return self._validate(self._pl[0])
360
360
361 def p2(self):
361 def p2(self):
362 return self._validate(self._pl[1])
362 return self._validate(self._pl[1])
363
363
364 @property
364 @property
365 def in_merge(self):
365 def in_merge(self):
366 """True if a merge is in progress"""
366 """True if a merge is in progress"""
367 return self._pl[1] != self._nodeconstants.nullid
367 return self._pl[1] != self._nodeconstants.nullid
368
368
369 def branch(self):
369 def branch(self):
370 return encoding.tolocal(self._branch)
370 return encoding.tolocal(self._branch)
371
371
372 def setparents(self, p1, p2=None):
372 def setparents(self, p1, p2=None):
373 """Set dirstate parents to p1 and p2.
373 """Set dirstate parents to p1 and p2.
374
374
375 When moving from two parents to one, "merged" entries a
375 When moving from two parents to one, "merged" entries a
376 adjusted to normal and previous copy records discarded and
376 adjusted to normal and previous copy records discarded and
377 returned by the call.
377 returned by the call.
378
378
379 See localrepo.setparents()
379 See localrepo.setparents()
380 """
380 """
381 if p2 is None:
381 if p2 is None:
382 p2 = self._nodeconstants.nullid
382 p2 = self._nodeconstants.nullid
383 if self._parentwriters == 0:
383 if self._parentwriters == 0:
384 raise ValueError(
384 raise ValueError(
385 b"cannot set dirstate parent outside of "
385 b"cannot set dirstate parent outside of "
386 b"dirstate.parentchange context manager"
386 b"dirstate.parentchange context manager"
387 )
387 )
388
388
389 self._dirty = True
389 self._dirty = True
390 oldp2 = self._pl[1]
390 oldp2 = self._pl[1]
391 if self._origpl is None:
391 if self._origpl is None:
392 self._origpl = self._pl
392 self._origpl = self._pl
393 nullid = self._nodeconstants.nullid
393 nullid = self._nodeconstants.nullid
394 # True if we need to fold p2 related state back to a linear case
394 # True if we need to fold p2 related state back to a linear case
395 fold_p2 = oldp2 != nullid and p2 == nullid
395 fold_p2 = oldp2 != nullid and p2 == nullid
396 return self._map.setparents(p1, p2, fold_p2=fold_p2)
396 return self._map.setparents(p1, p2, fold_p2=fold_p2)
397
397
398 def setbranch(self, branch):
398 def setbranch(self, branch):
399 self.__class__._branch.set(self, encoding.fromlocal(branch))
399 self.__class__._branch.set(self, encoding.fromlocal(branch))
400 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
400 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
401 try:
401 try:
402 f.write(self._branch + b'\n')
402 f.write(self._branch + b'\n')
403 f.close()
403 f.close()
404
404
405 # make sure filecache has the correct stat info for _branch after
405 # make sure filecache has the correct stat info for _branch after
406 # replacing the underlying file
406 # replacing the underlying file
407 ce = self._filecache[b'_branch']
407 ce = self._filecache[b'_branch']
408 if ce:
408 if ce:
409 ce.refresh()
409 ce.refresh()
410 except: # re-raises
410 except: # re-raises
411 f.discard()
411 f.discard()
412 raise
412 raise
413
413
414 def invalidate(self):
414 def invalidate(self):
415 """Causes the next access to reread the dirstate.
415 """Causes the next access to reread the dirstate.
416
416
417 This is different from localrepo.invalidatedirstate() because it always
417 This is different from localrepo.invalidatedirstate() because it always
418 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
418 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
419 check whether the dirstate has changed before rereading it."""
419 check whether the dirstate has changed before rereading it."""
420
420
421 for a in ("_map", "_branch", "_ignore"):
421 for a in ("_map", "_branch", "_ignore"):
422 if a in self.__dict__:
422 if a in self.__dict__:
423 delattr(self, a)
423 delattr(self, a)
424 self._lastnormaltime = 0
424 self._lastnormaltime = 0
425 self._dirty = False
425 self._dirty = False
426 self._parentwriters = 0
426 self._parentwriters = 0
427 self._origpl = None
427 self._origpl = None
428
428
429 def copy(self, source, dest):
429 def copy(self, source, dest):
430 """Mark dest as a copy of source. Unmark dest if source is None."""
430 """Mark dest as a copy of source. Unmark dest if source is None."""
431 if source == dest:
431 if source == dest:
432 return
432 return
433 self._dirty = True
433 self._dirty = True
434 if source is not None:
434 if source is not None:
435 self._map.copymap[dest] = source
435 self._map.copymap[dest] = source
436 else:
436 else:
437 self._map.copymap.pop(dest, None)
437 self._map.copymap.pop(dest, None)
438
438
439 def copied(self, file):
439 def copied(self, file):
440 return self._map.copymap.get(file, None)
440 return self._map.copymap.get(file, None)
441
441
442 def copies(self):
442 def copies(self):
443 return self._map.copymap
443 return self._map.copymap
444
444
445 @requires_no_parents_change
445 @requires_no_parents_change
446 def set_tracked(self, filename):
446 def set_tracked(self, filename):
447 """a "public" method for generic code to mark a file as tracked
447 """a "public" method for generic code to mark a file as tracked
448
448
449 This function is to be called outside of "update/merge" case. For
449 This function is to be called outside of "update/merge" case. For
450 example by a command like `hg add X`.
450 example by a command like `hg add X`.
451
451
452 return True the file was previously untracked, False otherwise.
452 return True the file was previously untracked, False otherwise.
453 """
453 """
454 self._dirty = True
454 self._dirty = True
455 entry = self._map.get(filename)
455 entry = self._map.get(filename)
456 if entry is None or not entry.tracked:
456 if entry is None or not entry.tracked:
457 self._check_new_tracked_filename(filename)
457 self._check_new_tracked_filename(filename)
458 return self._map.set_tracked(filename)
458 return self._map.set_tracked(filename)
459
459
460 @requires_no_parents_change
460 @requires_no_parents_change
461 def set_untracked(self, filename):
461 def set_untracked(self, filename):
462 """a "public" method for generic code to mark a file as untracked
462 """a "public" method for generic code to mark a file as untracked
463
463
464 This function is to be called outside of "update/merge" case. For
464 This function is to be called outside of "update/merge" case. For
465 example by a command like `hg remove X`.
465 example by a command like `hg remove X`.
466
466
467 return True the file was previously tracked, False otherwise.
467 return True the file was previously tracked, False otherwise.
468 """
468 """
469 ret = self._map.set_untracked(filename)
469 ret = self._map.set_untracked(filename)
470 if ret:
470 if ret:
471 self._dirty = True
471 self._dirty = True
472 return ret
472 return ret
473
473
474 @requires_no_parents_change
474 @requires_no_parents_change
475 def set_clean(self, filename, parentfiledata=None):
475 def set_clean(self, filename, parentfiledata=None):
476 """record that the current state of the file on disk is known to be clean"""
476 """record that the current state of the file on disk is known to be clean"""
477 self._dirty = True
477 self._dirty = True
478 if parentfiledata:
478 if parentfiledata:
479 (mode, size, mtime) = parentfiledata
479 (mode, size, mtime) = parentfiledata
480 else:
480 else:
481 (mode, size, mtime) = self._get_filedata(filename)
481 (mode, size, mtime) = self._get_filedata(filename)
482 if not self._map[filename].tracked:
482 if not self._map[filename].tracked:
483 self._check_new_tracked_filename(filename)
483 self._check_new_tracked_filename(filename)
484 self._map.set_clean(filename, mode, size, mtime)
484 self._map.set_clean(filename, mode, size, mtime)
485 if mtime > self._lastnormaltime:
485 if mtime > self._lastnormaltime:
486 # Remember the most recent modification timeslot for status(),
486 # Remember the most recent modification timeslot for status(),
487 # to make sure we won't miss future size-preserving file content
487 # to make sure we won't miss future size-preserving file content
488 # modifications that happen within the same timeslot.
488 # modifications that happen within the same timeslot.
489 self._lastnormaltime = mtime
489 self._lastnormaltime = mtime
490
490
491 @requires_no_parents_change
491 @requires_no_parents_change
492 def set_possibly_dirty(self, filename):
492 def set_possibly_dirty(self, filename):
493 """record that the current state of the file on disk is unknown"""
493 """record that the current state of the file on disk is unknown"""
494 self._dirty = True
494 self._dirty = True
495 self._map.set_possibly_dirty(filename)
495 self._map.set_possibly_dirty(filename)
496
496
497 @requires_parents_change
497 @requires_parents_change
498 def update_file_p1(
498 def update_file_p1(
499 self,
499 self,
500 filename,
500 filename,
501 p1_tracked,
501 p1_tracked,
502 ):
502 ):
503 """Set a file as tracked in the parent (or not)
503 """Set a file as tracked in the parent (or not)
504
504
505 This is to be called when adjust the dirstate to a new parent after an history
505 This is to be called when adjust the dirstate to a new parent after an history
506 rewriting operation.
506 rewriting operation.
507
507
508 It should not be called during a merge (p2 != nullid) and only within
508 It should not be called during a merge (p2 != nullid) and only within
509 a `with dirstate.parentchange():` context.
509 a `with dirstate.parentchange():` context.
510 """
510 """
511 if self.in_merge:
511 if self.in_merge:
512 msg = b'update_file_reference should not be called when merging'
512 msg = b'update_file_reference should not be called when merging'
513 raise error.ProgrammingError(msg)
513 raise error.ProgrammingError(msg)
514 entry = self._map.get(filename)
514 entry = self._map.get(filename)
515 if entry is None:
515 if entry is None:
516 wc_tracked = False
516 wc_tracked = False
517 else:
517 else:
518 wc_tracked = entry.tracked
518 wc_tracked = entry.tracked
519 if not (p1_tracked or wc_tracked):
519 if not (p1_tracked or wc_tracked):
520 # the file is no longer relevant to anyone
520 # the file is no longer relevant to anyone
521 if self._map.get(filename) is not None:
521 if self._map.get(filename) is not None:
522 self._map.reset_state(filename)
522 self._map.reset_state(filename)
523 self._dirty = True
523 self._dirty = True
524 elif (not p1_tracked) and wc_tracked:
524 elif (not p1_tracked) and wc_tracked:
525 if entry is not None and entry.added:
525 if entry is not None and entry.added:
526 return # avoid dropping copy information (maybe?)
526 return # avoid dropping copy information (maybe?)
527
527
528 parentfiledata = None
528 parentfiledata = None
529 if wc_tracked and p1_tracked:
529 if wc_tracked and p1_tracked:
530 parentfiledata = self._get_filedata(filename)
530 parentfiledata = self._get_filedata(filename)
531
531
532 self._map.reset_state(
532 self._map.reset_state(
533 filename,
533 filename,
534 wc_tracked,
534 wc_tracked,
535 p1_tracked,
535 p1_tracked,
536 # the underlying reference might have changed, we will have to
536 # the underlying reference might have changed, we will have to
537 # check it.
537 # check it.
538 has_meaningful_mtime=False,
538 has_meaningful_mtime=False,
539 parentfiledata=parentfiledata,
539 parentfiledata=parentfiledata,
540 )
540 )
541 if (
541 if (
542 parentfiledata is not None
542 parentfiledata is not None
543 and parentfiledata[2] > self._lastnormaltime
543 and parentfiledata[2] > self._lastnormaltime
544 ):
544 ):
545 # Remember the most recent modification timeslot for status(),
545 # Remember the most recent modification timeslot for status(),
546 # to make sure we won't miss future size-preserving file content
546 # to make sure we won't miss future size-preserving file content
547 # modifications that happen within the same timeslot.
547 # modifications that happen within the same timeslot.
548 self._lastnormaltime = parentfiledata[2]
548 self._lastnormaltime = parentfiledata[2]
549
549
550 @requires_parents_change
550 @requires_parents_change
551 def update_file(
551 def update_file(
552 self,
552 self,
553 filename,
553 filename,
554 wc_tracked,
554 wc_tracked,
555 p1_tracked,
555 p1_tracked,
556 p2_info=False,
556 p2_info=False,
557 possibly_dirty=False,
557 possibly_dirty=False,
558 parentfiledata=None,
558 parentfiledata=None,
559 ):
559 ):
560 """update the information about a file in the dirstate
560 """update the information about a file in the dirstate
561
561
562 This is to be called when the direstates parent changes to keep track
562 This is to be called when the direstates parent changes to keep track
563 of what is the file situation in regards to the working copy and its parent.
563 of what is the file situation in regards to the working copy and its parent.
564
564
565 This function must be called within a `dirstate.parentchange` context.
565 This function must be called within a `dirstate.parentchange` context.
566
566
567 note: the API is at an early stage and we might need to adjust it
567 note: the API is at an early stage and we might need to adjust it
568 depending of what information ends up being relevant and useful to
568 depending of what information ends up being relevant and useful to
569 other processing.
569 other processing.
570 """
570 """
571
571
572 # note: I do not think we need to double check name clash here since we
572 # note: I do not think we need to double check name clash here since we
573 # are in a update/merge case that should already have taken care of
573 # are in a update/merge case that should already have taken care of
574 # this. The test agrees
574 # this. The test agrees
575
575
576 self._dirty = True
576 self._dirty = True
577
577
578 need_parent_file_data = (
578 need_parent_file_data = (
579 not possibly_dirty and not p2_info and wc_tracked and p1_tracked
579 not possibly_dirty and not p2_info and wc_tracked and p1_tracked
580 )
580 )
581
581
582 # this mean we are doing call for file we do not really care about the
582 if need_parent_file_data and parentfiledata is None:
583 # data (eg: added or removed), however this should be a minor overhead
584 # compared to the overall update process calling this.
585 if need_parent_file_data or parentfiledata is None:
586 parentfiledata = self._get_filedata(filename)
583 parentfiledata = self._get_filedata(filename)
587
584
588 self._map.reset_state(
585 self._map.reset_state(
589 filename,
586 filename,
590 wc_tracked,
587 wc_tracked,
591 p1_tracked,
588 p1_tracked,
592 p2_info=p2_info,
589 p2_info=p2_info,
593 has_meaningful_mtime=not possibly_dirty,
590 has_meaningful_mtime=not possibly_dirty,
594 parentfiledata=parentfiledata,
591 parentfiledata=parentfiledata,
595 )
592 )
596 if (
593 if (
597 parentfiledata is not None
594 parentfiledata is not None
598 and parentfiledata[2] > self._lastnormaltime
595 and parentfiledata[2] > self._lastnormaltime
599 ):
596 ):
600 # Remember the most recent modification timeslot for status(),
597 # Remember the most recent modification timeslot for status(),
601 # to make sure we won't miss future size-preserving file content
598 # to make sure we won't miss future size-preserving file content
602 # modifications that happen within the same timeslot.
599 # modifications that happen within the same timeslot.
603 self._lastnormaltime = parentfiledata[2]
600 self._lastnormaltime = parentfiledata[2]
604
601
605 def _check_new_tracked_filename(self, filename):
602 def _check_new_tracked_filename(self, filename):
606 scmutil.checkfilename(filename)
603 scmutil.checkfilename(filename)
607 if self._map.hastrackeddir(filename):
604 if self._map.hastrackeddir(filename):
608 msg = _(b'directory %r already in dirstate')
605 msg = _(b'directory %r already in dirstate')
609 msg %= pycompat.bytestr(filename)
606 msg %= pycompat.bytestr(filename)
610 raise error.Abort(msg)
607 raise error.Abort(msg)
611 # shadows
608 # shadows
612 for d in pathutil.finddirs(filename):
609 for d in pathutil.finddirs(filename):
613 if self._map.hastrackeddir(d):
610 if self._map.hastrackeddir(d):
614 break
611 break
615 entry = self._map.get(d)
612 entry = self._map.get(d)
616 if entry is not None and not entry.removed:
613 if entry is not None and not entry.removed:
617 msg = _(b'file %r in dirstate clashes with %r')
614 msg = _(b'file %r in dirstate clashes with %r')
618 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
615 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
619 raise error.Abort(msg)
616 raise error.Abort(msg)
620
617
621 def _get_filedata(self, filename):
618 def _get_filedata(self, filename):
622 """returns"""
619 """returns"""
623 s = os.lstat(self._join(filename))
620 s = os.lstat(self._join(filename))
624 mode = s.st_mode
621 mode = s.st_mode
625 size = s.st_size
622 size = s.st_size
626 mtime = s[stat.ST_MTIME]
623 mtime = s[stat.ST_MTIME]
627 return (mode, size, mtime)
624 return (mode, size, mtime)
628
625
629 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
626 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
630 if exists is None:
627 if exists is None:
631 exists = os.path.lexists(os.path.join(self._root, path))
628 exists = os.path.lexists(os.path.join(self._root, path))
632 if not exists:
629 if not exists:
633 # Maybe a path component exists
630 # Maybe a path component exists
634 if not ignoremissing and b'/' in path:
631 if not ignoremissing and b'/' in path:
635 d, f = path.rsplit(b'/', 1)
632 d, f = path.rsplit(b'/', 1)
636 d = self._normalize(d, False, ignoremissing, None)
633 d = self._normalize(d, False, ignoremissing, None)
637 folded = d + b"/" + f
634 folded = d + b"/" + f
638 else:
635 else:
639 # No path components, preserve original case
636 # No path components, preserve original case
640 folded = path
637 folded = path
641 else:
638 else:
642 # recursively normalize leading directory components
639 # recursively normalize leading directory components
643 # against dirstate
640 # against dirstate
644 if b'/' in normed:
641 if b'/' in normed:
645 d, f = normed.rsplit(b'/', 1)
642 d, f = normed.rsplit(b'/', 1)
646 d = self._normalize(d, False, ignoremissing, True)
643 d = self._normalize(d, False, ignoremissing, True)
647 r = self._root + b"/" + d
644 r = self._root + b"/" + d
648 folded = d + b"/" + util.fspath(f, r)
645 folded = d + b"/" + util.fspath(f, r)
649 else:
646 else:
650 folded = util.fspath(normed, self._root)
647 folded = util.fspath(normed, self._root)
651 storemap[normed] = folded
648 storemap[normed] = folded
652
649
653 return folded
650 return folded
654
651
655 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
652 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
656 normed = util.normcase(path)
653 normed = util.normcase(path)
657 folded = self._map.filefoldmap.get(normed, None)
654 folded = self._map.filefoldmap.get(normed, None)
658 if folded is None:
655 if folded is None:
659 if isknown:
656 if isknown:
660 folded = path
657 folded = path
661 else:
658 else:
662 folded = self._discoverpath(
659 folded = self._discoverpath(
663 path, normed, ignoremissing, exists, self._map.filefoldmap
660 path, normed, ignoremissing, exists, self._map.filefoldmap
664 )
661 )
665 return folded
662 return folded
666
663
667 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
664 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
668 normed = util.normcase(path)
665 normed = util.normcase(path)
669 folded = self._map.filefoldmap.get(normed, None)
666 folded = self._map.filefoldmap.get(normed, None)
670 if folded is None:
667 if folded is None:
671 folded = self._map.dirfoldmap.get(normed, None)
668 folded = self._map.dirfoldmap.get(normed, None)
672 if folded is None:
669 if folded is None:
673 if isknown:
670 if isknown:
674 folded = path
671 folded = path
675 else:
672 else:
676 # store discovered result in dirfoldmap so that future
673 # store discovered result in dirfoldmap so that future
677 # normalizefile calls don't start matching directories
674 # normalizefile calls don't start matching directories
678 folded = self._discoverpath(
675 folded = self._discoverpath(
679 path, normed, ignoremissing, exists, self._map.dirfoldmap
676 path, normed, ignoremissing, exists, self._map.dirfoldmap
680 )
677 )
681 return folded
678 return folded
682
679
683 def normalize(self, path, isknown=False, ignoremissing=False):
680 def normalize(self, path, isknown=False, ignoremissing=False):
684 """
681 """
685 normalize the case of a pathname when on a casefolding filesystem
682 normalize the case of a pathname when on a casefolding filesystem
686
683
687 isknown specifies whether the filename came from walking the
684 isknown specifies whether the filename came from walking the
688 disk, to avoid extra filesystem access.
685 disk, to avoid extra filesystem access.
689
686
690 If ignoremissing is True, missing path are returned
687 If ignoremissing is True, missing path are returned
691 unchanged. Otherwise, we try harder to normalize possibly
688 unchanged. Otherwise, we try harder to normalize possibly
692 existing path components.
689 existing path components.
693
690
694 The normalized case is determined based on the following precedence:
691 The normalized case is determined based on the following precedence:
695
692
696 - version of name already stored in the dirstate
693 - version of name already stored in the dirstate
697 - version of name stored on disk
694 - version of name stored on disk
698 - version provided via command arguments
695 - version provided via command arguments
699 """
696 """
700
697
701 if self._checkcase:
698 if self._checkcase:
702 return self._normalize(path, isknown, ignoremissing)
699 return self._normalize(path, isknown, ignoremissing)
703 return path
700 return path
704
701
705 def clear(self):
702 def clear(self):
706 self._map.clear()
703 self._map.clear()
707 self._lastnormaltime = 0
704 self._lastnormaltime = 0
708 self._dirty = True
705 self._dirty = True
709
706
710 def rebuild(self, parent, allfiles, changedfiles=None):
707 def rebuild(self, parent, allfiles, changedfiles=None):
711 if changedfiles is None:
708 if changedfiles is None:
712 # Rebuild entire dirstate
709 # Rebuild entire dirstate
713 to_lookup = allfiles
710 to_lookup = allfiles
714 to_drop = []
711 to_drop = []
715 lastnormaltime = self._lastnormaltime
712 lastnormaltime = self._lastnormaltime
716 self.clear()
713 self.clear()
717 self._lastnormaltime = lastnormaltime
714 self._lastnormaltime = lastnormaltime
718 elif len(changedfiles) < 10:
715 elif len(changedfiles) < 10:
719 # Avoid turning allfiles into a set, which can be expensive if it's
716 # Avoid turning allfiles into a set, which can be expensive if it's
720 # large.
717 # large.
721 to_lookup = []
718 to_lookup = []
722 to_drop = []
719 to_drop = []
723 for f in changedfiles:
720 for f in changedfiles:
724 if f in allfiles:
721 if f in allfiles:
725 to_lookup.append(f)
722 to_lookup.append(f)
726 else:
723 else:
727 to_drop.append(f)
724 to_drop.append(f)
728 else:
725 else:
729 changedfilesset = set(changedfiles)
726 changedfilesset = set(changedfiles)
730 to_lookup = changedfilesset & set(allfiles)
727 to_lookup = changedfilesset & set(allfiles)
731 to_drop = changedfilesset - to_lookup
728 to_drop = changedfilesset - to_lookup
732
729
733 if self._origpl is None:
730 if self._origpl is None:
734 self._origpl = self._pl
731 self._origpl = self._pl
735 self._map.setparents(parent, self._nodeconstants.nullid)
732 self._map.setparents(parent, self._nodeconstants.nullid)
736
733
737 for f in to_lookup:
734 for f in to_lookup:
738
735
739 if self.in_merge:
736 if self.in_merge:
740 self.set_tracked(f)
737 self.set_tracked(f)
741 else:
738 else:
742 self._map.reset_state(
739 self._map.reset_state(
743 f,
740 f,
744 wc_tracked=True,
741 wc_tracked=True,
745 p1_tracked=True,
742 p1_tracked=True,
746 )
743 )
747 for f in to_drop:
744 for f in to_drop:
748 self._map.reset_state(f)
745 self._map.reset_state(f)
749
746
750 self._dirty = True
747 self._dirty = True
751
748
752 def identity(self):
749 def identity(self):
753 """Return identity of dirstate itself to detect changing in storage
750 """Return identity of dirstate itself to detect changing in storage
754
751
755 If identity of previous dirstate is equal to this, writing
752 If identity of previous dirstate is equal to this, writing
756 changes based on the former dirstate out can keep consistency.
753 changes based on the former dirstate out can keep consistency.
757 """
754 """
758 return self._map.identity
755 return self._map.identity
759
756
760 def write(self, tr):
757 def write(self, tr):
761 if not self._dirty:
758 if not self._dirty:
762 return
759 return
763
760
764 filename = self._filename
761 filename = self._filename
765 if tr:
762 if tr:
766 # 'dirstate.write()' is not only for writing in-memory
763 # 'dirstate.write()' is not only for writing in-memory
767 # changes out, but also for dropping ambiguous timestamp.
764 # changes out, but also for dropping ambiguous timestamp.
768 # delayed writing re-raise "ambiguous timestamp issue".
765 # delayed writing re-raise "ambiguous timestamp issue".
769 # See also the wiki page below for detail:
766 # See also the wiki page below for detail:
770 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
767 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
771
768
772 # record when mtime start to be ambiguous
769 # record when mtime start to be ambiguous
773 now = _getfsnow(self._opener)
770 now = _getfsnow(self._opener)
774
771
775 # delay writing in-memory changes out
772 # delay writing in-memory changes out
776 tr.addfilegenerator(
773 tr.addfilegenerator(
777 b'dirstate',
774 b'dirstate',
778 (self._filename,),
775 (self._filename,),
779 lambda f: self._writedirstate(tr, f, now=now),
776 lambda f: self._writedirstate(tr, f, now=now),
780 location=b'plain',
777 location=b'plain',
781 )
778 )
782 return
779 return
783
780
784 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
781 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
785 self._writedirstate(tr, st)
782 self._writedirstate(tr, st)
786
783
787 def addparentchangecallback(self, category, callback):
784 def addparentchangecallback(self, category, callback):
788 """add a callback to be called when the wd parents are changed
785 """add a callback to be called when the wd parents are changed
789
786
790 Callback will be called with the following arguments:
787 Callback will be called with the following arguments:
791 dirstate, (oldp1, oldp2), (newp1, newp2)
788 dirstate, (oldp1, oldp2), (newp1, newp2)
792
789
793 Category is a unique identifier to allow overwriting an old callback
790 Category is a unique identifier to allow overwriting an old callback
794 with a newer callback.
791 with a newer callback.
795 """
792 """
796 self._plchangecallbacks[category] = callback
793 self._plchangecallbacks[category] = callback
797
794
798 def _writedirstate(self, tr, st, now=None):
795 def _writedirstate(self, tr, st, now=None):
799 # notify callbacks about parents change
796 # notify callbacks about parents change
800 if self._origpl is not None and self._origpl != self._pl:
797 if self._origpl is not None and self._origpl != self._pl:
801 for c, callback in sorted(
798 for c, callback in sorted(
802 pycompat.iteritems(self._plchangecallbacks)
799 pycompat.iteritems(self._plchangecallbacks)
803 ):
800 ):
804 callback(self, self._origpl, self._pl)
801 callback(self, self._origpl, self._pl)
805 self._origpl = None
802 self._origpl = None
806
803
807 if now is None:
804 if now is None:
808 # use the modification time of the newly created temporary file as the
805 # use the modification time of the newly created temporary file as the
809 # filesystem's notion of 'now'
806 # filesystem's notion of 'now'
810 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
807 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
811
808
812 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
809 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
813 # timestamp of each entries in dirstate, because of 'now > mtime'
810 # timestamp of each entries in dirstate, because of 'now > mtime'
814 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
811 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
815 if delaywrite > 0:
812 if delaywrite > 0:
816 # do we have any files to delay for?
813 # do we have any files to delay for?
817 for f, e in pycompat.iteritems(self._map):
814 for f, e in pycompat.iteritems(self._map):
818 if e.need_delay(now):
815 if e.need_delay(now):
819 import time # to avoid useless import
816 import time # to avoid useless import
820
817
821 # rather than sleep n seconds, sleep until the next
818 # rather than sleep n seconds, sleep until the next
822 # multiple of n seconds
819 # multiple of n seconds
823 clock = time.time()
820 clock = time.time()
824 start = int(clock) - (int(clock) % delaywrite)
821 start = int(clock) - (int(clock) % delaywrite)
825 end = start + delaywrite
822 end = start + delaywrite
826 time.sleep(end - clock)
823 time.sleep(end - clock)
827 now = end # trust our estimate that the end is near now
824 now = end # trust our estimate that the end is near now
828 break
825 break
829
826
830 self._map.write(tr, st, now)
827 self._map.write(tr, st, now)
831 self._lastnormaltime = 0
828 self._lastnormaltime = 0
832 self._dirty = False
829 self._dirty = False
833
830
834 def _dirignore(self, f):
831 def _dirignore(self, f):
835 if self._ignore(f):
832 if self._ignore(f):
836 return True
833 return True
837 for p in pathutil.finddirs(f):
834 for p in pathutil.finddirs(f):
838 if self._ignore(p):
835 if self._ignore(p):
839 return True
836 return True
840 return False
837 return False
841
838
842 def _ignorefiles(self):
839 def _ignorefiles(self):
843 files = []
840 files = []
844 if os.path.exists(self._join(b'.hgignore')):
841 if os.path.exists(self._join(b'.hgignore')):
845 files.append(self._join(b'.hgignore'))
842 files.append(self._join(b'.hgignore'))
846 for name, path in self._ui.configitems(b"ui"):
843 for name, path in self._ui.configitems(b"ui"):
847 if name == b'ignore' or name.startswith(b'ignore.'):
844 if name == b'ignore' or name.startswith(b'ignore.'):
848 # we need to use os.path.join here rather than self._join
845 # we need to use os.path.join here rather than self._join
849 # because path is arbitrary and user-specified
846 # because path is arbitrary and user-specified
850 files.append(os.path.join(self._rootdir, util.expandpath(path)))
847 files.append(os.path.join(self._rootdir, util.expandpath(path)))
851 return files
848 return files
852
849
853 def _ignorefileandline(self, f):
850 def _ignorefileandline(self, f):
854 files = collections.deque(self._ignorefiles())
851 files = collections.deque(self._ignorefiles())
855 visited = set()
852 visited = set()
856 while files:
853 while files:
857 i = files.popleft()
854 i = files.popleft()
858 patterns = matchmod.readpatternfile(
855 patterns = matchmod.readpatternfile(
859 i, self._ui.warn, sourceinfo=True
856 i, self._ui.warn, sourceinfo=True
860 )
857 )
861 for pattern, lineno, line in patterns:
858 for pattern, lineno, line in patterns:
862 kind, p = matchmod._patsplit(pattern, b'glob')
859 kind, p = matchmod._patsplit(pattern, b'glob')
863 if kind == b"subinclude":
860 if kind == b"subinclude":
864 if p not in visited:
861 if p not in visited:
865 files.append(p)
862 files.append(p)
866 continue
863 continue
867 m = matchmod.match(
864 m = matchmod.match(
868 self._root, b'', [], [pattern], warn=self._ui.warn
865 self._root, b'', [], [pattern], warn=self._ui.warn
869 )
866 )
870 if m(f):
867 if m(f):
871 return (i, lineno, line)
868 return (i, lineno, line)
872 visited.add(i)
869 visited.add(i)
873 return (None, -1, b"")
870 return (None, -1, b"")
874
871
875 def _walkexplicit(self, match, subrepos):
872 def _walkexplicit(self, match, subrepos):
876 """Get stat data about the files explicitly specified by match.
873 """Get stat data about the files explicitly specified by match.
877
874
878 Return a triple (results, dirsfound, dirsnotfound).
875 Return a triple (results, dirsfound, dirsnotfound).
879 - results is a mapping from filename to stat result. It also contains
876 - results is a mapping from filename to stat result. It also contains
880 listings mapping subrepos and .hg to None.
877 listings mapping subrepos and .hg to None.
881 - dirsfound is a list of files found to be directories.
878 - dirsfound is a list of files found to be directories.
882 - dirsnotfound is a list of files that the dirstate thinks are
879 - dirsnotfound is a list of files that the dirstate thinks are
883 directories and that were not found."""
880 directories and that were not found."""
884
881
885 def badtype(mode):
882 def badtype(mode):
886 kind = _(b'unknown')
883 kind = _(b'unknown')
887 if stat.S_ISCHR(mode):
884 if stat.S_ISCHR(mode):
888 kind = _(b'character device')
885 kind = _(b'character device')
889 elif stat.S_ISBLK(mode):
886 elif stat.S_ISBLK(mode):
890 kind = _(b'block device')
887 kind = _(b'block device')
891 elif stat.S_ISFIFO(mode):
888 elif stat.S_ISFIFO(mode):
892 kind = _(b'fifo')
889 kind = _(b'fifo')
893 elif stat.S_ISSOCK(mode):
890 elif stat.S_ISSOCK(mode):
894 kind = _(b'socket')
891 kind = _(b'socket')
895 elif stat.S_ISDIR(mode):
892 elif stat.S_ISDIR(mode):
896 kind = _(b'directory')
893 kind = _(b'directory')
897 return _(b'unsupported file type (type is %s)') % kind
894 return _(b'unsupported file type (type is %s)') % kind
898
895
899 badfn = match.bad
896 badfn = match.bad
900 dmap = self._map
897 dmap = self._map
901 lstat = os.lstat
898 lstat = os.lstat
902 getkind = stat.S_IFMT
899 getkind = stat.S_IFMT
903 dirkind = stat.S_IFDIR
900 dirkind = stat.S_IFDIR
904 regkind = stat.S_IFREG
901 regkind = stat.S_IFREG
905 lnkkind = stat.S_IFLNK
902 lnkkind = stat.S_IFLNK
906 join = self._join
903 join = self._join
907 dirsfound = []
904 dirsfound = []
908 foundadd = dirsfound.append
905 foundadd = dirsfound.append
909 dirsnotfound = []
906 dirsnotfound = []
910 notfoundadd = dirsnotfound.append
907 notfoundadd = dirsnotfound.append
911
908
912 if not match.isexact() and self._checkcase:
909 if not match.isexact() and self._checkcase:
913 normalize = self._normalize
910 normalize = self._normalize
914 else:
911 else:
915 normalize = None
912 normalize = None
916
913
917 files = sorted(match.files())
914 files = sorted(match.files())
918 subrepos.sort()
915 subrepos.sort()
919 i, j = 0, 0
916 i, j = 0, 0
920 while i < len(files) and j < len(subrepos):
917 while i < len(files) and j < len(subrepos):
921 subpath = subrepos[j] + b"/"
918 subpath = subrepos[j] + b"/"
922 if files[i] < subpath:
919 if files[i] < subpath:
923 i += 1
920 i += 1
924 continue
921 continue
925 while i < len(files) and files[i].startswith(subpath):
922 while i < len(files) and files[i].startswith(subpath):
926 del files[i]
923 del files[i]
927 j += 1
924 j += 1
928
925
929 if not files or b'' in files:
926 if not files or b'' in files:
930 files = [b'']
927 files = [b'']
931 # constructing the foldmap is expensive, so don't do it for the
928 # constructing the foldmap is expensive, so don't do it for the
932 # common case where files is ['']
929 # common case where files is ['']
933 normalize = None
930 normalize = None
934 results = dict.fromkeys(subrepos)
931 results = dict.fromkeys(subrepos)
935 results[b'.hg'] = None
932 results[b'.hg'] = None
936
933
937 for ff in files:
934 for ff in files:
938 if normalize:
935 if normalize:
939 nf = normalize(ff, False, True)
936 nf = normalize(ff, False, True)
940 else:
937 else:
941 nf = ff
938 nf = ff
942 if nf in results:
939 if nf in results:
943 continue
940 continue
944
941
945 try:
942 try:
946 st = lstat(join(nf))
943 st = lstat(join(nf))
947 kind = getkind(st.st_mode)
944 kind = getkind(st.st_mode)
948 if kind == dirkind:
945 if kind == dirkind:
949 if nf in dmap:
946 if nf in dmap:
950 # file replaced by dir on disk but still in dirstate
947 # file replaced by dir on disk but still in dirstate
951 results[nf] = None
948 results[nf] = None
952 foundadd((nf, ff))
949 foundadd((nf, ff))
953 elif kind == regkind or kind == lnkkind:
950 elif kind == regkind or kind == lnkkind:
954 results[nf] = st
951 results[nf] = st
955 else:
952 else:
956 badfn(ff, badtype(kind))
953 badfn(ff, badtype(kind))
957 if nf in dmap:
954 if nf in dmap:
958 results[nf] = None
955 results[nf] = None
959 except OSError as inst: # nf not found on disk - it is dirstate only
956 except OSError as inst: # nf not found on disk - it is dirstate only
960 if nf in dmap: # does it exactly match a missing file?
957 if nf in dmap: # does it exactly match a missing file?
961 results[nf] = None
958 results[nf] = None
962 else: # does it match a missing directory?
959 else: # does it match a missing directory?
963 if self._map.hasdir(nf):
960 if self._map.hasdir(nf):
964 notfoundadd(nf)
961 notfoundadd(nf)
965 else:
962 else:
966 badfn(ff, encoding.strtolocal(inst.strerror))
963 badfn(ff, encoding.strtolocal(inst.strerror))
967
964
968 # match.files() may contain explicitly-specified paths that shouldn't
965 # match.files() may contain explicitly-specified paths that shouldn't
969 # be taken; drop them from the list of files found. dirsfound/notfound
966 # be taken; drop them from the list of files found. dirsfound/notfound
970 # aren't filtered here because they will be tested later.
967 # aren't filtered here because they will be tested later.
971 if match.anypats():
968 if match.anypats():
972 for f in list(results):
969 for f in list(results):
973 if f == b'.hg' or f in subrepos:
970 if f == b'.hg' or f in subrepos:
974 # keep sentinel to disable further out-of-repo walks
971 # keep sentinel to disable further out-of-repo walks
975 continue
972 continue
976 if not match(f):
973 if not match(f):
977 del results[f]
974 del results[f]
978
975
979 # Case insensitive filesystems cannot rely on lstat() failing to detect
976 # Case insensitive filesystems cannot rely on lstat() failing to detect
980 # a case-only rename. Prune the stat object for any file that does not
977 # a case-only rename. Prune the stat object for any file that does not
981 # match the case in the filesystem, if there are multiple files that
978 # match the case in the filesystem, if there are multiple files that
982 # normalize to the same path.
979 # normalize to the same path.
983 if match.isexact() and self._checkcase:
980 if match.isexact() and self._checkcase:
984 normed = {}
981 normed = {}
985
982
986 for f, st in pycompat.iteritems(results):
983 for f, st in pycompat.iteritems(results):
987 if st is None:
984 if st is None:
988 continue
985 continue
989
986
990 nc = util.normcase(f)
987 nc = util.normcase(f)
991 paths = normed.get(nc)
988 paths = normed.get(nc)
992
989
993 if paths is None:
990 if paths is None:
994 paths = set()
991 paths = set()
995 normed[nc] = paths
992 normed[nc] = paths
996
993
997 paths.add(f)
994 paths.add(f)
998
995
999 for norm, paths in pycompat.iteritems(normed):
996 for norm, paths in pycompat.iteritems(normed):
1000 if len(paths) > 1:
997 if len(paths) > 1:
1001 for path in paths:
998 for path in paths:
1002 folded = self._discoverpath(
999 folded = self._discoverpath(
1003 path, norm, True, None, self._map.dirfoldmap
1000 path, norm, True, None, self._map.dirfoldmap
1004 )
1001 )
1005 if path != folded:
1002 if path != folded:
1006 results[path] = None
1003 results[path] = None
1007
1004
1008 return results, dirsfound, dirsnotfound
1005 return results, dirsfound, dirsnotfound
1009
1006
1010 def walk(self, match, subrepos, unknown, ignored, full=True):
1007 def walk(self, match, subrepos, unknown, ignored, full=True):
1011 """
1008 """
1012 Walk recursively through the directory tree, finding all files
1009 Walk recursively through the directory tree, finding all files
1013 matched by match.
1010 matched by match.
1014
1011
1015 If full is False, maybe skip some known-clean files.
1012 If full is False, maybe skip some known-clean files.
1016
1013
1017 Return a dict mapping filename to stat-like object (either
1014 Return a dict mapping filename to stat-like object (either
1018 mercurial.osutil.stat instance or return value of os.stat()).
1015 mercurial.osutil.stat instance or return value of os.stat()).
1019
1016
1020 """
1017 """
1021 # full is a flag that extensions that hook into walk can use -- this
1018 # full is a flag that extensions that hook into walk can use -- this
1022 # implementation doesn't use it at all. This satisfies the contract
1019 # implementation doesn't use it at all. This satisfies the contract
1023 # because we only guarantee a "maybe".
1020 # because we only guarantee a "maybe".
1024
1021
1025 if ignored:
1022 if ignored:
1026 ignore = util.never
1023 ignore = util.never
1027 dirignore = util.never
1024 dirignore = util.never
1028 elif unknown:
1025 elif unknown:
1029 ignore = self._ignore
1026 ignore = self._ignore
1030 dirignore = self._dirignore
1027 dirignore = self._dirignore
1031 else:
1028 else:
1032 # if not unknown and not ignored, drop dir recursion and step 2
1029 # if not unknown and not ignored, drop dir recursion and step 2
1033 ignore = util.always
1030 ignore = util.always
1034 dirignore = util.always
1031 dirignore = util.always
1035
1032
1036 matchfn = match.matchfn
1033 matchfn = match.matchfn
1037 matchalways = match.always()
1034 matchalways = match.always()
1038 matchtdir = match.traversedir
1035 matchtdir = match.traversedir
1039 dmap = self._map
1036 dmap = self._map
1040 listdir = util.listdir
1037 listdir = util.listdir
1041 lstat = os.lstat
1038 lstat = os.lstat
1042 dirkind = stat.S_IFDIR
1039 dirkind = stat.S_IFDIR
1043 regkind = stat.S_IFREG
1040 regkind = stat.S_IFREG
1044 lnkkind = stat.S_IFLNK
1041 lnkkind = stat.S_IFLNK
1045 join = self._join
1042 join = self._join
1046
1043
1047 exact = skipstep3 = False
1044 exact = skipstep3 = False
1048 if match.isexact(): # match.exact
1045 if match.isexact(): # match.exact
1049 exact = True
1046 exact = True
1050 dirignore = util.always # skip step 2
1047 dirignore = util.always # skip step 2
1051 elif match.prefix(): # match.match, no patterns
1048 elif match.prefix(): # match.match, no patterns
1052 skipstep3 = True
1049 skipstep3 = True
1053
1050
1054 if not exact and self._checkcase:
1051 if not exact and self._checkcase:
1055 normalize = self._normalize
1052 normalize = self._normalize
1056 normalizefile = self._normalizefile
1053 normalizefile = self._normalizefile
1057 skipstep3 = False
1054 skipstep3 = False
1058 else:
1055 else:
1059 normalize = self._normalize
1056 normalize = self._normalize
1060 normalizefile = None
1057 normalizefile = None
1061
1058
1062 # step 1: find all explicit files
1059 # step 1: find all explicit files
1063 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1060 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1064 if matchtdir:
1061 if matchtdir:
1065 for d in work:
1062 for d in work:
1066 matchtdir(d[0])
1063 matchtdir(d[0])
1067 for d in dirsnotfound:
1064 for d in dirsnotfound:
1068 matchtdir(d)
1065 matchtdir(d)
1069
1066
1070 skipstep3 = skipstep3 and not (work or dirsnotfound)
1067 skipstep3 = skipstep3 and not (work or dirsnotfound)
1071 work = [d for d in work if not dirignore(d[0])]
1068 work = [d for d in work if not dirignore(d[0])]
1072
1069
1073 # step 2: visit subdirectories
1070 # step 2: visit subdirectories
1074 def traverse(work, alreadynormed):
1071 def traverse(work, alreadynormed):
1075 wadd = work.append
1072 wadd = work.append
1076 while work:
1073 while work:
1077 tracing.counter('dirstate.walk work', len(work))
1074 tracing.counter('dirstate.walk work', len(work))
1078 nd = work.pop()
1075 nd = work.pop()
1079 visitentries = match.visitchildrenset(nd)
1076 visitentries = match.visitchildrenset(nd)
1080 if not visitentries:
1077 if not visitentries:
1081 continue
1078 continue
1082 if visitentries == b'this' or visitentries == b'all':
1079 if visitentries == b'this' or visitentries == b'all':
1083 visitentries = None
1080 visitentries = None
1084 skip = None
1081 skip = None
1085 if nd != b'':
1082 if nd != b'':
1086 skip = b'.hg'
1083 skip = b'.hg'
1087 try:
1084 try:
1088 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1085 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1089 entries = listdir(join(nd), stat=True, skip=skip)
1086 entries = listdir(join(nd), stat=True, skip=skip)
1090 except OSError as inst:
1087 except OSError as inst:
1091 if inst.errno in (errno.EACCES, errno.ENOENT):
1088 if inst.errno in (errno.EACCES, errno.ENOENT):
1092 match.bad(
1089 match.bad(
1093 self.pathto(nd), encoding.strtolocal(inst.strerror)
1090 self.pathto(nd), encoding.strtolocal(inst.strerror)
1094 )
1091 )
1095 continue
1092 continue
1096 raise
1093 raise
1097 for f, kind, st in entries:
1094 for f, kind, st in entries:
1098 # Some matchers may return files in the visitentries set,
1095 # Some matchers may return files in the visitentries set,
1099 # instead of 'this', if the matcher explicitly mentions them
1096 # instead of 'this', if the matcher explicitly mentions them
1100 # and is not an exactmatcher. This is acceptable; we do not
1097 # and is not an exactmatcher. This is acceptable; we do not
1101 # make any hard assumptions about file-or-directory below
1098 # make any hard assumptions about file-or-directory below
1102 # based on the presence of `f` in visitentries. If
1099 # based on the presence of `f` in visitentries. If
1103 # visitchildrenset returned a set, we can always skip the
1100 # visitchildrenset returned a set, we can always skip the
1104 # entries *not* in the set it provided regardless of whether
1101 # entries *not* in the set it provided regardless of whether
1105 # they're actually a file or a directory.
1102 # they're actually a file or a directory.
1106 if visitentries and f not in visitentries:
1103 if visitentries and f not in visitentries:
1107 continue
1104 continue
1108 if normalizefile:
1105 if normalizefile:
1109 # even though f might be a directory, we're only
1106 # even though f might be a directory, we're only
1110 # interested in comparing it to files currently in the
1107 # interested in comparing it to files currently in the
1111 # dmap -- therefore normalizefile is enough
1108 # dmap -- therefore normalizefile is enough
1112 nf = normalizefile(
1109 nf = normalizefile(
1113 nd and (nd + b"/" + f) or f, True, True
1110 nd and (nd + b"/" + f) or f, True, True
1114 )
1111 )
1115 else:
1112 else:
1116 nf = nd and (nd + b"/" + f) or f
1113 nf = nd and (nd + b"/" + f) or f
1117 if nf not in results:
1114 if nf not in results:
1118 if kind == dirkind:
1115 if kind == dirkind:
1119 if not ignore(nf):
1116 if not ignore(nf):
1120 if matchtdir:
1117 if matchtdir:
1121 matchtdir(nf)
1118 matchtdir(nf)
1122 wadd(nf)
1119 wadd(nf)
1123 if nf in dmap and (matchalways or matchfn(nf)):
1120 if nf in dmap and (matchalways or matchfn(nf)):
1124 results[nf] = None
1121 results[nf] = None
1125 elif kind == regkind or kind == lnkkind:
1122 elif kind == regkind or kind == lnkkind:
1126 if nf in dmap:
1123 if nf in dmap:
1127 if matchalways or matchfn(nf):
1124 if matchalways or matchfn(nf):
1128 results[nf] = st
1125 results[nf] = st
1129 elif (matchalways or matchfn(nf)) and not ignore(
1126 elif (matchalways or matchfn(nf)) and not ignore(
1130 nf
1127 nf
1131 ):
1128 ):
1132 # unknown file -- normalize if necessary
1129 # unknown file -- normalize if necessary
1133 if not alreadynormed:
1130 if not alreadynormed:
1134 nf = normalize(nf, False, True)
1131 nf = normalize(nf, False, True)
1135 results[nf] = st
1132 results[nf] = st
1136 elif nf in dmap and (matchalways or matchfn(nf)):
1133 elif nf in dmap and (matchalways or matchfn(nf)):
1137 results[nf] = None
1134 results[nf] = None
1138
1135
1139 for nd, d in work:
1136 for nd, d in work:
1140 # alreadynormed means that processwork doesn't have to do any
1137 # alreadynormed means that processwork doesn't have to do any
1141 # expensive directory normalization
1138 # expensive directory normalization
1142 alreadynormed = not normalize or nd == d
1139 alreadynormed = not normalize or nd == d
1143 traverse([d], alreadynormed)
1140 traverse([d], alreadynormed)
1144
1141
1145 for s in subrepos:
1142 for s in subrepos:
1146 del results[s]
1143 del results[s]
1147 del results[b'.hg']
1144 del results[b'.hg']
1148
1145
1149 # step 3: visit remaining files from dmap
1146 # step 3: visit remaining files from dmap
1150 if not skipstep3 and not exact:
1147 if not skipstep3 and not exact:
1151 # If a dmap file is not in results yet, it was either
1148 # If a dmap file is not in results yet, it was either
1152 # a) not matching matchfn b) ignored, c) missing, or d) under a
1149 # a) not matching matchfn b) ignored, c) missing, or d) under a
1153 # symlink directory.
1150 # symlink directory.
1154 if not results and matchalways:
1151 if not results and matchalways:
1155 visit = [f for f in dmap]
1152 visit = [f for f in dmap]
1156 else:
1153 else:
1157 visit = [f for f in dmap if f not in results and matchfn(f)]
1154 visit = [f for f in dmap if f not in results and matchfn(f)]
1158 visit.sort()
1155 visit.sort()
1159
1156
1160 if unknown:
1157 if unknown:
1161 # unknown == True means we walked all dirs under the roots
1158 # unknown == True means we walked all dirs under the roots
1162 # that wasn't ignored, and everything that matched was stat'ed
1159 # that wasn't ignored, and everything that matched was stat'ed
1163 # and is already in results.
1160 # and is already in results.
1164 # The rest must thus be ignored or under a symlink.
1161 # The rest must thus be ignored or under a symlink.
1165 audit_path = pathutil.pathauditor(self._root, cached=True)
1162 audit_path = pathutil.pathauditor(self._root, cached=True)
1166
1163
1167 for nf in iter(visit):
1164 for nf in iter(visit):
1168 # If a stat for the same file was already added with a
1165 # If a stat for the same file was already added with a
1169 # different case, don't add one for this, since that would
1166 # different case, don't add one for this, since that would
1170 # make it appear as if the file exists under both names
1167 # make it appear as if the file exists under both names
1171 # on disk.
1168 # on disk.
1172 if (
1169 if (
1173 normalizefile
1170 normalizefile
1174 and normalizefile(nf, True, True) in results
1171 and normalizefile(nf, True, True) in results
1175 ):
1172 ):
1176 results[nf] = None
1173 results[nf] = None
1177 # Report ignored items in the dmap as long as they are not
1174 # Report ignored items in the dmap as long as they are not
1178 # under a symlink directory.
1175 # under a symlink directory.
1179 elif audit_path.check(nf):
1176 elif audit_path.check(nf):
1180 try:
1177 try:
1181 results[nf] = lstat(join(nf))
1178 results[nf] = lstat(join(nf))
1182 # file was just ignored, no links, and exists
1179 # file was just ignored, no links, and exists
1183 except OSError:
1180 except OSError:
1184 # file doesn't exist
1181 # file doesn't exist
1185 results[nf] = None
1182 results[nf] = None
1186 else:
1183 else:
1187 # It's either missing or under a symlink directory
1184 # It's either missing or under a symlink directory
1188 # which we in this case report as missing
1185 # which we in this case report as missing
1189 results[nf] = None
1186 results[nf] = None
1190 else:
1187 else:
1191 # We may not have walked the full directory tree above,
1188 # We may not have walked the full directory tree above,
1192 # so stat and check everything we missed.
1189 # so stat and check everything we missed.
1193 iv = iter(visit)
1190 iv = iter(visit)
1194 for st in util.statfiles([join(i) for i in visit]):
1191 for st in util.statfiles([join(i) for i in visit]):
1195 results[next(iv)] = st
1192 results[next(iv)] = st
1196 return results
1193 return results
1197
1194
1198 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1195 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1199 # Force Rayon (Rust parallelism library) to respect the number of
1196 # Force Rayon (Rust parallelism library) to respect the number of
1200 # workers. This is a temporary workaround until Rust code knows
1197 # workers. This is a temporary workaround until Rust code knows
1201 # how to read the config file.
1198 # how to read the config file.
1202 numcpus = self._ui.configint(b"worker", b"numcpus")
1199 numcpus = self._ui.configint(b"worker", b"numcpus")
1203 if numcpus is not None:
1200 if numcpus is not None:
1204 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1201 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1205
1202
1206 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1203 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1207 if not workers_enabled:
1204 if not workers_enabled:
1208 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1205 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1209
1206
1210 (
1207 (
1211 lookup,
1208 lookup,
1212 modified,
1209 modified,
1213 added,
1210 added,
1214 removed,
1211 removed,
1215 deleted,
1212 deleted,
1216 clean,
1213 clean,
1217 ignored,
1214 ignored,
1218 unknown,
1215 unknown,
1219 warnings,
1216 warnings,
1220 bad,
1217 bad,
1221 traversed,
1218 traversed,
1222 dirty,
1219 dirty,
1223 ) = rustmod.status(
1220 ) = rustmod.status(
1224 self._map._map,
1221 self._map._map,
1225 matcher,
1222 matcher,
1226 self._rootdir,
1223 self._rootdir,
1227 self._ignorefiles(),
1224 self._ignorefiles(),
1228 self._checkexec,
1225 self._checkexec,
1229 self._lastnormaltime,
1226 self._lastnormaltime,
1230 bool(list_clean),
1227 bool(list_clean),
1231 bool(list_ignored),
1228 bool(list_ignored),
1232 bool(list_unknown),
1229 bool(list_unknown),
1233 bool(matcher.traversedir),
1230 bool(matcher.traversedir),
1234 )
1231 )
1235
1232
1236 self._dirty |= dirty
1233 self._dirty |= dirty
1237
1234
1238 if matcher.traversedir:
1235 if matcher.traversedir:
1239 for dir in traversed:
1236 for dir in traversed:
1240 matcher.traversedir(dir)
1237 matcher.traversedir(dir)
1241
1238
1242 if self._ui.warn:
1239 if self._ui.warn:
1243 for item in warnings:
1240 for item in warnings:
1244 if isinstance(item, tuple):
1241 if isinstance(item, tuple):
1245 file_path, syntax = item
1242 file_path, syntax = item
1246 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1243 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1247 file_path,
1244 file_path,
1248 syntax,
1245 syntax,
1249 )
1246 )
1250 self._ui.warn(msg)
1247 self._ui.warn(msg)
1251 else:
1248 else:
1252 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1249 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1253 self._ui.warn(
1250 self._ui.warn(
1254 msg
1251 msg
1255 % (
1252 % (
1256 pathutil.canonpath(
1253 pathutil.canonpath(
1257 self._rootdir, self._rootdir, item
1254 self._rootdir, self._rootdir, item
1258 ),
1255 ),
1259 b"No such file or directory",
1256 b"No such file or directory",
1260 )
1257 )
1261 )
1258 )
1262
1259
1263 for (fn, message) in bad:
1260 for (fn, message) in bad:
1264 matcher.bad(fn, encoding.strtolocal(message))
1261 matcher.bad(fn, encoding.strtolocal(message))
1265
1262
1266 status = scmutil.status(
1263 status = scmutil.status(
1267 modified=modified,
1264 modified=modified,
1268 added=added,
1265 added=added,
1269 removed=removed,
1266 removed=removed,
1270 deleted=deleted,
1267 deleted=deleted,
1271 unknown=unknown,
1268 unknown=unknown,
1272 ignored=ignored,
1269 ignored=ignored,
1273 clean=clean,
1270 clean=clean,
1274 )
1271 )
1275 return (lookup, status)
1272 return (lookup, status)
1276
1273
1277 def status(self, match, subrepos, ignored, clean, unknown):
1274 def status(self, match, subrepos, ignored, clean, unknown):
1278 """Determine the status of the working copy relative to the
1275 """Determine the status of the working copy relative to the
1279 dirstate and return a pair of (unsure, status), where status is of type
1276 dirstate and return a pair of (unsure, status), where status is of type
1280 scmutil.status and:
1277 scmutil.status and:
1281
1278
1282 unsure:
1279 unsure:
1283 files that might have been modified since the dirstate was
1280 files that might have been modified since the dirstate was
1284 written, but need to be read to be sure (size is the same
1281 written, but need to be read to be sure (size is the same
1285 but mtime differs)
1282 but mtime differs)
1286 status.modified:
1283 status.modified:
1287 files that have definitely been modified since the dirstate
1284 files that have definitely been modified since the dirstate
1288 was written (different size or mode)
1285 was written (different size or mode)
1289 status.clean:
1286 status.clean:
1290 files that have definitely not been modified since the
1287 files that have definitely not been modified since the
1291 dirstate was written
1288 dirstate was written
1292 """
1289 """
1293 listignored, listclean, listunknown = ignored, clean, unknown
1290 listignored, listclean, listunknown = ignored, clean, unknown
1294 lookup, modified, added, unknown, ignored = [], [], [], [], []
1291 lookup, modified, added, unknown, ignored = [], [], [], [], []
1295 removed, deleted, clean = [], [], []
1292 removed, deleted, clean = [], [], []
1296
1293
1297 dmap = self._map
1294 dmap = self._map
1298 dmap.preload()
1295 dmap.preload()
1299
1296
1300 use_rust = True
1297 use_rust = True
1301
1298
1302 allowed_matchers = (
1299 allowed_matchers = (
1303 matchmod.alwaysmatcher,
1300 matchmod.alwaysmatcher,
1304 matchmod.exactmatcher,
1301 matchmod.exactmatcher,
1305 matchmod.includematcher,
1302 matchmod.includematcher,
1306 )
1303 )
1307
1304
1308 if rustmod is None:
1305 if rustmod is None:
1309 use_rust = False
1306 use_rust = False
1310 elif self._checkcase:
1307 elif self._checkcase:
1311 # Case-insensitive filesystems are not handled yet
1308 # Case-insensitive filesystems are not handled yet
1312 use_rust = False
1309 use_rust = False
1313 elif subrepos:
1310 elif subrepos:
1314 use_rust = False
1311 use_rust = False
1315 elif sparse.enabled:
1312 elif sparse.enabled:
1316 use_rust = False
1313 use_rust = False
1317 elif not isinstance(match, allowed_matchers):
1314 elif not isinstance(match, allowed_matchers):
1318 # Some matchers have yet to be implemented
1315 # Some matchers have yet to be implemented
1319 use_rust = False
1316 use_rust = False
1320
1317
1321 if use_rust:
1318 if use_rust:
1322 try:
1319 try:
1323 return self._rust_status(
1320 return self._rust_status(
1324 match, listclean, listignored, listunknown
1321 match, listclean, listignored, listunknown
1325 )
1322 )
1326 except rustmod.FallbackError:
1323 except rustmod.FallbackError:
1327 pass
1324 pass
1328
1325
1329 def noop(f):
1326 def noop(f):
1330 pass
1327 pass
1331
1328
1332 dcontains = dmap.__contains__
1329 dcontains = dmap.__contains__
1333 dget = dmap.__getitem__
1330 dget = dmap.__getitem__
1334 ladd = lookup.append # aka "unsure"
1331 ladd = lookup.append # aka "unsure"
1335 madd = modified.append
1332 madd = modified.append
1336 aadd = added.append
1333 aadd = added.append
1337 uadd = unknown.append if listunknown else noop
1334 uadd = unknown.append if listunknown else noop
1338 iadd = ignored.append if listignored else noop
1335 iadd = ignored.append if listignored else noop
1339 radd = removed.append
1336 radd = removed.append
1340 dadd = deleted.append
1337 dadd = deleted.append
1341 cadd = clean.append if listclean else noop
1338 cadd = clean.append if listclean else noop
1342 mexact = match.exact
1339 mexact = match.exact
1343 dirignore = self._dirignore
1340 dirignore = self._dirignore
1344 checkexec = self._checkexec
1341 checkexec = self._checkexec
1345 copymap = self._map.copymap
1342 copymap = self._map.copymap
1346 lastnormaltime = self._lastnormaltime
1343 lastnormaltime = self._lastnormaltime
1347
1344
1348 # We need to do full walks when either
1345 # We need to do full walks when either
1349 # - we're listing all clean files, or
1346 # - we're listing all clean files, or
1350 # - match.traversedir does something, because match.traversedir should
1347 # - match.traversedir does something, because match.traversedir should
1351 # be called for every dir in the working dir
1348 # be called for every dir in the working dir
1352 full = listclean or match.traversedir is not None
1349 full = listclean or match.traversedir is not None
1353 for fn, st in pycompat.iteritems(
1350 for fn, st in pycompat.iteritems(
1354 self.walk(match, subrepos, listunknown, listignored, full=full)
1351 self.walk(match, subrepos, listunknown, listignored, full=full)
1355 ):
1352 ):
1356 if not dcontains(fn):
1353 if not dcontains(fn):
1357 if (listignored or mexact(fn)) and dirignore(fn):
1354 if (listignored or mexact(fn)) and dirignore(fn):
1358 if listignored:
1355 if listignored:
1359 iadd(fn)
1356 iadd(fn)
1360 else:
1357 else:
1361 uadd(fn)
1358 uadd(fn)
1362 continue
1359 continue
1363
1360
1364 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1361 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1365 # written like that for performance reasons. dmap[fn] is not a
1362 # written like that for performance reasons. dmap[fn] is not a
1366 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1363 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1367 # opcode has fast paths when the value to be unpacked is a tuple or
1364 # opcode has fast paths when the value to be unpacked is a tuple or
1368 # a list, but falls back to creating a full-fledged iterator in
1365 # a list, but falls back to creating a full-fledged iterator in
1369 # general. That is much slower than simply accessing and storing the
1366 # general. That is much slower than simply accessing and storing the
1370 # tuple members one by one.
1367 # tuple members one by one.
1371 t = dget(fn)
1368 t = dget(fn)
1372 mode = t.mode
1369 mode = t.mode
1373 size = t.size
1370 size = t.size
1374 time = t.mtime
1371 time = t.mtime
1375
1372
1376 if not st and t.tracked:
1373 if not st and t.tracked:
1377 dadd(fn)
1374 dadd(fn)
1378 elif t.merged:
1375 elif t.merged:
1379 madd(fn)
1376 madd(fn)
1380 elif t.added:
1377 elif t.added:
1381 aadd(fn)
1378 aadd(fn)
1382 elif t.removed:
1379 elif t.removed:
1383 radd(fn)
1380 radd(fn)
1384 elif t.tracked:
1381 elif t.tracked:
1385 if (
1382 if (
1386 size >= 0
1383 size >= 0
1387 and (
1384 and (
1388 (size != st.st_size and size != st.st_size & _rangemask)
1385 (size != st.st_size and size != st.st_size & _rangemask)
1389 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1386 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1390 )
1387 )
1391 or t.from_p2
1388 or t.from_p2
1392 or fn in copymap
1389 or fn in copymap
1393 ):
1390 ):
1394 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1391 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1395 # issue6456: Size returned may be longer due to
1392 # issue6456: Size returned may be longer due to
1396 # encryption on EXT-4 fscrypt, undecided.
1393 # encryption on EXT-4 fscrypt, undecided.
1397 ladd(fn)
1394 ladd(fn)
1398 else:
1395 else:
1399 madd(fn)
1396 madd(fn)
1400 elif (
1397 elif (
1401 time != st[stat.ST_MTIME]
1398 time != st[stat.ST_MTIME]
1402 and time != st[stat.ST_MTIME] & _rangemask
1399 and time != st[stat.ST_MTIME] & _rangemask
1403 ):
1400 ):
1404 ladd(fn)
1401 ladd(fn)
1405 elif st[stat.ST_MTIME] == lastnormaltime:
1402 elif st[stat.ST_MTIME] == lastnormaltime:
1406 # fn may have just been marked as normal and it may have
1403 # fn may have just been marked as normal and it may have
1407 # changed in the same second without changing its size.
1404 # changed in the same second without changing its size.
1408 # This can happen if we quickly do multiple commits.
1405 # This can happen if we quickly do multiple commits.
1409 # Force lookup, so we don't miss such a racy file change.
1406 # Force lookup, so we don't miss such a racy file change.
1410 ladd(fn)
1407 ladd(fn)
1411 elif listclean:
1408 elif listclean:
1412 cadd(fn)
1409 cadd(fn)
1413 status = scmutil.status(
1410 status = scmutil.status(
1414 modified, added, removed, deleted, unknown, ignored, clean
1411 modified, added, removed, deleted, unknown, ignored, clean
1415 )
1412 )
1416 return (lookup, status)
1413 return (lookup, status)
1417
1414
1418 def matches(self, match):
1415 def matches(self, match):
1419 """
1416 """
1420 return files in the dirstate (in whatever state) filtered by match
1417 return files in the dirstate (in whatever state) filtered by match
1421 """
1418 """
1422 dmap = self._map
1419 dmap = self._map
1423 if rustmod is not None:
1420 if rustmod is not None:
1424 dmap = self._map._map
1421 dmap = self._map._map
1425
1422
1426 if match.always():
1423 if match.always():
1427 return dmap.keys()
1424 return dmap.keys()
1428 files = match.files()
1425 files = match.files()
1429 if match.isexact():
1426 if match.isexact():
1430 # fast path -- filter the other way around, since typically files is
1427 # fast path -- filter the other way around, since typically files is
1431 # much smaller than dmap
1428 # much smaller than dmap
1432 return [f for f in files if f in dmap]
1429 return [f for f in files if f in dmap]
1433 if match.prefix() and all(fn in dmap for fn in files):
1430 if match.prefix() and all(fn in dmap for fn in files):
1434 # fast path -- all the values are known to be files, so just return
1431 # fast path -- all the values are known to be files, so just return
1435 # that
1432 # that
1436 return list(files)
1433 return list(files)
1437 return [f for f in dmap if match(f)]
1434 return [f for f in dmap if match(f)]
1438
1435
1439 def _actualfilename(self, tr):
1436 def _actualfilename(self, tr):
1440 if tr:
1437 if tr:
1441 return self._pendingfilename
1438 return self._pendingfilename
1442 else:
1439 else:
1443 return self._filename
1440 return self._filename
1444
1441
1445 def savebackup(self, tr, backupname):
1442 def savebackup(self, tr, backupname):
1446 '''Save current dirstate into backup file'''
1443 '''Save current dirstate into backup file'''
1447 filename = self._actualfilename(tr)
1444 filename = self._actualfilename(tr)
1448 assert backupname != filename
1445 assert backupname != filename
1449
1446
1450 # use '_writedirstate' instead of 'write' to write changes certainly,
1447 # use '_writedirstate' instead of 'write' to write changes certainly,
1451 # because the latter omits writing out if transaction is running.
1448 # because the latter omits writing out if transaction is running.
1452 # output file will be used to create backup of dirstate at this point.
1449 # output file will be used to create backup of dirstate at this point.
1453 if self._dirty or not self._opener.exists(filename):
1450 if self._dirty or not self._opener.exists(filename):
1454 self._writedirstate(
1451 self._writedirstate(
1455 tr,
1452 tr,
1456 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1453 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1457 )
1454 )
1458
1455
1459 if tr:
1456 if tr:
1460 # ensure that subsequent tr.writepending returns True for
1457 # ensure that subsequent tr.writepending returns True for
1461 # changes written out above, even if dirstate is never
1458 # changes written out above, even if dirstate is never
1462 # changed after this
1459 # changed after this
1463 tr.addfilegenerator(
1460 tr.addfilegenerator(
1464 b'dirstate',
1461 b'dirstate',
1465 (self._filename,),
1462 (self._filename,),
1466 lambda f: self._writedirstate(tr, f),
1463 lambda f: self._writedirstate(tr, f),
1467 location=b'plain',
1464 location=b'plain',
1468 )
1465 )
1469
1466
1470 # ensure that pending file written above is unlinked at
1467 # ensure that pending file written above is unlinked at
1471 # failure, even if tr.writepending isn't invoked until the
1468 # failure, even if tr.writepending isn't invoked until the
1472 # end of this transaction
1469 # end of this transaction
1473 tr.registertmp(filename, location=b'plain')
1470 tr.registertmp(filename, location=b'plain')
1474
1471
1475 self._opener.tryunlink(backupname)
1472 self._opener.tryunlink(backupname)
1476 # hardlink backup is okay because _writedirstate is always called
1473 # hardlink backup is okay because _writedirstate is always called
1477 # with an "atomictemp=True" file.
1474 # with an "atomictemp=True" file.
1478 util.copyfile(
1475 util.copyfile(
1479 self._opener.join(filename),
1476 self._opener.join(filename),
1480 self._opener.join(backupname),
1477 self._opener.join(backupname),
1481 hardlink=True,
1478 hardlink=True,
1482 )
1479 )
1483
1480
1484 def restorebackup(self, tr, backupname):
1481 def restorebackup(self, tr, backupname):
1485 '''Restore dirstate by backup file'''
1482 '''Restore dirstate by backup file'''
1486 # this "invalidate()" prevents "wlock.release()" from writing
1483 # this "invalidate()" prevents "wlock.release()" from writing
1487 # changes of dirstate out after restoring from backup file
1484 # changes of dirstate out after restoring from backup file
1488 self.invalidate()
1485 self.invalidate()
1489 filename = self._actualfilename(tr)
1486 filename = self._actualfilename(tr)
1490 o = self._opener
1487 o = self._opener
1491 if util.samefile(o.join(backupname), o.join(filename)):
1488 if util.samefile(o.join(backupname), o.join(filename)):
1492 o.unlink(backupname)
1489 o.unlink(backupname)
1493 else:
1490 else:
1494 o.rename(backupname, filename, checkambig=True)
1491 o.rename(backupname, filename, checkambig=True)
1495
1492
1496 def clearbackup(self, tr, backupname):
1493 def clearbackup(self, tr, backupname):
1497 '''Clear backup file'''
1494 '''Clear backup file'''
1498 self._opener.unlink(backupname)
1495 self._opener.unlink(backupname)
1499
1496
1500 def verify(self, m1, m2):
1497 def verify(self, m1, m2):
1501 """check the dirstate content again the parent manifest and yield errors"""
1498 """check the dirstate content again the parent manifest and yield errors"""
1502 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1499 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1503 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1500 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1504 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1501 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1505 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1502 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1506 for f, entry in self.items():
1503 for f, entry in self.items():
1507 state = entry.state
1504 state = entry.state
1508 if state in b"nr" and f not in m1:
1505 if state in b"nr" and f not in m1:
1509 yield (missing_from_p1, f, state)
1506 yield (missing_from_p1, f, state)
1510 if state in b"a" and f in m1:
1507 if state in b"a" and f in m1:
1511 yield (unexpected_in_p1, f, state)
1508 yield (unexpected_in_p1, f, state)
1512 if state in b"m" and f not in m1 and f not in m2:
1509 if state in b"m" and f not in m1 and f not in m2:
1513 yield (missing_from_ps, f, state)
1510 yield (missing_from_ps, f, state)
1514 for f in m1:
1511 for f in m1:
1515 state = self.get_entry(f).state
1512 state = self.get_entry(f).state
1516 if state not in b"nrm":
1513 if state not in b"nrm":
1517 yield (missing_from_ds, f, state)
1514 yield (missing_from_ds, f, state)
General Comments 0
You need to be logged in to leave comments. Login now