##// END OF EJS Templates
status: process `from_p2` file the same as `merged` one...
marmoute -
r48959:142e9f0a default
parent child Browse files
Show More
@@ -1,1514 +1,1513 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = dirstatemap.DirstateItem
48 DirstateItem = dirstatemap.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._mapcls = dirstatemap.dirstatemap
133 self._mapcls = dirstatemap.dirstatemap
134 # Access and cache cwd early, so we don't access it for the first time
134 # Access and cache cwd early, so we don't access it for the first time
135 # after a working-copy update caused it to not exist (accessing it then
135 # after a working-copy update caused it to not exist (accessing it then
136 # raises an exception).
136 # raises an exception).
137 self._cwd
137 self._cwd
138
138
139 def prefetch_parents(self):
139 def prefetch_parents(self):
140 """make sure the parents are loaded
140 """make sure the parents are loaded
141
141
142 Used to avoid a race condition.
142 Used to avoid a race condition.
143 """
143 """
144 self._pl
144 self._pl
145
145
146 @contextlib.contextmanager
146 @contextlib.contextmanager
147 def parentchange(self):
147 def parentchange(self):
148 """Context manager for handling dirstate parents.
148 """Context manager for handling dirstate parents.
149
149
150 If an exception occurs in the scope of the context manager,
150 If an exception occurs in the scope of the context manager,
151 the incoherent dirstate won't be written when wlock is
151 the incoherent dirstate won't be written when wlock is
152 released.
152 released.
153 """
153 """
154 self._parentwriters += 1
154 self._parentwriters += 1
155 yield
155 yield
156 # Typically we want the "undo" step of a context manager in a
156 # Typically we want the "undo" step of a context manager in a
157 # finally block so it happens even when an exception
157 # finally block so it happens even when an exception
158 # occurs. In this case, however, we only want to decrement
158 # occurs. In this case, however, we only want to decrement
159 # parentwriters if the code in the with statement exits
159 # parentwriters if the code in the with statement exits
160 # normally, so we don't have a try/finally here on purpose.
160 # normally, so we don't have a try/finally here on purpose.
161 self._parentwriters -= 1
161 self._parentwriters -= 1
162
162
163 def pendingparentchange(self):
163 def pendingparentchange(self):
164 """Returns true if the dirstate is in the middle of a set of changes
164 """Returns true if the dirstate is in the middle of a set of changes
165 that modify the dirstate parent.
165 that modify the dirstate parent.
166 """
166 """
167 return self._parentwriters > 0
167 return self._parentwriters > 0
168
168
169 @propertycache
169 @propertycache
170 def _map(self):
170 def _map(self):
171 """Return the dirstate contents (see documentation for dirstatemap)."""
171 """Return the dirstate contents (see documentation for dirstatemap)."""
172 self._map = self._mapcls(
172 self._map = self._mapcls(
173 self._ui,
173 self._ui,
174 self._opener,
174 self._opener,
175 self._root,
175 self._root,
176 self._nodeconstants,
176 self._nodeconstants,
177 self._use_dirstate_v2,
177 self._use_dirstate_v2,
178 )
178 )
179 return self._map
179 return self._map
180
180
181 @property
181 @property
182 def _sparsematcher(self):
182 def _sparsematcher(self):
183 """The matcher for the sparse checkout.
183 """The matcher for the sparse checkout.
184
184
185 The working directory may not include every file from a manifest. The
185 The working directory may not include every file from a manifest. The
186 matcher obtained by this property will match a path if it is to be
186 matcher obtained by this property will match a path if it is to be
187 included in the working directory.
187 included in the working directory.
188 """
188 """
189 # TODO there is potential to cache this property. For now, the matcher
189 # TODO there is potential to cache this property. For now, the matcher
190 # is resolved on every access. (But the called function does use a
190 # is resolved on every access. (But the called function does use a
191 # cache to keep the lookup fast.)
191 # cache to keep the lookup fast.)
192 return self._sparsematchfn()
192 return self._sparsematchfn()
193
193
194 @repocache(b'branch')
194 @repocache(b'branch')
195 def _branch(self):
195 def _branch(self):
196 try:
196 try:
197 return self._opener.read(b"branch").strip() or b"default"
197 return self._opener.read(b"branch").strip() or b"default"
198 except IOError as inst:
198 except IOError as inst:
199 if inst.errno != errno.ENOENT:
199 if inst.errno != errno.ENOENT:
200 raise
200 raise
201 return b"default"
201 return b"default"
202
202
203 @property
203 @property
204 def _pl(self):
204 def _pl(self):
205 return self._map.parents()
205 return self._map.parents()
206
206
207 def hasdir(self, d):
207 def hasdir(self, d):
208 return self._map.hastrackeddir(d)
208 return self._map.hastrackeddir(d)
209
209
210 @rootcache(b'.hgignore')
210 @rootcache(b'.hgignore')
211 def _ignore(self):
211 def _ignore(self):
212 files = self._ignorefiles()
212 files = self._ignorefiles()
213 if not files:
213 if not files:
214 return matchmod.never()
214 return matchmod.never()
215
215
216 pats = [b'include:%s' % f for f in files]
216 pats = [b'include:%s' % f for f in files]
217 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
217 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218
218
219 @propertycache
219 @propertycache
220 def _slash(self):
220 def _slash(self):
221 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
221 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222
222
223 @propertycache
223 @propertycache
224 def _checklink(self):
224 def _checklink(self):
225 return util.checklink(self._root)
225 return util.checklink(self._root)
226
226
227 @propertycache
227 @propertycache
228 def _checkexec(self):
228 def _checkexec(self):
229 return bool(util.checkexec(self._root))
229 return bool(util.checkexec(self._root))
230
230
231 @propertycache
231 @propertycache
232 def _checkcase(self):
232 def _checkcase(self):
233 return not util.fscasesensitive(self._join(b'.hg'))
233 return not util.fscasesensitive(self._join(b'.hg'))
234
234
235 def _join(self, f):
235 def _join(self, f):
236 # much faster than os.path.join()
236 # much faster than os.path.join()
237 # it's safe because f is always a relative path
237 # it's safe because f is always a relative path
238 return self._rootdir + f
238 return self._rootdir + f
239
239
240 def flagfunc(self, buildfallback):
240 def flagfunc(self, buildfallback):
241 if self._checklink and self._checkexec:
241 if self._checklink and self._checkexec:
242
242
243 def f(x):
243 def f(x):
244 try:
244 try:
245 st = os.lstat(self._join(x))
245 st = os.lstat(self._join(x))
246 if util.statislink(st):
246 if util.statislink(st):
247 return b'l'
247 return b'l'
248 if util.statisexec(st):
248 if util.statisexec(st):
249 return b'x'
249 return b'x'
250 except OSError:
250 except OSError:
251 pass
251 pass
252 return b''
252 return b''
253
253
254 return f
254 return f
255
255
256 fallback = buildfallback()
256 fallback = buildfallback()
257 if self._checklink:
257 if self._checklink:
258
258
259 def f(x):
259 def f(x):
260 if os.path.islink(self._join(x)):
260 if os.path.islink(self._join(x)):
261 return b'l'
261 return b'l'
262 if b'x' in fallback(x):
262 if b'x' in fallback(x):
263 return b'x'
263 return b'x'
264 return b''
264 return b''
265
265
266 return f
266 return f
267 if self._checkexec:
267 if self._checkexec:
268
268
269 def f(x):
269 def f(x):
270 if b'l' in fallback(x):
270 if b'l' in fallback(x):
271 return b'l'
271 return b'l'
272 if util.isexec(self._join(x)):
272 if util.isexec(self._join(x)):
273 return b'x'
273 return b'x'
274 return b''
274 return b''
275
275
276 return f
276 return f
277 else:
277 else:
278 return fallback
278 return fallback
279
279
280 @propertycache
280 @propertycache
281 def _cwd(self):
281 def _cwd(self):
282 # internal config: ui.forcecwd
282 # internal config: ui.forcecwd
283 forcecwd = self._ui.config(b'ui', b'forcecwd')
283 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 if forcecwd:
284 if forcecwd:
285 return forcecwd
285 return forcecwd
286 return encoding.getcwd()
286 return encoding.getcwd()
287
287
288 def getcwd(self):
288 def getcwd(self):
289 """Return the path from which a canonical path is calculated.
289 """Return the path from which a canonical path is calculated.
290
290
291 This path should be used to resolve file patterns or to convert
291 This path should be used to resolve file patterns or to convert
292 canonical paths back to file paths for display. It shouldn't be
292 canonical paths back to file paths for display. It shouldn't be
293 used to get real file paths. Use vfs functions instead.
293 used to get real file paths. Use vfs functions instead.
294 """
294 """
295 cwd = self._cwd
295 cwd = self._cwd
296 if cwd == self._root:
296 if cwd == self._root:
297 return b''
297 return b''
298 # self._root ends with a path separator if self._root is '/' or 'C:\'
298 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 rootsep = self._root
299 rootsep = self._root
300 if not util.endswithsep(rootsep):
300 if not util.endswithsep(rootsep):
301 rootsep += pycompat.ossep
301 rootsep += pycompat.ossep
302 if cwd.startswith(rootsep):
302 if cwd.startswith(rootsep):
303 return cwd[len(rootsep) :]
303 return cwd[len(rootsep) :]
304 else:
304 else:
305 # we're outside the repo. return an absolute path.
305 # we're outside the repo. return an absolute path.
306 return cwd
306 return cwd
307
307
308 def pathto(self, f, cwd=None):
308 def pathto(self, f, cwd=None):
309 if cwd is None:
309 if cwd is None:
310 cwd = self.getcwd()
310 cwd = self.getcwd()
311 path = util.pathto(self._root, cwd, f)
311 path = util.pathto(self._root, cwd, f)
312 if self._slash:
312 if self._slash:
313 return util.pconvert(path)
313 return util.pconvert(path)
314 return path
314 return path
315
315
316 def __getitem__(self, key):
316 def __getitem__(self, key):
317 """Return the current state of key (a filename) in the dirstate.
317 """Return the current state of key (a filename) in the dirstate.
318
318
319 States are:
319 States are:
320 n normal
320 n normal
321 m needs merging
321 m needs merging
322 r marked for removal
322 r marked for removal
323 a marked for addition
323 a marked for addition
324 ? not tracked
324 ? not tracked
325
325
326 XXX The "state" is a bit obscure to be in the "public" API. we should
326 XXX The "state" is a bit obscure to be in the "public" API. we should
327 consider migrating all user of this to going through the dirstate entry
327 consider migrating all user of this to going through the dirstate entry
328 instead.
328 instead.
329 """
329 """
330 msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
330 msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
331 util.nouideprecwarn(msg, b'6.1', stacklevel=2)
331 util.nouideprecwarn(msg, b'6.1', stacklevel=2)
332 entry = self._map.get(key)
332 entry = self._map.get(key)
333 if entry is not None:
333 if entry is not None:
334 return entry.state
334 return entry.state
335 return b'?'
335 return b'?'
336
336
337 def get_entry(self, path):
337 def get_entry(self, path):
338 """return a DirstateItem for the associated path"""
338 """return a DirstateItem for the associated path"""
339 entry = self._map.get(path)
339 entry = self._map.get(path)
340 if entry is None:
340 if entry is None:
341 return DirstateItem()
341 return DirstateItem()
342 return entry
342 return entry
343
343
344 def __contains__(self, key):
344 def __contains__(self, key):
345 return key in self._map
345 return key in self._map
346
346
347 def __iter__(self):
347 def __iter__(self):
348 return iter(sorted(self._map))
348 return iter(sorted(self._map))
349
349
350 def items(self):
350 def items(self):
351 return pycompat.iteritems(self._map)
351 return pycompat.iteritems(self._map)
352
352
353 iteritems = items
353 iteritems = items
354
354
355 def parents(self):
355 def parents(self):
356 return [self._validate(p) for p in self._pl]
356 return [self._validate(p) for p in self._pl]
357
357
358 def p1(self):
358 def p1(self):
359 return self._validate(self._pl[0])
359 return self._validate(self._pl[0])
360
360
361 def p2(self):
361 def p2(self):
362 return self._validate(self._pl[1])
362 return self._validate(self._pl[1])
363
363
364 @property
364 @property
365 def in_merge(self):
365 def in_merge(self):
366 """True if a merge is in progress"""
366 """True if a merge is in progress"""
367 return self._pl[1] != self._nodeconstants.nullid
367 return self._pl[1] != self._nodeconstants.nullid
368
368
369 def branch(self):
369 def branch(self):
370 return encoding.tolocal(self._branch)
370 return encoding.tolocal(self._branch)
371
371
372 def setparents(self, p1, p2=None):
372 def setparents(self, p1, p2=None):
373 """Set dirstate parents to p1 and p2.
373 """Set dirstate parents to p1 and p2.
374
374
375 When moving from two parents to one, "merged" entries a
375 When moving from two parents to one, "merged" entries a
376 adjusted to normal and previous copy records discarded and
376 adjusted to normal and previous copy records discarded and
377 returned by the call.
377 returned by the call.
378
378
379 See localrepo.setparents()
379 See localrepo.setparents()
380 """
380 """
381 if p2 is None:
381 if p2 is None:
382 p2 = self._nodeconstants.nullid
382 p2 = self._nodeconstants.nullid
383 if self._parentwriters == 0:
383 if self._parentwriters == 0:
384 raise ValueError(
384 raise ValueError(
385 b"cannot set dirstate parent outside of "
385 b"cannot set dirstate parent outside of "
386 b"dirstate.parentchange context manager"
386 b"dirstate.parentchange context manager"
387 )
387 )
388
388
389 self._dirty = True
389 self._dirty = True
390 oldp2 = self._pl[1]
390 oldp2 = self._pl[1]
391 if self._origpl is None:
391 if self._origpl is None:
392 self._origpl = self._pl
392 self._origpl = self._pl
393 nullid = self._nodeconstants.nullid
393 nullid = self._nodeconstants.nullid
394 # True if we need to fold p2 related state back to a linear case
394 # True if we need to fold p2 related state back to a linear case
395 fold_p2 = oldp2 != nullid and p2 == nullid
395 fold_p2 = oldp2 != nullid and p2 == nullid
396 return self._map.setparents(p1, p2, fold_p2=fold_p2)
396 return self._map.setparents(p1, p2, fold_p2=fold_p2)
397
397
398 def setbranch(self, branch):
398 def setbranch(self, branch):
399 self.__class__._branch.set(self, encoding.fromlocal(branch))
399 self.__class__._branch.set(self, encoding.fromlocal(branch))
400 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
400 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
401 try:
401 try:
402 f.write(self._branch + b'\n')
402 f.write(self._branch + b'\n')
403 f.close()
403 f.close()
404
404
405 # make sure filecache has the correct stat info for _branch after
405 # make sure filecache has the correct stat info for _branch after
406 # replacing the underlying file
406 # replacing the underlying file
407 ce = self._filecache[b'_branch']
407 ce = self._filecache[b'_branch']
408 if ce:
408 if ce:
409 ce.refresh()
409 ce.refresh()
410 except: # re-raises
410 except: # re-raises
411 f.discard()
411 f.discard()
412 raise
412 raise
413
413
414 def invalidate(self):
414 def invalidate(self):
415 """Causes the next access to reread the dirstate.
415 """Causes the next access to reread the dirstate.
416
416
417 This is different from localrepo.invalidatedirstate() because it always
417 This is different from localrepo.invalidatedirstate() because it always
418 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
418 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
419 check whether the dirstate has changed before rereading it."""
419 check whether the dirstate has changed before rereading it."""
420
420
421 for a in ("_map", "_branch", "_ignore"):
421 for a in ("_map", "_branch", "_ignore"):
422 if a in self.__dict__:
422 if a in self.__dict__:
423 delattr(self, a)
423 delattr(self, a)
424 self._lastnormaltime = 0
424 self._lastnormaltime = 0
425 self._dirty = False
425 self._dirty = False
426 self._parentwriters = 0
426 self._parentwriters = 0
427 self._origpl = None
427 self._origpl = None
428
428
429 def copy(self, source, dest):
429 def copy(self, source, dest):
430 """Mark dest as a copy of source. Unmark dest if source is None."""
430 """Mark dest as a copy of source. Unmark dest if source is None."""
431 if source == dest:
431 if source == dest:
432 return
432 return
433 self._dirty = True
433 self._dirty = True
434 if source is not None:
434 if source is not None:
435 self._map.copymap[dest] = source
435 self._map.copymap[dest] = source
436 else:
436 else:
437 self._map.copymap.pop(dest, None)
437 self._map.copymap.pop(dest, None)
438
438
439 def copied(self, file):
439 def copied(self, file):
440 return self._map.copymap.get(file, None)
440 return self._map.copymap.get(file, None)
441
441
442 def copies(self):
442 def copies(self):
443 return self._map.copymap
443 return self._map.copymap
444
444
445 @requires_no_parents_change
445 @requires_no_parents_change
446 def set_tracked(self, filename):
446 def set_tracked(self, filename):
447 """a "public" method for generic code to mark a file as tracked
447 """a "public" method for generic code to mark a file as tracked
448
448
449 This function is to be called outside of "update/merge" case. For
449 This function is to be called outside of "update/merge" case. For
450 example by a command like `hg add X`.
450 example by a command like `hg add X`.
451
451
452 return True the file was previously untracked, False otherwise.
452 return True the file was previously untracked, False otherwise.
453 """
453 """
454 self._dirty = True
454 self._dirty = True
455 entry = self._map.get(filename)
455 entry = self._map.get(filename)
456 if entry is None or not entry.tracked:
456 if entry is None or not entry.tracked:
457 self._check_new_tracked_filename(filename)
457 self._check_new_tracked_filename(filename)
458 return self._map.set_tracked(filename)
458 return self._map.set_tracked(filename)
459
459
460 @requires_no_parents_change
460 @requires_no_parents_change
461 def set_untracked(self, filename):
461 def set_untracked(self, filename):
462 """a "public" method for generic code to mark a file as untracked
462 """a "public" method for generic code to mark a file as untracked
463
463
464 This function is to be called outside of "update/merge" case. For
464 This function is to be called outside of "update/merge" case. For
465 example by a command like `hg remove X`.
465 example by a command like `hg remove X`.
466
466
467 return True the file was previously tracked, False otherwise.
467 return True the file was previously tracked, False otherwise.
468 """
468 """
469 ret = self._map.set_untracked(filename)
469 ret = self._map.set_untracked(filename)
470 if ret:
470 if ret:
471 self._dirty = True
471 self._dirty = True
472 return ret
472 return ret
473
473
474 @requires_no_parents_change
474 @requires_no_parents_change
475 def set_clean(self, filename, parentfiledata=None):
475 def set_clean(self, filename, parentfiledata=None):
476 """record that the current state of the file on disk is known to be clean"""
476 """record that the current state of the file on disk is known to be clean"""
477 self._dirty = True
477 self._dirty = True
478 if parentfiledata:
478 if parentfiledata:
479 (mode, size, mtime) = parentfiledata
479 (mode, size, mtime) = parentfiledata
480 else:
480 else:
481 (mode, size, mtime) = self._get_filedata(filename)
481 (mode, size, mtime) = self._get_filedata(filename)
482 if not self._map[filename].tracked:
482 if not self._map[filename].tracked:
483 self._check_new_tracked_filename(filename)
483 self._check_new_tracked_filename(filename)
484 self._map.set_clean(filename, mode, size, mtime)
484 self._map.set_clean(filename, mode, size, mtime)
485 if mtime > self._lastnormaltime:
485 if mtime > self._lastnormaltime:
486 # Remember the most recent modification timeslot for status(),
486 # Remember the most recent modification timeslot for status(),
487 # to make sure we won't miss future size-preserving file content
487 # to make sure we won't miss future size-preserving file content
488 # modifications that happen within the same timeslot.
488 # modifications that happen within the same timeslot.
489 self._lastnormaltime = mtime
489 self._lastnormaltime = mtime
490
490
491 @requires_no_parents_change
491 @requires_no_parents_change
492 def set_possibly_dirty(self, filename):
492 def set_possibly_dirty(self, filename):
493 """record that the current state of the file on disk is unknown"""
493 """record that the current state of the file on disk is unknown"""
494 self._dirty = True
494 self._dirty = True
495 self._map.set_possibly_dirty(filename)
495 self._map.set_possibly_dirty(filename)
496
496
497 @requires_parents_change
497 @requires_parents_change
498 def update_file_p1(
498 def update_file_p1(
499 self,
499 self,
500 filename,
500 filename,
501 p1_tracked,
501 p1_tracked,
502 ):
502 ):
503 """Set a file as tracked in the parent (or not)
503 """Set a file as tracked in the parent (or not)
504
504
505 This is to be called when adjust the dirstate to a new parent after an history
505 This is to be called when adjust the dirstate to a new parent after an history
506 rewriting operation.
506 rewriting operation.
507
507
508 It should not be called during a merge (p2 != nullid) and only within
508 It should not be called during a merge (p2 != nullid) and only within
509 a `with dirstate.parentchange():` context.
509 a `with dirstate.parentchange():` context.
510 """
510 """
511 if self.in_merge:
511 if self.in_merge:
512 msg = b'update_file_reference should not be called when merging'
512 msg = b'update_file_reference should not be called when merging'
513 raise error.ProgrammingError(msg)
513 raise error.ProgrammingError(msg)
514 entry = self._map.get(filename)
514 entry = self._map.get(filename)
515 if entry is None:
515 if entry is None:
516 wc_tracked = False
516 wc_tracked = False
517 else:
517 else:
518 wc_tracked = entry.tracked
518 wc_tracked = entry.tracked
519 if not (p1_tracked or wc_tracked):
519 if not (p1_tracked or wc_tracked):
520 # the file is no longer relevant to anyone
520 # the file is no longer relevant to anyone
521 if self._map.get(filename) is not None:
521 if self._map.get(filename) is not None:
522 self._map.reset_state(filename)
522 self._map.reset_state(filename)
523 self._dirty = True
523 self._dirty = True
524 elif (not p1_tracked) and wc_tracked:
524 elif (not p1_tracked) and wc_tracked:
525 if entry is not None and entry.added:
525 if entry is not None and entry.added:
526 return # avoid dropping copy information (maybe?)
526 return # avoid dropping copy information (maybe?)
527
527
528 parentfiledata = None
528 parentfiledata = None
529 if wc_tracked and p1_tracked:
529 if wc_tracked and p1_tracked:
530 parentfiledata = self._get_filedata(filename)
530 parentfiledata = self._get_filedata(filename)
531
531
532 self._map.reset_state(
532 self._map.reset_state(
533 filename,
533 filename,
534 wc_tracked,
534 wc_tracked,
535 p1_tracked,
535 p1_tracked,
536 # the underlying reference might have changed, we will have to
536 # the underlying reference might have changed, we will have to
537 # check it.
537 # check it.
538 has_meaningful_mtime=False,
538 has_meaningful_mtime=False,
539 parentfiledata=parentfiledata,
539 parentfiledata=parentfiledata,
540 )
540 )
541 if (
541 if (
542 parentfiledata is not None
542 parentfiledata is not None
543 and parentfiledata[2] > self._lastnormaltime
543 and parentfiledata[2] > self._lastnormaltime
544 ):
544 ):
545 # Remember the most recent modification timeslot for status(),
545 # Remember the most recent modification timeslot for status(),
546 # to make sure we won't miss future size-preserving file content
546 # to make sure we won't miss future size-preserving file content
547 # modifications that happen within the same timeslot.
547 # modifications that happen within the same timeslot.
548 self._lastnormaltime = parentfiledata[2]
548 self._lastnormaltime = parentfiledata[2]
549
549
550 @requires_parents_change
550 @requires_parents_change
551 def update_file(
551 def update_file(
552 self,
552 self,
553 filename,
553 filename,
554 wc_tracked,
554 wc_tracked,
555 p1_tracked,
555 p1_tracked,
556 p2_info=False,
556 p2_info=False,
557 possibly_dirty=False,
557 possibly_dirty=False,
558 parentfiledata=None,
558 parentfiledata=None,
559 ):
559 ):
560 """update the information about a file in the dirstate
560 """update the information about a file in the dirstate
561
561
562 This is to be called when the direstates parent changes to keep track
562 This is to be called when the direstates parent changes to keep track
563 of what is the file situation in regards to the working copy and its parent.
563 of what is the file situation in regards to the working copy and its parent.
564
564
565 This function must be called within a `dirstate.parentchange` context.
565 This function must be called within a `dirstate.parentchange` context.
566
566
567 note: the API is at an early stage and we might need to adjust it
567 note: the API is at an early stage and we might need to adjust it
568 depending of what information ends up being relevant and useful to
568 depending of what information ends up being relevant and useful to
569 other processing.
569 other processing.
570 """
570 """
571
571
572 # note: I do not think we need to double check name clash here since we
572 # note: I do not think we need to double check name clash here since we
573 # are in a update/merge case that should already have taken care of
573 # are in a update/merge case that should already have taken care of
574 # this. The test agrees
574 # this. The test agrees
575
575
576 self._dirty = True
576 self._dirty = True
577
577
578 need_parent_file_data = (
578 need_parent_file_data = (
579 not possibly_dirty and not p2_info and wc_tracked and p1_tracked
579 not possibly_dirty and not p2_info and wc_tracked and p1_tracked
580 )
580 )
581
581
582 if need_parent_file_data and parentfiledata is None:
582 if need_parent_file_data and parentfiledata is None:
583 parentfiledata = self._get_filedata(filename)
583 parentfiledata = self._get_filedata(filename)
584
584
585 self._map.reset_state(
585 self._map.reset_state(
586 filename,
586 filename,
587 wc_tracked,
587 wc_tracked,
588 p1_tracked,
588 p1_tracked,
589 p2_info=p2_info,
589 p2_info=p2_info,
590 has_meaningful_mtime=not possibly_dirty,
590 has_meaningful_mtime=not possibly_dirty,
591 parentfiledata=parentfiledata,
591 parentfiledata=parentfiledata,
592 )
592 )
593 if (
593 if (
594 parentfiledata is not None
594 parentfiledata is not None
595 and parentfiledata[2] > self._lastnormaltime
595 and parentfiledata[2] > self._lastnormaltime
596 ):
596 ):
597 # Remember the most recent modification timeslot for status(),
597 # Remember the most recent modification timeslot for status(),
598 # to make sure we won't miss future size-preserving file content
598 # to make sure we won't miss future size-preserving file content
599 # modifications that happen within the same timeslot.
599 # modifications that happen within the same timeslot.
600 self._lastnormaltime = parentfiledata[2]
600 self._lastnormaltime = parentfiledata[2]
601
601
602 def _check_new_tracked_filename(self, filename):
602 def _check_new_tracked_filename(self, filename):
603 scmutil.checkfilename(filename)
603 scmutil.checkfilename(filename)
604 if self._map.hastrackeddir(filename):
604 if self._map.hastrackeddir(filename):
605 msg = _(b'directory %r already in dirstate')
605 msg = _(b'directory %r already in dirstate')
606 msg %= pycompat.bytestr(filename)
606 msg %= pycompat.bytestr(filename)
607 raise error.Abort(msg)
607 raise error.Abort(msg)
608 # shadows
608 # shadows
609 for d in pathutil.finddirs(filename):
609 for d in pathutil.finddirs(filename):
610 if self._map.hastrackeddir(d):
610 if self._map.hastrackeddir(d):
611 break
611 break
612 entry = self._map.get(d)
612 entry = self._map.get(d)
613 if entry is not None and not entry.removed:
613 if entry is not None and not entry.removed:
614 msg = _(b'file %r in dirstate clashes with %r')
614 msg = _(b'file %r in dirstate clashes with %r')
615 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
615 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
616 raise error.Abort(msg)
616 raise error.Abort(msg)
617
617
618 def _get_filedata(self, filename):
618 def _get_filedata(self, filename):
619 """returns"""
619 """returns"""
620 s = os.lstat(self._join(filename))
620 s = os.lstat(self._join(filename))
621 mode = s.st_mode
621 mode = s.st_mode
622 size = s.st_size
622 size = s.st_size
623 mtime = s[stat.ST_MTIME]
623 mtime = s[stat.ST_MTIME]
624 return (mode, size, mtime)
624 return (mode, size, mtime)
625
625
626 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
626 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
627 if exists is None:
627 if exists is None:
628 exists = os.path.lexists(os.path.join(self._root, path))
628 exists = os.path.lexists(os.path.join(self._root, path))
629 if not exists:
629 if not exists:
630 # Maybe a path component exists
630 # Maybe a path component exists
631 if not ignoremissing and b'/' in path:
631 if not ignoremissing and b'/' in path:
632 d, f = path.rsplit(b'/', 1)
632 d, f = path.rsplit(b'/', 1)
633 d = self._normalize(d, False, ignoremissing, None)
633 d = self._normalize(d, False, ignoremissing, None)
634 folded = d + b"/" + f
634 folded = d + b"/" + f
635 else:
635 else:
636 # No path components, preserve original case
636 # No path components, preserve original case
637 folded = path
637 folded = path
638 else:
638 else:
639 # recursively normalize leading directory components
639 # recursively normalize leading directory components
640 # against dirstate
640 # against dirstate
641 if b'/' in normed:
641 if b'/' in normed:
642 d, f = normed.rsplit(b'/', 1)
642 d, f = normed.rsplit(b'/', 1)
643 d = self._normalize(d, False, ignoremissing, True)
643 d = self._normalize(d, False, ignoremissing, True)
644 r = self._root + b"/" + d
644 r = self._root + b"/" + d
645 folded = d + b"/" + util.fspath(f, r)
645 folded = d + b"/" + util.fspath(f, r)
646 else:
646 else:
647 folded = util.fspath(normed, self._root)
647 folded = util.fspath(normed, self._root)
648 storemap[normed] = folded
648 storemap[normed] = folded
649
649
650 return folded
650 return folded
651
651
652 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
652 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
653 normed = util.normcase(path)
653 normed = util.normcase(path)
654 folded = self._map.filefoldmap.get(normed, None)
654 folded = self._map.filefoldmap.get(normed, None)
655 if folded is None:
655 if folded is None:
656 if isknown:
656 if isknown:
657 folded = path
657 folded = path
658 else:
658 else:
659 folded = self._discoverpath(
659 folded = self._discoverpath(
660 path, normed, ignoremissing, exists, self._map.filefoldmap
660 path, normed, ignoremissing, exists, self._map.filefoldmap
661 )
661 )
662 return folded
662 return folded
663
663
664 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
664 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
665 normed = util.normcase(path)
665 normed = util.normcase(path)
666 folded = self._map.filefoldmap.get(normed, None)
666 folded = self._map.filefoldmap.get(normed, None)
667 if folded is None:
667 if folded is None:
668 folded = self._map.dirfoldmap.get(normed, None)
668 folded = self._map.dirfoldmap.get(normed, None)
669 if folded is None:
669 if folded is None:
670 if isknown:
670 if isknown:
671 folded = path
671 folded = path
672 else:
672 else:
673 # store discovered result in dirfoldmap so that future
673 # store discovered result in dirfoldmap so that future
674 # normalizefile calls don't start matching directories
674 # normalizefile calls don't start matching directories
675 folded = self._discoverpath(
675 folded = self._discoverpath(
676 path, normed, ignoremissing, exists, self._map.dirfoldmap
676 path, normed, ignoremissing, exists, self._map.dirfoldmap
677 )
677 )
678 return folded
678 return folded
679
679
680 def normalize(self, path, isknown=False, ignoremissing=False):
680 def normalize(self, path, isknown=False, ignoremissing=False):
681 """
681 """
682 normalize the case of a pathname when on a casefolding filesystem
682 normalize the case of a pathname when on a casefolding filesystem
683
683
684 isknown specifies whether the filename came from walking the
684 isknown specifies whether the filename came from walking the
685 disk, to avoid extra filesystem access.
685 disk, to avoid extra filesystem access.
686
686
687 If ignoremissing is True, missing path are returned
687 If ignoremissing is True, missing path are returned
688 unchanged. Otherwise, we try harder to normalize possibly
688 unchanged. Otherwise, we try harder to normalize possibly
689 existing path components.
689 existing path components.
690
690
691 The normalized case is determined based on the following precedence:
691 The normalized case is determined based on the following precedence:
692
692
693 - version of name already stored in the dirstate
693 - version of name already stored in the dirstate
694 - version of name stored on disk
694 - version of name stored on disk
695 - version provided via command arguments
695 - version provided via command arguments
696 """
696 """
697
697
698 if self._checkcase:
698 if self._checkcase:
699 return self._normalize(path, isknown, ignoremissing)
699 return self._normalize(path, isknown, ignoremissing)
700 return path
700 return path
701
701
702 def clear(self):
702 def clear(self):
703 self._map.clear()
703 self._map.clear()
704 self._lastnormaltime = 0
704 self._lastnormaltime = 0
705 self._dirty = True
705 self._dirty = True
706
706
707 def rebuild(self, parent, allfiles, changedfiles=None):
707 def rebuild(self, parent, allfiles, changedfiles=None):
708 if changedfiles is None:
708 if changedfiles is None:
709 # Rebuild entire dirstate
709 # Rebuild entire dirstate
710 to_lookup = allfiles
710 to_lookup = allfiles
711 to_drop = []
711 to_drop = []
712 lastnormaltime = self._lastnormaltime
712 lastnormaltime = self._lastnormaltime
713 self.clear()
713 self.clear()
714 self._lastnormaltime = lastnormaltime
714 self._lastnormaltime = lastnormaltime
715 elif len(changedfiles) < 10:
715 elif len(changedfiles) < 10:
716 # Avoid turning allfiles into a set, which can be expensive if it's
716 # Avoid turning allfiles into a set, which can be expensive if it's
717 # large.
717 # large.
718 to_lookup = []
718 to_lookup = []
719 to_drop = []
719 to_drop = []
720 for f in changedfiles:
720 for f in changedfiles:
721 if f in allfiles:
721 if f in allfiles:
722 to_lookup.append(f)
722 to_lookup.append(f)
723 else:
723 else:
724 to_drop.append(f)
724 to_drop.append(f)
725 else:
725 else:
726 changedfilesset = set(changedfiles)
726 changedfilesset = set(changedfiles)
727 to_lookup = changedfilesset & set(allfiles)
727 to_lookup = changedfilesset & set(allfiles)
728 to_drop = changedfilesset - to_lookup
728 to_drop = changedfilesset - to_lookup
729
729
730 if self._origpl is None:
730 if self._origpl is None:
731 self._origpl = self._pl
731 self._origpl = self._pl
732 self._map.setparents(parent, self._nodeconstants.nullid)
732 self._map.setparents(parent, self._nodeconstants.nullid)
733
733
734 for f in to_lookup:
734 for f in to_lookup:
735
735
736 if self.in_merge:
736 if self.in_merge:
737 self.set_tracked(f)
737 self.set_tracked(f)
738 else:
738 else:
739 self._map.reset_state(
739 self._map.reset_state(
740 f,
740 f,
741 wc_tracked=True,
741 wc_tracked=True,
742 p1_tracked=True,
742 p1_tracked=True,
743 )
743 )
744 for f in to_drop:
744 for f in to_drop:
745 self._map.reset_state(f)
745 self._map.reset_state(f)
746
746
747 self._dirty = True
747 self._dirty = True
748
748
749 def identity(self):
749 def identity(self):
750 """Return identity of dirstate itself to detect changing in storage
750 """Return identity of dirstate itself to detect changing in storage
751
751
752 If identity of previous dirstate is equal to this, writing
752 If identity of previous dirstate is equal to this, writing
753 changes based on the former dirstate out can keep consistency.
753 changes based on the former dirstate out can keep consistency.
754 """
754 """
755 return self._map.identity
755 return self._map.identity
756
756
757 def write(self, tr):
757 def write(self, tr):
758 if not self._dirty:
758 if not self._dirty:
759 return
759 return
760
760
761 filename = self._filename
761 filename = self._filename
762 if tr:
762 if tr:
763 # 'dirstate.write()' is not only for writing in-memory
763 # 'dirstate.write()' is not only for writing in-memory
764 # changes out, but also for dropping ambiguous timestamp.
764 # changes out, but also for dropping ambiguous timestamp.
765 # delayed writing re-raise "ambiguous timestamp issue".
765 # delayed writing re-raise "ambiguous timestamp issue".
766 # See also the wiki page below for detail:
766 # See also the wiki page below for detail:
767 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
767 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
768
768
769 # record when mtime start to be ambiguous
769 # record when mtime start to be ambiguous
770 now = _getfsnow(self._opener)
770 now = _getfsnow(self._opener)
771
771
772 # delay writing in-memory changes out
772 # delay writing in-memory changes out
773 tr.addfilegenerator(
773 tr.addfilegenerator(
774 b'dirstate',
774 b'dirstate',
775 (self._filename,),
775 (self._filename,),
776 lambda f: self._writedirstate(tr, f, now=now),
776 lambda f: self._writedirstate(tr, f, now=now),
777 location=b'plain',
777 location=b'plain',
778 )
778 )
779 return
779 return
780
780
781 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
781 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
782 self._writedirstate(tr, st)
782 self._writedirstate(tr, st)
783
783
784 def addparentchangecallback(self, category, callback):
784 def addparentchangecallback(self, category, callback):
785 """add a callback to be called when the wd parents are changed
785 """add a callback to be called when the wd parents are changed
786
786
787 Callback will be called with the following arguments:
787 Callback will be called with the following arguments:
788 dirstate, (oldp1, oldp2), (newp1, newp2)
788 dirstate, (oldp1, oldp2), (newp1, newp2)
789
789
790 Category is a unique identifier to allow overwriting an old callback
790 Category is a unique identifier to allow overwriting an old callback
791 with a newer callback.
791 with a newer callback.
792 """
792 """
793 self._plchangecallbacks[category] = callback
793 self._plchangecallbacks[category] = callback
794
794
795 def _writedirstate(self, tr, st, now=None):
795 def _writedirstate(self, tr, st, now=None):
796 # notify callbacks about parents change
796 # notify callbacks about parents change
797 if self._origpl is not None and self._origpl != self._pl:
797 if self._origpl is not None and self._origpl != self._pl:
798 for c, callback in sorted(
798 for c, callback in sorted(
799 pycompat.iteritems(self._plchangecallbacks)
799 pycompat.iteritems(self._plchangecallbacks)
800 ):
800 ):
801 callback(self, self._origpl, self._pl)
801 callback(self, self._origpl, self._pl)
802 self._origpl = None
802 self._origpl = None
803
803
804 if now is None:
804 if now is None:
805 # use the modification time of the newly created temporary file as the
805 # use the modification time of the newly created temporary file as the
806 # filesystem's notion of 'now'
806 # filesystem's notion of 'now'
807 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
807 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
808
808
809 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
809 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
810 # timestamp of each entries in dirstate, because of 'now > mtime'
810 # timestamp of each entries in dirstate, because of 'now > mtime'
811 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
811 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
812 if delaywrite > 0:
812 if delaywrite > 0:
813 # do we have any files to delay for?
813 # do we have any files to delay for?
814 for f, e in pycompat.iteritems(self._map):
814 for f, e in pycompat.iteritems(self._map):
815 if e.need_delay(now):
815 if e.need_delay(now):
816 import time # to avoid useless import
816 import time # to avoid useless import
817
817
818 # rather than sleep n seconds, sleep until the next
818 # rather than sleep n seconds, sleep until the next
819 # multiple of n seconds
819 # multiple of n seconds
820 clock = time.time()
820 clock = time.time()
821 start = int(clock) - (int(clock) % delaywrite)
821 start = int(clock) - (int(clock) % delaywrite)
822 end = start + delaywrite
822 end = start + delaywrite
823 time.sleep(end - clock)
823 time.sleep(end - clock)
824 now = end # trust our estimate that the end is near now
824 now = end # trust our estimate that the end is near now
825 break
825 break
826
826
827 self._map.write(tr, st, now)
827 self._map.write(tr, st, now)
828 self._lastnormaltime = 0
828 self._lastnormaltime = 0
829 self._dirty = False
829 self._dirty = False
830
830
831 def _dirignore(self, f):
831 def _dirignore(self, f):
832 if self._ignore(f):
832 if self._ignore(f):
833 return True
833 return True
834 for p in pathutil.finddirs(f):
834 for p in pathutil.finddirs(f):
835 if self._ignore(p):
835 if self._ignore(p):
836 return True
836 return True
837 return False
837 return False
838
838
839 def _ignorefiles(self):
839 def _ignorefiles(self):
840 files = []
840 files = []
841 if os.path.exists(self._join(b'.hgignore')):
841 if os.path.exists(self._join(b'.hgignore')):
842 files.append(self._join(b'.hgignore'))
842 files.append(self._join(b'.hgignore'))
843 for name, path in self._ui.configitems(b"ui"):
843 for name, path in self._ui.configitems(b"ui"):
844 if name == b'ignore' or name.startswith(b'ignore.'):
844 if name == b'ignore' or name.startswith(b'ignore.'):
845 # we need to use os.path.join here rather than self._join
845 # we need to use os.path.join here rather than self._join
846 # because path is arbitrary and user-specified
846 # because path is arbitrary and user-specified
847 files.append(os.path.join(self._rootdir, util.expandpath(path)))
847 files.append(os.path.join(self._rootdir, util.expandpath(path)))
848 return files
848 return files
849
849
850 def _ignorefileandline(self, f):
850 def _ignorefileandline(self, f):
851 files = collections.deque(self._ignorefiles())
851 files = collections.deque(self._ignorefiles())
852 visited = set()
852 visited = set()
853 while files:
853 while files:
854 i = files.popleft()
854 i = files.popleft()
855 patterns = matchmod.readpatternfile(
855 patterns = matchmod.readpatternfile(
856 i, self._ui.warn, sourceinfo=True
856 i, self._ui.warn, sourceinfo=True
857 )
857 )
858 for pattern, lineno, line in patterns:
858 for pattern, lineno, line in patterns:
859 kind, p = matchmod._patsplit(pattern, b'glob')
859 kind, p = matchmod._patsplit(pattern, b'glob')
860 if kind == b"subinclude":
860 if kind == b"subinclude":
861 if p not in visited:
861 if p not in visited:
862 files.append(p)
862 files.append(p)
863 continue
863 continue
864 m = matchmod.match(
864 m = matchmod.match(
865 self._root, b'', [], [pattern], warn=self._ui.warn
865 self._root, b'', [], [pattern], warn=self._ui.warn
866 )
866 )
867 if m(f):
867 if m(f):
868 return (i, lineno, line)
868 return (i, lineno, line)
869 visited.add(i)
869 visited.add(i)
870 return (None, -1, b"")
870 return (None, -1, b"")
871
871
872 def _walkexplicit(self, match, subrepos):
872 def _walkexplicit(self, match, subrepos):
873 """Get stat data about the files explicitly specified by match.
873 """Get stat data about the files explicitly specified by match.
874
874
875 Return a triple (results, dirsfound, dirsnotfound).
875 Return a triple (results, dirsfound, dirsnotfound).
876 - results is a mapping from filename to stat result. It also contains
876 - results is a mapping from filename to stat result. It also contains
877 listings mapping subrepos and .hg to None.
877 listings mapping subrepos and .hg to None.
878 - dirsfound is a list of files found to be directories.
878 - dirsfound is a list of files found to be directories.
879 - dirsnotfound is a list of files that the dirstate thinks are
879 - dirsnotfound is a list of files that the dirstate thinks are
880 directories and that were not found."""
880 directories and that were not found."""
881
881
882 def badtype(mode):
882 def badtype(mode):
883 kind = _(b'unknown')
883 kind = _(b'unknown')
884 if stat.S_ISCHR(mode):
884 if stat.S_ISCHR(mode):
885 kind = _(b'character device')
885 kind = _(b'character device')
886 elif stat.S_ISBLK(mode):
886 elif stat.S_ISBLK(mode):
887 kind = _(b'block device')
887 kind = _(b'block device')
888 elif stat.S_ISFIFO(mode):
888 elif stat.S_ISFIFO(mode):
889 kind = _(b'fifo')
889 kind = _(b'fifo')
890 elif stat.S_ISSOCK(mode):
890 elif stat.S_ISSOCK(mode):
891 kind = _(b'socket')
891 kind = _(b'socket')
892 elif stat.S_ISDIR(mode):
892 elif stat.S_ISDIR(mode):
893 kind = _(b'directory')
893 kind = _(b'directory')
894 return _(b'unsupported file type (type is %s)') % kind
894 return _(b'unsupported file type (type is %s)') % kind
895
895
896 badfn = match.bad
896 badfn = match.bad
897 dmap = self._map
897 dmap = self._map
898 lstat = os.lstat
898 lstat = os.lstat
899 getkind = stat.S_IFMT
899 getkind = stat.S_IFMT
900 dirkind = stat.S_IFDIR
900 dirkind = stat.S_IFDIR
901 regkind = stat.S_IFREG
901 regkind = stat.S_IFREG
902 lnkkind = stat.S_IFLNK
902 lnkkind = stat.S_IFLNK
903 join = self._join
903 join = self._join
904 dirsfound = []
904 dirsfound = []
905 foundadd = dirsfound.append
905 foundadd = dirsfound.append
906 dirsnotfound = []
906 dirsnotfound = []
907 notfoundadd = dirsnotfound.append
907 notfoundadd = dirsnotfound.append
908
908
909 if not match.isexact() and self._checkcase:
909 if not match.isexact() and self._checkcase:
910 normalize = self._normalize
910 normalize = self._normalize
911 else:
911 else:
912 normalize = None
912 normalize = None
913
913
914 files = sorted(match.files())
914 files = sorted(match.files())
915 subrepos.sort()
915 subrepos.sort()
916 i, j = 0, 0
916 i, j = 0, 0
917 while i < len(files) and j < len(subrepos):
917 while i < len(files) and j < len(subrepos):
918 subpath = subrepos[j] + b"/"
918 subpath = subrepos[j] + b"/"
919 if files[i] < subpath:
919 if files[i] < subpath:
920 i += 1
920 i += 1
921 continue
921 continue
922 while i < len(files) and files[i].startswith(subpath):
922 while i < len(files) and files[i].startswith(subpath):
923 del files[i]
923 del files[i]
924 j += 1
924 j += 1
925
925
926 if not files or b'' in files:
926 if not files or b'' in files:
927 files = [b'']
927 files = [b'']
928 # constructing the foldmap is expensive, so don't do it for the
928 # constructing the foldmap is expensive, so don't do it for the
929 # common case where files is ['']
929 # common case where files is ['']
930 normalize = None
930 normalize = None
931 results = dict.fromkeys(subrepos)
931 results = dict.fromkeys(subrepos)
932 results[b'.hg'] = None
932 results[b'.hg'] = None
933
933
934 for ff in files:
934 for ff in files:
935 if normalize:
935 if normalize:
936 nf = normalize(ff, False, True)
936 nf = normalize(ff, False, True)
937 else:
937 else:
938 nf = ff
938 nf = ff
939 if nf in results:
939 if nf in results:
940 continue
940 continue
941
941
942 try:
942 try:
943 st = lstat(join(nf))
943 st = lstat(join(nf))
944 kind = getkind(st.st_mode)
944 kind = getkind(st.st_mode)
945 if kind == dirkind:
945 if kind == dirkind:
946 if nf in dmap:
946 if nf in dmap:
947 # file replaced by dir on disk but still in dirstate
947 # file replaced by dir on disk but still in dirstate
948 results[nf] = None
948 results[nf] = None
949 foundadd((nf, ff))
949 foundadd((nf, ff))
950 elif kind == regkind or kind == lnkkind:
950 elif kind == regkind or kind == lnkkind:
951 results[nf] = st
951 results[nf] = st
952 else:
952 else:
953 badfn(ff, badtype(kind))
953 badfn(ff, badtype(kind))
954 if nf in dmap:
954 if nf in dmap:
955 results[nf] = None
955 results[nf] = None
956 except OSError as inst: # nf not found on disk - it is dirstate only
956 except OSError as inst: # nf not found on disk - it is dirstate only
957 if nf in dmap: # does it exactly match a missing file?
957 if nf in dmap: # does it exactly match a missing file?
958 results[nf] = None
958 results[nf] = None
959 else: # does it match a missing directory?
959 else: # does it match a missing directory?
960 if self._map.hasdir(nf):
960 if self._map.hasdir(nf):
961 notfoundadd(nf)
961 notfoundadd(nf)
962 else:
962 else:
963 badfn(ff, encoding.strtolocal(inst.strerror))
963 badfn(ff, encoding.strtolocal(inst.strerror))
964
964
965 # match.files() may contain explicitly-specified paths that shouldn't
965 # match.files() may contain explicitly-specified paths that shouldn't
966 # be taken; drop them from the list of files found. dirsfound/notfound
966 # be taken; drop them from the list of files found. dirsfound/notfound
967 # aren't filtered here because they will be tested later.
967 # aren't filtered here because they will be tested later.
968 if match.anypats():
968 if match.anypats():
969 for f in list(results):
969 for f in list(results):
970 if f == b'.hg' or f in subrepos:
970 if f == b'.hg' or f in subrepos:
971 # keep sentinel to disable further out-of-repo walks
971 # keep sentinel to disable further out-of-repo walks
972 continue
972 continue
973 if not match(f):
973 if not match(f):
974 del results[f]
974 del results[f]
975
975
976 # Case insensitive filesystems cannot rely on lstat() failing to detect
976 # Case insensitive filesystems cannot rely on lstat() failing to detect
977 # a case-only rename. Prune the stat object for any file that does not
977 # a case-only rename. Prune the stat object for any file that does not
978 # match the case in the filesystem, if there are multiple files that
978 # match the case in the filesystem, if there are multiple files that
979 # normalize to the same path.
979 # normalize to the same path.
980 if match.isexact() and self._checkcase:
980 if match.isexact() and self._checkcase:
981 normed = {}
981 normed = {}
982
982
983 for f, st in pycompat.iteritems(results):
983 for f, st in pycompat.iteritems(results):
984 if st is None:
984 if st is None:
985 continue
985 continue
986
986
987 nc = util.normcase(f)
987 nc = util.normcase(f)
988 paths = normed.get(nc)
988 paths = normed.get(nc)
989
989
990 if paths is None:
990 if paths is None:
991 paths = set()
991 paths = set()
992 normed[nc] = paths
992 normed[nc] = paths
993
993
994 paths.add(f)
994 paths.add(f)
995
995
996 for norm, paths in pycompat.iteritems(normed):
996 for norm, paths in pycompat.iteritems(normed):
997 if len(paths) > 1:
997 if len(paths) > 1:
998 for path in paths:
998 for path in paths:
999 folded = self._discoverpath(
999 folded = self._discoverpath(
1000 path, norm, True, None, self._map.dirfoldmap
1000 path, norm, True, None, self._map.dirfoldmap
1001 )
1001 )
1002 if path != folded:
1002 if path != folded:
1003 results[path] = None
1003 results[path] = None
1004
1004
1005 return results, dirsfound, dirsnotfound
1005 return results, dirsfound, dirsnotfound
1006
1006
1007 def walk(self, match, subrepos, unknown, ignored, full=True):
1007 def walk(self, match, subrepos, unknown, ignored, full=True):
1008 """
1008 """
1009 Walk recursively through the directory tree, finding all files
1009 Walk recursively through the directory tree, finding all files
1010 matched by match.
1010 matched by match.
1011
1011
1012 If full is False, maybe skip some known-clean files.
1012 If full is False, maybe skip some known-clean files.
1013
1013
1014 Return a dict mapping filename to stat-like object (either
1014 Return a dict mapping filename to stat-like object (either
1015 mercurial.osutil.stat instance or return value of os.stat()).
1015 mercurial.osutil.stat instance or return value of os.stat()).
1016
1016
1017 """
1017 """
1018 # full is a flag that extensions that hook into walk can use -- this
1018 # full is a flag that extensions that hook into walk can use -- this
1019 # implementation doesn't use it at all. This satisfies the contract
1019 # implementation doesn't use it at all. This satisfies the contract
1020 # because we only guarantee a "maybe".
1020 # because we only guarantee a "maybe".
1021
1021
1022 if ignored:
1022 if ignored:
1023 ignore = util.never
1023 ignore = util.never
1024 dirignore = util.never
1024 dirignore = util.never
1025 elif unknown:
1025 elif unknown:
1026 ignore = self._ignore
1026 ignore = self._ignore
1027 dirignore = self._dirignore
1027 dirignore = self._dirignore
1028 else:
1028 else:
1029 # if not unknown and not ignored, drop dir recursion and step 2
1029 # if not unknown and not ignored, drop dir recursion and step 2
1030 ignore = util.always
1030 ignore = util.always
1031 dirignore = util.always
1031 dirignore = util.always
1032
1032
1033 matchfn = match.matchfn
1033 matchfn = match.matchfn
1034 matchalways = match.always()
1034 matchalways = match.always()
1035 matchtdir = match.traversedir
1035 matchtdir = match.traversedir
1036 dmap = self._map
1036 dmap = self._map
1037 listdir = util.listdir
1037 listdir = util.listdir
1038 lstat = os.lstat
1038 lstat = os.lstat
1039 dirkind = stat.S_IFDIR
1039 dirkind = stat.S_IFDIR
1040 regkind = stat.S_IFREG
1040 regkind = stat.S_IFREG
1041 lnkkind = stat.S_IFLNK
1041 lnkkind = stat.S_IFLNK
1042 join = self._join
1042 join = self._join
1043
1043
1044 exact = skipstep3 = False
1044 exact = skipstep3 = False
1045 if match.isexact(): # match.exact
1045 if match.isexact(): # match.exact
1046 exact = True
1046 exact = True
1047 dirignore = util.always # skip step 2
1047 dirignore = util.always # skip step 2
1048 elif match.prefix(): # match.match, no patterns
1048 elif match.prefix(): # match.match, no patterns
1049 skipstep3 = True
1049 skipstep3 = True
1050
1050
1051 if not exact and self._checkcase:
1051 if not exact and self._checkcase:
1052 normalize = self._normalize
1052 normalize = self._normalize
1053 normalizefile = self._normalizefile
1053 normalizefile = self._normalizefile
1054 skipstep3 = False
1054 skipstep3 = False
1055 else:
1055 else:
1056 normalize = self._normalize
1056 normalize = self._normalize
1057 normalizefile = None
1057 normalizefile = None
1058
1058
1059 # step 1: find all explicit files
1059 # step 1: find all explicit files
1060 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1060 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1061 if matchtdir:
1061 if matchtdir:
1062 for d in work:
1062 for d in work:
1063 matchtdir(d[0])
1063 matchtdir(d[0])
1064 for d in dirsnotfound:
1064 for d in dirsnotfound:
1065 matchtdir(d)
1065 matchtdir(d)
1066
1066
1067 skipstep3 = skipstep3 and not (work or dirsnotfound)
1067 skipstep3 = skipstep3 and not (work or dirsnotfound)
1068 work = [d for d in work if not dirignore(d[0])]
1068 work = [d for d in work if not dirignore(d[0])]
1069
1069
1070 # step 2: visit subdirectories
1070 # step 2: visit subdirectories
1071 def traverse(work, alreadynormed):
1071 def traverse(work, alreadynormed):
1072 wadd = work.append
1072 wadd = work.append
1073 while work:
1073 while work:
1074 tracing.counter('dirstate.walk work', len(work))
1074 tracing.counter('dirstate.walk work', len(work))
1075 nd = work.pop()
1075 nd = work.pop()
1076 visitentries = match.visitchildrenset(nd)
1076 visitentries = match.visitchildrenset(nd)
1077 if not visitentries:
1077 if not visitentries:
1078 continue
1078 continue
1079 if visitentries == b'this' or visitentries == b'all':
1079 if visitentries == b'this' or visitentries == b'all':
1080 visitentries = None
1080 visitentries = None
1081 skip = None
1081 skip = None
1082 if nd != b'':
1082 if nd != b'':
1083 skip = b'.hg'
1083 skip = b'.hg'
1084 try:
1084 try:
1085 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1085 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1086 entries = listdir(join(nd), stat=True, skip=skip)
1086 entries = listdir(join(nd), stat=True, skip=skip)
1087 except OSError as inst:
1087 except OSError as inst:
1088 if inst.errno in (errno.EACCES, errno.ENOENT):
1088 if inst.errno in (errno.EACCES, errno.ENOENT):
1089 match.bad(
1089 match.bad(
1090 self.pathto(nd), encoding.strtolocal(inst.strerror)
1090 self.pathto(nd), encoding.strtolocal(inst.strerror)
1091 )
1091 )
1092 continue
1092 continue
1093 raise
1093 raise
1094 for f, kind, st in entries:
1094 for f, kind, st in entries:
1095 # Some matchers may return files in the visitentries set,
1095 # Some matchers may return files in the visitentries set,
1096 # instead of 'this', if the matcher explicitly mentions them
1096 # instead of 'this', if the matcher explicitly mentions them
1097 # and is not an exactmatcher. This is acceptable; we do not
1097 # and is not an exactmatcher. This is acceptable; we do not
1098 # make any hard assumptions about file-or-directory below
1098 # make any hard assumptions about file-or-directory below
1099 # based on the presence of `f` in visitentries. If
1099 # based on the presence of `f` in visitentries. If
1100 # visitchildrenset returned a set, we can always skip the
1100 # visitchildrenset returned a set, we can always skip the
1101 # entries *not* in the set it provided regardless of whether
1101 # entries *not* in the set it provided regardless of whether
1102 # they're actually a file or a directory.
1102 # they're actually a file or a directory.
1103 if visitentries and f not in visitentries:
1103 if visitentries and f not in visitentries:
1104 continue
1104 continue
1105 if normalizefile:
1105 if normalizefile:
1106 # even though f might be a directory, we're only
1106 # even though f might be a directory, we're only
1107 # interested in comparing it to files currently in the
1107 # interested in comparing it to files currently in the
1108 # dmap -- therefore normalizefile is enough
1108 # dmap -- therefore normalizefile is enough
1109 nf = normalizefile(
1109 nf = normalizefile(
1110 nd and (nd + b"/" + f) or f, True, True
1110 nd and (nd + b"/" + f) or f, True, True
1111 )
1111 )
1112 else:
1112 else:
1113 nf = nd and (nd + b"/" + f) or f
1113 nf = nd and (nd + b"/" + f) or f
1114 if nf not in results:
1114 if nf not in results:
1115 if kind == dirkind:
1115 if kind == dirkind:
1116 if not ignore(nf):
1116 if not ignore(nf):
1117 if matchtdir:
1117 if matchtdir:
1118 matchtdir(nf)
1118 matchtdir(nf)
1119 wadd(nf)
1119 wadd(nf)
1120 if nf in dmap and (matchalways or matchfn(nf)):
1120 if nf in dmap and (matchalways or matchfn(nf)):
1121 results[nf] = None
1121 results[nf] = None
1122 elif kind == regkind or kind == lnkkind:
1122 elif kind == regkind or kind == lnkkind:
1123 if nf in dmap:
1123 if nf in dmap:
1124 if matchalways or matchfn(nf):
1124 if matchalways or matchfn(nf):
1125 results[nf] = st
1125 results[nf] = st
1126 elif (matchalways or matchfn(nf)) and not ignore(
1126 elif (matchalways or matchfn(nf)) and not ignore(
1127 nf
1127 nf
1128 ):
1128 ):
1129 # unknown file -- normalize if necessary
1129 # unknown file -- normalize if necessary
1130 if not alreadynormed:
1130 if not alreadynormed:
1131 nf = normalize(nf, False, True)
1131 nf = normalize(nf, False, True)
1132 results[nf] = st
1132 results[nf] = st
1133 elif nf in dmap and (matchalways or matchfn(nf)):
1133 elif nf in dmap and (matchalways or matchfn(nf)):
1134 results[nf] = None
1134 results[nf] = None
1135
1135
1136 for nd, d in work:
1136 for nd, d in work:
1137 # alreadynormed means that processwork doesn't have to do any
1137 # alreadynormed means that processwork doesn't have to do any
1138 # expensive directory normalization
1138 # expensive directory normalization
1139 alreadynormed = not normalize or nd == d
1139 alreadynormed = not normalize or nd == d
1140 traverse([d], alreadynormed)
1140 traverse([d], alreadynormed)
1141
1141
1142 for s in subrepos:
1142 for s in subrepos:
1143 del results[s]
1143 del results[s]
1144 del results[b'.hg']
1144 del results[b'.hg']
1145
1145
1146 # step 3: visit remaining files from dmap
1146 # step 3: visit remaining files from dmap
1147 if not skipstep3 and not exact:
1147 if not skipstep3 and not exact:
1148 # If a dmap file is not in results yet, it was either
1148 # If a dmap file is not in results yet, it was either
1149 # a) not matching matchfn b) ignored, c) missing, or d) under a
1149 # a) not matching matchfn b) ignored, c) missing, or d) under a
1150 # symlink directory.
1150 # symlink directory.
1151 if not results and matchalways:
1151 if not results and matchalways:
1152 visit = [f for f in dmap]
1152 visit = [f for f in dmap]
1153 else:
1153 else:
1154 visit = [f for f in dmap if f not in results and matchfn(f)]
1154 visit = [f for f in dmap if f not in results and matchfn(f)]
1155 visit.sort()
1155 visit.sort()
1156
1156
1157 if unknown:
1157 if unknown:
1158 # unknown == True means we walked all dirs under the roots
1158 # unknown == True means we walked all dirs under the roots
1159 # that wasn't ignored, and everything that matched was stat'ed
1159 # that wasn't ignored, and everything that matched was stat'ed
1160 # and is already in results.
1160 # and is already in results.
1161 # The rest must thus be ignored or under a symlink.
1161 # The rest must thus be ignored or under a symlink.
1162 audit_path = pathutil.pathauditor(self._root, cached=True)
1162 audit_path = pathutil.pathauditor(self._root, cached=True)
1163
1163
1164 for nf in iter(visit):
1164 for nf in iter(visit):
1165 # If a stat for the same file was already added with a
1165 # If a stat for the same file was already added with a
1166 # different case, don't add one for this, since that would
1166 # different case, don't add one for this, since that would
1167 # make it appear as if the file exists under both names
1167 # make it appear as if the file exists under both names
1168 # on disk.
1168 # on disk.
1169 if (
1169 if (
1170 normalizefile
1170 normalizefile
1171 and normalizefile(nf, True, True) in results
1171 and normalizefile(nf, True, True) in results
1172 ):
1172 ):
1173 results[nf] = None
1173 results[nf] = None
1174 # Report ignored items in the dmap as long as they are not
1174 # Report ignored items in the dmap as long as they are not
1175 # under a symlink directory.
1175 # under a symlink directory.
1176 elif audit_path.check(nf):
1176 elif audit_path.check(nf):
1177 try:
1177 try:
1178 results[nf] = lstat(join(nf))
1178 results[nf] = lstat(join(nf))
1179 # file was just ignored, no links, and exists
1179 # file was just ignored, no links, and exists
1180 except OSError:
1180 except OSError:
1181 # file doesn't exist
1181 # file doesn't exist
1182 results[nf] = None
1182 results[nf] = None
1183 else:
1183 else:
1184 # It's either missing or under a symlink directory
1184 # It's either missing or under a symlink directory
1185 # which we in this case report as missing
1185 # which we in this case report as missing
1186 results[nf] = None
1186 results[nf] = None
1187 else:
1187 else:
1188 # We may not have walked the full directory tree above,
1188 # We may not have walked the full directory tree above,
1189 # so stat and check everything we missed.
1189 # so stat and check everything we missed.
1190 iv = iter(visit)
1190 iv = iter(visit)
1191 for st in util.statfiles([join(i) for i in visit]):
1191 for st in util.statfiles([join(i) for i in visit]):
1192 results[next(iv)] = st
1192 results[next(iv)] = st
1193 return results
1193 return results
1194
1194
1195 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1195 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1196 # Force Rayon (Rust parallelism library) to respect the number of
1196 # Force Rayon (Rust parallelism library) to respect the number of
1197 # workers. This is a temporary workaround until Rust code knows
1197 # workers. This is a temporary workaround until Rust code knows
1198 # how to read the config file.
1198 # how to read the config file.
1199 numcpus = self._ui.configint(b"worker", b"numcpus")
1199 numcpus = self._ui.configint(b"worker", b"numcpus")
1200 if numcpus is not None:
1200 if numcpus is not None:
1201 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1201 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1202
1202
1203 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1203 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1204 if not workers_enabled:
1204 if not workers_enabled:
1205 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1205 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1206
1206
1207 (
1207 (
1208 lookup,
1208 lookup,
1209 modified,
1209 modified,
1210 added,
1210 added,
1211 removed,
1211 removed,
1212 deleted,
1212 deleted,
1213 clean,
1213 clean,
1214 ignored,
1214 ignored,
1215 unknown,
1215 unknown,
1216 warnings,
1216 warnings,
1217 bad,
1217 bad,
1218 traversed,
1218 traversed,
1219 dirty,
1219 dirty,
1220 ) = rustmod.status(
1220 ) = rustmod.status(
1221 self._map._map,
1221 self._map._map,
1222 matcher,
1222 matcher,
1223 self._rootdir,
1223 self._rootdir,
1224 self._ignorefiles(),
1224 self._ignorefiles(),
1225 self._checkexec,
1225 self._checkexec,
1226 self._lastnormaltime,
1226 self._lastnormaltime,
1227 bool(list_clean),
1227 bool(list_clean),
1228 bool(list_ignored),
1228 bool(list_ignored),
1229 bool(list_unknown),
1229 bool(list_unknown),
1230 bool(matcher.traversedir),
1230 bool(matcher.traversedir),
1231 )
1231 )
1232
1232
1233 self._dirty |= dirty
1233 self._dirty |= dirty
1234
1234
1235 if matcher.traversedir:
1235 if matcher.traversedir:
1236 for dir in traversed:
1236 for dir in traversed:
1237 matcher.traversedir(dir)
1237 matcher.traversedir(dir)
1238
1238
1239 if self._ui.warn:
1239 if self._ui.warn:
1240 for item in warnings:
1240 for item in warnings:
1241 if isinstance(item, tuple):
1241 if isinstance(item, tuple):
1242 file_path, syntax = item
1242 file_path, syntax = item
1243 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1243 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1244 file_path,
1244 file_path,
1245 syntax,
1245 syntax,
1246 )
1246 )
1247 self._ui.warn(msg)
1247 self._ui.warn(msg)
1248 else:
1248 else:
1249 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1249 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1250 self._ui.warn(
1250 self._ui.warn(
1251 msg
1251 msg
1252 % (
1252 % (
1253 pathutil.canonpath(
1253 pathutil.canonpath(
1254 self._rootdir, self._rootdir, item
1254 self._rootdir, self._rootdir, item
1255 ),
1255 ),
1256 b"No such file or directory",
1256 b"No such file or directory",
1257 )
1257 )
1258 )
1258 )
1259
1259
1260 for (fn, message) in bad:
1260 for (fn, message) in bad:
1261 matcher.bad(fn, encoding.strtolocal(message))
1261 matcher.bad(fn, encoding.strtolocal(message))
1262
1262
1263 status = scmutil.status(
1263 status = scmutil.status(
1264 modified=modified,
1264 modified=modified,
1265 added=added,
1265 added=added,
1266 removed=removed,
1266 removed=removed,
1267 deleted=deleted,
1267 deleted=deleted,
1268 unknown=unknown,
1268 unknown=unknown,
1269 ignored=ignored,
1269 ignored=ignored,
1270 clean=clean,
1270 clean=clean,
1271 )
1271 )
1272 return (lookup, status)
1272 return (lookup, status)
1273
1273
1274 def status(self, match, subrepos, ignored, clean, unknown):
1274 def status(self, match, subrepos, ignored, clean, unknown):
1275 """Determine the status of the working copy relative to the
1275 """Determine the status of the working copy relative to the
1276 dirstate and return a pair of (unsure, status), where status is of type
1276 dirstate and return a pair of (unsure, status), where status is of type
1277 scmutil.status and:
1277 scmutil.status and:
1278
1278
1279 unsure:
1279 unsure:
1280 files that might have been modified since the dirstate was
1280 files that might have been modified since the dirstate was
1281 written, but need to be read to be sure (size is the same
1281 written, but need to be read to be sure (size is the same
1282 but mtime differs)
1282 but mtime differs)
1283 status.modified:
1283 status.modified:
1284 files that have definitely been modified since the dirstate
1284 files that have definitely been modified since the dirstate
1285 was written (different size or mode)
1285 was written (different size or mode)
1286 status.clean:
1286 status.clean:
1287 files that have definitely not been modified since the
1287 files that have definitely not been modified since the
1288 dirstate was written
1288 dirstate was written
1289 """
1289 """
1290 listignored, listclean, listunknown = ignored, clean, unknown
1290 listignored, listclean, listunknown = ignored, clean, unknown
1291 lookup, modified, added, unknown, ignored = [], [], [], [], []
1291 lookup, modified, added, unknown, ignored = [], [], [], [], []
1292 removed, deleted, clean = [], [], []
1292 removed, deleted, clean = [], [], []
1293
1293
1294 dmap = self._map
1294 dmap = self._map
1295 dmap.preload()
1295 dmap.preload()
1296
1296
1297 use_rust = True
1297 use_rust = True
1298
1298
1299 allowed_matchers = (
1299 allowed_matchers = (
1300 matchmod.alwaysmatcher,
1300 matchmod.alwaysmatcher,
1301 matchmod.exactmatcher,
1301 matchmod.exactmatcher,
1302 matchmod.includematcher,
1302 matchmod.includematcher,
1303 )
1303 )
1304
1304
1305 if rustmod is None:
1305 if rustmod is None:
1306 use_rust = False
1306 use_rust = False
1307 elif self._checkcase:
1307 elif self._checkcase:
1308 # Case-insensitive filesystems are not handled yet
1308 # Case-insensitive filesystems are not handled yet
1309 use_rust = False
1309 use_rust = False
1310 elif subrepos:
1310 elif subrepos:
1311 use_rust = False
1311 use_rust = False
1312 elif sparse.enabled:
1312 elif sparse.enabled:
1313 use_rust = False
1313 use_rust = False
1314 elif not isinstance(match, allowed_matchers):
1314 elif not isinstance(match, allowed_matchers):
1315 # Some matchers have yet to be implemented
1315 # Some matchers have yet to be implemented
1316 use_rust = False
1316 use_rust = False
1317
1317
1318 if use_rust:
1318 if use_rust:
1319 try:
1319 try:
1320 return self._rust_status(
1320 return self._rust_status(
1321 match, listclean, listignored, listunknown
1321 match, listclean, listignored, listunknown
1322 )
1322 )
1323 except rustmod.FallbackError:
1323 except rustmod.FallbackError:
1324 pass
1324 pass
1325
1325
1326 def noop(f):
1326 def noop(f):
1327 pass
1327 pass
1328
1328
1329 dcontains = dmap.__contains__
1329 dcontains = dmap.__contains__
1330 dget = dmap.__getitem__
1330 dget = dmap.__getitem__
1331 ladd = lookup.append # aka "unsure"
1331 ladd = lookup.append # aka "unsure"
1332 madd = modified.append
1332 madd = modified.append
1333 aadd = added.append
1333 aadd = added.append
1334 uadd = unknown.append if listunknown else noop
1334 uadd = unknown.append if listunknown else noop
1335 iadd = ignored.append if listignored else noop
1335 iadd = ignored.append if listignored else noop
1336 radd = removed.append
1336 radd = removed.append
1337 dadd = deleted.append
1337 dadd = deleted.append
1338 cadd = clean.append if listclean else noop
1338 cadd = clean.append if listclean else noop
1339 mexact = match.exact
1339 mexact = match.exact
1340 dirignore = self._dirignore
1340 dirignore = self._dirignore
1341 checkexec = self._checkexec
1341 checkexec = self._checkexec
1342 copymap = self._map.copymap
1342 copymap = self._map.copymap
1343 lastnormaltime = self._lastnormaltime
1343 lastnormaltime = self._lastnormaltime
1344
1344
1345 # We need to do full walks when either
1345 # We need to do full walks when either
1346 # - we're listing all clean files, or
1346 # - we're listing all clean files, or
1347 # - match.traversedir does something, because match.traversedir should
1347 # - match.traversedir does something, because match.traversedir should
1348 # be called for every dir in the working dir
1348 # be called for every dir in the working dir
1349 full = listclean or match.traversedir is not None
1349 full = listclean or match.traversedir is not None
1350 for fn, st in pycompat.iteritems(
1350 for fn, st in pycompat.iteritems(
1351 self.walk(match, subrepos, listunknown, listignored, full=full)
1351 self.walk(match, subrepos, listunknown, listignored, full=full)
1352 ):
1352 ):
1353 if not dcontains(fn):
1353 if not dcontains(fn):
1354 if (listignored or mexact(fn)) and dirignore(fn):
1354 if (listignored or mexact(fn)) and dirignore(fn):
1355 if listignored:
1355 if listignored:
1356 iadd(fn)
1356 iadd(fn)
1357 else:
1357 else:
1358 uadd(fn)
1358 uadd(fn)
1359 continue
1359 continue
1360
1360
1361 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1361 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1362 # written like that for performance reasons. dmap[fn] is not a
1362 # written like that for performance reasons. dmap[fn] is not a
1363 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1363 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1364 # opcode has fast paths when the value to be unpacked is a tuple or
1364 # opcode has fast paths when the value to be unpacked is a tuple or
1365 # a list, but falls back to creating a full-fledged iterator in
1365 # a list, but falls back to creating a full-fledged iterator in
1366 # general. That is much slower than simply accessing and storing the
1366 # general. That is much slower than simply accessing and storing the
1367 # tuple members one by one.
1367 # tuple members one by one.
1368 t = dget(fn)
1368 t = dget(fn)
1369 mode = t.mode
1369 mode = t.mode
1370 size = t.size
1370 size = t.size
1371 time = t.mtime
1371 time = t.mtime
1372
1372
1373 if not st and t.tracked:
1373 if not st and t.tracked:
1374 dadd(fn)
1374 dadd(fn)
1375 elif t.merged:
1375 elif t.merged or t.from_p2:
1376 madd(fn)
1376 madd(fn)
1377 elif t.added:
1377 elif t.added:
1378 aadd(fn)
1378 aadd(fn)
1379 elif t.removed:
1379 elif t.removed:
1380 radd(fn)
1380 radd(fn)
1381 elif t.tracked:
1381 elif t.tracked:
1382 if (
1382 if (
1383 size >= 0
1383 size >= 0
1384 and (
1384 and (
1385 (size != st.st_size and size != st.st_size & _rangemask)
1385 (size != st.st_size and size != st.st_size & _rangemask)
1386 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1386 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1387 )
1387 )
1388 or t.from_p2
1389 or fn in copymap
1388 or fn in copymap
1390 ):
1389 ):
1391 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1390 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1392 # issue6456: Size returned may be longer due to
1391 # issue6456: Size returned may be longer due to
1393 # encryption on EXT-4 fscrypt, undecided.
1392 # encryption on EXT-4 fscrypt, undecided.
1394 ladd(fn)
1393 ladd(fn)
1395 else:
1394 else:
1396 madd(fn)
1395 madd(fn)
1397 elif (
1396 elif (
1398 time != st[stat.ST_MTIME]
1397 time != st[stat.ST_MTIME]
1399 and time != st[stat.ST_MTIME] & _rangemask
1398 and time != st[stat.ST_MTIME] & _rangemask
1400 ):
1399 ):
1401 ladd(fn)
1400 ladd(fn)
1402 elif st[stat.ST_MTIME] == lastnormaltime:
1401 elif st[stat.ST_MTIME] == lastnormaltime:
1403 # fn may have just been marked as normal and it may have
1402 # fn may have just been marked as normal and it may have
1404 # changed in the same second without changing its size.
1403 # changed in the same second without changing its size.
1405 # This can happen if we quickly do multiple commits.
1404 # This can happen if we quickly do multiple commits.
1406 # Force lookup, so we don't miss such a racy file change.
1405 # Force lookup, so we don't miss such a racy file change.
1407 ladd(fn)
1406 ladd(fn)
1408 elif listclean:
1407 elif listclean:
1409 cadd(fn)
1408 cadd(fn)
1410 status = scmutil.status(
1409 status = scmutil.status(
1411 modified, added, removed, deleted, unknown, ignored, clean
1410 modified, added, removed, deleted, unknown, ignored, clean
1412 )
1411 )
1413 return (lookup, status)
1412 return (lookup, status)
1414
1413
1415 def matches(self, match):
1414 def matches(self, match):
1416 """
1415 """
1417 return files in the dirstate (in whatever state) filtered by match
1416 return files in the dirstate (in whatever state) filtered by match
1418 """
1417 """
1419 dmap = self._map
1418 dmap = self._map
1420 if rustmod is not None:
1419 if rustmod is not None:
1421 dmap = self._map._map
1420 dmap = self._map._map
1422
1421
1423 if match.always():
1422 if match.always():
1424 return dmap.keys()
1423 return dmap.keys()
1425 files = match.files()
1424 files = match.files()
1426 if match.isexact():
1425 if match.isexact():
1427 # fast path -- filter the other way around, since typically files is
1426 # fast path -- filter the other way around, since typically files is
1428 # much smaller than dmap
1427 # much smaller than dmap
1429 return [f for f in files if f in dmap]
1428 return [f for f in files if f in dmap]
1430 if match.prefix() and all(fn in dmap for fn in files):
1429 if match.prefix() and all(fn in dmap for fn in files):
1431 # fast path -- all the values are known to be files, so just return
1430 # fast path -- all the values are known to be files, so just return
1432 # that
1431 # that
1433 return list(files)
1432 return list(files)
1434 return [f for f in dmap if match(f)]
1433 return [f for f in dmap if match(f)]
1435
1434
1436 def _actualfilename(self, tr):
1435 def _actualfilename(self, tr):
1437 if tr:
1436 if tr:
1438 return self._pendingfilename
1437 return self._pendingfilename
1439 else:
1438 else:
1440 return self._filename
1439 return self._filename
1441
1440
1442 def savebackup(self, tr, backupname):
1441 def savebackup(self, tr, backupname):
1443 '''Save current dirstate into backup file'''
1442 '''Save current dirstate into backup file'''
1444 filename = self._actualfilename(tr)
1443 filename = self._actualfilename(tr)
1445 assert backupname != filename
1444 assert backupname != filename
1446
1445
1447 # use '_writedirstate' instead of 'write' to write changes certainly,
1446 # use '_writedirstate' instead of 'write' to write changes certainly,
1448 # because the latter omits writing out if transaction is running.
1447 # because the latter omits writing out if transaction is running.
1449 # output file will be used to create backup of dirstate at this point.
1448 # output file will be used to create backup of dirstate at this point.
1450 if self._dirty or not self._opener.exists(filename):
1449 if self._dirty or not self._opener.exists(filename):
1451 self._writedirstate(
1450 self._writedirstate(
1452 tr,
1451 tr,
1453 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1452 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1454 )
1453 )
1455
1454
1456 if tr:
1455 if tr:
1457 # ensure that subsequent tr.writepending returns True for
1456 # ensure that subsequent tr.writepending returns True for
1458 # changes written out above, even if dirstate is never
1457 # changes written out above, even if dirstate is never
1459 # changed after this
1458 # changed after this
1460 tr.addfilegenerator(
1459 tr.addfilegenerator(
1461 b'dirstate',
1460 b'dirstate',
1462 (self._filename,),
1461 (self._filename,),
1463 lambda f: self._writedirstate(tr, f),
1462 lambda f: self._writedirstate(tr, f),
1464 location=b'plain',
1463 location=b'plain',
1465 )
1464 )
1466
1465
1467 # ensure that pending file written above is unlinked at
1466 # ensure that pending file written above is unlinked at
1468 # failure, even if tr.writepending isn't invoked until the
1467 # failure, even if tr.writepending isn't invoked until the
1469 # end of this transaction
1468 # end of this transaction
1470 tr.registertmp(filename, location=b'plain')
1469 tr.registertmp(filename, location=b'plain')
1471
1470
1472 self._opener.tryunlink(backupname)
1471 self._opener.tryunlink(backupname)
1473 # hardlink backup is okay because _writedirstate is always called
1472 # hardlink backup is okay because _writedirstate is always called
1474 # with an "atomictemp=True" file.
1473 # with an "atomictemp=True" file.
1475 util.copyfile(
1474 util.copyfile(
1476 self._opener.join(filename),
1475 self._opener.join(filename),
1477 self._opener.join(backupname),
1476 self._opener.join(backupname),
1478 hardlink=True,
1477 hardlink=True,
1479 )
1478 )
1480
1479
1481 def restorebackup(self, tr, backupname):
1480 def restorebackup(self, tr, backupname):
1482 '''Restore dirstate by backup file'''
1481 '''Restore dirstate by backup file'''
1483 # this "invalidate()" prevents "wlock.release()" from writing
1482 # this "invalidate()" prevents "wlock.release()" from writing
1484 # changes of dirstate out after restoring from backup file
1483 # changes of dirstate out after restoring from backup file
1485 self.invalidate()
1484 self.invalidate()
1486 filename = self._actualfilename(tr)
1485 filename = self._actualfilename(tr)
1487 o = self._opener
1486 o = self._opener
1488 if util.samefile(o.join(backupname), o.join(filename)):
1487 if util.samefile(o.join(backupname), o.join(filename)):
1489 o.unlink(backupname)
1488 o.unlink(backupname)
1490 else:
1489 else:
1491 o.rename(backupname, filename, checkambig=True)
1490 o.rename(backupname, filename, checkambig=True)
1492
1491
1493 def clearbackup(self, tr, backupname):
1492 def clearbackup(self, tr, backupname):
1494 '''Clear backup file'''
1493 '''Clear backup file'''
1495 self._opener.unlink(backupname)
1494 self._opener.unlink(backupname)
1496
1495
1497 def verify(self, m1, m2):
1496 def verify(self, m1, m2):
1498 """check the dirstate content again the parent manifest and yield errors"""
1497 """check the dirstate content again the parent manifest and yield errors"""
1499 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1498 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1500 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1499 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1501 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1500 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1502 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1501 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1503 for f, entry in self.items():
1502 for f, entry in self.items():
1504 state = entry.state
1503 state = entry.state
1505 if state in b"nr" and f not in m1:
1504 if state in b"nr" and f not in m1:
1506 yield (missing_from_p1, f, state)
1505 yield (missing_from_p1, f, state)
1507 if state in b"a" and f in m1:
1506 if state in b"a" and f in m1:
1508 yield (unexpected_in_p1, f, state)
1507 yield (unexpected_in_p1, f, state)
1509 if state in b"m" and f not in m1 and f not in m2:
1508 if state in b"m" and f not in m1 and f not in m2:
1510 yield (missing_from_ps, f, state)
1509 yield (missing_from_ps, f, state)
1511 for f in m1:
1510 for f in m1:
1512 state = self.get_entry(f).state
1511 state = self.get_entry(f).state
1513 if state not in b"nrm":
1512 if state not in b"nrm":
1514 yield (missing_from_ds, f, state)
1513 yield (missing_from_ds, f, state)
General Comments 0
You need to be logged in to leave comments. Login now