##// END OF EJS Templates
dirstate: requires being in a `changing_parents` `context to set_parents`...
marmoute -
r51001:cdbd5f99 default
parent child Browse files
Show More
@@ -1,1660 +1,1658 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import collections
9 import collections
10 import contextlib
10 import contextlib
11 import os
11 import os
12 import stat
12 import stat
13 import uuid
13 import uuid
14
14
15 from .i18n import _
15 from .i18n import _
16 from .pycompat import delattr
16 from .pycompat import delattr
17
17
18 from hgdemandimport import tracing
18 from hgdemandimport import tracing
19
19
20 from . import (
20 from . import (
21 dirstatemap,
21 dirstatemap,
22 encoding,
22 encoding,
23 error,
23 error,
24 match as matchmod,
24 match as matchmod,
25 node,
25 node,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 util,
30 util,
31 )
31 )
32
32
33 from .dirstateutils import (
33 from .dirstateutils import (
34 timestamp,
34 timestamp,
35 )
35 )
36
36
37 from .interfaces import (
37 from .interfaces import (
38 dirstate as intdirstate,
38 dirstate as intdirstate,
39 util as interfaceutil,
39 util as interfaceutil,
40 )
40 )
41
41
42 parsers = policy.importmod('parsers')
42 parsers = policy.importmod('parsers')
43 rustmod = policy.importrust('dirstate')
43 rustmod = policy.importrust('dirstate')
44
44
45 HAS_FAST_DIRSTATE_V2 = rustmod is not None
45 HAS_FAST_DIRSTATE_V2 = rustmod is not None
46
46
47 propertycache = util.propertycache
47 propertycache = util.propertycache
48 filecache = scmutil.filecache
48 filecache = scmutil.filecache
49 _rangemask = dirstatemap.rangemask
49 _rangemask = dirstatemap.rangemask
50
50
51 DirstateItem = dirstatemap.DirstateItem
51 DirstateItem = dirstatemap.DirstateItem
52
52
53
53
54 class repocache(filecache):
54 class repocache(filecache):
55 """filecache for files in .hg/"""
55 """filecache for files in .hg/"""
56
56
57 def join(self, obj, fname):
57 def join(self, obj, fname):
58 return obj._opener.join(fname)
58 return obj._opener.join(fname)
59
59
60
60
61 class rootcache(filecache):
61 class rootcache(filecache):
62 """filecache for files in the repository root"""
62 """filecache for files in the repository root"""
63
63
64 def join(self, obj, fname):
64 def join(self, obj, fname):
65 return obj._join(fname)
65 return obj._join(fname)
66
66
67
67
68 def requires_changing_parents(func):
68 def requires_changing_parents(func):
69 def wrap(self, *args, **kwargs):
69 def wrap(self, *args, **kwargs):
70 if not self.is_changing_parents:
70 if not self.is_changing_parents:
71 msg = 'calling `%s` outside of a changing_parents context'
71 msg = 'calling `%s` outside of a changing_parents context'
72 msg %= func.__name__
72 msg %= func.__name__
73 raise error.ProgrammingError(msg)
73 raise error.ProgrammingError(msg)
74 if self._invalidated_context:
74 if self._invalidated_context:
75 msg = 'calling `%s` after the dirstate was invalidated'
75 msg = 'calling `%s` after the dirstate was invalidated'
76 raise error.ProgrammingError(msg)
76 raise error.ProgrammingError(msg)
77 return func(self, *args, **kwargs)
77 return func(self, *args, **kwargs)
78
78
79 return wrap
79 return wrap
80
80
81
81
82 def requires_changing_files(func):
82 def requires_changing_files(func):
83 def wrap(self, *args, **kwargs):
83 def wrap(self, *args, **kwargs):
84 if not self.is_changing_files:
84 if not self.is_changing_files:
85 msg = 'calling `%s` outside of a `changing_files`'
85 msg = 'calling `%s` outside of a `changing_files`'
86 msg %= func.__name__
86 msg %= func.__name__
87 raise error.ProgrammingError(msg)
87 raise error.ProgrammingError(msg)
88 return func(self, *args, **kwargs)
88 return func(self, *args, **kwargs)
89
89
90 return wrap
90 return wrap
91
91
92
92
93 def requires_not_changing_parents(func):
93 def requires_not_changing_parents(func):
94 def wrap(self, *args, **kwargs):
94 def wrap(self, *args, **kwargs):
95 if self.is_changing_parents:
95 if self.is_changing_parents:
96 msg = 'calling `%s` inside of a changing_parents context'
96 msg = 'calling `%s` inside of a changing_parents context'
97 msg %= func.__name__
97 msg %= func.__name__
98 raise error.ProgrammingError(msg)
98 raise error.ProgrammingError(msg)
99 return func(self, *args, **kwargs)
99 return func(self, *args, **kwargs)
100
100
101 return wrap
101 return wrap
102
102
103
103
104 CHANGE_TYPE_PARENTS = "parents"
104 CHANGE_TYPE_PARENTS = "parents"
105 CHANGE_TYPE_FILES = "files"
105 CHANGE_TYPE_FILES = "files"
106
106
107
107
108 @interfaceutil.implementer(intdirstate.idirstate)
108 @interfaceutil.implementer(intdirstate.idirstate)
109 class dirstate:
109 class dirstate:
110 def __init__(
110 def __init__(
111 self,
111 self,
112 opener,
112 opener,
113 ui,
113 ui,
114 root,
114 root,
115 validate,
115 validate,
116 sparsematchfn,
116 sparsematchfn,
117 nodeconstants,
117 nodeconstants,
118 use_dirstate_v2,
118 use_dirstate_v2,
119 use_tracked_hint=False,
119 use_tracked_hint=False,
120 ):
120 ):
121 """Create a new dirstate object.
121 """Create a new dirstate object.
122
122
123 opener is an open()-like callable that can be used to open the
123 opener is an open()-like callable that can be used to open the
124 dirstate file; root is the root of the directory tracked by
124 dirstate file; root is the root of the directory tracked by
125 the dirstate.
125 the dirstate.
126 """
126 """
127 self._use_dirstate_v2 = use_dirstate_v2
127 self._use_dirstate_v2 = use_dirstate_v2
128 self._use_tracked_hint = use_tracked_hint
128 self._use_tracked_hint = use_tracked_hint
129 self._nodeconstants = nodeconstants
129 self._nodeconstants = nodeconstants
130 self._opener = opener
130 self._opener = opener
131 self._validate = validate
131 self._validate = validate
132 self._root = root
132 self._root = root
133 # Either build a sparse-matcher or None if sparse is disabled
133 # Either build a sparse-matcher or None if sparse is disabled
134 self._sparsematchfn = sparsematchfn
134 self._sparsematchfn = sparsematchfn
135 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
135 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
136 # UNC path pointing to root share (issue4557)
136 # UNC path pointing to root share (issue4557)
137 self._rootdir = pathutil.normasprefix(root)
137 self._rootdir = pathutil.normasprefix(root)
138 # True is any internal state may be different
138 # True is any internal state may be different
139 self._dirty = False
139 self._dirty = False
140 # True if the set of tracked file may be different
140 # True if the set of tracked file may be different
141 self._dirty_tracked_set = False
141 self._dirty_tracked_set = False
142 self._ui = ui
142 self._ui = ui
143 self._filecache = {}
143 self._filecache = {}
144 # nesting level of `changing_parents` context
144 # nesting level of `changing_parents` context
145 self._changing_level = 0
145 self._changing_level = 0
146 # the change currently underway
146 # the change currently underway
147 self._change_type = None
147 self._change_type = None
148 # True if the current dirstate changing operations have been
148 # True if the current dirstate changing operations have been
149 # invalidated (used to make sure all nested contexts have been exited)
149 # invalidated (used to make sure all nested contexts have been exited)
150 self._invalidated_context = False
150 self._invalidated_context = False
151 self._filename = b'dirstate'
151 self._filename = b'dirstate'
152 self._filename_th = b'dirstate-tracked-hint'
152 self._filename_th = b'dirstate-tracked-hint'
153 self._pendingfilename = b'%s.pending' % self._filename
153 self._pendingfilename = b'%s.pending' % self._filename
154 self._plchangecallbacks = {}
154 self._plchangecallbacks = {}
155 self._origpl = None
155 self._origpl = None
156 self._mapcls = dirstatemap.dirstatemap
156 self._mapcls = dirstatemap.dirstatemap
157 # Access and cache cwd early, so we don't access it for the first time
157 # Access and cache cwd early, so we don't access it for the first time
158 # after a working-copy update caused it to not exist (accessing it then
158 # after a working-copy update caused it to not exist (accessing it then
159 # raises an exception).
159 # raises an exception).
160 self._cwd
160 self._cwd
161
161
162 def prefetch_parents(self):
162 def prefetch_parents(self):
163 """make sure the parents are loaded
163 """make sure the parents are loaded
164
164
165 Used to avoid a race condition.
165 Used to avoid a race condition.
166 """
166 """
167 self._pl
167 self._pl
168
168
169 @contextlib.contextmanager
169 @contextlib.contextmanager
170 def _changing(self, repo, change_type):
170 def _changing(self, repo, change_type):
171 if repo.currentwlock() is None:
171 if repo.currentwlock() is None:
172 msg = b"trying to change the dirstate without holding the wlock"
172 msg = b"trying to change the dirstate without holding the wlock"
173 raise error.ProgrammingError(msg)
173 raise error.ProgrammingError(msg)
174 if self._invalidated_context:
174 if self._invalidated_context:
175 msg = "trying to use an invalidated dirstate before it has reset"
175 msg = "trying to use an invalidated dirstate before it has reset"
176 raise error.ProgrammingError(msg)
176 raise error.ProgrammingError(msg)
177
177
178 has_tr = repo.currenttransaction() is not None
178 has_tr = repo.currenttransaction() is not None
179
179
180 # different type of change are mutually exclusive
180 # different type of change are mutually exclusive
181 if self._change_type is None:
181 if self._change_type is None:
182 assert self._changing_level == 0
182 assert self._changing_level == 0
183 self._change_type = change_type
183 self._change_type = change_type
184 elif self._change_type != change_type:
184 elif self._change_type != change_type:
185 msg = (
185 msg = (
186 'trying to open "%s" dirstate-changing context while a "%s" is'
186 'trying to open "%s" dirstate-changing context while a "%s" is'
187 ' already open'
187 ' already open'
188 )
188 )
189 msg %= (change_type, self._change_type)
189 msg %= (change_type, self._change_type)
190 raise error.ProgrammingError(msg)
190 raise error.ProgrammingError(msg)
191 self._changing_level += 1
191 self._changing_level += 1
192 try:
192 try:
193 yield
193 yield
194 except: # re-raises
194 except: # re-raises
195 self.invalidate()
195 self.invalidate()
196 raise
196 raise
197 finally:
197 finally:
198 tr = repo.currenttransaction()
198 tr = repo.currenttransaction()
199 if self._changing_level > 0:
199 if self._changing_level > 0:
200 if self._invalidated_context:
200 if self._invalidated_context:
201 # make sure we invalidate anything an upper context might
201 # make sure we invalidate anything an upper context might
202 # have changed.
202 # have changed.
203 self.invalidate()
203 self.invalidate()
204 self._changing_level -= 1
204 self._changing_level -= 1
205 # The invalidation is complete once we exit the final context
205 # The invalidation is complete once we exit the final context
206 # manager
206 # manager
207 if self._changing_level <= 0:
207 if self._changing_level <= 0:
208 self._change_type = None
208 self._change_type = None
209 assert self._changing_level == 0
209 assert self._changing_level == 0
210 if self._invalidated_context:
210 if self._invalidated_context:
211 self._invalidated_context = False
211 self._invalidated_context = False
212 else:
212 else:
213 # When an exception occured, `_invalidated_context`
213 # When an exception occured, `_invalidated_context`
214 # would have been set to True by the `invalidate`
214 # would have been set to True by the `invalidate`
215 # call earlier.
215 # call earlier.
216 #
216 #
217 # We don't have more straightforward code, because the
217 # We don't have more straightforward code, because the
218 # Exception catching (and the associated `invalidate`
218 # Exception catching (and the associated `invalidate`
219 # calling) might have been called by a nested context
219 # calling) might have been called by a nested context
220 # instead of the top level one.
220 # instead of the top level one.
221 self.write(tr)
221 self.write(tr)
222 if has_tr != (tr is not None):
222 if has_tr != (tr is not None):
223 if has_tr:
223 if has_tr:
224 m = "transaction vanished while changing dirstate"
224 m = "transaction vanished while changing dirstate"
225 else:
225 else:
226 m = "transaction appeared while changing dirstate"
226 m = "transaction appeared while changing dirstate"
227 raise error.ProgrammingError(m)
227 raise error.ProgrammingError(m)
228
228
229 @contextlib.contextmanager
229 @contextlib.contextmanager
230 def changing_parents(self, repo):
230 def changing_parents(self, repo):
231 with self._changing(repo, CHANGE_TYPE_PARENTS) as c:
231 with self._changing(repo, CHANGE_TYPE_PARENTS) as c:
232 yield c
232 yield c
233
233
234 @contextlib.contextmanager
234 @contextlib.contextmanager
235 def changing_files(self, repo):
235 def changing_files(self, repo):
236 with self._changing(repo, CHANGE_TYPE_FILES) as c:
236 with self._changing(repo, CHANGE_TYPE_FILES) as c:
237 yield c
237 yield c
238
238
239 # here to help migration to the new code
239 # here to help migration to the new code
240 def parentchange(self):
240 def parentchange(self):
241 msg = (
241 msg = (
242 "Mercurial 6.4 and later requires call to "
242 "Mercurial 6.4 and later requires call to "
243 "`dirstate.changing_parents(repo)`"
243 "`dirstate.changing_parents(repo)`"
244 )
244 )
245 raise error.ProgrammingError(msg)
245 raise error.ProgrammingError(msg)
246
246
247 @property
247 @property
248 def is_changing_any(self):
248 def is_changing_any(self):
249 """Returns true if the dirstate is in the middle of a set of changes.
249 """Returns true if the dirstate is in the middle of a set of changes.
250
250
251 This returns True for any kind of change.
251 This returns True for any kind of change.
252 """
252 """
253 return self._changing_level > 0
253 return self._changing_level > 0
254
254
255 def pendingparentchange(self):
255 def pendingparentchange(self):
256 return self.is_changing_parent()
256 return self.is_changing_parent()
257
257
258 def is_changing_parent(self):
258 def is_changing_parent(self):
259 """Returns true if the dirstate is in the middle of a set of changes
259 """Returns true if the dirstate is in the middle of a set of changes
260 that modify the dirstate parent.
260 that modify the dirstate parent.
261 """
261 """
262 self._ui.deprecwarn(b"dirstate.is_changing_parents", b"6.5")
262 self._ui.deprecwarn(b"dirstate.is_changing_parents", b"6.5")
263 return self.is_changing_parents
263 return self.is_changing_parents
264
264
265 @property
265 @property
266 def is_changing_parents(self):
266 def is_changing_parents(self):
267 """Returns true if the dirstate is in the middle of a set of changes
267 """Returns true if the dirstate is in the middle of a set of changes
268 that modify the dirstate parent.
268 that modify the dirstate parent.
269 """
269 """
270 if self._changing_level <= 0:
270 if self._changing_level <= 0:
271 return False
271 return False
272 return self._change_type == CHANGE_TYPE_PARENTS
272 return self._change_type == CHANGE_TYPE_PARENTS
273
273
274 @property
274 @property
275 def is_changing_files(self):
275 def is_changing_files(self):
276 """Returns true if the dirstate is in the middle of a set of changes
276 """Returns true if the dirstate is in the middle of a set of changes
277 that modify the files tracked or their sources.
277 that modify the files tracked or their sources.
278 """
278 """
279 if self._changing_level <= 0:
279 if self._changing_level <= 0:
280 return False
280 return False
281 return self._change_type == CHANGE_TYPE_FILES
281 return self._change_type == CHANGE_TYPE_FILES
282
282
283 @propertycache
283 @propertycache
284 def _map(self):
284 def _map(self):
285 """Return the dirstate contents (see documentation for dirstatemap)."""
285 """Return the dirstate contents (see documentation for dirstatemap)."""
286 self._map = self._mapcls(
286 self._map = self._mapcls(
287 self._ui,
287 self._ui,
288 self._opener,
288 self._opener,
289 self._root,
289 self._root,
290 self._nodeconstants,
290 self._nodeconstants,
291 self._use_dirstate_v2,
291 self._use_dirstate_v2,
292 )
292 )
293 return self._map
293 return self._map
294
294
295 @property
295 @property
296 def _sparsematcher(self):
296 def _sparsematcher(self):
297 """The matcher for the sparse checkout.
297 """The matcher for the sparse checkout.
298
298
299 The working directory may not include every file from a manifest. The
299 The working directory may not include every file from a manifest. The
300 matcher obtained by this property will match a path if it is to be
300 matcher obtained by this property will match a path if it is to be
301 included in the working directory.
301 included in the working directory.
302
302
303 When sparse if disabled, return None.
303 When sparse if disabled, return None.
304 """
304 """
305 if self._sparsematchfn is None:
305 if self._sparsematchfn is None:
306 return None
306 return None
307 # TODO there is potential to cache this property. For now, the matcher
307 # TODO there is potential to cache this property. For now, the matcher
308 # is resolved on every access. (But the called function does use a
308 # is resolved on every access. (But the called function does use a
309 # cache to keep the lookup fast.)
309 # cache to keep the lookup fast.)
310 return self._sparsematchfn()
310 return self._sparsematchfn()
311
311
312 @repocache(b'branch')
312 @repocache(b'branch')
313 def _branch(self):
313 def _branch(self):
314 try:
314 try:
315 return self._opener.read(b"branch").strip() or b"default"
315 return self._opener.read(b"branch").strip() or b"default"
316 except FileNotFoundError:
316 except FileNotFoundError:
317 return b"default"
317 return b"default"
318
318
319 @property
319 @property
320 def _pl(self):
320 def _pl(self):
321 return self._map.parents()
321 return self._map.parents()
322
322
323 def hasdir(self, d):
323 def hasdir(self, d):
324 return self._map.hastrackeddir(d)
324 return self._map.hastrackeddir(d)
325
325
326 @rootcache(b'.hgignore')
326 @rootcache(b'.hgignore')
327 def _ignore(self):
327 def _ignore(self):
328 files = self._ignorefiles()
328 files = self._ignorefiles()
329 if not files:
329 if not files:
330 return matchmod.never()
330 return matchmod.never()
331
331
332 pats = [b'include:%s' % f for f in files]
332 pats = [b'include:%s' % f for f in files]
333 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
333 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
334
334
335 @propertycache
335 @propertycache
336 def _slash(self):
336 def _slash(self):
337 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
337 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
338
338
339 @propertycache
339 @propertycache
340 def _checklink(self):
340 def _checklink(self):
341 return util.checklink(self._root)
341 return util.checklink(self._root)
342
342
343 @propertycache
343 @propertycache
344 def _checkexec(self):
344 def _checkexec(self):
345 return bool(util.checkexec(self._root))
345 return bool(util.checkexec(self._root))
346
346
347 @propertycache
347 @propertycache
348 def _checkcase(self):
348 def _checkcase(self):
349 return not util.fscasesensitive(self._join(b'.hg'))
349 return not util.fscasesensitive(self._join(b'.hg'))
350
350
351 def _join(self, f):
351 def _join(self, f):
352 # much faster than os.path.join()
352 # much faster than os.path.join()
353 # it's safe because f is always a relative path
353 # it's safe because f is always a relative path
354 return self._rootdir + f
354 return self._rootdir + f
355
355
356 def flagfunc(self, buildfallback):
356 def flagfunc(self, buildfallback):
357 """build a callable that returns flags associated with a filename
357 """build a callable that returns flags associated with a filename
358
358
359 The information is extracted from three possible layers:
359 The information is extracted from three possible layers:
360 1. the file system if it supports the information
360 1. the file system if it supports the information
361 2. the "fallback" information stored in the dirstate if any
361 2. the "fallback" information stored in the dirstate if any
362 3. a more expensive mechanism inferring the flags from the parents.
362 3. a more expensive mechanism inferring the flags from the parents.
363 """
363 """
364
364
365 # small hack to cache the result of buildfallback()
365 # small hack to cache the result of buildfallback()
366 fallback_func = []
366 fallback_func = []
367
367
368 def get_flags(x):
368 def get_flags(x):
369 entry = None
369 entry = None
370 fallback_value = None
370 fallback_value = None
371 try:
371 try:
372 st = os.lstat(self._join(x))
372 st = os.lstat(self._join(x))
373 except OSError:
373 except OSError:
374 return b''
374 return b''
375
375
376 if self._checklink:
376 if self._checklink:
377 if util.statislink(st):
377 if util.statislink(st):
378 return b'l'
378 return b'l'
379 else:
379 else:
380 entry = self.get_entry(x)
380 entry = self.get_entry(x)
381 if entry.has_fallback_symlink:
381 if entry.has_fallback_symlink:
382 if entry.fallback_symlink:
382 if entry.fallback_symlink:
383 return b'l'
383 return b'l'
384 else:
384 else:
385 if not fallback_func:
385 if not fallback_func:
386 fallback_func.append(buildfallback())
386 fallback_func.append(buildfallback())
387 fallback_value = fallback_func[0](x)
387 fallback_value = fallback_func[0](x)
388 if b'l' in fallback_value:
388 if b'l' in fallback_value:
389 return b'l'
389 return b'l'
390
390
391 if self._checkexec:
391 if self._checkexec:
392 if util.statisexec(st):
392 if util.statisexec(st):
393 return b'x'
393 return b'x'
394 else:
394 else:
395 if entry is None:
395 if entry is None:
396 entry = self.get_entry(x)
396 entry = self.get_entry(x)
397 if entry.has_fallback_exec:
397 if entry.has_fallback_exec:
398 if entry.fallback_exec:
398 if entry.fallback_exec:
399 return b'x'
399 return b'x'
400 else:
400 else:
401 if fallback_value is None:
401 if fallback_value is None:
402 if not fallback_func:
402 if not fallback_func:
403 fallback_func.append(buildfallback())
403 fallback_func.append(buildfallback())
404 fallback_value = fallback_func[0](x)
404 fallback_value = fallback_func[0](x)
405 if b'x' in fallback_value:
405 if b'x' in fallback_value:
406 return b'x'
406 return b'x'
407 return b''
407 return b''
408
408
409 return get_flags
409 return get_flags
410
410
411 @propertycache
411 @propertycache
412 def _cwd(self):
412 def _cwd(self):
413 # internal config: ui.forcecwd
413 # internal config: ui.forcecwd
414 forcecwd = self._ui.config(b'ui', b'forcecwd')
414 forcecwd = self._ui.config(b'ui', b'forcecwd')
415 if forcecwd:
415 if forcecwd:
416 return forcecwd
416 return forcecwd
417 return encoding.getcwd()
417 return encoding.getcwd()
418
418
419 def getcwd(self):
419 def getcwd(self):
420 """Return the path from which a canonical path is calculated.
420 """Return the path from which a canonical path is calculated.
421
421
422 This path should be used to resolve file patterns or to convert
422 This path should be used to resolve file patterns or to convert
423 canonical paths back to file paths for display. It shouldn't be
423 canonical paths back to file paths for display. It shouldn't be
424 used to get real file paths. Use vfs functions instead.
424 used to get real file paths. Use vfs functions instead.
425 """
425 """
426 cwd = self._cwd
426 cwd = self._cwd
427 if cwd == self._root:
427 if cwd == self._root:
428 return b''
428 return b''
429 # self._root ends with a path separator if self._root is '/' or 'C:\'
429 # self._root ends with a path separator if self._root is '/' or 'C:\'
430 rootsep = self._root
430 rootsep = self._root
431 if not util.endswithsep(rootsep):
431 if not util.endswithsep(rootsep):
432 rootsep += pycompat.ossep
432 rootsep += pycompat.ossep
433 if cwd.startswith(rootsep):
433 if cwd.startswith(rootsep):
434 return cwd[len(rootsep) :]
434 return cwd[len(rootsep) :]
435 else:
435 else:
436 # we're outside the repo. return an absolute path.
436 # we're outside the repo. return an absolute path.
437 return cwd
437 return cwd
438
438
439 def pathto(self, f, cwd=None):
439 def pathto(self, f, cwd=None):
440 if cwd is None:
440 if cwd is None:
441 cwd = self.getcwd()
441 cwd = self.getcwd()
442 path = util.pathto(self._root, cwd, f)
442 path = util.pathto(self._root, cwd, f)
443 if self._slash:
443 if self._slash:
444 return util.pconvert(path)
444 return util.pconvert(path)
445 return path
445 return path
446
446
447 def get_entry(self, path):
447 def get_entry(self, path):
448 """return a DirstateItem for the associated path"""
448 """return a DirstateItem for the associated path"""
449 entry = self._map.get(path)
449 entry = self._map.get(path)
450 if entry is None:
450 if entry is None:
451 return DirstateItem()
451 return DirstateItem()
452 return entry
452 return entry
453
453
454 def __contains__(self, key):
454 def __contains__(self, key):
455 return key in self._map
455 return key in self._map
456
456
457 def __iter__(self):
457 def __iter__(self):
458 return iter(sorted(self._map))
458 return iter(sorted(self._map))
459
459
460 def items(self):
460 def items(self):
461 return self._map.items()
461 return self._map.items()
462
462
463 iteritems = items
463 iteritems = items
464
464
465 def parents(self):
465 def parents(self):
466 return [self._validate(p) for p in self._pl]
466 return [self._validate(p) for p in self._pl]
467
467
468 def p1(self):
468 def p1(self):
469 return self._validate(self._pl[0])
469 return self._validate(self._pl[0])
470
470
471 def p2(self):
471 def p2(self):
472 return self._validate(self._pl[1])
472 return self._validate(self._pl[1])
473
473
474 @property
474 @property
475 def in_merge(self):
475 def in_merge(self):
476 """True if a merge is in progress"""
476 """True if a merge is in progress"""
477 return self._pl[1] != self._nodeconstants.nullid
477 return self._pl[1] != self._nodeconstants.nullid
478
478
479 def branch(self):
479 def branch(self):
480 return encoding.tolocal(self._branch)
480 return encoding.tolocal(self._branch)
481
481
482 # XXX since this make the dirstate dirty, we should enforce that it is done
482 @requires_changing_parents
483 # withing an appropriate change-context that scope the change and ensure it
484 # eventually get written on disk (or rolled back)
485 def setparents(self, p1, p2=None):
483 def setparents(self, p1, p2=None):
486 """Set dirstate parents to p1 and p2.
484 """Set dirstate parents to p1 and p2.
487
485
488 When moving from two parents to one, "merged" entries a
486 When moving from two parents to one, "merged" entries a
489 adjusted to normal and previous copy records discarded and
487 adjusted to normal and previous copy records discarded and
490 returned by the call.
488 returned by the call.
491
489
492 See localrepo.setparents()
490 See localrepo.setparents()
493 """
491 """
494 if p2 is None:
492 if p2 is None:
495 p2 = self._nodeconstants.nullid
493 p2 = self._nodeconstants.nullid
496 if self._changing_level == 0:
494 if self._changing_level == 0:
497 raise ValueError(
495 raise ValueError(
498 b"cannot set dirstate parent outside of "
496 b"cannot set dirstate parent outside of "
499 b"dirstate.changing_parents context manager"
497 b"dirstate.changing_parents context manager"
500 )
498 )
501
499
502 self._dirty = True
500 self._dirty = True
503 oldp2 = self._pl[1]
501 oldp2 = self._pl[1]
504 if self._origpl is None:
502 if self._origpl is None:
505 self._origpl = self._pl
503 self._origpl = self._pl
506 nullid = self._nodeconstants.nullid
504 nullid = self._nodeconstants.nullid
507 # True if we need to fold p2 related state back to a linear case
505 # True if we need to fold p2 related state back to a linear case
508 fold_p2 = oldp2 != nullid and p2 == nullid
506 fold_p2 = oldp2 != nullid and p2 == nullid
509 return self._map.setparents(p1, p2, fold_p2=fold_p2)
507 return self._map.setparents(p1, p2, fold_p2=fold_p2)
510
508
511 def setbranch(self, branch):
509 def setbranch(self, branch):
512 self.__class__._branch.set(self, encoding.fromlocal(branch))
510 self.__class__._branch.set(self, encoding.fromlocal(branch))
513 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
511 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
514 try:
512 try:
515 f.write(self._branch + b'\n')
513 f.write(self._branch + b'\n')
516 f.close()
514 f.close()
517
515
518 # make sure filecache has the correct stat info for _branch after
516 # make sure filecache has the correct stat info for _branch after
519 # replacing the underlying file
517 # replacing the underlying file
520 ce = self._filecache[b'_branch']
518 ce = self._filecache[b'_branch']
521 if ce:
519 if ce:
522 ce.refresh()
520 ce.refresh()
523 except: # re-raises
521 except: # re-raises
524 f.discard()
522 f.discard()
525 raise
523 raise
526
524
527 def invalidate(self):
525 def invalidate(self):
528 """Causes the next access to reread the dirstate.
526 """Causes the next access to reread the dirstate.
529
527
530 This is different from localrepo.invalidatedirstate() because it always
528 This is different from localrepo.invalidatedirstate() because it always
531 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
529 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
532 check whether the dirstate has changed before rereading it."""
530 check whether the dirstate has changed before rereading it."""
533
531
534 for a in ("_map", "_branch", "_ignore"):
532 for a in ("_map", "_branch", "_ignore"):
535 if a in self.__dict__:
533 if a in self.__dict__:
536 delattr(self, a)
534 delattr(self, a)
537 self._dirty = False
535 self._dirty = False
538 self._dirty_tracked_set = False
536 self._dirty_tracked_set = False
539 self._invalidated_context = self._changing_level > 0
537 self._invalidated_context = self._changing_level > 0
540 self._origpl = None
538 self._origpl = None
541
539
542 # XXX since this make the dirstate dirty, we should enforce that it is done
540 # XXX since this make the dirstate dirty, we should enforce that it is done
543 # withing an appropriate change-context that scope the change and ensure it
541 # withing an appropriate change-context that scope the change and ensure it
544 # eventually get written on disk (or rolled back)
542 # eventually get written on disk (or rolled back)
545 def copy(self, source, dest):
543 def copy(self, source, dest):
546 """Mark dest as a copy of source. Unmark dest if source is None."""
544 """Mark dest as a copy of source. Unmark dest if source is None."""
547 if source == dest:
545 if source == dest:
548 return
546 return
549 self._dirty = True
547 self._dirty = True
550 if source is not None:
548 if source is not None:
551 self._check_sparse(source)
549 self._check_sparse(source)
552 self._map.copymap[dest] = source
550 self._map.copymap[dest] = source
553 else:
551 else:
554 self._map.copymap.pop(dest, None)
552 self._map.copymap.pop(dest, None)
555
553
556 def copied(self, file):
554 def copied(self, file):
557 return self._map.copymap.get(file, None)
555 return self._map.copymap.get(file, None)
558
556
559 def copies(self):
557 def copies(self):
560 return self._map.copymap
558 return self._map.copymap
561
559
562 @requires_changing_files
560 @requires_changing_files
563 def set_tracked(self, filename, reset_copy=False):
561 def set_tracked(self, filename, reset_copy=False):
564 """a "public" method for generic code to mark a file as tracked
562 """a "public" method for generic code to mark a file as tracked
565
563
566 This function is to be called outside of "update/merge" case. For
564 This function is to be called outside of "update/merge" case. For
567 example by a command like `hg add X`.
565 example by a command like `hg add X`.
568
566
569 if reset_copy is set, any existing copy information will be dropped.
567 if reset_copy is set, any existing copy information will be dropped.
570
568
571 return True the file was previously untracked, False otherwise.
569 return True the file was previously untracked, False otherwise.
572 """
570 """
573 self._dirty = True
571 self._dirty = True
574 entry = self._map.get(filename)
572 entry = self._map.get(filename)
575 if entry is None or not entry.tracked:
573 if entry is None or not entry.tracked:
576 self._check_new_tracked_filename(filename)
574 self._check_new_tracked_filename(filename)
577 pre_tracked = self._map.set_tracked(filename)
575 pre_tracked = self._map.set_tracked(filename)
578 if reset_copy:
576 if reset_copy:
579 self._map.copymap.pop(filename, None)
577 self._map.copymap.pop(filename, None)
580 if pre_tracked:
578 if pre_tracked:
581 self._dirty_tracked_set = True
579 self._dirty_tracked_set = True
582 return pre_tracked
580 return pre_tracked
583
581
584 @requires_changing_files
582 @requires_changing_files
585 def set_untracked(self, filename):
583 def set_untracked(self, filename):
586 """a "public" method for generic code to mark a file as untracked
584 """a "public" method for generic code to mark a file as untracked
587
585
588 This function is to be called outside of "update/merge" case. For
586 This function is to be called outside of "update/merge" case. For
589 example by a command like `hg remove X`.
587 example by a command like `hg remove X`.
590
588
591 return True the file was previously tracked, False otherwise.
589 return True the file was previously tracked, False otherwise.
592 """
590 """
593 ret = self._map.set_untracked(filename)
591 ret = self._map.set_untracked(filename)
594 if ret:
592 if ret:
595 self._dirty = True
593 self._dirty = True
596 self._dirty_tracked_set = True
594 self._dirty_tracked_set = True
597 return ret
595 return ret
598
596
599 @requires_not_changing_parents
597 @requires_not_changing_parents
600 def set_clean(self, filename, parentfiledata):
598 def set_clean(self, filename, parentfiledata):
601 """record that the current state of the file on disk is known to be clean"""
599 """record that the current state of the file on disk is known to be clean"""
602 self._dirty = True
600 self._dirty = True
603 if not self._map[filename].tracked:
601 if not self._map[filename].tracked:
604 self._check_new_tracked_filename(filename)
602 self._check_new_tracked_filename(filename)
605 (mode, size, mtime) = parentfiledata
603 (mode, size, mtime) = parentfiledata
606 self._map.set_clean(filename, mode, size, mtime)
604 self._map.set_clean(filename, mode, size, mtime)
607
605
608 @requires_not_changing_parents
606 @requires_not_changing_parents
609 def set_possibly_dirty(self, filename):
607 def set_possibly_dirty(self, filename):
610 """record that the current state of the file on disk is unknown"""
608 """record that the current state of the file on disk is unknown"""
611 self._dirty = True
609 self._dirty = True
612 self._map.set_possibly_dirty(filename)
610 self._map.set_possibly_dirty(filename)
613
611
614 @requires_changing_parents
612 @requires_changing_parents
615 def update_file_p1(
613 def update_file_p1(
616 self,
614 self,
617 filename,
615 filename,
618 p1_tracked,
616 p1_tracked,
619 ):
617 ):
620 """Set a file as tracked in the parent (or not)
618 """Set a file as tracked in the parent (or not)
621
619
622 This is to be called when adjust the dirstate to a new parent after an history
620 This is to be called when adjust the dirstate to a new parent after an history
623 rewriting operation.
621 rewriting operation.
624
622
625 It should not be called during a merge (p2 != nullid) and only within
623 It should not be called during a merge (p2 != nullid) and only within
626 a `with dirstate.changing_parents(repo):` context.
624 a `with dirstate.changing_parents(repo):` context.
627 """
625 """
628 if self.in_merge:
626 if self.in_merge:
629 msg = b'update_file_reference should not be called when merging'
627 msg = b'update_file_reference should not be called when merging'
630 raise error.ProgrammingError(msg)
628 raise error.ProgrammingError(msg)
631 entry = self._map.get(filename)
629 entry = self._map.get(filename)
632 if entry is None:
630 if entry is None:
633 wc_tracked = False
631 wc_tracked = False
634 else:
632 else:
635 wc_tracked = entry.tracked
633 wc_tracked = entry.tracked
636 if not (p1_tracked or wc_tracked):
634 if not (p1_tracked or wc_tracked):
637 # the file is no longer relevant to anyone
635 # the file is no longer relevant to anyone
638 if self._map.get(filename) is not None:
636 if self._map.get(filename) is not None:
639 self._map.reset_state(filename)
637 self._map.reset_state(filename)
640 self._dirty = True
638 self._dirty = True
641 elif (not p1_tracked) and wc_tracked:
639 elif (not p1_tracked) and wc_tracked:
642 if entry is not None and entry.added:
640 if entry is not None and entry.added:
643 return # avoid dropping copy information (maybe?)
641 return # avoid dropping copy information (maybe?)
644
642
645 self._map.reset_state(
643 self._map.reset_state(
646 filename,
644 filename,
647 wc_tracked,
645 wc_tracked,
648 p1_tracked,
646 p1_tracked,
649 # the underlying reference might have changed, we will have to
647 # the underlying reference might have changed, we will have to
650 # check it.
648 # check it.
651 has_meaningful_mtime=False,
649 has_meaningful_mtime=False,
652 )
650 )
653
651
654 @requires_changing_parents
652 @requires_changing_parents
655 def update_file(
653 def update_file(
656 self,
654 self,
657 filename,
655 filename,
658 wc_tracked,
656 wc_tracked,
659 p1_tracked,
657 p1_tracked,
660 p2_info=False,
658 p2_info=False,
661 possibly_dirty=False,
659 possibly_dirty=False,
662 parentfiledata=None,
660 parentfiledata=None,
663 ):
661 ):
664 """update the information about a file in the dirstate
662 """update the information about a file in the dirstate
665
663
666 This is to be called when the direstates parent changes to keep track
664 This is to be called when the direstates parent changes to keep track
667 of what is the file situation in regards to the working copy and its parent.
665 of what is the file situation in regards to the working copy and its parent.
668
666
669 This function must be called within a `dirstate.changing_parents` context.
667 This function must be called within a `dirstate.changing_parents` context.
670
668
671 note: the API is at an early stage and we might need to adjust it
669 note: the API is at an early stage and we might need to adjust it
672 depending of what information ends up being relevant and useful to
670 depending of what information ends up being relevant and useful to
673 other processing.
671 other processing.
674 """
672 """
675 self._update_file(
673 self._update_file(
676 filename=filename,
674 filename=filename,
677 wc_tracked=wc_tracked,
675 wc_tracked=wc_tracked,
678 p1_tracked=p1_tracked,
676 p1_tracked=p1_tracked,
679 p2_info=p2_info,
677 p2_info=p2_info,
680 possibly_dirty=possibly_dirty,
678 possibly_dirty=possibly_dirty,
681 parentfiledata=parentfiledata,
679 parentfiledata=parentfiledata,
682 )
680 )
683
681
684 # XXX since this make the dirstate dirty, we should enforce that it is done
682 # XXX since this make the dirstate dirty, we should enforce that it is done
685 # withing an appropriate change-context that scope the change and ensure it
683 # withing an appropriate change-context that scope the change and ensure it
686 # eventually get written on disk (or rolled back)
684 # eventually get written on disk (or rolled back)
687 def hacky_extension_update_file(self, *args, **kwargs):
685 def hacky_extension_update_file(self, *args, **kwargs):
688 """NEVER USE THIS, YOU DO NOT NEED IT
686 """NEVER USE THIS, YOU DO NOT NEED IT
689
687
690 This function is a variant of "update_file" to be called by a small set
688 This function is a variant of "update_file" to be called by a small set
691 of extensions, it also adjust the internal state of file, but can be
689 of extensions, it also adjust the internal state of file, but can be
692 called outside an `changing_parents` context.
690 called outside an `changing_parents` context.
693
691
694 A very small number of extension meddle with the working copy content
692 A very small number of extension meddle with the working copy content
695 in a way that requires to adjust the dirstate accordingly. At the time
693 in a way that requires to adjust the dirstate accordingly. At the time
696 this command is written they are :
694 this command is written they are :
697 - keyword,
695 - keyword,
698 - largefile,
696 - largefile,
699 PLEASE DO NOT GROW THIS LIST ANY FURTHER.
697 PLEASE DO NOT GROW THIS LIST ANY FURTHER.
700
698
701 This function could probably be replaced by more semantic one (like
699 This function could probably be replaced by more semantic one (like
702 "adjust expected size" or "always revalidate file content", etc)
700 "adjust expected size" or "always revalidate file content", etc)
703 however at the time where this is writen, this is too much of a detour
701 however at the time where this is writen, this is too much of a detour
704 to be considered.
702 to be considered.
705 """
703 """
706 self._update_file(
704 self._update_file(
707 *args,
705 *args,
708 **kwargs,
706 **kwargs,
709 )
707 )
710
708
711 def _update_file(
709 def _update_file(
712 self,
710 self,
713 filename,
711 filename,
714 wc_tracked,
712 wc_tracked,
715 p1_tracked,
713 p1_tracked,
716 p2_info=False,
714 p2_info=False,
717 possibly_dirty=False,
715 possibly_dirty=False,
718 parentfiledata=None,
716 parentfiledata=None,
719 ):
717 ):
720
718
721 # note: I do not think we need to double check name clash here since we
719 # note: I do not think we need to double check name clash here since we
722 # are in a update/merge case that should already have taken care of
720 # are in a update/merge case that should already have taken care of
723 # this. The test agrees
721 # this. The test agrees
724
722
725 self._dirty = True
723 self._dirty = True
726 old_entry = self._map.get(filename)
724 old_entry = self._map.get(filename)
727 if old_entry is None:
725 if old_entry is None:
728 prev_tracked = False
726 prev_tracked = False
729 else:
727 else:
730 prev_tracked = old_entry.tracked
728 prev_tracked = old_entry.tracked
731 if prev_tracked != wc_tracked:
729 if prev_tracked != wc_tracked:
732 self._dirty_tracked_set = True
730 self._dirty_tracked_set = True
733
731
734 self._map.reset_state(
732 self._map.reset_state(
735 filename,
733 filename,
736 wc_tracked,
734 wc_tracked,
737 p1_tracked,
735 p1_tracked,
738 p2_info=p2_info,
736 p2_info=p2_info,
739 has_meaningful_mtime=not possibly_dirty,
737 has_meaningful_mtime=not possibly_dirty,
740 parentfiledata=parentfiledata,
738 parentfiledata=parentfiledata,
741 )
739 )
742
740
743 def _check_new_tracked_filename(self, filename):
741 def _check_new_tracked_filename(self, filename):
744 scmutil.checkfilename(filename)
742 scmutil.checkfilename(filename)
745 if self._map.hastrackeddir(filename):
743 if self._map.hastrackeddir(filename):
746 msg = _(b'directory %r already in dirstate')
744 msg = _(b'directory %r already in dirstate')
747 msg %= pycompat.bytestr(filename)
745 msg %= pycompat.bytestr(filename)
748 raise error.Abort(msg)
746 raise error.Abort(msg)
749 # shadows
747 # shadows
750 for d in pathutil.finddirs(filename):
748 for d in pathutil.finddirs(filename):
751 if self._map.hastrackeddir(d):
749 if self._map.hastrackeddir(d):
752 break
750 break
753 entry = self._map.get(d)
751 entry = self._map.get(d)
754 if entry is not None and not entry.removed:
752 if entry is not None and not entry.removed:
755 msg = _(b'file %r in dirstate clashes with %r')
753 msg = _(b'file %r in dirstate clashes with %r')
756 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
754 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
757 raise error.Abort(msg)
755 raise error.Abort(msg)
758 self._check_sparse(filename)
756 self._check_sparse(filename)
759
757
760 def _check_sparse(self, filename):
758 def _check_sparse(self, filename):
761 """Check that a filename is inside the sparse profile"""
759 """Check that a filename is inside the sparse profile"""
762 sparsematch = self._sparsematcher
760 sparsematch = self._sparsematcher
763 if sparsematch is not None and not sparsematch.always():
761 if sparsematch is not None and not sparsematch.always():
764 if not sparsematch(filename):
762 if not sparsematch(filename):
765 msg = _(b"cannot add '%s' - it is outside the sparse checkout")
763 msg = _(b"cannot add '%s' - it is outside the sparse checkout")
766 hint = _(
764 hint = _(
767 b'include file with `hg debugsparse --include <pattern>` or use '
765 b'include file with `hg debugsparse --include <pattern>` or use '
768 b'`hg add -s <file>` to include file directory while adding'
766 b'`hg add -s <file>` to include file directory while adding'
769 )
767 )
770 raise error.Abort(msg % filename, hint=hint)
768 raise error.Abort(msg % filename, hint=hint)
771
769
772 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
770 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
773 if exists is None:
771 if exists is None:
774 exists = os.path.lexists(os.path.join(self._root, path))
772 exists = os.path.lexists(os.path.join(self._root, path))
775 if not exists:
773 if not exists:
776 # Maybe a path component exists
774 # Maybe a path component exists
777 if not ignoremissing and b'/' in path:
775 if not ignoremissing and b'/' in path:
778 d, f = path.rsplit(b'/', 1)
776 d, f = path.rsplit(b'/', 1)
779 d = self._normalize(d, False, ignoremissing, None)
777 d = self._normalize(d, False, ignoremissing, None)
780 folded = d + b"/" + f
778 folded = d + b"/" + f
781 else:
779 else:
782 # No path components, preserve original case
780 # No path components, preserve original case
783 folded = path
781 folded = path
784 else:
782 else:
785 # recursively normalize leading directory components
783 # recursively normalize leading directory components
786 # against dirstate
784 # against dirstate
787 if b'/' in normed:
785 if b'/' in normed:
788 d, f = normed.rsplit(b'/', 1)
786 d, f = normed.rsplit(b'/', 1)
789 d = self._normalize(d, False, ignoremissing, True)
787 d = self._normalize(d, False, ignoremissing, True)
790 r = self._root + b"/" + d
788 r = self._root + b"/" + d
791 folded = d + b"/" + util.fspath(f, r)
789 folded = d + b"/" + util.fspath(f, r)
792 else:
790 else:
793 folded = util.fspath(normed, self._root)
791 folded = util.fspath(normed, self._root)
794 storemap[normed] = folded
792 storemap[normed] = folded
795
793
796 return folded
794 return folded
797
795
798 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
796 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
799 normed = util.normcase(path)
797 normed = util.normcase(path)
800 folded = self._map.filefoldmap.get(normed, None)
798 folded = self._map.filefoldmap.get(normed, None)
801 if folded is None:
799 if folded is None:
802 if isknown:
800 if isknown:
803 folded = path
801 folded = path
804 else:
802 else:
805 folded = self._discoverpath(
803 folded = self._discoverpath(
806 path, normed, ignoremissing, exists, self._map.filefoldmap
804 path, normed, ignoremissing, exists, self._map.filefoldmap
807 )
805 )
808 return folded
806 return folded
809
807
810 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
808 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
811 normed = util.normcase(path)
809 normed = util.normcase(path)
812 folded = self._map.filefoldmap.get(normed, None)
810 folded = self._map.filefoldmap.get(normed, None)
813 if folded is None:
811 if folded is None:
814 folded = self._map.dirfoldmap.get(normed, None)
812 folded = self._map.dirfoldmap.get(normed, None)
815 if folded is None:
813 if folded is None:
816 if isknown:
814 if isknown:
817 folded = path
815 folded = path
818 else:
816 else:
819 # store discovered result in dirfoldmap so that future
817 # store discovered result in dirfoldmap so that future
820 # normalizefile calls don't start matching directories
818 # normalizefile calls don't start matching directories
821 folded = self._discoverpath(
819 folded = self._discoverpath(
822 path, normed, ignoremissing, exists, self._map.dirfoldmap
820 path, normed, ignoremissing, exists, self._map.dirfoldmap
823 )
821 )
824 return folded
822 return folded
825
823
826 def normalize(self, path, isknown=False, ignoremissing=False):
824 def normalize(self, path, isknown=False, ignoremissing=False):
827 """
825 """
828 normalize the case of a pathname when on a casefolding filesystem
826 normalize the case of a pathname when on a casefolding filesystem
829
827
830 isknown specifies whether the filename came from walking the
828 isknown specifies whether the filename came from walking the
831 disk, to avoid extra filesystem access.
829 disk, to avoid extra filesystem access.
832
830
833 If ignoremissing is True, missing path are returned
831 If ignoremissing is True, missing path are returned
834 unchanged. Otherwise, we try harder to normalize possibly
832 unchanged. Otherwise, we try harder to normalize possibly
835 existing path components.
833 existing path components.
836
834
837 The normalized case is determined based on the following precedence:
835 The normalized case is determined based on the following precedence:
838
836
839 - version of name already stored in the dirstate
837 - version of name already stored in the dirstate
840 - version of name stored on disk
838 - version of name stored on disk
841 - version provided via command arguments
839 - version provided via command arguments
842 """
840 """
843
841
844 if self._checkcase:
842 if self._checkcase:
845 return self._normalize(path, isknown, ignoremissing)
843 return self._normalize(path, isknown, ignoremissing)
846 return path
844 return path
847
845
848 # XXX since this make the dirstate dirty, we should enforce that it is done
846 # XXX since this make the dirstate dirty, we should enforce that it is done
849 # withing an appropriate change-context that scope the change and ensure it
847 # withing an appropriate change-context that scope the change and ensure it
850 # eventually get written on disk (or rolled back)
848 # eventually get written on disk (or rolled back)
851 def clear(self):
849 def clear(self):
852 self._map.clear()
850 self._map.clear()
853 self._dirty = True
851 self._dirty = True
854
852
855 # XXX since this make the dirstate dirty, we should enforce that it is done
853 # XXX since this make the dirstate dirty, we should enforce that it is done
856 # withing an appropriate change-context that scope the change and ensure it
854 # withing an appropriate change-context that scope the change and ensure it
857 # eventually get written on disk (or rolled back)
855 # eventually get written on disk (or rolled back)
858 def rebuild(self, parent, allfiles, changedfiles=None):
856 def rebuild(self, parent, allfiles, changedfiles=None):
859 matcher = self._sparsematcher
857 matcher = self._sparsematcher
860 if matcher is not None and not matcher.always():
858 if matcher is not None and not matcher.always():
861 # should not add non-matching files
859 # should not add non-matching files
862 allfiles = [f for f in allfiles if matcher(f)]
860 allfiles = [f for f in allfiles if matcher(f)]
863 if changedfiles:
861 if changedfiles:
864 changedfiles = [f for f in changedfiles if matcher(f)]
862 changedfiles = [f for f in changedfiles if matcher(f)]
865
863
866 if changedfiles is not None:
864 if changedfiles is not None:
867 # these files will be deleted from the dirstate when they are
865 # these files will be deleted from the dirstate when they are
868 # not found to be in allfiles
866 # not found to be in allfiles
869 dirstatefilestoremove = {f for f in self if not matcher(f)}
867 dirstatefilestoremove = {f for f in self if not matcher(f)}
870 changedfiles = dirstatefilestoremove.union(changedfiles)
868 changedfiles = dirstatefilestoremove.union(changedfiles)
871
869
872 if changedfiles is None:
870 if changedfiles is None:
873 # Rebuild entire dirstate
871 # Rebuild entire dirstate
874 to_lookup = allfiles
872 to_lookup = allfiles
875 to_drop = []
873 to_drop = []
876 self.clear()
874 self.clear()
877 elif len(changedfiles) < 10:
875 elif len(changedfiles) < 10:
878 # Avoid turning allfiles into a set, which can be expensive if it's
876 # Avoid turning allfiles into a set, which can be expensive if it's
879 # large.
877 # large.
880 to_lookup = []
878 to_lookup = []
881 to_drop = []
879 to_drop = []
882 for f in changedfiles:
880 for f in changedfiles:
883 if f in allfiles:
881 if f in allfiles:
884 to_lookup.append(f)
882 to_lookup.append(f)
885 else:
883 else:
886 to_drop.append(f)
884 to_drop.append(f)
887 else:
885 else:
888 changedfilesset = set(changedfiles)
886 changedfilesset = set(changedfiles)
889 to_lookup = changedfilesset & set(allfiles)
887 to_lookup = changedfilesset & set(allfiles)
890 to_drop = changedfilesset - to_lookup
888 to_drop = changedfilesset - to_lookup
891
889
892 if self._origpl is None:
890 if self._origpl is None:
893 self._origpl = self._pl
891 self._origpl = self._pl
894 self._map.setparents(parent, self._nodeconstants.nullid)
892 self._map.setparents(parent, self._nodeconstants.nullid)
895
893
896 for f in to_lookup:
894 for f in to_lookup:
897 if self.in_merge:
895 if self.in_merge:
898 self.set_tracked(f)
896 self.set_tracked(f)
899 else:
897 else:
900 self._map.reset_state(
898 self._map.reset_state(
901 f,
899 f,
902 wc_tracked=True,
900 wc_tracked=True,
903 p1_tracked=True,
901 p1_tracked=True,
904 )
902 )
905 for f in to_drop:
903 for f in to_drop:
906 self._map.reset_state(f)
904 self._map.reset_state(f)
907
905
908 self._dirty = True
906 self._dirty = True
909
907
910 def identity(self):
908 def identity(self):
911 """Return identity of dirstate itself to detect changing in storage
909 """Return identity of dirstate itself to detect changing in storage
912
910
913 If identity of previous dirstate is equal to this, writing
911 If identity of previous dirstate is equal to this, writing
914 changes based on the former dirstate out can keep consistency.
912 changes based on the former dirstate out can keep consistency.
915 """
913 """
916 return self._map.identity
914 return self._map.identity
917
915
918 def write(self, tr):
916 def write(self, tr):
919 if not self._dirty:
917 if not self._dirty:
920 return
918 return
921
919
922 write_key = self._use_tracked_hint and self._dirty_tracked_set
920 write_key = self._use_tracked_hint and self._dirty_tracked_set
923 if tr:
921 if tr:
924 # make sure we invalidate the current change on abort
922 # make sure we invalidate the current change on abort
925 if tr is not None:
923 if tr is not None:
926 tr.addabort(
924 tr.addabort(
927 b'dirstate-invalidate',
925 b'dirstate-invalidate',
928 lambda tr: self.invalidate(),
926 lambda tr: self.invalidate(),
929 )
927 )
930 # delay writing in-memory changes out
928 # delay writing in-memory changes out
931 tr.addfilegenerator(
929 tr.addfilegenerator(
932 b'dirstate-1-main',
930 b'dirstate-1-main',
933 (self._filename,),
931 (self._filename,),
934 lambda f: self._writedirstate(tr, f),
932 lambda f: self._writedirstate(tr, f),
935 location=b'plain',
933 location=b'plain',
936 post_finalize=True,
934 post_finalize=True,
937 )
935 )
938 if write_key:
936 if write_key:
939 tr.addfilegenerator(
937 tr.addfilegenerator(
940 b'dirstate-2-key-post',
938 b'dirstate-2-key-post',
941 (self._filename_th,),
939 (self._filename_th,),
942 lambda f: self._write_tracked_hint(tr, f),
940 lambda f: self._write_tracked_hint(tr, f),
943 location=b'plain',
941 location=b'plain',
944 post_finalize=True,
942 post_finalize=True,
945 )
943 )
946 return
944 return
947
945
948 file = lambda f: self._opener(f, b"w", atomictemp=True, checkambig=True)
946 file = lambda f: self._opener(f, b"w", atomictemp=True, checkambig=True)
949 with file(self._filename) as f:
947 with file(self._filename) as f:
950 self._writedirstate(tr, f)
948 self._writedirstate(tr, f)
951 if write_key:
949 if write_key:
952 # we update the key-file after writing to make sure reader have a
950 # we update the key-file after writing to make sure reader have a
953 # key that match the newly written content
951 # key that match the newly written content
954 with file(self._filename_th) as f:
952 with file(self._filename_th) as f:
955 self._write_tracked_hint(tr, f)
953 self._write_tracked_hint(tr, f)
956
954
957 def delete_tracked_hint(self):
955 def delete_tracked_hint(self):
958 """remove the tracked_hint file
956 """remove the tracked_hint file
959
957
960 To be used by format downgrades operation"""
958 To be used by format downgrades operation"""
961 self._opener.unlink(self._filename_th)
959 self._opener.unlink(self._filename_th)
962 self._use_tracked_hint = False
960 self._use_tracked_hint = False
963
961
964 def addparentchangecallback(self, category, callback):
962 def addparentchangecallback(self, category, callback):
965 """add a callback to be called when the wd parents are changed
963 """add a callback to be called when the wd parents are changed
966
964
967 Callback will be called with the following arguments:
965 Callback will be called with the following arguments:
968 dirstate, (oldp1, oldp2), (newp1, newp2)
966 dirstate, (oldp1, oldp2), (newp1, newp2)
969
967
970 Category is a unique identifier to allow overwriting an old callback
968 Category is a unique identifier to allow overwriting an old callback
971 with a newer callback.
969 with a newer callback.
972 """
970 """
973 self._plchangecallbacks[category] = callback
971 self._plchangecallbacks[category] = callback
974
972
975 def _writedirstate(self, tr, st):
973 def _writedirstate(self, tr, st):
976 # notify callbacks about parents change
974 # notify callbacks about parents change
977 if self._origpl is not None and self._origpl != self._pl:
975 if self._origpl is not None and self._origpl != self._pl:
978 for c, callback in sorted(self._plchangecallbacks.items()):
976 for c, callback in sorted(self._plchangecallbacks.items()):
979 callback(self, self._origpl, self._pl)
977 callback(self, self._origpl, self._pl)
980 self._origpl = None
978 self._origpl = None
981 self._map.write(tr, st)
979 self._map.write(tr, st)
982 self._dirty = False
980 self._dirty = False
983 self._dirty_tracked_set = False
981 self._dirty_tracked_set = False
984
982
985 def _write_tracked_hint(self, tr, f):
983 def _write_tracked_hint(self, tr, f):
986 key = node.hex(uuid.uuid4().bytes)
984 key = node.hex(uuid.uuid4().bytes)
987 f.write(b"1\n%s\n" % key) # 1 is the format version
985 f.write(b"1\n%s\n" % key) # 1 is the format version
988
986
989 def _dirignore(self, f):
987 def _dirignore(self, f):
990 if self._ignore(f):
988 if self._ignore(f):
991 return True
989 return True
992 for p in pathutil.finddirs(f):
990 for p in pathutil.finddirs(f):
993 if self._ignore(p):
991 if self._ignore(p):
994 return True
992 return True
995 return False
993 return False
996
994
997 def _ignorefiles(self):
995 def _ignorefiles(self):
998 files = []
996 files = []
999 if os.path.exists(self._join(b'.hgignore')):
997 if os.path.exists(self._join(b'.hgignore')):
1000 files.append(self._join(b'.hgignore'))
998 files.append(self._join(b'.hgignore'))
1001 for name, path in self._ui.configitems(b"ui"):
999 for name, path in self._ui.configitems(b"ui"):
1002 if name == b'ignore' or name.startswith(b'ignore.'):
1000 if name == b'ignore' or name.startswith(b'ignore.'):
1003 # we need to use os.path.join here rather than self._join
1001 # we need to use os.path.join here rather than self._join
1004 # because path is arbitrary and user-specified
1002 # because path is arbitrary and user-specified
1005 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1003 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1006 return files
1004 return files
1007
1005
1008 def _ignorefileandline(self, f):
1006 def _ignorefileandline(self, f):
1009 files = collections.deque(self._ignorefiles())
1007 files = collections.deque(self._ignorefiles())
1010 visited = set()
1008 visited = set()
1011 while files:
1009 while files:
1012 i = files.popleft()
1010 i = files.popleft()
1013 patterns = matchmod.readpatternfile(
1011 patterns = matchmod.readpatternfile(
1014 i, self._ui.warn, sourceinfo=True
1012 i, self._ui.warn, sourceinfo=True
1015 )
1013 )
1016 for pattern, lineno, line in patterns:
1014 for pattern, lineno, line in patterns:
1017 kind, p = matchmod._patsplit(pattern, b'glob')
1015 kind, p = matchmod._patsplit(pattern, b'glob')
1018 if kind == b"subinclude":
1016 if kind == b"subinclude":
1019 if p not in visited:
1017 if p not in visited:
1020 files.append(p)
1018 files.append(p)
1021 continue
1019 continue
1022 m = matchmod.match(
1020 m = matchmod.match(
1023 self._root, b'', [], [pattern], warn=self._ui.warn
1021 self._root, b'', [], [pattern], warn=self._ui.warn
1024 )
1022 )
1025 if m(f):
1023 if m(f):
1026 return (i, lineno, line)
1024 return (i, lineno, line)
1027 visited.add(i)
1025 visited.add(i)
1028 return (None, -1, b"")
1026 return (None, -1, b"")
1029
1027
1030 def _walkexplicit(self, match, subrepos):
1028 def _walkexplicit(self, match, subrepos):
1031 """Get stat data about the files explicitly specified by match.
1029 """Get stat data about the files explicitly specified by match.
1032
1030
1033 Return a triple (results, dirsfound, dirsnotfound).
1031 Return a triple (results, dirsfound, dirsnotfound).
1034 - results is a mapping from filename to stat result. It also contains
1032 - results is a mapping from filename to stat result. It also contains
1035 listings mapping subrepos and .hg to None.
1033 listings mapping subrepos and .hg to None.
1036 - dirsfound is a list of files found to be directories.
1034 - dirsfound is a list of files found to be directories.
1037 - dirsnotfound is a list of files that the dirstate thinks are
1035 - dirsnotfound is a list of files that the dirstate thinks are
1038 directories and that were not found."""
1036 directories and that were not found."""
1039
1037
1040 def badtype(mode):
1038 def badtype(mode):
1041 kind = _(b'unknown')
1039 kind = _(b'unknown')
1042 if stat.S_ISCHR(mode):
1040 if stat.S_ISCHR(mode):
1043 kind = _(b'character device')
1041 kind = _(b'character device')
1044 elif stat.S_ISBLK(mode):
1042 elif stat.S_ISBLK(mode):
1045 kind = _(b'block device')
1043 kind = _(b'block device')
1046 elif stat.S_ISFIFO(mode):
1044 elif stat.S_ISFIFO(mode):
1047 kind = _(b'fifo')
1045 kind = _(b'fifo')
1048 elif stat.S_ISSOCK(mode):
1046 elif stat.S_ISSOCK(mode):
1049 kind = _(b'socket')
1047 kind = _(b'socket')
1050 elif stat.S_ISDIR(mode):
1048 elif stat.S_ISDIR(mode):
1051 kind = _(b'directory')
1049 kind = _(b'directory')
1052 return _(b'unsupported file type (type is %s)') % kind
1050 return _(b'unsupported file type (type is %s)') % kind
1053
1051
1054 badfn = match.bad
1052 badfn = match.bad
1055 dmap = self._map
1053 dmap = self._map
1056 lstat = os.lstat
1054 lstat = os.lstat
1057 getkind = stat.S_IFMT
1055 getkind = stat.S_IFMT
1058 dirkind = stat.S_IFDIR
1056 dirkind = stat.S_IFDIR
1059 regkind = stat.S_IFREG
1057 regkind = stat.S_IFREG
1060 lnkkind = stat.S_IFLNK
1058 lnkkind = stat.S_IFLNK
1061 join = self._join
1059 join = self._join
1062 dirsfound = []
1060 dirsfound = []
1063 foundadd = dirsfound.append
1061 foundadd = dirsfound.append
1064 dirsnotfound = []
1062 dirsnotfound = []
1065 notfoundadd = dirsnotfound.append
1063 notfoundadd = dirsnotfound.append
1066
1064
1067 if not match.isexact() and self._checkcase:
1065 if not match.isexact() and self._checkcase:
1068 normalize = self._normalize
1066 normalize = self._normalize
1069 else:
1067 else:
1070 normalize = None
1068 normalize = None
1071
1069
1072 files = sorted(match.files())
1070 files = sorted(match.files())
1073 subrepos.sort()
1071 subrepos.sort()
1074 i, j = 0, 0
1072 i, j = 0, 0
1075 while i < len(files) and j < len(subrepos):
1073 while i < len(files) and j < len(subrepos):
1076 subpath = subrepos[j] + b"/"
1074 subpath = subrepos[j] + b"/"
1077 if files[i] < subpath:
1075 if files[i] < subpath:
1078 i += 1
1076 i += 1
1079 continue
1077 continue
1080 while i < len(files) and files[i].startswith(subpath):
1078 while i < len(files) and files[i].startswith(subpath):
1081 del files[i]
1079 del files[i]
1082 j += 1
1080 j += 1
1083
1081
1084 if not files or b'' in files:
1082 if not files or b'' in files:
1085 files = [b'']
1083 files = [b'']
1086 # constructing the foldmap is expensive, so don't do it for the
1084 # constructing the foldmap is expensive, so don't do it for the
1087 # common case where files is ['']
1085 # common case where files is ['']
1088 normalize = None
1086 normalize = None
1089 results = dict.fromkeys(subrepos)
1087 results = dict.fromkeys(subrepos)
1090 results[b'.hg'] = None
1088 results[b'.hg'] = None
1091
1089
1092 for ff in files:
1090 for ff in files:
1093 if normalize:
1091 if normalize:
1094 nf = normalize(ff, False, True)
1092 nf = normalize(ff, False, True)
1095 else:
1093 else:
1096 nf = ff
1094 nf = ff
1097 if nf in results:
1095 if nf in results:
1098 continue
1096 continue
1099
1097
1100 try:
1098 try:
1101 st = lstat(join(nf))
1099 st = lstat(join(nf))
1102 kind = getkind(st.st_mode)
1100 kind = getkind(st.st_mode)
1103 if kind == dirkind:
1101 if kind == dirkind:
1104 if nf in dmap:
1102 if nf in dmap:
1105 # file replaced by dir on disk but still in dirstate
1103 # file replaced by dir on disk but still in dirstate
1106 results[nf] = None
1104 results[nf] = None
1107 foundadd((nf, ff))
1105 foundadd((nf, ff))
1108 elif kind == regkind or kind == lnkkind:
1106 elif kind == regkind or kind == lnkkind:
1109 results[nf] = st
1107 results[nf] = st
1110 else:
1108 else:
1111 badfn(ff, badtype(kind))
1109 badfn(ff, badtype(kind))
1112 if nf in dmap:
1110 if nf in dmap:
1113 results[nf] = None
1111 results[nf] = None
1114 except (OSError) as inst:
1112 except (OSError) as inst:
1115 # nf not found on disk - it is dirstate only
1113 # nf not found on disk - it is dirstate only
1116 if nf in dmap: # does it exactly match a missing file?
1114 if nf in dmap: # does it exactly match a missing file?
1117 results[nf] = None
1115 results[nf] = None
1118 else: # does it match a missing directory?
1116 else: # does it match a missing directory?
1119 if self._map.hasdir(nf):
1117 if self._map.hasdir(nf):
1120 notfoundadd(nf)
1118 notfoundadd(nf)
1121 else:
1119 else:
1122 badfn(ff, encoding.strtolocal(inst.strerror))
1120 badfn(ff, encoding.strtolocal(inst.strerror))
1123
1121
1124 # match.files() may contain explicitly-specified paths that shouldn't
1122 # match.files() may contain explicitly-specified paths that shouldn't
1125 # be taken; drop them from the list of files found. dirsfound/notfound
1123 # be taken; drop them from the list of files found. dirsfound/notfound
1126 # aren't filtered here because they will be tested later.
1124 # aren't filtered here because they will be tested later.
1127 if match.anypats():
1125 if match.anypats():
1128 for f in list(results):
1126 for f in list(results):
1129 if f == b'.hg' or f in subrepos:
1127 if f == b'.hg' or f in subrepos:
1130 # keep sentinel to disable further out-of-repo walks
1128 # keep sentinel to disable further out-of-repo walks
1131 continue
1129 continue
1132 if not match(f):
1130 if not match(f):
1133 del results[f]
1131 del results[f]
1134
1132
1135 # Case insensitive filesystems cannot rely on lstat() failing to detect
1133 # Case insensitive filesystems cannot rely on lstat() failing to detect
1136 # a case-only rename. Prune the stat object for any file that does not
1134 # a case-only rename. Prune the stat object for any file that does not
1137 # match the case in the filesystem, if there are multiple files that
1135 # match the case in the filesystem, if there are multiple files that
1138 # normalize to the same path.
1136 # normalize to the same path.
1139 if match.isexact() and self._checkcase:
1137 if match.isexact() and self._checkcase:
1140 normed = {}
1138 normed = {}
1141
1139
1142 for f, st in results.items():
1140 for f, st in results.items():
1143 if st is None:
1141 if st is None:
1144 continue
1142 continue
1145
1143
1146 nc = util.normcase(f)
1144 nc = util.normcase(f)
1147 paths = normed.get(nc)
1145 paths = normed.get(nc)
1148
1146
1149 if paths is None:
1147 if paths is None:
1150 paths = set()
1148 paths = set()
1151 normed[nc] = paths
1149 normed[nc] = paths
1152
1150
1153 paths.add(f)
1151 paths.add(f)
1154
1152
1155 for norm, paths in normed.items():
1153 for norm, paths in normed.items():
1156 if len(paths) > 1:
1154 if len(paths) > 1:
1157 for path in paths:
1155 for path in paths:
1158 folded = self._discoverpath(
1156 folded = self._discoverpath(
1159 path, norm, True, None, self._map.dirfoldmap
1157 path, norm, True, None, self._map.dirfoldmap
1160 )
1158 )
1161 if path != folded:
1159 if path != folded:
1162 results[path] = None
1160 results[path] = None
1163
1161
1164 return results, dirsfound, dirsnotfound
1162 return results, dirsfound, dirsnotfound
1165
1163
1166 def walk(self, match, subrepos, unknown, ignored, full=True):
1164 def walk(self, match, subrepos, unknown, ignored, full=True):
1167 """
1165 """
1168 Walk recursively through the directory tree, finding all files
1166 Walk recursively through the directory tree, finding all files
1169 matched by match.
1167 matched by match.
1170
1168
1171 If full is False, maybe skip some known-clean files.
1169 If full is False, maybe skip some known-clean files.
1172
1170
1173 Return a dict mapping filename to stat-like object (either
1171 Return a dict mapping filename to stat-like object (either
1174 mercurial.osutil.stat instance or return value of os.stat()).
1172 mercurial.osutil.stat instance or return value of os.stat()).
1175
1173
1176 """
1174 """
1177 # full is a flag that extensions that hook into walk can use -- this
1175 # full is a flag that extensions that hook into walk can use -- this
1178 # implementation doesn't use it at all. This satisfies the contract
1176 # implementation doesn't use it at all. This satisfies the contract
1179 # because we only guarantee a "maybe".
1177 # because we only guarantee a "maybe".
1180
1178
1181 if ignored:
1179 if ignored:
1182 ignore = util.never
1180 ignore = util.never
1183 dirignore = util.never
1181 dirignore = util.never
1184 elif unknown:
1182 elif unknown:
1185 ignore = self._ignore
1183 ignore = self._ignore
1186 dirignore = self._dirignore
1184 dirignore = self._dirignore
1187 else:
1185 else:
1188 # if not unknown and not ignored, drop dir recursion and step 2
1186 # if not unknown and not ignored, drop dir recursion and step 2
1189 ignore = util.always
1187 ignore = util.always
1190 dirignore = util.always
1188 dirignore = util.always
1191
1189
1192 if self._sparsematchfn is not None:
1190 if self._sparsematchfn is not None:
1193 em = matchmod.exact(match.files())
1191 em = matchmod.exact(match.files())
1194 sm = matchmod.unionmatcher([self._sparsematcher, em])
1192 sm = matchmod.unionmatcher([self._sparsematcher, em])
1195 match = matchmod.intersectmatchers(match, sm)
1193 match = matchmod.intersectmatchers(match, sm)
1196
1194
1197 matchfn = match.matchfn
1195 matchfn = match.matchfn
1198 matchalways = match.always()
1196 matchalways = match.always()
1199 matchtdir = match.traversedir
1197 matchtdir = match.traversedir
1200 dmap = self._map
1198 dmap = self._map
1201 listdir = util.listdir
1199 listdir = util.listdir
1202 lstat = os.lstat
1200 lstat = os.lstat
1203 dirkind = stat.S_IFDIR
1201 dirkind = stat.S_IFDIR
1204 regkind = stat.S_IFREG
1202 regkind = stat.S_IFREG
1205 lnkkind = stat.S_IFLNK
1203 lnkkind = stat.S_IFLNK
1206 join = self._join
1204 join = self._join
1207
1205
1208 exact = skipstep3 = False
1206 exact = skipstep3 = False
1209 if match.isexact(): # match.exact
1207 if match.isexact(): # match.exact
1210 exact = True
1208 exact = True
1211 dirignore = util.always # skip step 2
1209 dirignore = util.always # skip step 2
1212 elif match.prefix(): # match.match, no patterns
1210 elif match.prefix(): # match.match, no patterns
1213 skipstep3 = True
1211 skipstep3 = True
1214
1212
1215 if not exact and self._checkcase:
1213 if not exact and self._checkcase:
1216 normalize = self._normalize
1214 normalize = self._normalize
1217 normalizefile = self._normalizefile
1215 normalizefile = self._normalizefile
1218 skipstep3 = False
1216 skipstep3 = False
1219 else:
1217 else:
1220 normalize = self._normalize
1218 normalize = self._normalize
1221 normalizefile = None
1219 normalizefile = None
1222
1220
1223 # step 1: find all explicit files
1221 # step 1: find all explicit files
1224 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1222 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1225 if matchtdir:
1223 if matchtdir:
1226 for d in work:
1224 for d in work:
1227 matchtdir(d[0])
1225 matchtdir(d[0])
1228 for d in dirsnotfound:
1226 for d in dirsnotfound:
1229 matchtdir(d)
1227 matchtdir(d)
1230
1228
1231 skipstep3 = skipstep3 and not (work or dirsnotfound)
1229 skipstep3 = skipstep3 and not (work or dirsnotfound)
1232 work = [d for d in work if not dirignore(d[0])]
1230 work = [d for d in work if not dirignore(d[0])]
1233
1231
1234 # step 2: visit subdirectories
1232 # step 2: visit subdirectories
1235 def traverse(work, alreadynormed):
1233 def traverse(work, alreadynormed):
1236 wadd = work.append
1234 wadd = work.append
1237 while work:
1235 while work:
1238 tracing.counter('dirstate.walk work', len(work))
1236 tracing.counter('dirstate.walk work', len(work))
1239 nd = work.pop()
1237 nd = work.pop()
1240 visitentries = match.visitchildrenset(nd)
1238 visitentries = match.visitchildrenset(nd)
1241 if not visitentries:
1239 if not visitentries:
1242 continue
1240 continue
1243 if visitentries == b'this' or visitentries == b'all':
1241 if visitentries == b'this' or visitentries == b'all':
1244 visitentries = None
1242 visitentries = None
1245 skip = None
1243 skip = None
1246 if nd != b'':
1244 if nd != b'':
1247 skip = b'.hg'
1245 skip = b'.hg'
1248 try:
1246 try:
1249 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1247 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1250 entries = listdir(join(nd), stat=True, skip=skip)
1248 entries = listdir(join(nd), stat=True, skip=skip)
1251 except (PermissionError, FileNotFoundError) as inst:
1249 except (PermissionError, FileNotFoundError) as inst:
1252 match.bad(
1250 match.bad(
1253 self.pathto(nd), encoding.strtolocal(inst.strerror)
1251 self.pathto(nd), encoding.strtolocal(inst.strerror)
1254 )
1252 )
1255 continue
1253 continue
1256 for f, kind, st in entries:
1254 for f, kind, st in entries:
1257 # Some matchers may return files in the visitentries set,
1255 # Some matchers may return files in the visitentries set,
1258 # instead of 'this', if the matcher explicitly mentions them
1256 # instead of 'this', if the matcher explicitly mentions them
1259 # and is not an exactmatcher. This is acceptable; we do not
1257 # and is not an exactmatcher. This is acceptable; we do not
1260 # make any hard assumptions about file-or-directory below
1258 # make any hard assumptions about file-or-directory below
1261 # based on the presence of `f` in visitentries. If
1259 # based on the presence of `f` in visitentries. If
1262 # visitchildrenset returned a set, we can always skip the
1260 # visitchildrenset returned a set, we can always skip the
1263 # entries *not* in the set it provided regardless of whether
1261 # entries *not* in the set it provided regardless of whether
1264 # they're actually a file or a directory.
1262 # they're actually a file or a directory.
1265 if visitentries and f not in visitentries:
1263 if visitentries and f not in visitentries:
1266 continue
1264 continue
1267 if normalizefile:
1265 if normalizefile:
1268 # even though f might be a directory, we're only
1266 # even though f might be a directory, we're only
1269 # interested in comparing it to files currently in the
1267 # interested in comparing it to files currently in the
1270 # dmap -- therefore normalizefile is enough
1268 # dmap -- therefore normalizefile is enough
1271 nf = normalizefile(
1269 nf = normalizefile(
1272 nd and (nd + b"/" + f) or f, True, True
1270 nd and (nd + b"/" + f) or f, True, True
1273 )
1271 )
1274 else:
1272 else:
1275 nf = nd and (nd + b"/" + f) or f
1273 nf = nd and (nd + b"/" + f) or f
1276 if nf not in results:
1274 if nf not in results:
1277 if kind == dirkind:
1275 if kind == dirkind:
1278 if not ignore(nf):
1276 if not ignore(nf):
1279 if matchtdir:
1277 if matchtdir:
1280 matchtdir(nf)
1278 matchtdir(nf)
1281 wadd(nf)
1279 wadd(nf)
1282 if nf in dmap and (matchalways or matchfn(nf)):
1280 if nf in dmap and (matchalways or matchfn(nf)):
1283 results[nf] = None
1281 results[nf] = None
1284 elif kind == regkind or kind == lnkkind:
1282 elif kind == regkind or kind == lnkkind:
1285 if nf in dmap:
1283 if nf in dmap:
1286 if matchalways or matchfn(nf):
1284 if matchalways or matchfn(nf):
1287 results[nf] = st
1285 results[nf] = st
1288 elif (matchalways or matchfn(nf)) and not ignore(
1286 elif (matchalways or matchfn(nf)) and not ignore(
1289 nf
1287 nf
1290 ):
1288 ):
1291 # unknown file -- normalize if necessary
1289 # unknown file -- normalize if necessary
1292 if not alreadynormed:
1290 if not alreadynormed:
1293 nf = normalize(nf, False, True)
1291 nf = normalize(nf, False, True)
1294 results[nf] = st
1292 results[nf] = st
1295 elif nf in dmap and (matchalways or matchfn(nf)):
1293 elif nf in dmap and (matchalways or matchfn(nf)):
1296 results[nf] = None
1294 results[nf] = None
1297
1295
1298 for nd, d in work:
1296 for nd, d in work:
1299 # alreadynormed means that processwork doesn't have to do any
1297 # alreadynormed means that processwork doesn't have to do any
1300 # expensive directory normalization
1298 # expensive directory normalization
1301 alreadynormed = not normalize or nd == d
1299 alreadynormed = not normalize or nd == d
1302 traverse([d], alreadynormed)
1300 traverse([d], alreadynormed)
1303
1301
1304 for s in subrepos:
1302 for s in subrepos:
1305 del results[s]
1303 del results[s]
1306 del results[b'.hg']
1304 del results[b'.hg']
1307
1305
1308 # step 3: visit remaining files from dmap
1306 # step 3: visit remaining files from dmap
1309 if not skipstep3 and not exact:
1307 if not skipstep3 and not exact:
1310 # If a dmap file is not in results yet, it was either
1308 # If a dmap file is not in results yet, it was either
1311 # a) not matching matchfn b) ignored, c) missing, or d) under a
1309 # a) not matching matchfn b) ignored, c) missing, or d) under a
1312 # symlink directory.
1310 # symlink directory.
1313 if not results and matchalways:
1311 if not results and matchalways:
1314 visit = [f for f in dmap]
1312 visit = [f for f in dmap]
1315 else:
1313 else:
1316 visit = [f for f in dmap if f not in results and matchfn(f)]
1314 visit = [f for f in dmap if f not in results and matchfn(f)]
1317 visit.sort()
1315 visit.sort()
1318
1316
1319 if unknown:
1317 if unknown:
1320 # unknown == True means we walked all dirs under the roots
1318 # unknown == True means we walked all dirs under the roots
1321 # that wasn't ignored, and everything that matched was stat'ed
1319 # that wasn't ignored, and everything that matched was stat'ed
1322 # and is already in results.
1320 # and is already in results.
1323 # The rest must thus be ignored or under a symlink.
1321 # The rest must thus be ignored or under a symlink.
1324 audit_path = pathutil.pathauditor(self._root, cached=True)
1322 audit_path = pathutil.pathauditor(self._root, cached=True)
1325
1323
1326 for nf in iter(visit):
1324 for nf in iter(visit):
1327 # If a stat for the same file was already added with a
1325 # If a stat for the same file was already added with a
1328 # different case, don't add one for this, since that would
1326 # different case, don't add one for this, since that would
1329 # make it appear as if the file exists under both names
1327 # make it appear as if the file exists under both names
1330 # on disk.
1328 # on disk.
1331 if (
1329 if (
1332 normalizefile
1330 normalizefile
1333 and normalizefile(nf, True, True) in results
1331 and normalizefile(nf, True, True) in results
1334 ):
1332 ):
1335 results[nf] = None
1333 results[nf] = None
1336 # Report ignored items in the dmap as long as they are not
1334 # Report ignored items in the dmap as long as they are not
1337 # under a symlink directory.
1335 # under a symlink directory.
1338 elif audit_path.check(nf):
1336 elif audit_path.check(nf):
1339 try:
1337 try:
1340 results[nf] = lstat(join(nf))
1338 results[nf] = lstat(join(nf))
1341 # file was just ignored, no links, and exists
1339 # file was just ignored, no links, and exists
1342 except OSError:
1340 except OSError:
1343 # file doesn't exist
1341 # file doesn't exist
1344 results[nf] = None
1342 results[nf] = None
1345 else:
1343 else:
1346 # It's either missing or under a symlink directory
1344 # It's either missing or under a symlink directory
1347 # which we in this case report as missing
1345 # which we in this case report as missing
1348 results[nf] = None
1346 results[nf] = None
1349 else:
1347 else:
1350 # We may not have walked the full directory tree above,
1348 # We may not have walked the full directory tree above,
1351 # so stat and check everything we missed.
1349 # so stat and check everything we missed.
1352 iv = iter(visit)
1350 iv = iter(visit)
1353 for st in util.statfiles([join(i) for i in visit]):
1351 for st in util.statfiles([join(i) for i in visit]):
1354 results[next(iv)] = st
1352 results[next(iv)] = st
1355 return results
1353 return results
1356
1354
1357 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1355 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1358 if self._sparsematchfn is not None:
1356 if self._sparsematchfn is not None:
1359 em = matchmod.exact(matcher.files())
1357 em = matchmod.exact(matcher.files())
1360 sm = matchmod.unionmatcher([self._sparsematcher, em])
1358 sm = matchmod.unionmatcher([self._sparsematcher, em])
1361 matcher = matchmod.intersectmatchers(matcher, sm)
1359 matcher = matchmod.intersectmatchers(matcher, sm)
1362 # Force Rayon (Rust parallelism library) to respect the number of
1360 # Force Rayon (Rust parallelism library) to respect the number of
1363 # workers. This is a temporary workaround until Rust code knows
1361 # workers. This is a temporary workaround until Rust code knows
1364 # how to read the config file.
1362 # how to read the config file.
1365 numcpus = self._ui.configint(b"worker", b"numcpus")
1363 numcpus = self._ui.configint(b"worker", b"numcpus")
1366 if numcpus is not None:
1364 if numcpus is not None:
1367 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1365 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1368
1366
1369 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1367 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1370 if not workers_enabled:
1368 if not workers_enabled:
1371 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1369 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1372
1370
1373 (
1371 (
1374 lookup,
1372 lookup,
1375 modified,
1373 modified,
1376 added,
1374 added,
1377 removed,
1375 removed,
1378 deleted,
1376 deleted,
1379 clean,
1377 clean,
1380 ignored,
1378 ignored,
1381 unknown,
1379 unknown,
1382 warnings,
1380 warnings,
1383 bad,
1381 bad,
1384 traversed,
1382 traversed,
1385 dirty,
1383 dirty,
1386 ) = rustmod.status(
1384 ) = rustmod.status(
1387 self._map._map,
1385 self._map._map,
1388 matcher,
1386 matcher,
1389 self._rootdir,
1387 self._rootdir,
1390 self._ignorefiles(),
1388 self._ignorefiles(),
1391 self._checkexec,
1389 self._checkexec,
1392 bool(list_clean),
1390 bool(list_clean),
1393 bool(list_ignored),
1391 bool(list_ignored),
1394 bool(list_unknown),
1392 bool(list_unknown),
1395 bool(matcher.traversedir),
1393 bool(matcher.traversedir),
1396 )
1394 )
1397
1395
1398 self._dirty |= dirty
1396 self._dirty |= dirty
1399
1397
1400 if matcher.traversedir:
1398 if matcher.traversedir:
1401 for dir in traversed:
1399 for dir in traversed:
1402 matcher.traversedir(dir)
1400 matcher.traversedir(dir)
1403
1401
1404 if self._ui.warn:
1402 if self._ui.warn:
1405 for item in warnings:
1403 for item in warnings:
1406 if isinstance(item, tuple):
1404 if isinstance(item, tuple):
1407 file_path, syntax = item
1405 file_path, syntax = item
1408 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1406 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1409 file_path,
1407 file_path,
1410 syntax,
1408 syntax,
1411 )
1409 )
1412 self._ui.warn(msg)
1410 self._ui.warn(msg)
1413 else:
1411 else:
1414 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1412 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1415 self._ui.warn(
1413 self._ui.warn(
1416 msg
1414 msg
1417 % (
1415 % (
1418 pathutil.canonpath(
1416 pathutil.canonpath(
1419 self._rootdir, self._rootdir, item
1417 self._rootdir, self._rootdir, item
1420 ),
1418 ),
1421 b"No such file or directory",
1419 b"No such file or directory",
1422 )
1420 )
1423 )
1421 )
1424
1422
1425 for fn, message in bad:
1423 for fn, message in bad:
1426 matcher.bad(fn, encoding.strtolocal(message))
1424 matcher.bad(fn, encoding.strtolocal(message))
1427
1425
1428 status = scmutil.status(
1426 status = scmutil.status(
1429 modified=modified,
1427 modified=modified,
1430 added=added,
1428 added=added,
1431 removed=removed,
1429 removed=removed,
1432 deleted=deleted,
1430 deleted=deleted,
1433 unknown=unknown,
1431 unknown=unknown,
1434 ignored=ignored,
1432 ignored=ignored,
1435 clean=clean,
1433 clean=clean,
1436 )
1434 )
1437 return (lookup, status)
1435 return (lookup, status)
1438
1436
1439 # XXX since this can make the dirstate dirty (through rust), we should
1437 # XXX since this can make the dirstate dirty (through rust), we should
1440 # enforce that it is done withing an appropriate change-context that scope
1438 # enforce that it is done withing an appropriate change-context that scope
1441 # the change and ensure it eventually get written on disk (or rolled back)
1439 # the change and ensure it eventually get written on disk (or rolled back)
1442 def status(self, match, subrepos, ignored, clean, unknown):
1440 def status(self, match, subrepos, ignored, clean, unknown):
1443 """Determine the status of the working copy relative to the
1441 """Determine the status of the working copy relative to the
1444 dirstate and return a pair of (unsure, status), where status is of type
1442 dirstate and return a pair of (unsure, status), where status is of type
1445 scmutil.status and:
1443 scmutil.status and:
1446
1444
1447 unsure:
1445 unsure:
1448 files that might have been modified since the dirstate was
1446 files that might have been modified since the dirstate was
1449 written, but need to be read to be sure (size is the same
1447 written, but need to be read to be sure (size is the same
1450 but mtime differs)
1448 but mtime differs)
1451 status.modified:
1449 status.modified:
1452 files that have definitely been modified since the dirstate
1450 files that have definitely been modified since the dirstate
1453 was written (different size or mode)
1451 was written (different size or mode)
1454 status.clean:
1452 status.clean:
1455 files that have definitely not been modified since the
1453 files that have definitely not been modified since the
1456 dirstate was written
1454 dirstate was written
1457 """
1455 """
1458 listignored, listclean, listunknown = ignored, clean, unknown
1456 listignored, listclean, listunknown = ignored, clean, unknown
1459 lookup, modified, added, unknown, ignored = [], [], [], [], []
1457 lookup, modified, added, unknown, ignored = [], [], [], [], []
1460 removed, deleted, clean = [], [], []
1458 removed, deleted, clean = [], [], []
1461
1459
1462 dmap = self._map
1460 dmap = self._map
1463 dmap.preload()
1461 dmap.preload()
1464
1462
1465 use_rust = True
1463 use_rust = True
1466
1464
1467 allowed_matchers = (
1465 allowed_matchers = (
1468 matchmod.alwaysmatcher,
1466 matchmod.alwaysmatcher,
1469 matchmod.differencematcher,
1467 matchmod.differencematcher,
1470 matchmod.exactmatcher,
1468 matchmod.exactmatcher,
1471 matchmod.includematcher,
1469 matchmod.includematcher,
1472 matchmod.intersectionmatcher,
1470 matchmod.intersectionmatcher,
1473 matchmod.nevermatcher,
1471 matchmod.nevermatcher,
1474 matchmod.unionmatcher,
1472 matchmod.unionmatcher,
1475 )
1473 )
1476
1474
1477 if rustmod is None:
1475 if rustmod is None:
1478 use_rust = False
1476 use_rust = False
1479 elif self._checkcase:
1477 elif self._checkcase:
1480 # Case-insensitive filesystems are not handled yet
1478 # Case-insensitive filesystems are not handled yet
1481 use_rust = False
1479 use_rust = False
1482 elif subrepos:
1480 elif subrepos:
1483 use_rust = False
1481 use_rust = False
1484 elif not isinstance(match, allowed_matchers):
1482 elif not isinstance(match, allowed_matchers):
1485 # Some matchers have yet to be implemented
1483 # Some matchers have yet to be implemented
1486 use_rust = False
1484 use_rust = False
1487
1485
1488 # Get the time from the filesystem so we can disambiguate files that
1486 # Get the time from the filesystem so we can disambiguate files that
1489 # appear modified in the present or future.
1487 # appear modified in the present or future.
1490 try:
1488 try:
1491 mtime_boundary = timestamp.get_fs_now(self._opener)
1489 mtime_boundary = timestamp.get_fs_now(self._opener)
1492 except OSError:
1490 except OSError:
1493 # In largefiles or readonly context
1491 # In largefiles or readonly context
1494 mtime_boundary = None
1492 mtime_boundary = None
1495
1493
1496 if use_rust:
1494 if use_rust:
1497 try:
1495 try:
1498 res = self._rust_status(
1496 res = self._rust_status(
1499 match, listclean, listignored, listunknown
1497 match, listclean, listignored, listunknown
1500 )
1498 )
1501 return res + (mtime_boundary,)
1499 return res + (mtime_boundary,)
1502 except rustmod.FallbackError:
1500 except rustmod.FallbackError:
1503 pass
1501 pass
1504
1502
1505 def noop(f):
1503 def noop(f):
1506 pass
1504 pass
1507
1505
1508 dcontains = dmap.__contains__
1506 dcontains = dmap.__contains__
1509 dget = dmap.__getitem__
1507 dget = dmap.__getitem__
1510 ladd = lookup.append # aka "unsure"
1508 ladd = lookup.append # aka "unsure"
1511 madd = modified.append
1509 madd = modified.append
1512 aadd = added.append
1510 aadd = added.append
1513 uadd = unknown.append if listunknown else noop
1511 uadd = unknown.append if listunknown else noop
1514 iadd = ignored.append if listignored else noop
1512 iadd = ignored.append if listignored else noop
1515 radd = removed.append
1513 radd = removed.append
1516 dadd = deleted.append
1514 dadd = deleted.append
1517 cadd = clean.append if listclean else noop
1515 cadd = clean.append if listclean else noop
1518 mexact = match.exact
1516 mexact = match.exact
1519 dirignore = self._dirignore
1517 dirignore = self._dirignore
1520 checkexec = self._checkexec
1518 checkexec = self._checkexec
1521 checklink = self._checklink
1519 checklink = self._checklink
1522 copymap = self._map.copymap
1520 copymap = self._map.copymap
1523
1521
1524 # We need to do full walks when either
1522 # We need to do full walks when either
1525 # - we're listing all clean files, or
1523 # - we're listing all clean files, or
1526 # - match.traversedir does something, because match.traversedir should
1524 # - match.traversedir does something, because match.traversedir should
1527 # be called for every dir in the working dir
1525 # be called for every dir in the working dir
1528 full = listclean or match.traversedir is not None
1526 full = listclean or match.traversedir is not None
1529 for fn, st in self.walk(
1527 for fn, st in self.walk(
1530 match, subrepos, listunknown, listignored, full=full
1528 match, subrepos, listunknown, listignored, full=full
1531 ).items():
1529 ).items():
1532 if not dcontains(fn):
1530 if not dcontains(fn):
1533 if (listignored or mexact(fn)) and dirignore(fn):
1531 if (listignored or mexact(fn)) and dirignore(fn):
1534 if listignored:
1532 if listignored:
1535 iadd(fn)
1533 iadd(fn)
1536 else:
1534 else:
1537 uadd(fn)
1535 uadd(fn)
1538 continue
1536 continue
1539
1537
1540 t = dget(fn)
1538 t = dget(fn)
1541 mode = t.mode
1539 mode = t.mode
1542 size = t.size
1540 size = t.size
1543
1541
1544 if not st and t.tracked:
1542 if not st and t.tracked:
1545 dadd(fn)
1543 dadd(fn)
1546 elif t.p2_info:
1544 elif t.p2_info:
1547 madd(fn)
1545 madd(fn)
1548 elif t.added:
1546 elif t.added:
1549 aadd(fn)
1547 aadd(fn)
1550 elif t.removed:
1548 elif t.removed:
1551 radd(fn)
1549 radd(fn)
1552 elif t.tracked:
1550 elif t.tracked:
1553 if not checklink and t.has_fallback_symlink:
1551 if not checklink and t.has_fallback_symlink:
1554 # If the file system does not support symlink, the mode
1552 # If the file system does not support symlink, the mode
1555 # might not be correctly stored in the dirstate, so do not
1553 # might not be correctly stored in the dirstate, so do not
1556 # trust it.
1554 # trust it.
1557 ladd(fn)
1555 ladd(fn)
1558 elif not checkexec and t.has_fallback_exec:
1556 elif not checkexec and t.has_fallback_exec:
1559 # If the file system does not support exec bits, the mode
1557 # If the file system does not support exec bits, the mode
1560 # might not be correctly stored in the dirstate, so do not
1558 # might not be correctly stored in the dirstate, so do not
1561 # trust it.
1559 # trust it.
1562 ladd(fn)
1560 ladd(fn)
1563 elif (
1561 elif (
1564 size >= 0
1562 size >= 0
1565 and (
1563 and (
1566 (size != st.st_size and size != st.st_size & _rangemask)
1564 (size != st.st_size and size != st.st_size & _rangemask)
1567 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1565 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1568 )
1566 )
1569 or fn in copymap
1567 or fn in copymap
1570 ):
1568 ):
1571 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1569 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1572 # issue6456: Size returned may be longer due to
1570 # issue6456: Size returned may be longer due to
1573 # encryption on EXT-4 fscrypt, undecided.
1571 # encryption on EXT-4 fscrypt, undecided.
1574 ladd(fn)
1572 ladd(fn)
1575 else:
1573 else:
1576 madd(fn)
1574 madd(fn)
1577 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1575 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1578 # There might be a change in the future if for example the
1576 # There might be a change in the future if for example the
1579 # internal clock is off, but this is a case where the issues
1577 # internal clock is off, but this is a case where the issues
1580 # the user would face would be a lot worse and there is
1578 # the user would face would be a lot worse and there is
1581 # nothing we can really do.
1579 # nothing we can really do.
1582 ladd(fn)
1580 ladd(fn)
1583 elif listclean:
1581 elif listclean:
1584 cadd(fn)
1582 cadd(fn)
1585 status = scmutil.status(
1583 status = scmutil.status(
1586 modified, added, removed, deleted, unknown, ignored, clean
1584 modified, added, removed, deleted, unknown, ignored, clean
1587 )
1585 )
1588 return (lookup, status, mtime_boundary)
1586 return (lookup, status, mtime_boundary)
1589
1587
1590 def matches(self, match):
1588 def matches(self, match):
1591 """
1589 """
1592 return files in the dirstate (in whatever state) filtered by match
1590 return files in the dirstate (in whatever state) filtered by match
1593 """
1591 """
1594 dmap = self._map
1592 dmap = self._map
1595 if rustmod is not None:
1593 if rustmod is not None:
1596 dmap = self._map._map
1594 dmap = self._map._map
1597
1595
1598 if match.always():
1596 if match.always():
1599 return dmap.keys()
1597 return dmap.keys()
1600 files = match.files()
1598 files = match.files()
1601 if match.isexact():
1599 if match.isexact():
1602 # fast path -- filter the other way around, since typically files is
1600 # fast path -- filter the other way around, since typically files is
1603 # much smaller than dmap
1601 # much smaller than dmap
1604 return [f for f in files if f in dmap]
1602 return [f for f in files if f in dmap]
1605 if match.prefix() and all(fn in dmap for fn in files):
1603 if match.prefix() and all(fn in dmap for fn in files):
1606 # fast path -- all the values are known to be files, so just return
1604 # fast path -- all the values are known to be files, so just return
1607 # that
1605 # that
1608 return list(files)
1606 return list(files)
1609 return [f for f in dmap if match(f)]
1607 return [f for f in dmap if match(f)]
1610
1608
1611 def _actualfilename(self, tr):
1609 def _actualfilename(self, tr):
1612 if tr:
1610 if tr:
1613 return self._pendingfilename
1611 return self._pendingfilename
1614 else:
1612 else:
1615 return self._filename
1613 return self._filename
1616
1614
1617 def all_file_names(self):
1615 def all_file_names(self):
1618 """list all filename currently used by this dirstate
1616 """list all filename currently used by this dirstate
1619
1617
1620 This is only used to do `hg rollback` related backup in the transaction
1618 This is only used to do `hg rollback` related backup in the transaction
1621 """
1619 """
1622 if not self._opener.exists(self._filename):
1620 if not self._opener.exists(self._filename):
1623 # no data every written to disk yet
1621 # no data every written to disk yet
1624 return ()
1622 return ()
1625 elif self._use_dirstate_v2:
1623 elif self._use_dirstate_v2:
1626 return (
1624 return (
1627 self._filename,
1625 self._filename,
1628 self._map.docket.data_filename(),
1626 self._map.docket.data_filename(),
1629 )
1627 )
1630 else:
1628 else:
1631 return (self._filename,)
1629 return (self._filename,)
1632
1630
1633 def verify(self, m1, m2, p1, narrow_matcher=None):
1631 def verify(self, m1, m2, p1, narrow_matcher=None):
1634 """
1632 """
1635 check the dirstate contents against the parent manifest and yield errors
1633 check the dirstate contents against the parent manifest and yield errors
1636 """
1634 """
1637 missing_from_p1 = _(
1635 missing_from_p1 = _(
1638 b"%s marked as tracked in p1 (%s) but not in manifest1\n"
1636 b"%s marked as tracked in p1 (%s) but not in manifest1\n"
1639 )
1637 )
1640 unexpected_in_p1 = _(b"%s marked as added, but also in manifest1\n")
1638 unexpected_in_p1 = _(b"%s marked as added, but also in manifest1\n")
1641 missing_from_ps = _(
1639 missing_from_ps = _(
1642 b"%s marked as modified, but not in either manifest\n"
1640 b"%s marked as modified, but not in either manifest\n"
1643 )
1641 )
1644 missing_from_ds = _(
1642 missing_from_ds = _(
1645 b"%s in manifest1, but not marked as tracked in p1 (%s)\n"
1643 b"%s in manifest1, but not marked as tracked in p1 (%s)\n"
1646 )
1644 )
1647 for f, entry in self.items():
1645 for f, entry in self.items():
1648 if entry.p1_tracked:
1646 if entry.p1_tracked:
1649 if entry.modified and f not in m1 and f not in m2:
1647 if entry.modified and f not in m1 and f not in m2:
1650 yield missing_from_ps % f
1648 yield missing_from_ps % f
1651 elif f not in m1:
1649 elif f not in m1:
1652 yield missing_from_p1 % (f, node.short(p1))
1650 yield missing_from_p1 % (f, node.short(p1))
1653 if entry.added and f in m1:
1651 if entry.added and f in m1:
1654 yield unexpected_in_p1 % f
1652 yield unexpected_in_p1 % f
1655 for f in m1:
1653 for f in m1:
1656 if narrow_matcher is not None and not narrow_matcher(f):
1654 if narrow_matcher is not None and not narrow_matcher(f):
1657 continue
1655 continue
1658 entry = self.get_entry(f)
1656 entry = self.get_entry(f)
1659 if not entry.p1_tracked:
1657 if not entry.p1_tracked:
1660 yield missing_from_ds % (f, node.short(p1))
1658 yield missing_from_ds % (f, node.short(p1))
General Comments 0
You need to be logged in to leave comments. Login now