##// END OF EJS Templates
dirstate: phase-divergent update to 4e95341c89aa...
marmoute -
r51052:fbb4c711 default
parent child Browse files
Show More
@@ -1,1781 +1,1781 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import collections
9 import collections
10 import contextlib
10 import contextlib
11 import os
11 import os
12 import stat
12 import stat
13 import uuid
13 import uuid
14
14
15 from .i18n import _
15 from .i18n import _
16 from .pycompat import delattr
16 from .pycompat import delattr
17
17
18 from hgdemandimport import tracing
18 from hgdemandimport import tracing
19
19
20 from . import (
20 from . import (
21 dirstatemap,
21 dirstatemap,
22 encoding,
22 encoding,
23 error,
23 error,
24 match as matchmod,
24 match as matchmod,
25 node,
25 node,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 util,
30 util,
31 )
31 )
32
32
33 from .dirstateutils import (
33 from .dirstateutils import (
34 timestamp,
34 timestamp,
35 )
35 )
36
36
37 from .interfaces import (
37 from .interfaces import (
38 dirstate as intdirstate,
38 dirstate as intdirstate,
39 util as interfaceutil,
39 util as interfaceutil,
40 )
40 )
41
41
42 parsers = policy.importmod('parsers')
42 parsers = policy.importmod('parsers')
43 rustmod = policy.importrust('dirstate')
43 rustmod = policy.importrust('dirstate')
44
44
45 HAS_FAST_DIRSTATE_V2 = rustmod is not None
45 HAS_FAST_DIRSTATE_V2 = rustmod is not None
46
46
47 propertycache = util.propertycache
47 propertycache = util.propertycache
48 filecache = scmutil.filecache
48 filecache = scmutil.filecache
49 _rangemask = dirstatemap.rangemask
49 _rangemask = dirstatemap.rangemask
50
50
51 DirstateItem = dirstatemap.DirstateItem
51 DirstateItem = dirstatemap.DirstateItem
52
52
53
53
54 class repocache(filecache):
54 class repocache(filecache):
55 """filecache for files in .hg/"""
55 """filecache for files in .hg/"""
56
56
57 def join(self, obj, fname):
57 def join(self, obj, fname):
58 return obj._opener.join(fname)
58 return obj._opener.join(fname)
59
59
60
60
61 class rootcache(filecache):
61 class rootcache(filecache):
62 """filecache for files in the repository root"""
62 """filecache for files in the repository root"""
63
63
64 def join(self, obj, fname):
64 def join(self, obj, fname):
65 return obj._join(fname)
65 return obj._join(fname)
66
66
67
67
68 def check_invalidated(func):
68 def check_invalidated(func):
69 """check that the func is called with a non-invalidated dirstate
69 """check that the func is called with a non-invalidated dirstate
70
70
71 The dirstate is in an "invalidated state" after an error occured during its
71 The dirstate is in an "invalidated state" after an error occured during its
72 modification and remains so until we exited the top level scope that framed
72 modification and remains so until we exited the top level scope that framed
73 such change.
73 such change.
74 """
74 """
75
75
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if self._invalidated_context:
77 if self._invalidated_context:
78 msg = 'calling `%s` after the dirstate was invalidated'
78 msg = 'calling `%s` after the dirstate was invalidated'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_changing_parents(func):
86 def requires_changing_parents(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if not self.is_changing_parents:
88 if not self.is_changing_parents:
89 msg = 'calling `%s` outside of a changing_parents context'
89 msg = 'calling `%s` outside of a changing_parents context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return check_invalidated(wrap)
94 return check_invalidated(wrap)
95
95
96
96
97 def requires_changing_files(func):
97 def requires_changing_files(func):
98 def wrap(self, *args, **kwargs):
98 def wrap(self, *args, **kwargs):
99 if not self.is_changing_files:
99 if not self.is_changing_files:
100 msg = 'calling `%s` outside of a `changing_files`'
100 msg = 'calling `%s` outside of a `changing_files`'
101 msg %= func.__name__
101 msg %= func.__name__
102 raise error.ProgrammingError(msg)
102 raise error.ProgrammingError(msg)
103 return func(self, *args, **kwargs)
103 return func(self, *args, **kwargs)
104
104
105 return check_invalidated(wrap)
105 return check_invalidated(wrap)
106
106
107
107
108 def requires_changing_any(func):
108 def requires_changing_any(func):
109 def wrap(self, *args, **kwargs):
109 def wrap(self, *args, **kwargs):
110 if not self.is_changing_any:
110 if not self.is_changing_any:
111 msg = 'calling `%s` outside of a changing context'
111 msg = 'calling `%s` outside of a changing context'
112 msg %= func.__name__
112 msg %= func.__name__
113 raise error.ProgrammingError(msg)
113 raise error.ProgrammingError(msg)
114 return func(self, *args, **kwargs)
114 return func(self, *args, **kwargs)
115
115
116 return check_invalidated(wrap)
116 return check_invalidated(wrap)
117
117
118
118
119 def requires_changing_files_or_status(func):
119 def requires_changing_files_or_status(func):
120 def wrap(self, *args, **kwargs):
120 def wrap(self, *args, **kwargs):
121 if not (self.is_changing_files or self._running_status > 0):
121 if not (self.is_changing_files or self._running_status > 0):
122 msg = (
122 msg = (
123 'calling `%s` outside of a changing_files '
123 'calling `%s` outside of a changing_files '
124 'or running_status context'
124 'or running_status context'
125 )
125 )
126 msg %= func.__name__
126 msg %= func.__name__
127 raise error.ProgrammingError(msg)
127 raise error.ProgrammingError(msg)
128 return func(self, *args, **kwargs)
128 return func(self, *args, **kwargs)
129
129
130 return check_invalidated(wrap)
130 return check_invalidated(wrap)
131
131
132
132
133 CHANGE_TYPE_PARENTS = "parents"
133 CHANGE_TYPE_PARENTS = "parents"
134 CHANGE_TYPE_FILES = "files"
134 CHANGE_TYPE_FILES = "files"
135
135
136
136
137 @interfaceutil.implementer(intdirstate.idirstate)
137 @interfaceutil.implementer(intdirstate.idirstate)
138 class dirstate:
138 class dirstate:
139
139
140 # used by largefile to avoid overwritting transaction callbacK
140 # used by largefile to avoid overwritting transaction callback
141 _tr_key_suffix = b''
141 _tr_key_suffix = b''
142
142
143 def __init__(
143 def __init__(
144 self,
144 self,
145 opener,
145 opener,
146 ui,
146 ui,
147 root,
147 root,
148 validate,
148 validate,
149 sparsematchfn,
149 sparsematchfn,
150 nodeconstants,
150 nodeconstants,
151 use_dirstate_v2,
151 use_dirstate_v2,
152 use_tracked_hint=False,
152 use_tracked_hint=False,
153 ):
153 ):
154 """Create a new dirstate object.
154 """Create a new dirstate object.
155
155
156 opener is an open()-like callable that can be used to open the
156 opener is an open()-like callable that can be used to open the
157 dirstate file; root is the root of the directory tracked by
157 dirstate file; root is the root of the directory tracked by
158 the dirstate.
158 the dirstate.
159 """
159 """
160 self._use_dirstate_v2 = use_dirstate_v2
160 self._use_dirstate_v2 = use_dirstate_v2
161 self._use_tracked_hint = use_tracked_hint
161 self._use_tracked_hint = use_tracked_hint
162 self._nodeconstants = nodeconstants
162 self._nodeconstants = nodeconstants
163 self._opener = opener
163 self._opener = opener
164 self._validate = validate
164 self._validate = validate
165 self._root = root
165 self._root = root
166 # Either build a sparse-matcher or None if sparse is disabled
166 # Either build a sparse-matcher or None if sparse is disabled
167 self._sparsematchfn = sparsematchfn
167 self._sparsematchfn = sparsematchfn
168 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
168 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
169 # UNC path pointing to root share (issue4557)
169 # UNC path pointing to root share (issue4557)
170 self._rootdir = pathutil.normasprefix(root)
170 self._rootdir = pathutil.normasprefix(root)
171 # True is any internal state may be different
171 # True is any internal state may be different
172 self._dirty = False
172 self._dirty = False
173 # True if the set of tracked file may be different
173 # True if the set of tracked file may be different
174 self._dirty_tracked_set = False
174 self._dirty_tracked_set = False
175 self._ui = ui
175 self._ui = ui
176 self._filecache = {}
176 self._filecache = {}
177 # nesting level of `changing_parents` context
177 # nesting level of `changing_parents` context
178 self._changing_level = 0
178 self._changing_level = 0
179 # the change currently underway
179 # the change currently underway
180 self._change_type = None
180 self._change_type = None
181 # number of open _running_status context
181 # number of open _running_status context
182 self._running_status = 0
182 self._running_status = 0
183 # True if the current dirstate changing operations have been
183 # True if the current dirstate changing operations have been
184 # invalidated (used to make sure all nested contexts have been exited)
184 # invalidated (used to make sure all nested contexts have been exited)
185 self._invalidated_context = False
185 self._invalidated_context = False
186 self._attached_to_a_transaction = False
186 self._attached_to_a_transaction = False
187 self._filename = b'dirstate'
187 self._filename = b'dirstate'
188 self._filename_th = b'dirstate-tracked-hint'
188 self._filename_th = b'dirstate-tracked-hint'
189 self._pendingfilename = b'%s.pending' % self._filename
189 self._pendingfilename = b'%s.pending' % self._filename
190 self._plchangecallbacks = {}
190 self._plchangecallbacks = {}
191 self._origpl = None
191 self._origpl = None
192 self._mapcls = dirstatemap.dirstatemap
192 self._mapcls = dirstatemap.dirstatemap
193 # Access and cache cwd early, so we don't access it for the first time
193 # Access and cache cwd early, so we don't access it for the first time
194 # after a working-copy update caused it to not exist (accessing it then
194 # after a working-copy update caused it to not exist (accessing it then
195 # raises an exception).
195 # raises an exception).
196 self._cwd
196 self._cwd
197
197
198 def refresh(self):
198 def refresh(self):
199 if '_branch' in vars(self):
199 if '_branch' in vars(self):
200 del self._branch
200 del self._branch
201 if '_map' in vars(self) and self._map.may_need_refresh():
201 if '_map' in vars(self) and self._map.may_need_refresh():
202 self.invalidate()
202 self.invalidate()
203
203
204 def prefetch_parents(self):
204 def prefetch_parents(self):
205 """make sure the parents are loaded
205 """make sure the parents are loaded
206
206
207 Used to avoid a race condition.
207 Used to avoid a race condition.
208 """
208 """
209 self._pl
209 self._pl
210
210
211 @contextlib.contextmanager
211 @contextlib.contextmanager
212 @check_invalidated
212 @check_invalidated
213 def running_status(self, repo):
213 def running_status(self, repo):
214 """Wrap a status operation
214 """Wrap a status operation
215
215
216 This context is not mutally exclusive with the `changing_*` context. It
216 This context is not mutally exclusive with the `changing_*` context. It
217 also do not warrant for the `wlock` to be taken.
217 also do not warrant for the `wlock` to be taken.
218
218
219 If the wlock is taken, this context will behave in a simple way, and
219 If the wlock is taken, this context will behave in a simple way, and
220 ensure the data are scheduled for write when leaving the top level
220 ensure the data are scheduled for write when leaving the top level
221 context.
221 context.
222
222
223 If the lock is not taken, it will only warrant that the data are either
223 If the lock is not taken, it will only warrant that the data are either
224 committed (written) and rolled back (invalidated) when exiting the top
224 committed (written) and rolled back (invalidated) when exiting the top
225 level context. The write/invalidate action must be performed by the
225 level context. The write/invalidate action must be performed by the
226 wrapped code.
226 wrapped code.
227
227
228
228
229 The expected logic is:
229 The expected logic is:
230
230
231 A: read the dirstate
231 A: read the dirstate
232 B: run status
232 B: run status
233 This might make the dirstate dirty by updating cache,
233 This might make the dirstate dirty by updating cache,
234 especially in Rust.
234 especially in Rust.
235 C: do more "post status fixup if relevant
235 C: do more "post status fixup if relevant
236 D: try to take the w-lock (this will invalidate the changes if they were raced)
236 D: try to take the w-lock (this will invalidate the changes if they were raced)
237 E0: if dirstate changed on disk β†’ discard change (done by dirstate internal)
237 E0: if dirstate changed on disk β†’ discard change (done by dirstate internal)
238 E1: elif lock was acquired β†’ write the changes
238 E1: elif lock was acquired β†’ write the changes
239 E2: else β†’ discard the changes
239 E2: else β†’ discard the changes
240 """
240 """
241 has_lock = repo.currentwlock() is not None
241 has_lock = repo.currentwlock() is not None
242 is_changing = self.is_changing_any
242 is_changing = self.is_changing_any
243 tr = repo.currenttransaction()
243 tr = repo.currenttransaction()
244 has_tr = tr is not None
244 has_tr = tr is not None
245 nested = bool(self._running_status)
245 nested = bool(self._running_status)
246
246
247 first_and_alone = not (is_changing or has_tr or nested)
247 first_and_alone = not (is_changing or has_tr or nested)
248
248
249 # enforce no change happened outside of a proper context.
249 # enforce no change happened outside of a proper context.
250 if first_and_alone and self._dirty:
250 if first_and_alone and self._dirty:
251 has_tr = repo.currenttransaction() is not None
251 has_tr = repo.currenttransaction() is not None
252 if not has_tr and self._changing_level == 0 and self._dirty:
252 if not has_tr and self._changing_level == 0 and self._dirty:
253 msg = "entering a status context, but dirstate is already dirty"
253 msg = "entering a status context, but dirstate is already dirty"
254 raise error.ProgrammingError(msg)
254 raise error.ProgrammingError(msg)
255
255
256 should_write = has_lock and not (nested or is_changing)
256 should_write = has_lock and not (nested or is_changing)
257
257
258 self._running_status += 1
258 self._running_status += 1
259 try:
259 try:
260 yield
260 yield
261 except Exception:
261 except Exception:
262 self.invalidate()
262 self.invalidate()
263 raise
263 raise
264 finally:
264 finally:
265 self._running_status -= 1
265 self._running_status -= 1
266 if self._invalidated_context:
266 if self._invalidated_context:
267 should_write = False
267 should_write = False
268 self.invalidate()
268 self.invalidate()
269
269
270 if should_write:
270 if should_write:
271 assert repo.currenttransaction() is tr
271 assert repo.currenttransaction() is tr
272 self.write(tr)
272 self.write(tr)
273 elif not has_lock:
273 elif not has_lock:
274 if self._dirty:
274 if self._dirty:
275 msg = b'dirstate dirty while exiting an isolated status context'
275 msg = b'dirstate dirty while exiting an isolated status context'
276 repo.ui.develwarn(msg)
276 repo.ui.develwarn(msg)
277 self.invalidate()
277 self.invalidate()
278
278
279 @contextlib.contextmanager
279 @contextlib.contextmanager
280 @check_invalidated
280 @check_invalidated
281 def _changing(self, repo, change_type):
281 def _changing(self, repo, change_type):
282 if repo.currentwlock() is None:
282 if repo.currentwlock() is None:
283 msg = b"trying to change the dirstate without holding the wlock"
283 msg = b"trying to change the dirstate without holding the wlock"
284 raise error.ProgrammingError(msg)
284 raise error.ProgrammingError(msg)
285
285
286 has_tr = repo.currenttransaction() is not None
286 has_tr = repo.currenttransaction() is not None
287 if not has_tr and self._changing_level == 0 and self._dirty:
287 if not has_tr and self._changing_level == 0 and self._dirty:
288 msg = b"entering a changing context, but dirstate is already dirty"
288 msg = b"entering a changing context, but dirstate is already dirty"
289 repo.ui.develwarn(msg)
289 repo.ui.develwarn(msg)
290
290
291 assert self._changing_level >= 0
291 assert self._changing_level >= 0
292 # different type of change are mutually exclusive
292 # different type of change are mutually exclusive
293 if self._change_type is None:
293 if self._change_type is None:
294 assert self._changing_level == 0
294 assert self._changing_level == 0
295 self._change_type = change_type
295 self._change_type = change_type
296 elif self._change_type != change_type:
296 elif self._change_type != change_type:
297 msg = (
297 msg = (
298 'trying to open "%s" dirstate-changing context while a "%s" is'
298 'trying to open "%s" dirstate-changing context while a "%s" is'
299 ' already open'
299 ' already open'
300 )
300 )
301 msg %= (change_type, self._change_type)
301 msg %= (change_type, self._change_type)
302 raise error.ProgrammingError(msg)
302 raise error.ProgrammingError(msg)
303 should_write = False
303 should_write = False
304 self._changing_level += 1
304 self._changing_level += 1
305 try:
305 try:
306 yield
306 yield
307 except: # re-raises
307 except: # re-raises
308 self.invalidate() # this will set `_invalidated_context`
308 self.invalidate() # this will set `_invalidated_context`
309 raise
309 raise
310 finally:
310 finally:
311 assert self._changing_level > 0
311 assert self._changing_level > 0
312 self._changing_level -= 1
312 self._changing_level -= 1
313 # If the dirstate is being invalidated, call invalidate again.
313 # If the dirstate is being invalidated, call invalidate again.
314 # This will throw away anything added by a upper context and
314 # This will throw away anything added by a upper context and
315 # reset the `_invalidated_context` flag when relevant
315 # reset the `_invalidated_context` flag when relevant
316 if self._changing_level <= 0:
316 if self._changing_level <= 0:
317 self._change_type = None
317 self._change_type = None
318 assert self._changing_level == 0
318 assert self._changing_level == 0
319 if self._invalidated_context:
319 if self._invalidated_context:
320 # make sure we invalidate anything an upper context might
320 # make sure we invalidate anything an upper context might
321 # have changed.
321 # have changed.
322 self.invalidate()
322 self.invalidate()
323 else:
323 else:
324 should_write = self._changing_level <= 0
324 should_write = self._changing_level <= 0
325 tr = repo.currenttransaction()
325 tr = repo.currenttransaction()
326 if has_tr != (tr is not None):
326 if has_tr != (tr is not None):
327 if has_tr:
327 if has_tr:
328 m = "transaction vanished while changing dirstate"
328 m = "transaction vanished while changing dirstate"
329 else:
329 else:
330 m = "transaction appeared while changing dirstate"
330 m = "transaction appeared while changing dirstate"
331 raise error.ProgrammingError(m)
331 raise error.ProgrammingError(m)
332 if should_write:
332 if should_write:
333 self.write(tr)
333 self.write(tr)
334
334
335 @contextlib.contextmanager
335 @contextlib.contextmanager
336 def changing_parents(self, repo):
336 def changing_parents(self, repo):
337 with self._changing(repo, CHANGE_TYPE_PARENTS) as c:
337 with self._changing(repo, CHANGE_TYPE_PARENTS) as c:
338 yield c
338 yield c
339
339
340 @contextlib.contextmanager
340 @contextlib.contextmanager
341 def changing_files(self, repo):
341 def changing_files(self, repo):
342 with self._changing(repo, CHANGE_TYPE_FILES) as c:
342 with self._changing(repo, CHANGE_TYPE_FILES) as c:
343 yield c
343 yield c
344
344
345 # here to help migration to the new code
345 # here to help migration to the new code
346 def parentchange(self):
346 def parentchange(self):
347 msg = (
347 msg = (
348 "Mercurial 6.4 and later requires call to "
348 "Mercurial 6.4 and later requires call to "
349 "`dirstate.changing_parents(repo)`"
349 "`dirstate.changing_parents(repo)`"
350 )
350 )
351 raise error.ProgrammingError(msg)
351 raise error.ProgrammingError(msg)
352
352
353 @property
353 @property
354 def is_changing_any(self):
354 def is_changing_any(self):
355 """Returns true if the dirstate is in the middle of a set of changes.
355 """Returns true if the dirstate is in the middle of a set of changes.
356
356
357 This returns True for any kind of change.
357 This returns True for any kind of change.
358 """
358 """
359 return self._changing_level > 0
359 return self._changing_level > 0
360
360
361 def pendingparentchange(self):
361 def pendingparentchange(self):
362 return self.is_changing_parent()
362 return self.is_changing_parent()
363
363
364 def is_changing_parent(self):
364 def is_changing_parent(self):
365 """Returns true if the dirstate is in the middle of a set of changes
365 """Returns true if the dirstate is in the middle of a set of changes
366 that modify the dirstate parent.
366 that modify the dirstate parent.
367 """
367 """
368 self._ui.deprecwarn(b"dirstate.is_changing_parents", b"6.5")
368 self._ui.deprecwarn(b"dirstate.is_changing_parents", b"6.5")
369 return self.is_changing_parents
369 return self.is_changing_parents
370
370
371 @property
371 @property
372 def is_changing_parents(self):
372 def is_changing_parents(self):
373 """Returns true if the dirstate is in the middle of a set of changes
373 """Returns true if the dirstate is in the middle of a set of changes
374 that modify the dirstate parent.
374 that modify the dirstate parent.
375 """
375 """
376 if self._changing_level <= 0:
376 if self._changing_level <= 0:
377 return False
377 return False
378 return self._change_type == CHANGE_TYPE_PARENTS
378 return self._change_type == CHANGE_TYPE_PARENTS
379
379
380 @property
380 @property
381 def is_changing_files(self):
381 def is_changing_files(self):
382 """Returns true if the dirstate is in the middle of a set of changes
382 """Returns true if the dirstate is in the middle of a set of changes
383 that modify the files tracked or their sources.
383 that modify the files tracked or their sources.
384 """
384 """
385 if self._changing_level <= 0:
385 if self._changing_level <= 0:
386 return False
386 return False
387 return self._change_type == CHANGE_TYPE_FILES
387 return self._change_type == CHANGE_TYPE_FILES
388
388
389 @propertycache
389 @propertycache
390 def _map(self):
390 def _map(self):
391 """Return the dirstate contents (see documentation for dirstatemap)."""
391 """Return the dirstate contents (see documentation for dirstatemap)."""
392 return self._mapcls(
392 return self._mapcls(
393 self._ui,
393 self._ui,
394 self._opener,
394 self._opener,
395 self._root,
395 self._root,
396 self._nodeconstants,
396 self._nodeconstants,
397 self._use_dirstate_v2,
397 self._use_dirstate_v2,
398 )
398 )
399
399
400 @property
400 @property
401 def _sparsematcher(self):
401 def _sparsematcher(self):
402 """The matcher for the sparse checkout.
402 """The matcher for the sparse checkout.
403
403
404 The working directory may not include every file from a manifest. The
404 The working directory may not include every file from a manifest. The
405 matcher obtained by this property will match a path if it is to be
405 matcher obtained by this property will match a path if it is to be
406 included in the working directory.
406 included in the working directory.
407
407
408 When sparse if disabled, return None.
408 When sparse if disabled, return None.
409 """
409 """
410 if self._sparsematchfn is None:
410 if self._sparsematchfn is None:
411 return None
411 return None
412 # TODO there is potential to cache this property. For now, the matcher
412 # TODO there is potential to cache this property. For now, the matcher
413 # is resolved on every access. (But the called function does use a
413 # is resolved on every access. (But the called function does use a
414 # cache to keep the lookup fast.)
414 # cache to keep the lookup fast.)
415 return self._sparsematchfn()
415 return self._sparsematchfn()
416
416
417 @repocache(b'branch')
417 @repocache(b'branch')
418 def _branch(self):
418 def _branch(self):
419 try:
419 try:
420 return self._opener.read(b"branch").strip() or b"default"
420 return self._opener.read(b"branch").strip() or b"default"
421 except FileNotFoundError:
421 except FileNotFoundError:
422 return b"default"
422 return b"default"
423
423
424 @property
424 @property
425 def _pl(self):
425 def _pl(self):
426 return self._map.parents()
426 return self._map.parents()
427
427
428 def hasdir(self, d):
428 def hasdir(self, d):
429 return self._map.hastrackeddir(d)
429 return self._map.hastrackeddir(d)
430
430
431 @rootcache(b'.hgignore')
431 @rootcache(b'.hgignore')
432 def _ignore(self):
432 def _ignore(self):
433 files = self._ignorefiles()
433 files = self._ignorefiles()
434 if not files:
434 if not files:
435 return matchmod.never()
435 return matchmod.never()
436
436
437 pats = [b'include:%s' % f for f in files]
437 pats = [b'include:%s' % f for f in files]
438 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
438 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
439
439
440 @propertycache
440 @propertycache
441 def _slash(self):
441 def _slash(self):
442 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
442 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
443
443
444 @propertycache
444 @propertycache
445 def _checklink(self):
445 def _checklink(self):
446 return util.checklink(self._root)
446 return util.checklink(self._root)
447
447
448 @propertycache
448 @propertycache
449 def _checkexec(self):
449 def _checkexec(self):
450 return bool(util.checkexec(self._root))
450 return bool(util.checkexec(self._root))
451
451
452 @propertycache
452 @propertycache
453 def _checkcase(self):
453 def _checkcase(self):
454 return not util.fscasesensitive(self._join(b'.hg'))
454 return not util.fscasesensitive(self._join(b'.hg'))
455
455
456 def _join(self, f):
456 def _join(self, f):
457 # much faster than os.path.join()
457 # much faster than os.path.join()
458 # it's safe because f is always a relative path
458 # it's safe because f is always a relative path
459 return self._rootdir + f
459 return self._rootdir + f
460
460
461 def flagfunc(self, buildfallback):
461 def flagfunc(self, buildfallback):
462 """build a callable that returns flags associated with a filename
462 """build a callable that returns flags associated with a filename
463
463
464 The information is extracted from three possible layers:
464 The information is extracted from three possible layers:
465 1. the file system if it supports the information
465 1. the file system if it supports the information
466 2. the "fallback" information stored in the dirstate if any
466 2. the "fallback" information stored in the dirstate if any
467 3. a more expensive mechanism inferring the flags from the parents.
467 3. a more expensive mechanism inferring the flags from the parents.
468 """
468 """
469
469
470 # small hack to cache the result of buildfallback()
470 # small hack to cache the result of buildfallback()
471 fallback_func = []
471 fallback_func = []
472
472
473 def get_flags(x):
473 def get_flags(x):
474 entry = None
474 entry = None
475 fallback_value = None
475 fallback_value = None
476 try:
476 try:
477 st = os.lstat(self._join(x))
477 st = os.lstat(self._join(x))
478 except OSError:
478 except OSError:
479 return b''
479 return b''
480
480
481 if self._checklink:
481 if self._checklink:
482 if util.statislink(st):
482 if util.statislink(st):
483 return b'l'
483 return b'l'
484 else:
484 else:
485 entry = self.get_entry(x)
485 entry = self.get_entry(x)
486 if entry.has_fallback_symlink:
486 if entry.has_fallback_symlink:
487 if entry.fallback_symlink:
487 if entry.fallback_symlink:
488 return b'l'
488 return b'l'
489 else:
489 else:
490 if not fallback_func:
490 if not fallback_func:
491 fallback_func.append(buildfallback())
491 fallback_func.append(buildfallback())
492 fallback_value = fallback_func[0](x)
492 fallback_value = fallback_func[0](x)
493 if b'l' in fallback_value:
493 if b'l' in fallback_value:
494 return b'l'
494 return b'l'
495
495
496 if self._checkexec:
496 if self._checkexec:
497 if util.statisexec(st):
497 if util.statisexec(st):
498 return b'x'
498 return b'x'
499 else:
499 else:
500 if entry is None:
500 if entry is None:
501 entry = self.get_entry(x)
501 entry = self.get_entry(x)
502 if entry.has_fallback_exec:
502 if entry.has_fallback_exec:
503 if entry.fallback_exec:
503 if entry.fallback_exec:
504 return b'x'
504 return b'x'
505 else:
505 else:
506 if fallback_value is None:
506 if fallback_value is None:
507 if not fallback_func:
507 if not fallback_func:
508 fallback_func.append(buildfallback())
508 fallback_func.append(buildfallback())
509 fallback_value = fallback_func[0](x)
509 fallback_value = fallback_func[0](x)
510 if b'x' in fallback_value:
510 if b'x' in fallback_value:
511 return b'x'
511 return b'x'
512 return b''
512 return b''
513
513
514 return get_flags
514 return get_flags
515
515
516 @propertycache
516 @propertycache
517 def _cwd(self):
517 def _cwd(self):
518 # internal config: ui.forcecwd
518 # internal config: ui.forcecwd
519 forcecwd = self._ui.config(b'ui', b'forcecwd')
519 forcecwd = self._ui.config(b'ui', b'forcecwd')
520 if forcecwd:
520 if forcecwd:
521 return forcecwd
521 return forcecwd
522 return encoding.getcwd()
522 return encoding.getcwd()
523
523
524 def getcwd(self):
524 def getcwd(self):
525 """Return the path from which a canonical path is calculated.
525 """Return the path from which a canonical path is calculated.
526
526
527 This path should be used to resolve file patterns or to convert
527 This path should be used to resolve file patterns or to convert
528 canonical paths back to file paths for display. It shouldn't be
528 canonical paths back to file paths for display. It shouldn't be
529 used to get real file paths. Use vfs functions instead.
529 used to get real file paths. Use vfs functions instead.
530 """
530 """
531 cwd = self._cwd
531 cwd = self._cwd
532 if cwd == self._root:
532 if cwd == self._root:
533 return b''
533 return b''
534 # self._root ends with a path separator if self._root is '/' or 'C:\'
534 # self._root ends with a path separator if self._root is '/' or 'C:\'
535 rootsep = self._root
535 rootsep = self._root
536 if not util.endswithsep(rootsep):
536 if not util.endswithsep(rootsep):
537 rootsep += pycompat.ossep
537 rootsep += pycompat.ossep
538 if cwd.startswith(rootsep):
538 if cwd.startswith(rootsep):
539 return cwd[len(rootsep) :]
539 return cwd[len(rootsep) :]
540 else:
540 else:
541 # we're outside the repo. return an absolute path.
541 # we're outside the repo. return an absolute path.
542 return cwd
542 return cwd
543
543
544 def pathto(self, f, cwd=None):
544 def pathto(self, f, cwd=None):
545 if cwd is None:
545 if cwd is None:
546 cwd = self.getcwd()
546 cwd = self.getcwd()
547 path = util.pathto(self._root, cwd, f)
547 path = util.pathto(self._root, cwd, f)
548 if self._slash:
548 if self._slash:
549 return util.pconvert(path)
549 return util.pconvert(path)
550 return path
550 return path
551
551
552 def get_entry(self, path):
552 def get_entry(self, path):
553 """return a DirstateItem for the associated path"""
553 """return a DirstateItem for the associated path"""
554 entry = self._map.get(path)
554 entry = self._map.get(path)
555 if entry is None:
555 if entry is None:
556 return DirstateItem()
556 return DirstateItem()
557 return entry
557 return entry
558
558
559 def __contains__(self, key):
559 def __contains__(self, key):
560 return key in self._map
560 return key in self._map
561
561
562 def __iter__(self):
562 def __iter__(self):
563 return iter(sorted(self._map))
563 return iter(sorted(self._map))
564
564
565 def items(self):
565 def items(self):
566 return self._map.items()
566 return self._map.items()
567
567
568 iteritems = items
568 iteritems = items
569
569
570 def parents(self):
570 def parents(self):
571 return [self._validate(p) for p in self._pl]
571 return [self._validate(p) for p in self._pl]
572
572
573 def p1(self):
573 def p1(self):
574 return self._validate(self._pl[0])
574 return self._validate(self._pl[0])
575
575
576 def p2(self):
576 def p2(self):
577 return self._validate(self._pl[1])
577 return self._validate(self._pl[1])
578
578
579 @property
579 @property
580 def in_merge(self):
580 def in_merge(self):
581 """True if a merge is in progress"""
581 """True if a merge is in progress"""
582 return self._pl[1] != self._nodeconstants.nullid
582 return self._pl[1] != self._nodeconstants.nullid
583
583
584 def branch(self):
584 def branch(self):
585 return encoding.tolocal(self._branch)
585 return encoding.tolocal(self._branch)
586
586
587 @requires_changing_parents
587 @requires_changing_parents
588 def setparents(self, p1, p2=None):
588 def setparents(self, p1, p2=None):
589 """Set dirstate parents to p1 and p2.
589 """Set dirstate parents to p1 and p2.
590
590
591 When moving from two parents to one, "merged" entries a
591 When moving from two parents to one, "merged" entries a
592 adjusted to normal and previous copy records discarded and
592 adjusted to normal and previous copy records discarded and
593 returned by the call.
593 returned by the call.
594
594
595 See localrepo.setparents()
595 See localrepo.setparents()
596 """
596 """
597 if p2 is None:
597 if p2 is None:
598 p2 = self._nodeconstants.nullid
598 p2 = self._nodeconstants.nullid
599 if self._changing_level == 0:
599 if self._changing_level == 0:
600 raise ValueError(
600 raise ValueError(
601 b"cannot set dirstate parent outside of "
601 b"cannot set dirstate parent outside of "
602 b"dirstate.changing_parents context manager"
602 b"dirstate.changing_parents context manager"
603 )
603 )
604
604
605 self._dirty = True
605 self._dirty = True
606 oldp2 = self._pl[1]
606 oldp2 = self._pl[1]
607 if self._origpl is None:
607 if self._origpl is None:
608 self._origpl = self._pl
608 self._origpl = self._pl
609 nullid = self._nodeconstants.nullid
609 nullid = self._nodeconstants.nullid
610 # True if we need to fold p2 related state back to a linear case
610 # True if we need to fold p2 related state back to a linear case
611 fold_p2 = oldp2 != nullid and p2 == nullid
611 fold_p2 = oldp2 != nullid and p2 == nullid
612 return self._map.setparents(p1, p2, fold_p2=fold_p2)
612 return self._map.setparents(p1, p2, fold_p2=fold_p2)
613
613
614 def setbranch(self, branch):
614 def setbranch(self, branch):
615 self.__class__._branch.set(self, encoding.fromlocal(branch))
615 self.__class__._branch.set(self, encoding.fromlocal(branch))
616 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
616 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
617 try:
617 try:
618 f.write(self._branch + b'\n')
618 f.write(self._branch + b'\n')
619 f.close()
619 f.close()
620
620
621 # make sure filecache has the correct stat info for _branch after
621 # make sure filecache has the correct stat info for _branch after
622 # replacing the underlying file
622 # replacing the underlying file
623 ce = self._filecache[b'_branch']
623 ce = self._filecache[b'_branch']
624 if ce:
624 if ce:
625 ce.refresh()
625 ce.refresh()
626 except: # re-raises
626 except: # re-raises
627 f.discard()
627 f.discard()
628 raise
628 raise
629
629
630 def invalidate(self):
630 def invalidate(self):
631 """Causes the next access to reread the dirstate.
631 """Causes the next access to reread the dirstate.
632
632
633 This is different from localrepo.invalidatedirstate() because it always
633 This is different from localrepo.invalidatedirstate() because it always
634 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
634 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
635 check whether the dirstate has changed before rereading it."""
635 check whether the dirstate has changed before rereading it."""
636
636
637 for a in ("_map", "_branch", "_ignore"):
637 for a in ("_map", "_branch", "_ignore"):
638 if a in self.__dict__:
638 if a in self.__dict__:
639 delattr(self, a)
639 delattr(self, a)
640 self._dirty = False
640 self._dirty = False
641 self._dirty_tracked_set = False
641 self._dirty_tracked_set = False
642 self._invalidated_context = bool(
642 self._invalidated_context = bool(
643 self._changing_level > 0
643 self._changing_level > 0
644 or self._attached_to_a_transaction
644 or self._attached_to_a_transaction
645 or self._running_status
645 or self._running_status
646 )
646 )
647 self._origpl = None
647 self._origpl = None
648
648
649 @requires_changing_any
649 @requires_changing_any
650 def copy(self, source, dest):
650 def copy(self, source, dest):
651 """Mark dest as a copy of source. Unmark dest if source is None."""
651 """Mark dest as a copy of source. Unmark dest if source is None."""
652 if source == dest:
652 if source == dest:
653 return
653 return
654 self._dirty = True
654 self._dirty = True
655 if source is not None:
655 if source is not None:
656 self._check_sparse(source)
656 self._check_sparse(source)
657 self._map.copymap[dest] = source
657 self._map.copymap[dest] = source
658 else:
658 else:
659 self._map.copymap.pop(dest, None)
659 self._map.copymap.pop(dest, None)
660
660
661 def copied(self, file):
661 def copied(self, file):
662 return self._map.copymap.get(file, None)
662 return self._map.copymap.get(file, None)
663
663
664 def copies(self):
664 def copies(self):
665 return self._map.copymap
665 return self._map.copymap
666
666
667 @requires_changing_files
667 @requires_changing_files
668 def set_tracked(self, filename, reset_copy=False):
668 def set_tracked(self, filename, reset_copy=False):
669 """a "public" method for generic code to mark a file as tracked
669 """a "public" method for generic code to mark a file as tracked
670
670
671 This function is to be called outside of "update/merge" case. For
671 This function is to be called outside of "update/merge" case. For
672 example by a command like `hg add X`.
672 example by a command like `hg add X`.
673
673
674 if reset_copy is set, any existing copy information will be dropped.
674 if reset_copy is set, any existing copy information will be dropped.
675
675
676 return True the file was previously untracked, False otherwise.
676 return True the file was previously untracked, False otherwise.
677 """
677 """
678 self._dirty = True
678 self._dirty = True
679 entry = self._map.get(filename)
679 entry = self._map.get(filename)
680 if entry is None or not entry.tracked:
680 if entry is None or not entry.tracked:
681 self._check_new_tracked_filename(filename)
681 self._check_new_tracked_filename(filename)
682 pre_tracked = self._map.set_tracked(filename)
682 pre_tracked = self._map.set_tracked(filename)
683 if reset_copy:
683 if reset_copy:
684 self._map.copymap.pop(filename, None)
684 self._map.copymap.pop(filename, None)
685 if pre_tracked:
685 if pre_tracked:
686 self._dirty_tracked_set = True
686 self._dirty_tracked_set = True
687 return pre_tracked
687 return pre_tracked
688
688
689 @requires_changing_files
689 @requires_changing_files
690 def set_untracked(self, filename):
690 def set_untracked(self, filename):
691 """a "public" method for generic code to mark a file as untracked
691 """a "public" method for generic code to mark a file as untracked
692
692
693 This function is to be called outside of "update/merge" case. For
693 This function is to be called outside of "update/merge" case. For
694 example by a command like `hg remove X`.
694 example by a command like `hg remove X`.
695
695
696 return True the file was previously tracked, False otherwise.
696 return True the file was previously tracked, False otherwise.
697 """
697 """
698 ret = self._map.set_untracked(filename)
698 ret = self._map.set_untracked(filename)
699 if ret:
699 if ret:
700 self._dirty = True
700 self._dirty = True
701 self._dirty_tracked_set = True
701 self._dirty_tracked_set = True
702 return ret
702 return ret
703
703
704 @requires_changing_files_or_status
704 @requires_changing_files_or_status
705 def set_clean(self, filename, parentfiledata):
705 def set_clean(self, filename, parentfiledata):
706 """record that the current state of the file on disk is known to be clean"""
706 """record that the current state of the file on disk is known to be clean"""
707 self._dirty = True
707 self._dirty = True
708 if not self._map[filename].tracked:
708 if not self._map[filename].tracked:
709 self._check_new_tracked_filename(filename)
709 self._check_new_tracked_filename(filename)
710 (mode, size, mtime) = parentfiledata
710 (mode, size, mtime) = parentfiledata
711 self._map.set_clean(filename, mode, size, mtime)
711 self._map.set_clean(filename, mode, size, mtime)
712
712
713 @requires_changing_files_or_status
713 @requires_changing_files_or_status
714 def set_possibly_dirty(self, filename):
714 def set_possibly_dirty(self, filename):
715 """record that the current state of the file on disk is unknown"""
715 """record that the current state of the file on disk is unknown"""
716 self._dirty = True
716 self._dirty = True
717 self._map.set_possibly_dirty(filename)
717 self._map.set_possibly_dirty(filename)
718
718
719 @requires_changing_parents
719 @requires_changing_parents
720 def update_file_p1(
720 def update_file_p1(
721 self,
721 self,
722 filename,
722 filename,
723 p1_tracked,
723 p1_tracked,
724 ):
724 ):
725 """Set a file as tracked in the parent (or not)
725 """Set a file as tracked in the parent (or not)
726
726
727 This is to be called when adjust the dirstate to a new parent after an history
727 This is to be called when adjust the dirstate to a new parent after an history
728 rewriting operation.
728 rewriting operation.
729
729
730 It should not be called during a merge (p2 != nullid) and only within
730 It should not be called during a merge (p2 != nullid) and only within
731 a `with dirstate.changing_parents(repo):` context.
731 a `with dirstate.changing_parents(repo):` context.
732 """
732 """
733 if self.in_merge:
733 if self.in_merge:
734 msg = b'update_file_reference should not be called when merging'
734 msg = b'update_file_reference should not be called when merging'
735 raise error.ProgrammingError(msg)
735 raise error.ProgrammingError(msg)
736 entry = self._map.get(filename)
736 entry = self._map.get(filename)
737 if entry is None:
737 if entry is None:
738 wc_tracked = False
738 wc_tracked = False
739 else:
739 else:
740 wc_tracked = entry.tracked
740 wc_tracked = entry.tracked
741 if not (p1_tracked or wc_tracked):
741 if not (p1_tracked or wc_tracked):
742 # the file is no longer relevant to anyone
742 # the file is no longer relevant to anyone
743 if self._map.get(filename) is not None:
743 if self._map.get(filename) is not None:
744 self._map.reset_state(filename)
744 self._map.reset_state(filename)
745 self._dirty = True
745 self._dirty = True
746 elif (not p1_tracked) and wc_tracked:
746 elif (not p1_tracked) and wc_tracked:
747 if entry is not None and entry.added:
747 if entry is not None and entry.added:
748 return # avoid dropping copy information (maybe?)
748 return # avoid dropping copy information (maybe?)
749
749
750 self._map.reset_state(
750 self._map.reset_state(
751 filename,
751 filename,
752 wc_tracked,
752 wc_tracked,
753 p1_tracked,
753 p1_tracked,
754 # the underlying reference might have changed, we will have to
754 # the underlying reference might have changed, we will have to
755 # check it.
755 # check it.
756 has_meaningful_mtime=False,
756 has_meaningful_mtime=False,
757 )
757 )
758
758
759 @requires_changing_parents
759 @requires_changing_parents
760 def update_file(
760 def update_file(
761 self,
761 self,
762 filename,
762 filename,
763 wc_tracked,
763 wc_tracked,
764 p1_tracked,
764 p1_tracked,
765 p2_info=False,
765 p2_info=False,
766 possibly_dirty=False,
766 possibly_dirty=False,
767 parentfiledata=None,
767 parentfiledata=None,
768 ):
768 ):
769 """update the information about a file in the dirstate
769 """update the information about a file in the dirstate
770
770
771 This is to be called when the direstates parent changes to keep track
771 This is to be called when the direstates parent changes to keep track
772 of what is the file situation in regards to the working copy and its parent.
772 of what is the file situation in regards to the working copy and its parent.
773
773
774 This function must be called within a `dirstate.changing_parents` context.
774 This function must be called within a `dirstate.changing_parents` context.
775
775
776 note: the API is at an early stage and we might need to adjust it
776 note: the API is at an early stage and we might need to adjust it
777 depending of what information ends up being relevant and useful to
777 depending of what information ends up being relevant and useful to
778 other processing.
778 other processing.
779 """
779 """
780 self._update_file(
780 self._update_file(
781 filename=filename,
781 filename=filename,
782 wc_tracked=wc_tracked,
782 wc_tracked=wc_tracked,
783 p1_tracked=p1_tracked,
783 p1_tracked=p1_tracked,
784 p2_info=p2_info,
784 p2_info=p2_info,
785 possibly_dirty=possibly_dirty,
785 possibly_dirty=possibly_dirty,
786 parentfiledata=parentfiledata,
786 parentfiledata=parentfiledata,
787 )
787 )
788
788
789 def hacky_extension_update_file(self, *args, **kwargs):
789 def hacky_extension_update_file(self, *args, **kwargs):
790 """NEVER USE THIS, YOU DO NOT NEED IT
790 """NEVER USE THIS, YOU DO NOT NEED IT
791
791
792 This function is a variant of "update_file" to be called by a small set
792 This function is a variant of "update_file" to be called by a small set
793 of extensions, it also adjust the internal state of file, but can be
793 of extensions, it also adjust the internal state of file, but can be
794 called outside an `changing_parents` context.
794 called outside an `changing_parents` context.
795
795
796 A very small number of extension meddle with the working copy content
796 A very small number of extension meddle with the working copy content
797 in a way that requires to adjust the dirstate accordingly. At the time
797 in a way that requires to adjust the dirstate accordingly. At the time
798 this command is written they are :
798 this command is written they are :
799 - keyword,
799 - keyword,
800 - largefile,
800 - largefile,
801 PLEASE DO NOT GROW THIS LIST ANY FURTHER.
801 PLEASE DO NOT GROW THIS LIST ANY FURTHER.
802
802
803 This function could probably be replaced by more semantic one (like
803 This function could probably be replaced by more semantic one (like
804 "adjust expected size" or "always revalidate file content", etc)
804 "adjust expected size" or "always revalidate file content", etc)
805 however at the time where this is writen, this is too much of a detour
805 however at the time where this is writen, this is too much of a detour
806 to be considered.
806 to be considered.
807 """
807 """
808 if not (self._changing_level > 0 or self._running_status > 0):
808 if not (self._changing_level > 0 or self._running_status > 0):
809 msg = "requires a changes context"
809 msg = "requires a changes context"
810 raise error.ProgrammingError(msg)
810 raise error.ProgrammingError(msg)
811 self._update_file(
811 self._update_file(
812 *args,
812 *args,
813 **kwargs,
813 **kwargs,
814 )
814 )
815
815
816 def _update_file(
816 def _update_file(
817 self,
817 self,
818 filename,
818 filename,
819 wc_tracked,
819 wc_tracked,
820 p1_tracked,
820 p1_tracked,
821 p2_info=False,
821 p2_info=False,
822 possibly_dirty=False,
822 possibly_dirty=False,
823 parentfiledata=None,
823 parentfiledata=None,
824 ):
824 ):
825
825
826 # note: I do not think we need to double check name clash here since we
826 # note: I do not think we need to double check name clash here since we
827 # are in a update/merge case that should already have taken care of
827 # are in a update/merge case that should already have taken care of
828 # this. The test agrees
828 # this. The test agrees
829
829
830 self._dirty = True
830 self._dirty = True
831 old_entry = self._map.get(filename)
831 old_entry = self._map.get(filename)
832 if old_entry is None:
832 if old_entry is None:
833 prev_tracked = False
833 prev_tracked = False
834 else:
834 else:
835 prev_tracked = old_entry.tracked
835 prev_tracked = old_entry.tracked
836 if prev_tracked != wc_tracked:
836 if prev_tracked != wc_tracked:
837 self._dirty_tracked_set = True
837 self._dirty_tracked_set = True
838
838
839 self._map.reset_state(
839 self._map.reset_state(
840 filename,
840 filename,
841 wc_tracked,
841 wc_tracked,
842 p1_tracked,
842 p1_tracked,
843 p2_info=p2_info,
843 p2_info=p2_info,
844 has_meaningful_mtime=not possibly_dirty,
844 has_meaningful_mtime=not possibly_dirty,
845 parentfiledata=parentfiledata,
845 parentfiledata=parentfiledata,
846 )
846 )
847
847
848 def _check_new_tracked_filename(self, filename):
848 def _check_new_tracked_filename(self, filename):
849 scmutil.checkfilename(filename)
849 scmutil.checkfilename(filename)
850 if self._map.hastrackeddir(filename):
850 if self._map.hastrackeddir(filename):
851 msg = _(b'directory %r already in dirstate')
851 msg = _(b'directory %r already in dirstate')
852 msg %= pycompat.bytestr(filename)
852 msg %= pycompat.bytestr(filename)
853 raise error.Abort(msg)
853 raise error.Abort(msg)
854 # shadows
854 # shadows
855 for d in pathutil.finddirs(filename):
855 for d in pathutil.finddirs(filename):
856 if self._map.hastrackeddir(d):
856 if self._map.hastrackeddir(d):
857 break
857 break
858 entry = self._map.get(d)
858 entry = self._map.get(d)
859 if entry is not None and not entry.removed:
859 if entry is not None and not entry.removed:
860 msg = _(b'file %r in dirstate clashes with %r')
860 msg = _(b'file %r in dirstate clashes with %r')
861 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
861 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
862 raise error.Abort(msg)
862 raise error.Abort(msg)
863 self._check_sparse(filename)
863 self._check_sparse(filename)
864
864
865 def _check_sparse(self, filename):
865 def _check_sparse(self, filename):
866 """Check that a filename is inside the sparse profile"""
866 """Check that a filename is inside the sparse profile"""
867 sparsematch = self._sparsematcher
867 sparsematch = self._sparsematcher
868 if sparsematch is not None and not sparsematch.always():
868 if sparsematch is not None and not sparsematch.always():
869 if not sparsematch(filename):
869 if not sparsematch(filename):
870 msg = _(b"cannot add '%s' - it is outside the sparse checkout")
870 msg = _(b"cannot add '%s' - it is outside the sparse checkout")
871 hint = _(
871 hint = _(
872 b'include file with `hg debugsparse --include <pattern>` or use '
872 b'include file with `hg debugsparse --include <pattern>` or use '
873 b'`hg add -s <file>` to include file directory while adding'
873 b'`hg add -s <file>` to include file directory while adding'
874 )
874 )
875 raise error.Abort(msg % filename, hint=hint)
875 raise error.Abort(msg % filename, hint=hint)
876
876
877 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
877 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
878 if exists is None:
878 if exists is None:
879 exists = os.path.lexists(os.path.join(self._root, path))
879 exists = os.path.lexists(os.path.join(self._root, path))
880 if not exists:
880 if not exists:
881 # Maybe a path component exists
881 # Maybe a path component exists
882 if not ignoremissing and b'/' in path:
882 if not ignoremissing and b'/' in path:
883 d, f = path.rsplit(b'/', 1)
883 d, f = path.rsplit(b'/', 1)
884 d = self._normalize(d, False, ignoremissing, None)
884 d = self._normalize(d, False, ignoremissing, None)
885 folded = d + b"/" + f
885 folded = d + b"/" + f
886 else:
886 else:
887 # No path components, preserve original case
887 # No path components, preserve original case
888 folded = path
888 folded = path
889 else:
889 else:
890 # recursively normalize leading directory components
890 # recursively normalize leading directory components
891 # against dirstate
891 # against dirstate
892 if b'/' in normed:
892 if b'/' in normed:
893 d, f = normed.rsplit(b'/', 1)
893 d, f = normed.rsplit(b'/', 1)
894 d = self._normalize(d, False, ignoremissing, True)
894 d = self._normalize(d, False, ignoremissing, True)
895 r = self._root + b"/" + d
895 r = self._root + b"/" + d
896 folded = d + b"/" + util.fspath(f, r)
896 folded = d + b"/" + util.fspath(f, r)
897 else:
897 else:
898 folded = util.fspath(normed, self._root)
898 folded = util.fspath(normed, self._root)
899 storemap[normed] = folded
899 storemap[normed] = folded
900
900
901 return folded
901 return folded
902
902
903 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
903 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
904 normed = util.normcase(path)
904 normed = util.normcase(path)
905 folded = self._map.filefoldmap.get(normed, None)
905 folded = self._map.filefoldmap.get(normed, None)
906 if folded is None:
906 if folded is None:
907 if isknown:
907 if isknown:
908 folded = path
908 folded = path
909 else:
909 else:
910 folded = self._discoverpath(
910 folded = self._discoverpath(
911 path, normed, ignoremissing, exists, self._map.filefoldmap
911 path, normed, ignoremissing, exists, self._map.filefoldmap
912 )
912 )
913 return folded
913 return folded
914
914
915 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
915 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
916 normed = util.normcase(path)
916 normed = util.normcase(path)
917 folded = self._map.filefoldmap.get(normed, None)
917 folded = self._map.filefoldmap.get(normed, None)
918 if folded is None:
918 if folded is None:
919 folded = self._map.dirfoldmap.get(normed, None)
919 folded = self._map.dirfoldmap.get(normed, None)
920 if folded is None:
920 if folded is None:
921 if isknown:
921 if isknown:
922 folded = path
922 folded = path
923 else:
923 else:
924 # store discovered result in dirfoldmap so that future
924 # store discovered result in dirfoldmap so that future
925 # normalizefile calls don't start matching directories
925 # normalizefile calls don't start matching directories
926 folded = self._discoverpath(
926 folded = self._discoverpath(
927 path, normed, ignoremissing, exists, self._map.dirfoldmap
927 path, normed, ignoremissing, exists, self._map.dirfoldmap
928 )
928 )
929 return folded
929 return folded
930
930
931 def normalize(self, path, isknown=False, ignoremissing=False):
931 def normalize(self, path, isknown=False, ignoremissing=False):
932 """
932 """
933 normalize the case of a pathname when on a casefolding filesystem
933 normalize the case of a pathname when on a casefolding filesystem
934
934
935 isknown specifies whether the filename came from walking the
935 isknown specifies whether the filename came from walking the
936 disk, to avoid extra filesystem access.
936 disk, to avoid extra filesystem access.
937
937
938 If ignoremissing is True, missing path are returned
938 If ignoremissing is True, missing path are returned
939 unchanged. Otherwise, we try harder to normalize possibly
939 unchanged. Otherwise, we try harder to normalize possibly
940 existing path components.
940 existing path components.
941
941
942 The normalized case is determined based on the following precedence:
942 The normalized case is determined based on the following precedence:
943
943
944 - version of name already stored in the dirstate
944 - version of name already stored in the dirstate
945 - version of name stored on disk
945 - version of name stored on disk
946 - version provided via command arguments
946 - version provided via command arguments
947 """
947 """
948
948
949 if self._checkcase:
949 if self._checkcase:
950 return self._normalize(path, isknown, ignoremissing)
950 return self._normalize(path, isknown, ignoremissing)
951 return path
951 return path
952
952
953 # XXX this method is barely used, as a result:
953 # XXX this method is barely used, as a result:
954 # - its semantic is unclear
954 # - its semantic is unclear
955 # - do we really needs it ?
955 # - do we really needs it ?
956 @requires_changing_parents
956 @requires_changing_parents
957 def clear(self):
957 def clear(self):
958 self._map.clear()
958 self._map.clear()
959 self._dirty = True
959 self._dirty = True
960
960
961 @requires_changing_parents
961 @requires_changing_parents
962 def rebuild(self, parent, allfiles, changedfiles=None):
962 def rebuild(self, parent, allfiles, changedfiles=None):
963 matcher = self._sparsematcher
963 matcher = self._sparsematcher
964 if matcher is not None and not matcher.always():
964 if matcher is not None and not matcher.always():
965 # should not add non-matching files
965 # should not add non-matching files
966 allfiles = [f for f in allfiles if matcher(f)]
966 allfiles = [f for f in allfiles if matcher(f)]
967 if changedfiles:
967 if changedfiles:
968 changedfiles = [f for f in changedfiles if matcher(f)]
968 changedfiles = [f for f in changedfiles if matcher(f)]
969
969
970 if changedfiles is not None:
970 if changedfiles is not None:
971 # these files will be deleted from the dirstate when they are
971 # these files will be deleted from the dirstate when they are
972 # not found to be in allfiles
972 # not found to be in allfiles
973 dirstatefilestoremove = {f for f in self if not matcher(f)}
973 dirstatefilestoremove = {f for f in self if not matcher(f)}
974 changedfiles = dirstatefilestoremove.union(changedfiles)
974 changedfiles = dirstatefilestoremove.union(changedfiles)
975
975
976 if changedfiles is None:
976 if changedfiles is None:
977 # Rebuild entire dirstate
977 # Rebuild entire dirstate
978 to_lookup = allfiles
978 to_lookup = allfiles
979 to_drop = []
979 to_drop = []
980 self.clear()
980 self.clear()
981 elif len(changedfiles) < 10:
981 elif len(changedfiles) < 10:
982 # Avoid turning allfiles into a set, which can be expensive if it's
982 # Avoid turning allfiles into a set, which can be expensive if it's
983 # large.
983 # large.
984 to_lookup = []
984 to_lookup = []
985 to_drop = []
985 to_drop = []
986 for f in changedfiles:
986 for f in changedfiles:
987 if f in allfiles:
987 if f in allfiles:
988 to_lookup.append(f)
988 to_lookup.append(f)
989 else:
989 else:
990 to_drop.append(f)
990 to_drop.append(f)
991 else:
991 else:
992 changedfilesset = set(changedfiles)
992 changedfilesset = set(changedfiles)
993 to_lookup = changedfilesset & set(allfiles)
993 to_lookup = changedfilesset & set(allfiles)
994 to_drop = changedfilesset - to_lookup
994 to_drop = changedfilesset - to_lookup
995
995
996 if self._origpl is None:
996 if self._origpl is None:
997 self._origpl = self._pl
997 self._origpl = self._pl
998 self._map.setparents(parent, self._nodeconstants.nullid)
998 self._map.setparents(parent, self._nodeconstants.nullid)
999
999
1000 for f in to_lookup:
1000 for f in to_lookup:
1001 if self.in_merge:
1001 if self.in_merge:
1002 self.set_tracked(f)
1002 self.set_tracked(f)
1003 else:
1003 else:
1004 self._map.reset_state(
1004 self._map.reset_state(
1005 f,
1005 f,
1006 wc_tracked=True,
1006 wc_tracked=True,
1007 p1_tracked=True,
1007 p1_tracked=True,
1008 )
1008 )
1009 for f in to_drop:
1009 for f in to_drop:
1010 self._map.reset_state(f)
1010 self._map.reset_state(f)
1011
1011
1012 self._dirty = True
1012 self._dirty = True
1013
1013
1014 def identity(self):
1014 def identity(self):
1015 """Return identity of dirstate itself to detect changing in storage
1015 """Return identity of dirstate itself to detect changing in storage
1016
1016
1017 If identity of previous dirstate is equal to this, writing
1017 If identity of previous dirstate is equal to this, writing
1018 changes based on the former dirstate out can keep consistency.
1018 changes based on the former dirstate out can keep consistency.
1019 """
1019 """
1020 return self._map.identity
1020 return self._map.identity
1021
1021
1022 def write(self, tr):
1022 def write(self, tr):
1023 if not self._dirty:
1023 if not self._dirty:
1024 return
1024 return
1025 # make sure we don't request a write of invalidated content
1025 # make sure we don't request a write of invalidated content
1026 # XXX move before the dirty check once `unlock` stop calling `write`
1026 # XXX move before the dirty check once `unlock` stop calling `write`
1027 assert not self._invalidated_context
1027 assert not self._invalidated_context
1028
1028
1029 write_key = self._use_tracked_hint and self._dirty_tracked_set
1029 write_key = self._use_tracked_hint and self._dirty_tracked_set
1030 if tr:
1030 if tr:
1031
1031
1032 def on_abort(tr):
1032 def on_abort(tr):
1033 self._attached_to_a_transaction = False
1033 self._attached_to_a_transaction = False
1034 self.invalidate()
1034 self.invalidate()
1035
1035
1036 # make sure we invalidate the current change on abort
1036 # make sure we invalidate the current change on abort
1037 if tr is not None:
1037 if tr is not None:
1038 tr.addabort(
1038 tr.addabort(
1039 b'dirstate-invalidate%s' % self._tr_key_suffix,
1039 b'dirstate-invalidate%s' % self._tr_key_suffix,
1040 on_abort,
1040 on_abort,
1041 )
1041 )
1042
1042
1043 self._attached_to_a_transaction = True
1043 self._attached_to_a_transaction = True
1044
1044
1045 def on_success(f):
1045 def on_success(f):
1046 self._attached_to_a_transaction = False
1046 self._attached_to_a_transaction = False
1047 self._writedirstate(tr, f),
1047 self._writedirstate(tr, f),
1048
1048
1049 # delay writing in-memory changes out
1049 # delay writing in-memory changes out
1050 tr.addfilegenerator(
1050 tr.addfilegenerator(
1051 b'dirstate-1-main%s' % self._tr_key_suffix,
1051 b'dirstate-1-main%s' % self._tr_key_suffix,
1052 (self._filename,),
1052 (self._filename,),
1053 on_success,
1053 on_success,
1054 location=b'plain',
1054 location=b'plain',
1055 post_finalize=True,
1055 post_finalize=True,
1056 )
1056 )
1057 if write_key:
1057 if write_key:
1058 tr.addfilegenerator(
1058 tr.addfilegenerator(
1059 b'dirstate-2-key-post%s' % self._tr_key_suffix,
1059 b'dirstate-2-key-post%s' % self._tr_key_suffix,
1060 (self._filename_th,),
1060 (self._filename_th,),
1061 lambda f: self._write_tracked_hint(tr, f),
1061 lambda f: self._write_tracked_hint(tr, f),
1062 location=b'plain',
1062 location=b'plain',
1063 post_finalize=True,
1063 post_finalize=True,
1064 )
1064 )
1065 return
1065 return
1066
1066
1067 file = lambda f: self._opener(f, b"w", atomictemp=True, checkambig=True)
1067 file = lambda f: self._opener(f, b"w", atomictemp=True, checkambig=True)
1068 with file(self._filename) as f:
1068 with file(self._filename) as f:
1069 self._writedirstate(tr, f)
1069 self._writedirstate(tr, f)
1070 if write_key:
1070 if write_key:
1071 # we update the key-file after writing to make sure reader have a
1071 # we update the key-file after writing to make sure reader have a
1072 # key that match the newly written content
1072 # key that match the newly written content
1073 with file(self._filename_th) as f:
1073 with file(self._filename_th) as f:
1074 self._write_tracked_hint(tr, f)
1074 self._write_tracked_hint(tr, f)
1075
1075
1076 def delete_tracked_hint(self):
1076 def delete_tracked_hint(self):
1077 """remove the tracked_hint file
1077 """remove the tracked_hint file
1078
1078
1079 To be used by format downgrades operation"""
1079 To be used by format downgrades operation"""
1080 self._opener.unlink(self._filename_th)
1080 self._opener.unlink(self._filename_th)
1081 self._use_tracked_hint = False
1081 self._use_tracked_hint = False
1082
1082
1083 def addparentchangecallback(self, category, callback):
1083 def addparentchangecallback(self, category, callback):
1084 """add a callback to be called when the wd parents are changed
1084 """add a callback to be called when the wd parents are changed
1085
1085
1086 Callback will be called with the following arguments:
1086 Callback will be called with the following arguments:
1087 dirstate, (oldp1, oldp2), (newp1, newp2)
1087 dirstate, (oldp1, oldp2), (newp1, newp2)
1088
1088
1089 Category is a unique identifier to allow overwriting an old callback
1089 Category is a unique identifier to allow overwriting an old callback
1090 with a newer callback.
1090 with a newer callback.
1091 """
1091 """
1092 self._plchangecallbacks[category] = callback
1092 self._plchangecallbacks[category] = callback
1093
1093
1094 def _writedirstate(self, tr, st):
1094 def _writedirstate(self, tr, st):
1095 # make sure we don't write invalidated content
1095 # make sure we don't write invalidated content
1096 assert not self._invalidated_context
1096 assert not self._invalidated_context
1097 # notify callbacks about parents change
1097 # notify callbacks about parents change
1098 if self._origpl is not None and self._origpl != self._pl:
1098 if self._origpl is not None and self._origpl != self._pl:
1099 for c, callback in sorted(self._plchangecallbacks.items()):
1099 for c, callback in sorted(self._plchangecallbacks.items()):
1100 callback(self, self._origpl, self._pl)
1100 callback(self, self._origpl, self._pl)
1101 self._origpl = None
1101 self._origpl = None
1102 self._map.write(tr, st)
1102 self._map.write(tr, st)
1103 self._dirty = False
1103 self._dirty = False
1104 self._dirty_tracked_set = False
1104 self._dirty_tracked_set = False
1105
1105
1106 def _write_tracked_hint(self, tr, f):
1106 def _write_tracked_hint(self, tr, f):
1107 key = node.hex(uuid.uuid4().bytes)
1107 key = node.hex(uuid.uuid4().bytes)
1108 f.write(b"1\n%s\n" % key) # 1 is the format version
1108 f.write(b"1\n%s\n" % key) # 1 is the format version
1109
1109
1110 def _dirignore(self, f):
1110 def _dirignore(self, f):
1111 if self._ignore(f):
1111 if self._ignore(f):
1112 return True
1112 return True
1113 for p in pathutil.finddirs(f):
1113 for p in pathutil.finddirs(f):
1114 if self._ignore(p):
1114 if self._ignore(p):
1115 return True
1115 return True
1116 return False
1116 return False
1117
1117
1118 def _ignorefiles(self):
1118 def _ignorefiles(self):
1119 files = []
1119 files = []
1120 if os.path.exists(self._join(b'.hgignore')):
1120 if os.path.exists(self._join(b'.hgignore')):
1121 files.append(self._join(b'.hgignore'))
1121 files.append(self._join(b'.hgignore'))
1122 for name, path in self._ui.configitems(b"ui"):
1122 for name, path in self._ui.configitems(b"ui"):
1123 if name == b'ignore' or name.startswith(b'ignore.'):
1123 if name == b'ignore' or name.startswith(b'ignore.'):
1124 # we need to use os.path.join here rather than self._join
1124 # we need to use os.path.join here rather than self._join
1125 # because path is arbitrary and user-specified
1125 # because path is arbitrary and user-specified
1126 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1126 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1127 return files
1127 return files
1128
1128
1129 def _ignorefileandline(self, f):
1129 def _ignorefileandline(self, f):
1130 files = collections.deque(self._ignorefiles())
1130 files = collections.deque(self._ignorefiles())
1131 visited = set()
1131 visited = set()
1132 while files:
1132 while files:
1133 i = files.popleft()
1133 i = files.popleft()
1134 patterns = matchmod.readpatternfile(
1134 patterns = matchmod.readpatternfile(
1135 i, self._ui.warn, sourceinfo=True
1135 i, self._ui.warn, sourceinfo=True
1136 )
1136 )
1137 for pattern, lineno, line in patterns:
1137 for pattern, lineno, line in patterns:
1138 kind, p = matchmod._patsplit(pattern, b'glob')
1138 kind, p = matchmod._patsplit(pattern, b'glob')
1139 if kind == b"subinclude":
1139 if kind == b"subinclude":
1140 if p not in visited:
1140 if p not in visited:
1141 files.append(p)
1141 files.append(p)
1142 continue
1142 continue
1143 m = matchmod.match(
1143 m = matchmod.match(
1144 self._root, b'', [], [pattern], warn=self._ui.warn
1144 self._root, b'', [], [pattern], warn=self._ui.warn
1145 )
1145 )
1146 if m(f):
1146 if m(f):
1147 return (i, lineno, line)
1147 return (i, lineno, line)
1148 visited.add(i)
1148 visited.add(i)
1149 return (None, -1, b"")
1149 return (None, -1, b"")
1150
1150
1151 def _walkexplicit(self, match, subrepos):
1151 def _walkexplicit(self, match, subrepos):
1152 """Get stat data about the files explicitly specified by match.
1152 """Get stat data about the files explicitly specified by match.
1153
1153
1154 Return a triple (results, dirsfound, dirsnotfound).
1154 Return a triple (results, dirsfound, dirsnotfound).
1155 - results is a mapping from filename to stat result. It also contains
1155 - results is a mapping from filename to stat result. It also contains
1156 listings mapping subrepos and .hg to None.
1156 listings mapping subrepos and .hg to None.
1157 - dirsfound is a list of files found to be directories.
1157 - dirsfound is a list of files found to be directories.
1158 - dirsnotfound is a list of files that the dirstate thinks are
1158 - dirsnotfound is a list of files that the dirstate thinks are
1159 directories and that were not found."""
1159 directories and that were not found."""
1160
1160
1161 def badtype(mode):
1161 def badtype(mode):
1162 kind = _(b'unknown')
1162 kind = _(b'unknown')
1163 if stat.S_ISCHR(mode):
1163 if stat.S_ISCHR(mode):
1164 kind = _(b'character device')
1164 kind = _(b'character device')
1165 elif stat.S_ISBLK(mode):
1165 elif stat.S_ISBLK(mode):
1166 kind = _(b'block device')
1166 kind = _(b'block device')
1167 elif stat.S_ISFIFO(mode):
1167 elif stat.S_ISFIFO(mode):
1168 kind = _(b'fifo')
1168 kind = _(b'fifo')
1169 elif stat.S_ISSOCK(mode):
1169 elif stat.S_ISSOCK(mode):
1170 kind = _(b'socket')
1170 kind = _(b'socket')
1171 elif stat.S_ISDIR(mode):
1171 elif stat.S_ISDIR(mode):
1172 kind = _(b'directory')
1172 kind = _(b'directory')
1173 return _(b'unsupported file type (type is %s)') % kind
1173 return _(b'unsupported file type (type is %s)') % kind
1174
1174
1175 badfn = match.bad
1175 badfn = match.bad
1176 dmap = self._map
1176 dmap = self._map
1177 lstat = os.lstat
1177 lstat = os.lstat
1178 getkind = stat.S_IFMT
1178 getkind = stat.S_IFMT
1179 dirkind = stat.S_IFDIR
1179 dirkind = stat.S_IFDIR
1180 regkind = stat.S_IFREG
1180 regkind = stat.S_IFREG
1181 lnkkind = stat.S_IFLNK
1181 lnkkind = stat.S_IFLNK
1182 join = self._join
1182 join = self._join
1183 dirsfound = []
1183 dirsfound = []
1184 foundadd = dirsfound.append
1184 foundadd = dirsfound.append
1185 dirsnotfound = []
1185 dirsnotfound = []
1186 notfoundadd = dirsnotfound.append
1186 notfoundadd = dirsnotfound.append
1187
1187
1188 if not match.isexact() and self._checkcase:
1188 if not match.isexact() and self._checkcase:
1189 normalize = self._normalize
1189 normalize = self._normalize
1190 else:
1190 else:
1191 normalize = None
1191 normalize = None
1192
1192
1193 files = sorted(match.files())
1193 files = sorted(match.files())
1194 subrepos.sort()
1194 subrepos.sort()
1195 i, j = 0, 0
1195 i, j = 0, 0
1196 while i < len(files) and j < len(subrepos):
1196 while i < len(files) and j < len(subrepos):
1197 subpath = subrepos[j] + b"/"
1197 subpath = subrepos[j] + b"/"
1198 if files[i] < subpath:
1198 if files[i] < subpath:
1199 i += 1
1199 i += 1
1200 continue
1200 continue
1201 while i < len(files) and files[i].startswith(subpath):
1201 while i < len(files) and files[i].startswith(subpath):
1202 del files[i]
1202 del files[i]
1203 j += 1
1203 j += 1
1204
1204
1205 if not files or b'' in files:
1205 if not files or b'' in files:
1206 files = [b'']
1206 files = [b'']
1207 # constructing the foldmap is expensive, so don't do it for the
1207 # constructing the foldmap is expensive, so don't do it for the
1208 # common case where files is ['']
1208 # common case where files is ['']
1209 normalize = None
1209 normalize = None
1210 results = dict.fromkeys(subrepos)
1210 results = dict.fromkeys(subrepos)
1211 results[b'.hg'] = None
1211 results[b'.hg'] = None
1212
1212
1213 for ff in files:
1213 for ff in files:
1214 if normalize:
1214 if normalize:
1215 nf = normalize(ff, False, True)
1215 nf = normalize(ff, False, True)
1216 else:
1216 else:
1217 nf = ff
1217 nf = ff
1218 if nf in results:
1218 if nf in results:
1219 continue
1219 continue
1220
1220
1221 try:
1221 try:
1222 st = lstat(join(nf))
1222 st = lstat(join(nf))
1223 kind = getkind(st.st_mode)
1223 kind = getkind(st.st_mode)
1224 if kind == dirkind:
1224 if kind == dirkind:
1225 if nf in dmap:
1225 if nf in dmap:
1226 # file replaced by dir on disk but still in dirstate
1226 # file replaced by dir on disk but still in dirstate
1227 results[nf] = None
1227 results[nf] = None
1228 foundadd((nf, ff))
1228 foundadd((nf, ff))
1229 elif kind == regkind or kind == lnkkind:
1229 elif kind == regkind or kind == lnkkind:
1230 results[nf] = st
1230 results[nf] = st
1231 else:
1231 else:
1232 badfn(ff, badtype(kind))
1232 badfn(ff, badtype(kind))
1233 if nf in dmap:
1233 if nf in dmap:
1234 results[nf] = None
1234 results[nf] = None
1235 except (OSError) as inst:
1235 except (OSError) as inst:
1236 # nf not found on disk - it is dirstate only
1236 # nf not found on disk - it is dirstate only
1237 if nf in dmap: # does it exactly match a missing file?
1237 if nf in dmap: # does it exactly match a missing file?
1238 results[nf] = None
1238 results[nf] = None
1239 else: # does it match a missing directory?
1239 else: # does it match a missing directory?
1240 if self._map.hasdir(nf):
1240 if self._map.hasdir(nf):
1241 notfoundadd(nf)
1241 notfoundadd(nf)
1242 else:
1242 else:
1243 badfn(ff, encoding.strtolocal(inst.strerror))
1243 badfn(ff, encoding.strtolocal(inst.strerror))
1244
1244
1245 # match.files() may contain explicitly-specified paths that shouldn't
1245 # match.files() may contain explicitly-specified paths that shouldn't
1246 # be taken; drop them from the list of files found. dirsfound/notfound
1246 # be taken; drop them from the list of files found. dirsfound/notfound
1247 # aren't filtered here because they will be tested later.
1247 # aren't filtered here because they will be tested later.
1248 if match.anypats():
1248 if match.anypats():
1249 for f in list(results):
1249 for f in list(results):
1250 if f == b'.hg' or f in subrepos:
1250 if f == b'.hg' or f in subrepos:
1251 # keep sentinel to disable further out-of-repo walks
1251 # keep sentinel to disable further out-of-repo walks
1252 continue
1252 continue
1253 if not match(f):
1253 if not match(f):
1254 del results[f]
1254 del results[f]
1255
1255
1256 # Case insensitive filesystems cannot rely on lstat() failing to detect
1256 # Case insensitive filesystems cannot rely on lstat() failing to detect
1257 # a case-only rename. Prune the stat object for any file that does not
1257 # a case-only rename. Prune the stat object for any file that does not
1258 # match the case in the filesystem, if there are multiple files that
1258 # match the case in the filesystem, if there are multiple files that
1259 # normalize to the same path.
1259 # normalize to the same path.
1260 if match.isexact() and self._checkcase:
1260 if match.isexact() and self._checkcase:
1261 normed = {}
1261 normed = {}
1262
1262
1263 for f, st in results.items():
1263 for f, st in results.items():
1264 if st is None:
1264 if st is None:
1265 continue
1265 continue
1266
1266
1267 nc = util.normcase(f)
1267 nc = util.normcase(f)
1268 paths = normed.get(nc)
1268 paths = normed.get(nc)
1269
1269
1270 if paths is None:
1270 if paths is None:
1271 paths = set()
1271 paths = set()
1272 normed[nc] = paths
1272 normed[nc] = paths
1273
1273
1274 paths.add(f)
1274 paths.add(f)
1275
1275
1276 for norm, paths in normed.items():
1276 for norm, paths in normed.items():
1277 if len(paths) > 1:
1277 if len(paths) > 1:
1278 for path in paths:
1278 for path in paths:
1279 folded = self._discoverpath(
1279 folded = self._discoverpath(
1280 path, norm, True, None, self._map.dirfoldmap
1280 path, norm, True, None, self._map.dirfoldmap
1281 )
1281 )
1282 if path != folded:
1282 if path != folded:
1283 results[path] = None
1283 results[path] = None
1284
1284
1285 return results, dirsfound, dirsnotfound
1285 return results, dirsfound, dirsnotfound
1286
1286
1287 def walk(self, match, subrepos, unknown, ignored, full=True):
1287 def walk(self, match, subrepos, unknown, ignored, full=True):
1288 """
1288 """
1289 Walk recursively through the directory tree, finding all files
1289 Walk recursively through the directory tree, finding all files
1290 matched by match.
1290 matched by match.
1291
1291
1292 If full is False, maybe skip some known-clean files.
1292 If full is False, maybe skip some known-clean files.
1293
1293
1294 Return a dict mapping filename to stat-like object (either
1294 Return a dict mapping filename to stat-like object (either
1295 mercurial.osutil.stat instance or return value of os.stat()).
1295 mercurial.osutil.stat instance or return value of os.stat()).
1296
1296
1297 """
1297 """
1298 # full is a flag that extensions that hook into walk can use -- this
1298 # full is a flag that extensions that hook into walk can use -- this
1299 # implementation doesn't use it at all. This satisfies the contract
1299 # implementation doesn't use it at all. This satisfies the contract
1300 # because we only guarantee a "maybe".
1300 # because we only guarantee a "maybe".
1301
1301
1302 if ignored:
1302 if ignored:
1303 ignore = util.never
1303 ignore = util.never
1304 dirignore = util.never
1304 dirignore = util.never
1305 elif unknown:
1305 elif unknown:
1306 ignore = self._ignore
1306 ignore = self._ignore
1307 dirignore = self._dirignore
1307 dirignore = self._dirignore
1308 else:
1308 else:
1309 # if not unknown and not ignored, drop dir recursion and step 2
1309 # if not unknown and not ignored, drop dir recursion and step 2
1310 ignore = util.always
1310 ignore = util.always
1311 dirignore = util.always
1311 dirignore = util.always
1312
1312
1313 if self._sparsematchfn is not None:
1313 if self._sparsematchfn is not None:
1314 em = matchmod.exact(match.files())
1314 em = matchmod.exact(match.files())
1315 sm = matchmod.unionmatcher([self._sparsematcher, em])
1315 sm = matchmod.unionmatcher([self._sparsematcher, em])
1316 match = matchmod.intersectmatchers(match, sm)
1316 match = matchmod.intersectmatchers(match, sm)
1317
1317
1318 matchfn = match.matchfn
1318 matchfn = match.matchfn
1319 matchalways = match.always()
1319 matchalways = match.always()
1320 matchtdir = match.traversedir
1320 matchtdir = match.traversedir
1321 dmap = self._map
1321 dmap = self._map
1322 listdir = util.listdir
1322 listdir = util.listdir
1323 lstat = os.lstat
1323 lstat = os.lstat
1324 dirkind = stat.S_IFDIR
1324 dirkind = stat.S_IFDIR
1325 regkind = stat.S_IFREG
1325 regkind = stat.S_IFREG
1326 lnkkind = stat.S_IFLNK
1326 lnkkind = stat.S_IFLNK
1327 join = self._join
1327 join = self._join
1328
1328
1329 exact = skipstep3 = False
1329 exact = skipstep3 = False
1330 if match.isexact(): # match.exact
1330 if match.isexact(): # match.exact
1331 exact = True
1331 exact = True
1332 dirignore = util.always # skip step 2
1332 dirignore = util.always # skip step 2
1333 elif match.prefix(): # match.match, no patterns
1333 elif match.prefix(): # match.match, no patterns
1334 skipstep3 = True
1334 skipstep3 = True
1335
1335
1336 if not exact and self._checkcase:
1336 if not exact and self._checkcase:
1337 normalize = self._normalize
1337 normalize = self._normalize
1338 normalizefile = self._normalizefile
1338 normalizefile = self._normalizefile
1339 skipstep3 = False
1339 skipstep3 = False
1340 else:
1340 else:
1341 normalize = self._normalize
1341 normalize = self._normalize
1342 normalizefile = None
1342 normalizefile = None
1343
1343
1344 # step 1: find all explicit files
1344 # step 1: find all explicit files
1345 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1345 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1346 if matchtdir:
1346 if matchtdir:
1347 for d in work:
1347 for d in work:
1348 matchtdir(d[0])
1348 matchtdir(d[0])
1349 for d in dirsnotfound:
1349 for d in dirsnotfound:
1350 matchtdir(d)
1350 matchtdir(d)
1351
1351
1352 skipstep3 = skipstep3 and not (work or dirsnotfound)
1352 skipstep3 = skipstep3 and not (work or dirsnotfound)
1353 work = [d for d in work if not dirignore(d[0])]
1353 work = [d for d in work if not dirignore(d[0])]
1354
1354
1355 # step 2: visit subdirectories
1355 # step 2: visit subdirectories
1356 def traverse(work, alreadynormed):
1356 def traverse(work, alreadynormed):
1357 wadd = work.append
1357 wadd = work.append
1358 while work:
1358 while work:
1359 tracing.counter('dirstate.walk work', len(work))
1359 tracing.counter('dirstate.walk work', len(work))
1360 nd = work.pop()
1360 nd = work.pop()
1361 visitentries = match.visitchildrenset(nd)
1361 visitentries = match.visitchildrenset(nd)
1362 if not visitentries:
1362 if not visitentries:
1363 continue
1363 continue
1364 if visitentries == b'this' or visitentries == b'all':
1364 if visitentries == b'this' or visitentries == b'all':
1365 visitentries = None
1365 visitentries = None
1366 skip = None
1366 skip = None
1367 if nd != b'':
1367 if nd != b'':
1368 skip = b'.hg'
1368 skip = b'.hg'
1369 try:
1369 try:
1370 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1370 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1371 entries = listdir(join(nd), stat=True, skip=skip)
1371 entries = listdir(join(nd), stat=True, skip=skip)
1372 except (PermissionError, FileNotFoundError) as inst:
1372 except (PermissionError, FileNotFoundError) as inst:
1373 match.bad(
1373 match.bad(
1374 self.pathto(nd), encoding.strtolocal(inst.strerror)
1374 self.pathto(nd), encoding.strtolocal(inst.strerror)
1375 )
1375 )
1376 continue
1376 continue
1377 for f, kind, st in entries:
1377 for f, kind, st in entries:
1378 # Some matchers may return files in the visitentries set,
1378 # Some matchers may return files in the visitentries set,
1379 # instead of 'this', if the matcher explicitly mentions them
1379 # instead of 'this', if the matcher explicitly mentions them
1380 # and is not an exactmatcher. This is acceptable; we do not
1380 # and is not an exactmatcher. This is acceptable; we do not
1381 # make any hard assumptions about file-or-directory below
1381 # make any hard assumptions about file-or-directory below
1382 # based on the presence of `f` in visitentries. If
1382 # based on the presence of `f` in visitentries. If
1383 # visitchildrenset returned a set, we can always skip the
1383 # visitchildrenset returned a set, we can always skip the
1384 # entries *not* in the set it provided regardless of whether
1384 # entries *not* in the set it provided regardless of whether
1385 # they're actually a file or a directory.
1385 # they're actually a file or a directory.
1386 if visitentries and f not in visitentries:
1386 if visitentries and f not in visitentries:
1387 continue
1387 continue
1388 if normalizefile:
1388 if normalizefile:
1389 # even though f might be a directory, we're only
1389 # even though f might be a directory, we're only
1390 # interested in comparing it to files currently in the
1390 # interested in comparing it to files currently in the
1391 # dmap -- therefore normalizefile is enough
1391 # dmap -- therefore normalizefile is enough
1392 nf = normalizefile(
1392 nf = normalizefile(
1393 nd and (nd + b"/" + f) or f, True, True
1393 nd and (nd + b"/" + f) or f, True, True
1394 )
1394 )
1395 else:
1395 else:
1396 nf = nd and (nd + b"/" + f) or f
1396 nf = nd and (nd + b"/" + f) or f
1397 if nf not in results:
1397 if nf not in results:
1398 if kind == dirkind:
1398 if kind == dirkind:
1399 if not ignore(nf):
1399 if not ignore(nf):
1400 if matchtdir:
1400 if matchtdir:
1401 matchtdir(nf)
1401 matchtdir(nf)
1402 wadd(nf)
1402 wadd(nf)
1403 if nf in dmap and (matchalways or matchfn(nf)):
1403 if nf in dmap and (matchalways or matchfn(nf)):
1404 results[nf] = None
1404 results[nf] = None
1405 elif kind == regkind or kind == lnkkind:
1405 elif kind == regkind or kind == lnkkind:
1406 if nf in dmap:
1406 if nf in dmap:
1407 if matchalways or matchfn(nf):
1407 if matchalways or matchfn(nf):
1408 results[nf] = st
1408 results[nf] = st
1409 elif (matchalways or matchfn(nf)) and not ignore(
1409 elif (matchalways or matchfn(nf)) and not ignore(
1410 nf
1410 nf
1411 ):
1411 ):
1412 # unknown file -- normalize if necessary
1412 # unknown file -- normalize if necessary
1413 if not alreadynormed:
1413 if not alreadynormed:
1414 nf = normalize(nf, False, True)
1414 nf = normalize(nf, False, True)
1415 results[nf] = st
1415 results[nf] = st
1416 elif nf in dmap and (matchalways or matchfn(nf)):
1416 elif nf in dmap and (matchalways or matchfn(nf)):
1417 results[nf] = None
1417 results[nf] = None
1418
1418
1419 for nd, d in work:
1419 for nd, d in work:
1420 # alreadynormed means that processwork doesn't have to do any
1420 # alreadynormed means that processwork doesn't have to do any
1421 # expensive directory normalization
1421 # expensive directory normalization
1422 alreadynormed = not normalize or nd == d
1422 alreadynormed = not normalize or nd == d
1423 traverse([d], alreadynormed)
1423 traverse([d], alreadynormed)
1424
1424
1425 for s in subrepos:
1425 for s in subrepos:
1426 del results[s]
1426 del results[s]
1427 del results[b'.hg']
1427 del results[b'.hg']
1428
1428
1429 # step 3: visit remaining files from dmap
1429 # step 3: visit remaining files from dmap
1430 if not skipstep3 and not exact:
1430 if not skipstep3 and not exact:
1431 # If a dmap file is not in results yet, it was either
1431 # If a dmap file is not in results yet, it was either
1432 # a) not matching matchfn b) ignored, c) missing, or d) under a
1432 # a) not matching matchfn b) ignored, c) missing, or d) under a
1433 # symlink directory.
1433 # symlink directory.
1434 if not results and matchalways:
1434 if not results and matchalways:
1435 visit = [f for f in dmap]
1435 visit = [f for f in dmap]
1436 else:
1436 else:
1437 visit = [f for f in dmap if f not in results and matchfn(f)]
1437 visit = [f for f in dmap if f not in results and matchfn(f)]
1438 visit.sort()
1438 visit.sort()
1439
1439
1440 if unknown:
1440 if unknown:
1441 # unknown == True means we walked all dirs under the roots
1441 # unknown == True means we walked all dirs under the roots
1442 # that wasn't ignored, and everything that matched was stat'ed
1442 # that wasn't ignored, and everything that matched was stat'ed
1443 # and is already in results.
1443 # and is already in results.
1444 # The rest must thus be ignored or under a symlink.
1444 # The rest must thus be ignored or under a symlink.
1445 audit_path = pathutil.pathauditor(self._root, cached=True)
1445 audit_path = pathutil.pathauditor(self._root, cached=True)
1446
1446
1447 for nf in iter(visit):
1447 for nf in iter(visit):
1448 # If a stat for the same file was already added with a
1448 # If a stat for the same file was already added with a
1449 # different case, don't add one for this, since that would
1449 # different case, don't add one for this, since that would
1450 # make it appear as if the file exists under both names
1450 # make it appear as if the file exists under both names
1451 # on disk.
1451 # on disk.
1452 if (
1452 if (
1453 normalizefile
1453 normalizefile
1454 and normalizefile(nf, True, True) in results
1454 and normalizefile(nf, True, True) in results
1455 ):
1455 ):
1456 results[nf] = None
1456 results[nf] = None
1457 # Report ignored items in the dmap as long as they are not
1457 # Report ignored items in the dmap as long as they are not
1458 # under a symlink directory.
1458 # under a symlink directory.
1459 elif audit_path.check(nf):
1459 elif audit_path.check(nf):
1460 try:
1460 try:
1461 results[nf] = lstat(join(nf))
1461 results[nf] = lstat(join(nf))
1462 # file was just ignored, no links, and exists
1462 # file was just ignored, no links, and exists
1463 except OSError:
1463 except OSError:
1464 # file doesn't exist
1464 # file doesn't exist
1465 results[nf] = None
1465 results[nf] = None
1466 else:
1466 else:
1467 # It's either missing or under a symlink directory
1467 # It's either missing or under a symlink directory
1468 # which we in this case report as missing
1468 # which we in this case report as missing
1469 results[nf] = None
1469 results[nf] = None
1470 else:
1470 else:
1471 # We may not have walked the full directory tree above,
1471 # We may not have walked the full directory tree above,
1472 # so stat and check everything we missed.
1472 # so stat and check everything we missed.
1473 iv = iter(visit)
1473 iv = iter(visit)
1474 for st in util.statfiles([join(i) for i in visit]):
1474 for st in util.statfiles([join(i) for i in visit]):
1475 results[next(iv)] = st
1475 results[next(iv)] = st
1476 return results
1476 return results
1477
1477
1478 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1478 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1479 if self._sparsematchfn is not None:
1479 if self._sparsematchfn is not None:
1480 em = matchmod.exact(matcher.files())
1480 em = matchmod.exact(matcher.files())
1481 sm = matchmod.unionmatcher([self._sparsematcher, em])
1481 sm = matchmod.unionmatcher([self._sparsematcher, em])
1482 matcher = matchmod.intersectmatchers(matcher, sm)
1482 matcher = matchmod.intersectmatchers(matcher, sm)
1483 # Force Rayon (Rust parallelism library) to respect the number of
1483 # Force Rayon (Rust parallelism library) to respect the number of
1484 # workers. This is a temporary workaround until Rust code knows
1484 # workers. This is a temporary workaround until Rust code knows
1485 # how to read the config file.
1485 # how to read the config file.
1486 numcpus = self._ui.configint(b"worker", b"numcpus")
1486 numcpus = self._ui.configint(b"worker", b"numcpus")
1487 if numcpus is not None:
1487 if numcpus is not None:
1488 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1488 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1489
1489
1490 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1490 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1491 if not workers_enabled:
1491 if not workers_enabled:
1492 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1492 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1493
1493
1494 (
1494 (
1495 lookup,
1495 lookup,
1496 modified,
1496 modified,
1497 added,
1497 added,
1498 removed,
1498 removed,
1499 deleted,
1499 deleted,
1500 clean,
1500 clean,
1501 ignored,
1501 ignored,
1502 unknown,
1502 unknown,
1503 warnings,
1503 warnings,
1504 bad,
1504 bad,
1505 traversed,
1505 traversed,
1506 dirty,
1506 dirty,
1507 ) = rustmod.status(
1507 ) = rustmod.status(
1508 self._map._map,
1508 self._map._map,
1509 matcher,
1509 matcher,
1510 self._rootdir,
1510 self._rootdir,
1511 self._ignorefiles(),
1511 self._ignorefiles(),
1512 self._checkexec,
1512 self._checkexec,
1513 bool(list_clean),
1513 bool(list_clean),
1514 bool(list_ignored),
1514 bool(list_ignored),
1515 bool(list_unknown),
1515 bool(list_unknown),
1516 bool(matcher.traversedir),
1516 bool(matcher.traversedir),
1517 )
1517 )
1518
1518
1519 self._dirty |= dirty
1519 self._dirty |= dirty
1520
1520
1521 if matcher.traversedir:
1521 if matcher.traversedir:
1522 for dir in traversed:
1522 for dir in traversed:
1523 matcher.traversedir(dir)
1523 matcher.traversedir(dir)
1524
1524
1525 if self._ui.warn:
1525 if self._ui.warn:
1526 for item in warnings:
1526 for item in warnings:
1527 if isinstance(item, tuple):
1527 if isinstance(item, tuple):
1528 file_path, syntax = item
1528 file_path, syntax = item
1529 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1529 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1530 file_path,
1530 file_path,
1531 syntax,
1531 syntax,
1532 )
1532 )
1533 self._ui.warn(msg)
1533 self._ui.warn(msg)
1534 else:
1534 else:
1535 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1535 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1536 self._ui.warn(
1536 self._ui.warn(
1537 msg
1537 msg
1538 % (
1538 % (
1539 pathutil.canonpath(
1539 pathutil.canonpath(
1540 self._rootdir, self._rootdir, item
1540 self._rootdir, self._rootdir, item
1541 ),
1541 ),
1542 b"No such file or directory",
1542 b"No such file or directory",
1543 )
1543 )
1544 )
1544 )
1545
1545
1546 for fn, message in bad:
1546 for fn, message in bad:
1547 matcher.bad(fn, encoding.strtolocal(message))
1547 matcher.bad(fn, encoding.strtolocal(message))
1548
1548
1549 status = scmutil.status(
1549 status = scmutil.status(
1550 modified=modified,
1550 modified=modified,
1551 added=added,
1551 added=added,
1552 removed=removed,
1552 removed=removed,
1553 deleted=deleted,
1553 deleted=deleted,
1554 unknown=unknown,
1554 unknown=unknown,
1555 ignored=ignored,
1555 ignored=ignored,
1556 clean=clean,
1556 clean=clean,
1557 )
1557 )
1558 return (lookup, status)
1558 return (lookup, status)
1559
1559
1560 def status(self, match, subrepos, ignored, clean, unknown):
1560 def status(self, match, subrepos, ignored, clean, unknown):
1561 """Determine the status of the working copy relative to the
1561 """Determine the status of the working copy relative to the
1562 dirstate and return a pair of (unsure, status), where status is of type
1562 dirstate and return a pair of (unsure, status), where status is of type
1563 scmutil.status and:
1563 scmutil.status and:
1564
1564
1565 unsure:
1565 unsure:
1566 files that might have been modified since the dirstate was
1566 files that might have been modified since the dirstate was
1567 written, but need to be read to be sure (size is the same
1567 written, but need to be read to be sure (size is the same
1568 but mtime differs)
1568 but mtime differs)
1569 status.modified:
1569 status.modified:
1570 files that have definitely been modified since the dirstate
1570 files that have definitely been modified since the dirstate
1571 was written (different size or mode)
1571 was written (different size or mode)
1572 status.clean:
1572 status.clean:
1573 files that have definitely not been modified since the
1573 files that have definitely not been modified since the
1574 dirstate was written
1574 dirstate was written
1575 """
1575 """
1576 if not self._running_status:
1576 if not self._running_status:
1577 msg = "Calling `status` outside a `running_status` context"
1577 msg = "Calling `status` outside a `running_status` context"
1578 raise error.ProgrammingError(msg)
1578 raise error.ProgrammingError(msg)
1579 listignored, listclean, listunknown = ignored, clean, unknown
1579 listignored, listclean, listunknown = ignored, clean, unknown
1580 lookup, modified, added, unknown, ignored = [], [], [], [], []
1580 lookup, modified, added, unknown, ignored = [], [], [], [], []
1581 removed, deleted, clean = [], [], []
1581 removed, deleted, clean = [], [], []
1582
1582
1583 dmap = self._map
1583 dmap = self._map
1584 dmap.preload()
1584 dmap.preload()
1585
1585
1586 use_rust = True
1586 use_rust = True
1587
1587
1588 allowed_matchers = (
1588 allowed_matchers = (
1589 matchmod.alwaysmatcher,
1589 matchmod.alwaysmatcher,
1590 matchmod.differencematcher,
1590 matchmod.differencematcher,
1591 matchmod.exactmatcher,
1591 matchmod.exactmatcher,
1592 matchmod.includematcher,
1592 matchmod.includematcher,
1593 matchmod.intersectionmatcher,
1593 matchmod.intersectionmatcher,
1594 matchmod.nevermatcher,
1594 matchmod.nevermatcher,
1595 matchmod.unionmatcher,
1595 matchmod.unionmatcher,
1596 )
1596 )
1597
1597
1598 if rustmod is None:
1598 if rustmod is None:
1599 use_rust = False
1599 use_rust = False
1600 elif self._checkcase:
1600 elif self._checkcase:
1601 # Case-insensitive filesystems are not handled yet
1601 # Case-insensitive filesystems are not handled yet
1602 use_rust = False
1602 use_rust = False
1603 elif subrepos:
1603 elif subrepos:
1604 use_rust = False
1604 use_rust = False
1605 elif not isinstance(match, allowed_matchers):
1605 elif not isinstance(match, allowed_matchers):
1606 # Some matchers have yet to be implemented
1606 # Some matchers have yet to be implemented
1607 use_rust = False
1607 use_rust = False
1608
1608
1609 # Get the time from the filesystem so we can disambiguate files that
1609 # Get the time from the filesystem so we can disambiguate files that
1610 # appear modified in the present or future.
1610 # appear modified in the present or future.
1611 try:
1611 try:
1612 mtime_boundary = timestamp.get_fs_now(self._opener)
1612 mtime_boundary = timestamp.get_fs_now(self._opener)
1613 except OSError:
1613 except OSError:
1614 # In largefiles or readonly context
1614 # In largefiles or readonly context
1615 mtime_boundary = None
1615 mtime_boundary = None
1616
1616
1617 if use_rust:
1617 if use_rust:
1618 try:
1618 try:
1619 res = self._rust_status(
1619 res = self._rust_status(
1620 match, listclean, listignored, listunknown
1620 match, listclean, listignored, listunknown
1621 )
1621 )
1622 return res + (mtime_boundary,)
1622 return res + (mtime_boundary,)
1623 except rustmod.FallbackError:
1623 except rustmod.FallbackError:
1624 pass
1624 pass
1625
1625
1626 def noop(f):
1626 def noop(f):
1627 pass
1627 pass
1628
1628
1629 dcontains = dmap.__contains__
1629 dcontains = dmap.__contains__
1630 dget = dmap.__getitem__
1630 dget = dmap.__getitem__
1631 ladd = lookup.append # aka "unsure"
1631 ladd = lookup.append # aka "unsure"
1632 madd = modified.append
1632 madd = modified.append
1633 aadd = added.append
1633 aadd = added.append
1634 uadd = unknown.append if listunknown else noop
1634 uadd = unknown.append if listunknown else noop
1635 iadd = ignored.append if listignored else noop
1635 iadd = ignored.append if listignored else noop
1636 radd = removed.append
1636 radd = removed.append
1637 dadd = deleted.append
1637 dadd = deleted.append
1638 cadd = clean.append if listclean else noop
1638 cadd = clean.append if listclean else noop
1639 mexact = match.exact
1639 mexact = match.exact
1640 dirignore = self._dirignore
1640 dirignore = self._dirignore
1641 checkexec = self._checkexec
1641 checkexec = self._checkexec
1642 checklink = self._checklink
1642 checklink = self._checklink
1643 copymap = self._map.copymap
1643 copymap = self._map.copymap
1644
1644
1645 # We need to do full walks when either
1645 # We need to do full walks when either
1646 # - we're listing all clean files, or
1646 # - we're listing all clean files, or
1647 # - match.traversedir does something, because match.traversedir should
1647 # - match.traversedir does something, because match.traversedir should
1648 # be called for every dir in the working dir
1648 # be called for every dir in the working dir
1649 full = listclean or match.traversedir is not None
1649 full = listclean or match.traversedir is not None
1650 for fn, st in self.walk(
1650 for fn, st in self.walk(
1651 match, subrepos, listunknown, listignored, full=full
1651 match, subrepos, listunknown, listignored, full=full
1652 ).items():
1652 ).items():
1653 if not dcontains(fn):
1653 if not dcontains(fn):
1654 if (listignored or mexact(fn)) and dirignore(fn):
1654 if (listignored or mexact(fn)) and dirignore(fn):
1655 if listignored:
1655 if listignored:
1656 iadd(fn)
1656 iadd(fn)
1657 else:
1657 else:
1658 uadd(fn)
1658 uadd(fn)
1659 continue
1659 continue
1660
1660
1661 t = dget(fn)
1661 t = dget(fn)
1662 mode = t.mode
1662 mode = t.mode
1663 size = t.size
1663 size = t.size
1664
1664
1665 if not st and t.tracked:
1665 if not st and t.tracked:
1666 dadd(fn)
1666 dadd(fn)
1667 elif t.p2_info:
1667 elif t.p2_info:
1668 madd(fn)
1668 madd(fn)
1669 elif t.added:
1669 elif t.added:
1670 aadd(fn)
1670 aadd(fn)
1671 elif t.removed:
1671 elif t.removed:
1672 radd(fn)
1672 radd(fn)
1673 elif t.tracked:
1673 elif t.tracked:
1674 if not checklink and t.has_fallback_symlink:
1674 if not checklink and t.has_fallback_symlink:
1675 # If the file system does not support symlink, the mode
1675 # If the file system does not support symlink, the mode
1676 # might not be correctly stored in the dirstate, so do not
1676 # might not be correctly stored in the dirstate, so do not
1677 # trust it.
1677 # trust it.
1678 ladd(fn)
1678 ladd(fn)
1679 elif not checkexec and t.has_fallback_exec:
1679 elif not checkexec and t.has_fallback_exec:
1680 # If the file system does not support exec bits, the mode
1680 # If the file system does not support exec bits, the mode
1681 # might not be correctly stored in the dirstate, so do not
1681 # might not be correctly stored in the dirstate, so do not
1682 # trust it.
1682 # trust it.
1683 ladd(fn)
1683 ladd(fn)
1684 elif (
1684 elif (
1685 size >= 0
1685 size >= 0
1686 and (
1686 and (
1687 (size != st.st_size and size != st.st_size & _rangemask)
1687 (size != st.st_size and size != st.st_size & _rangemask)
1688 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1688 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1689 )
1689 )
1690 or fn in copymap
1690 or fn in copymap
1691 ):
1691 ):
1692 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1692 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1693 # issue6456: Size returned may be longer due to
1693 # issue6456: Size returned may be longer due to
1694 # encryption on EXT-4 fscrypt, undecided.
1694 # encryption on EXT-4 fscrypt, undecided.
1695 ladd(fn)
1695 ladd(fn)
1696 else:
1696 else:
1697 madd(fn)
1697 madd(fn)
1698 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1698 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1699 # There might be a change in the future if for example the
1699 # There might be a change in the future if for example the
1700 # internal clock is off, but this is a case where the issues
1700 # internal clock is off, but this is a case where the issues
1701 # the user would face would be a lot worse and there is
1701 # the user would face would be a lot worse and there is
1702 # nothing we can really do.
1702 # nothing we can really do.
1703 ladd(fn)
1703 ladd(fn)
1704 elif listclean:
1704 elif listclean:
1705 cadd(fn)
1705 cadd(fn)
1706 status = scmutil.status(
1706 status = scmutil.status(
1707 modified, added, removed, deleted, unknown, ignored, clean
1707 modified, added, removed, deleted, unknown, ignored, clean
1708 )
1708 )
1709 return (lookup, status, mtime_boundary)
1709 return (lookup, status, mtime_boundary)
1710
1710
1711 def matches(self, match):
1711 def matches(self, match):
1712 """
1712 """
1713 return files in the dirstate (in whatever state) filtered by match
1713 return files in the dirstate (in whatever state) filtered by match
1714 """
1714 """
1715 dmap = self._map
1715 dmap = self._map
1716 if rustmod is not None:
1716 if rustmod is not None:
1717 dmap = self._map._map
1717 dmap = self._map._map
1718
1718
1719 if match.always():
1719 if match.always():
1720 return dmap.keys()
1720 return dmap.keys()
1721 files = match.files()
1721 files = match.files()
1722 if match.isexact():
1722 if match.isexact():
1723 # fast path -- filter the other way around, since typically files is
1723 # fast path -- filter the other way around, since typically files is
1724 # much smaller than dmap
1724 # much smaller than dmap
1725 return [f for f in files if f in dmap]
1725 return [f for f in files if f in dmap]
1726 if match.prefix() and all(fn in dmap for fn in files):
1726 if match.prefix() and all(fn in dmap for fn in files):
1727 # fast path -- all the values are known to be files, so just return
1727 # fast path -- all the values are known to be files, so just return
1728 # that
1728 # that
1729 return list(files)
1729 return list(files)
1730 return [f for f in dmap if match(f)]
1730 return [f for f in dmap if match(f)]
1731
1731
1732 def _actualfilename(self, tr):
1732 def _actualfilename(self, tr):
1733 if tr:
1733 if tr:
1734 return self._pendingfilename
1734 return self._pendingfilename
1735 else:
1735 else:
1736 return self._filename
1736 return self._filename
1737
1737
1738 def all_file_names(self):
1738 def all_file_names(self):
1739 """list all filename currently used by this dirstate
1739 """list all filename currently used by this dirstate
1740
1740
1741 This is only used to do `hg rollback` related backup in the transaction
1741 This is only used to do `hg rollback` related backup in the transaction
1742 """
1742 """
1743 if not self._opener.exists(self._filename):
1743 if not self._opener.exists(self._filename):
1744 # no data every written to disk yet
1744 # no data every written to disk yet
1745 return ()
1745 return ()
1746 elif self._use_dirstate_v2:
1746 elif self._use_dirstate_v2:
1747 return (
1747 return (
1748 self._filename,
1748 self._filename,
1749 self._map.docket.data_filename(),
1749 self._map.docket.data_filename(),
1750 )
1750 )
1751 else:
1751 else:
1752 return (self._filename,)
1752 return (self._filename,)
1753
1753
1754 def verify(self, m1, m2, p1, narrow_matcher=None):
1754 def verify(self, m1, m2, p1, narrow_matcher=None):
1755 """
1755 """
1756 check the dirstate contents against the parent manifest and yield errors
1756 check the dirstate contents against the parent manifest and yield errors
1757 """
1757 """
1758 missing_from_p1 = _(
1758 missing_from_p1 = _(
1759 b"%s marked as tracked in p1 (%s) but not in manifest1\n"
1759 b"%s marked as tracked in p1 (%s) but not in manifest1\n"
1760 )
1760 )
1761 unexpected_in_p1 = _(b"%s marked as added, but also in manifest1\n")
1761 unexpected_in_p1 = _(b"%s marked as added, but also in manifest1\n")
1762 missing_from_ps = _(
1762 missing_from_ps = _(
1763 b"%s marked as modified, but not in either manifest\n"
1763 b"%s marked as modified, but not in either manifest\n"
1764 )
1764 )
1765 missing_from_ds = _(
1765 missing_from_ds = _(
1766 b"%s in manifest1, but not marked as tracked in p1 (%s)\n"
1766 b"%s in manifest1, but not marked as tracked in p1 (%s)\n"
1767 )
1767 )
1768 for f, entry in self.items():
1768 for f, entry in self.items():
1769 if entry.p1_tracked:
1769 if entry.p1_tracked:
1770 if entry.modified and f not in m1 and f not in m2:
1770 if entry.modified and f not in m1 and f not in m2:
1771 yield missing_from_ps % f
1771 yield missing_from_ps % f
1772 elif f not in m1:
1772 elif f not in m1:
1773 yield missing_from_p1 % (f, node.short(p1))
1773 yield missing_from_p1 % (f, node.short(p1))
1774 if entry.added and f in m1:
1774 if entry.added and f in m1:
1775 yield unexpected_in_p1 % f
1775 yield unexpected_in_p1 % f
1776 for f in m1:
1776 for f in m1:
1777 if narrow_matcher is not None and not narrow_matcher(f):
1777 if narrow_matcher is not None and not narrow_matcher(f):
1778 continue
1778 continue
1779 entry = self.get_entry(f)
1779 entry = self.get_entry(f)
1780 if not entry.p1_tracked:
1780 if not entry.p1_tracked:
1781 yield missing_from_ds % (f, node.short(p1))
1781 yield missing_from_ds % (f, node.short(p1))
General Comments 0
You need to be logged in to leave comments. Login now