##// END OF EJS Templates
dirstate: remove the interface decorator to help pytype...
Matt Harbison -
r52701:c1d7ac70 default
parent child Browse files
Show More
@@ -1,1809 +1,1811 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import collections
9 import collections
10 import contextlib
10 import contextlib
11 import os
11 import os
12 import stat
12 import stat
13 import uuid
13 import uuid
14
14
15 from .i18n import _
15 from .i18n import _
16
16
17 from hgdemandimport import tracing
17 from hgdemandimport import tracing
18
18
19 from . import (
19 from . import (
20 dirstatemap,
20 dirstatemap,
21 encoding,
21 encoding,
22 error,
22 error,
23 match as matchmod,
23 match as matchmod,
24 node,
24 node,
25 pathutil,
25 pathutil,
26 policy,
26 policy,
27 pycompat,
27 pycompat,
28 scmutil,
28 scmutil,
29 txnutil,
29 txnutil,
30 util,
30 util,
31 )
31 )
32
32
33 from .dirstateutils import (
33 from .dirstateutils import (
34 timestamp,
34 timestamp,
35 )
35 )
36
36
37 from .interfaces import (
37 from .interfaces import (
38 dirstate as intdirstate,
38 dirstate as intdirstate,
39 util as interfaceutil,
39 util as interfaceutil,
40 )
40 )
41
41
42 parsers = policy.importmod('parsers')
42 parsers = policy.importmod('parsers')
43 rustmod = policy.importrust('dirstate')
43 rustmod = policy.importrust('dirstate')
44
44
45 HAS_FAST_DIRSTATE_V2 = rustmod is not None
45 HAS_FAST_DIRSTATE_V2 = rustmod is not None
46
46
47 propertycache = util.propertycache
47 propertycache = util.propertycache
48 filecache = scmutil.filecache
48 filecache = scmutil.filecache
49 _rangemask = dirstatemap.rangemask
49 _rangemask = dirstatemap.rangemask
50
50
51 DirstateItem = dirstatemap.DirstateItem
51 DirstateItem = dirstatemap.DirstateItem
52
52
53
53
54 class repocache(filecache):
54 class repocache(filecache):
55 """filecache for files in .hg/"""
55 """filecache for files in .hg/"""
56
56
57 def join(self, obj, fname):
57 def join(self, obj, fname):
58 return obj._opener.join(fname)
58 return obj._opener.join(fname)
59
59
60
60
61 class rootcache(filecache):
61 class rootcache(filecache):
62 """filecache for files in the repository root"""
62 """filecache for files in the repository root"""
63
63
64 def join(self, obj, fname):
64 def join(self, obj, fname):
65 return obj._join(fname)
65 return obj._join(fname)
66
66
67
67
68 def check_invalidated(func):
68 def check_invalidated(func):
69 """check that the func is called with a non-invalidated dirstate
69 """check that the func is called with a non-invalidated dirstate
70
70
71 The dirstate is in an "invalidated state" after an error occured during its
71 The dirstate is in an "invalidated state" after an error occured during its
72 modification and remains so until we exited the top level scope that framed
72 modification and remains so until we exited the top level scope that framed
73 such change.
73 such change.
74 """
74 """
75
75
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if self._invalidated_context:
77 if self._invalidated_context:
78 msg = 'calling `%s` after the dirstate was invalidated'
78 msg = 'calling `%s` after the dirstate was invalidated'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_changing_parents(func):
86 def requires_changing_parents(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if not self.is_changing_parents:
88 if not self.is_changing_parents:
89 msg = 'calling `%s` outside of a changing_parents context'
89 msg = 'calling `%s` outside of a changing_parents context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return check_invalidated(wrap)
94 return check_invalidated(wrap)
95
95
96
96
97 def requires_changing_files(func):
97 def requires_changing_files(func):
98 def wrap(self, *args, **kwargs):
98 def wrap(self, *args, **kwargs):
99 if not self.is_changing_files:
99 if not self.is_changing_files:
100 msg = 'calling `%s` outside of a `changing_files`'
100 msg = 'calling `%s` outside of a `changing_files`'
101 msg %= func.__name__
101 msg %= func.__name__
102 raise error.ProgrammingError(msg)
102 raise error.ProgrammingError(msg)
103 return func(self, *args, **kwargs)
103 return func(self, *args, **kwargs)
104
104
105 return check_invalidated(wrap)
105 return check_invalidated(wrap)
106
106
107
107
108 def requires_changing_any(func):
108 def requires_changing_any(func):
109 def wrap(self, *args, **kwargs):
109 def wrap(self, *args, **kwargs):
110 if not self.is_changing_any:
110 if not self.is_changing_any:
111 msg = 'calling `%s` outside of a changing context'
111 msg = 'calling `%s` outside of a changing context'
112 msg %= func.__name__
112 msg %= func.__name__
113 raise error.ProgrammingError(msg)
113 raise error.ProgrammingError(msg)
114 return func(self, *args, **kwargs)
114 return func(self, *args, **kwargs)
115
115
116 return check_invalidated(wrap)
116 return check_invalidated(wrap)
117
117
118
118
119 def requires_changing_files_or_status(func):
119 def requires_changing_files_or_status(func):
120 def wrap(self, *args, **kwargs):
120 def wrap(self, *args, **kwargs):
121 if not (self.is_changing_files or self._running_status > 0):
121 if not (self.is_changing_files or self._running_status > 0):
122 msg = (
122 msg = (
123 'calling `%s` outside of a changing_files '
123 'calling `%s` outside of a changing_files '
124 'or running_status context'
124 'or running_status context'
125 )
125 )
126 msg %= func.__name__
126 msg %= func.__name__
127 raise error.ProgrammingError(msg)
127 raise error.ProgrammingError(msg)
128 return func(self, *args, **kwargs)
128 return func(self, *args, **kwargs)
129
129
130 return check_invalidated(wrap)
130 return check_invalidated(wrap)
131
131
132
132
133 CHANGE_TYPE_PARENTS = "parents"
133 CHANGE_TYPE_PARENTS = "parents"
134 CHANGE_TYPE_FILES = "files"
134 CHANGE_TYPE_FILES = "files"
135
135
136
136
137 @interfaceutil.implementer(intdirstate.idirstate)
137 class DirState:
138 class dirstate:
139 # used by largefile to avoid overwritting transaction callback
138 # used by largefile to avoid overwritting transaction callback
140 _tr_key_suffix = b''
139 _tr_key_suffix = b''
141
140
142 def __init__(
141 def __init__(
143 self,
142 self,
144 opener,
143 opener,
145 ui,
144 ui,
146 root,
145 root,
147 validate,
146 validate,
148 sparsematchfn,
147 sparsematchfn,
149 nodeconstants,
148 nodeconstants,
150 use_dirstate_v2,
149 use_dirstate_v2,
151 use_tracked_hint=False,
150 use_tracked_hint=False,
152 ):
151 ):
153 """Create a new dirstate object.
152 """Create a new dirstate object.
154
153
155 opener is an open()-like callable that can be used to open the
154 opener is an open()-like callable that can be used to open the
156 dirstate file; root is the root of the directory tracked by
155 dirstate file; root is the root of the directory tracked by
157 the dirstate.
156 the dirstate.
158 """
157 """
159 self._use_dirstate_v2 = use_dirstate_v2
158 self._use_dirstate_v2 = use_dirstate_v2
160 self._use_tracked_hint = use_tracked_hint
159 self._use_tracked_hint = use_tracked_hint
161 self._nodeconstants = nodeconstants
160 self._nodeconstants = nodeconstants
162 self._opener = opener
161 self._opener = opener
163 self._validate = validate
162 self._validate = validate
164 self._root = root
163 self._root = root
165 # Either build a sparse-matcher or None if sparse is disabled
164 # Either build a sparse-matcher or None if sparse is disabled
166 self._sparsematchfn = sparsematchfn
165 self._sparsematchfn = sparsematchfn
167 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
166 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
168 # UNC path pointing to root share (issue4557)
167 # UNC path pointing to root share (issue4557)
169 self._rootdir = pathutil.normasprefix(root)
168 self._rootdir = pathutil.normasprefix(root)
170 # True is any internal state may be different
169 # True is any internal state may be different
171 self._dirty = False
170 self._dirty = False
172 # True if the set of tracked file may be different
171 # True if the set of tracked file may be different
173 self._dirty_tracked_set = False
172 self._dirty_tracked_set = False
174 self._ui = ui
173 self._ui = ui
175 self._filecache = {}
174 self._filecache = {}
176 # nesting level of `changing_parents` context
175 # nesting level of `changing_parents` context
177 self._changing_level = 0
176 self._changing_level = 0
178 # the change currently underway
177 # the change currently underway
179 self._change_type = None
178 self._change_type = None
180 # number of open _running_status context
179 # number of open _running_status context
181 self._running_status = 0
180 self._running_status = 0
182 # True if the current dirstate changing operations have been
181 # True if the current dirstate changing operations have been
183 # invalidated (used to make sure all nested contexts have been exited)
182 # invalidated (used to make sure all nested contexts have been exited)
184 self._invalidated_context = False
183 self._invalidated_context = False
185 self._attached_to_a_transaction = False
184 self._attached_to_a_transaction = False
186 self._filename = b'dirstate'
185 self._filename = b'dirstate'
187 self._filename_th = b'dirstate-tracked-hint'
186 self._filename_th = b'dirstate-tracked-hint'
188 self._pendingfilename = b'%s.pending' % self._filename
187 self._pendingfilename = b'%s.pending' % self._filename
189 self._plchangecallbacks = {}
188 self._plchangecallbacks = {}
190 self._origpl = None
189 self._origpl = None
191 self._mapcls = dirstatemap.dirstatemap
190 self._mapcls = dirstatemap.dirstatemap
192 # Access and cache cwd early, so we don't access it for the first time
191 # Access and cache cwd early, so we don't access it for the first time
193 # after a working-copy update caused it to not exist (accessing it then
192 # after a working-copy update caused it to not exist (accessing it then
194 # raises an exception).
193 # raises an exception).
195 self._cwd
194 self._cwd
196
195
197 def refresh(self):
196 def refresh(self):
198 # XXX if this happens, you likely did not enter the `changing_xxx`
197 # XXX if this happens, you likely did not enter the `changing_xxx`
199 # using `repo.dirstate`, so a later `repo.dirstate` accesss might call
198 # using `repo.dirstate`, so a later `repo.dirstate` accesss might call
200 # `refresh`.
199 # `refresh`.
201 if self.is_changing_any:
200 if self.is_changing_any:
202 msg = "refreshing the dirstate in the middle of a change"
201 msg = "refreshing the dirstate in the middle of a change"
203 raise error.ProgrammingError(msg)
202 raise error.ProgrammingError(msg)
204 if '_branch' in vars(self):
203 if '_branch' in vars(self):
205 del self._branch
204 del self._branch
206 if '_map' in vars(self) and self._map.may_need_refresh():
205 if '_map' in vars(self) and self._map.may_need_refresh():
207 self.invalidate()
206 self.invalidate()
208
207
209 def prefetch_parents(self):
208 def prefetch_parents(self):
210 """make sure the parents are loaded
209 """make sure the parents are loaded
211
210
212 Used to avoid a race condition.
211 Used to avoid a race condition.
213 """
212 """
214 self._pl
213 self._pl
215
214
216 @contextlib.contextmanager
215 @contextlib.contextmanager
217 @check_invalidated
216 @check_invalidated
218 def running_status(self, repo):
217 def running_status(self, repo):
219 """Wrap a status operation
218 """Wrap a status operation
220
219
221 This context is not mutally exclusive with the `changing_*` context. It
220 This context is not mutally exclusive with the `changing_*` context. It
222 also do not warrant for the `wlock` to be taken.
221 also do not warrant for the `wlock` to be taken.
223
222
224 If the wlock is taken, this context will behave in a simple way, and
223 If the wlock is taken, this context will behave in a simple way, and
225 ensure the data are scheduled for write when leaving the top level
224 ensure the data are scheduled for write when leaving the top level
226 context.
225 context.
227
226
228 If the lock is not taken, it will only warrant that the data are either
227 If the lock is not taken, it will only warrant that the data are either
229 committed (written) and rolled back (invalidated) when exiting the top
228 committed (written) and rolled back (invalidated) when exiting the top
230 level context. The write/invalidate action must be performed by the
229 level context. The write/invalidate action must be performed by the
231 wrapped code.
230 wrapped code.
232
231
233
232
234 The expected logic is:
233 The expected logic is:
235
234
236 A: read the dirstate
235 A: read the dirstate
237 B: run status
236 B: run status
238 This might make the dirstate dirty by updating cache,
237 This might make the dirstate dirty by updating cache,
239 especially in Rust.
238 especially in Rust.
240 C: do more "post status fixup if relevant
239 C: do more "post status fixup if relevant
241 D: try to take the w-lock (this will invalidate the changes if they were raced)
240 D: try to take the w-lock (this will invalidate the changes if they were raced)
242 E0: if dirstate changed on disk β†’ discard change (done by dirstate internal)
241 E0: if dirstate changed on disk β†’ discard change (done by dirstate internal)
243 E1: elif lock was acquired β†’ write the changes
242 E1: elif lock was acquired β†’ write the changes
244 E2: else β†’ discard the changes
243 E2: else β†’ discard the changes
245 """
244 """
246 has_lock = repo.currentwlock() is not None
245 has_lock = repo.currentwlock() is not None
247 is_changing = self.is_changing_any
246 is_changing = self.is_changing_any
248 tr = repo.currenttransaction()
247 tr = repo.currenttransaction()
249 has_tr = tr is not None
248 has_tr = tr is not None
250 nested = bool(self._running_status)
249 nested = bool(self._running_status)
251
250
252 first_and_alone = not (is_changing or has_tr or nested)
251 first_and_alone = not (is_changing or has_tr or nested)
253
252
254 # enforce no change happened outside of a proper context.
253 # enforce no change happened outside of a proper context.
255 if first_and_alone and self._dirty:
254 if first_and_alone and self._dirty:
256 has_tr = repo.currenttransaction() is not None
255 has_tr = repo.currenttransaction() is not None
257 if not has_tr and self._changing_level == 0 and self._dirty:
256 if not has_tr and self._changing_level == 0 and self._dirty:
258 msg = "entering a status context, but dirstate is already dirty"
257 msg = "entering a status context, but dirstate is already dirty"
259 raise error.ProgrammingError(msg)
258 raise error.ProgrammingError(msg)
260
259
261 should_write = has_lock and not (nested or is_changing)
260 should_write = has_lock and not (nested or is_changing)
262
261
263 self._running_status += 1
262 self._running_status += 1
264 try:
263 try:
265 yield
264 yield
266 except Exception:
265 except Exception:
267 self.invalidate()
266 self.invalidate()
268 raise
267 raise
269 finally:
268 finally:
270 self._running_status -= 1
269 self._running_status -= 1
271 if self._invalidated_context:
270 if self._invalidated_context:
272 should_write = False
271 should_write = False
273 self.invalidate()
272 self.invalidate()
274
273
275 if should_write:
274 if should_write:
276 assert repo.currenttransaction() is tr
275 assert repo.currenttransaction() is tr
277 self.write(tr)
276 self.write(tr)
278 elif not has_lock:
277 elif not has_lock:
279 if self._dirty:
278 if self._dirty:
280 msg = b'dirstate dirty while exiting an isolated status context'
279 msg = b'dirstate dirty while exiting an isolated status context'
281 repo.ui.develwarn(msg)
280 repo.ui.develwarn(msg)
282 self.invalidate()
281 self.invalidate()
283
282
284 @contextlib.contextmanager
283 @contextlib.contextmanager
285 @check_invalidated
284 @check_invalidated
286 def _changing(self, repo, change_type):
285 def _changing(self, repo, change_type):
287 if repo.currentwlock() is None:
286 if repo.currentwlock() is None:
288 msg = b"trying to change the dirstate without holding the wlock"
287 msg = b"trying to change the dirstate without holding the wlock"
289 raise error.ProgrammingError(msg)
288 raise error.ProgrammingError(msg)
290
289
291 has_tr = repo.currenttransaction() is not None
290 has_tr = repo.currenttransaction() is not None
292 if not has_tr and self._changing_level == 0 and self._dirty:
291 if not has_tr and self._changing_level == 0 and self._dirty:
293 msg = b"entering a changing context, but dirstate is already dirty"
292 msg = b"entering a changing context, but dirstate is already dirty"
294 repo.ui.develwarn(msg)
293 repo.ui.develwarn(msg)
295
294
296 assert self._changing_level >= 0
295 assert self._changing_level >= 0
297 # different type of change are mutually exclusive
296 # different type of change are mutually exclusive
298 if self._change_type is None:
297 if self._change_type is None:
299 assert self._changing_level == 0
298 assert self._changing_level == 0
300 self._change_type = change_type
299 self._change_type = change_type
301 elif self._change_type != change_type:
300 elif self._change_type != change_type:
302 msg = (
301 msg = (
303 'trying to open "%s" dirstate-changing context while a "%s" is'
302 'trying to open "%s" dirstate-changing context while a "%s" is'
304 ' already open'
303 ' already open'
305 )
304 )
306 msg %= (change_type, self._change_type)
305 msg %= (change_type, self._change_type)
307 raise error.ProgrammingError(msg)
306 raise error.ProgrammingError(msg)
308 should_write = False
307 should_write = False
309 self._changing_level += 1
308 self._changing_level += 1
310 try:
309 try:
311 yield
310 yield
312 except: # re-raises
311 except: # re-raises
313 self.invalidate() # this will set `_invalidated_context`
312 self.invalidate() # this will set `_invalidated_context`
314 raise
313 raise
315 finally:
314 finally:
316 assert self._changing_level > 0
315 assert self._changing_level > 0
317 self._changing_level -= 1
316 self._changing_level -= 1
318 # If the dirstate is being invalidated, call invalidate again.
317 # If the dirstate is being invalidated, call invalidate again.
319 # This will throw away anything added by a upper context and
318 # This will throw away anything added by a upper context and
320 # reset the `_invalidated_context` flag when relevant
319 # reset the `_invalidated_context` flag when relevant
321 if self._changing_level <= 0:
320 if self._changing_level <= 0:
322 self._change_type = None
321 self._change_type = None
323 assert self._changing_level == 0
322 assert self._changing_level == 0
324 if self._invalidated_context:
323 if self._invalidated_context:
325 # make sure we invalidate anything an upper context might
324 # make sure we invalidate anything an upper context might
326 # have changed.
325 # have changed.
327 self.invalidate()
326 self.invalidate()
328 else:
327 else:
329 should_write = self._changing_level <= 0
328 should_write = self._changing_level <= 0
330 tr = repo.currenttransaction()
329 tr = repo.currenttransaction()
331 if has_tr != (tr is not None):
330 if has_tr != (tr is not None):
332 if has_tr:
331 if has_tr:
333 m = "transaction vanished while changing dirstate"
332 m = "transaction vanished while changing dirstate"
334 else:
333 else:
335 m = "transaction appeared while changing dirstate"
334 m = "transaction appeared while changing dirstate"
336 raise error.ProgrammingError(m)
335 raise error.ProgrammingError(m)
337 if should_write:
336 if should_write:
338 self.write(tr)
337 self.write(tr)
339
338
340 @contextlib.contextmanager
339 @contextlib.contextmanager
341 def changing_parents(self, repo):
340 def changing_parents(self, repo):
342 """Wrap a dirstate change related to a change of working copy parents
341 """Wrap a dirstate change related to a change of working copy parents
343
342
344 This context scopes a series of dirstate modifications that match an
343 This context scopes a series of dirstate modifications that match an
345 update of the working copy parents (typically `hg update`, `hg merge`
344 update of the working copy parents (typically `hg update`, `hg merge`
346 etc).
345 etc).
347
346
348 The dirstate's methods that perform this kind of modifications require
347 The dirstate's methods that perform this kind of modifications require
349 this context to be present before being called.
348 this context to be present before being called.
350 Such methods are decorated with `@requires_changing_parents`.
349 Such methods are decorated with `@requires_changing_parents`.
351
350
352 The new dirstate contents will be written to disk when the top-most
351 The new dirstate contents will be written to disk when the top-most
353 `changing_parents` context exits successfully. If an exception is
352 `changing_parents` context exits successfully. If an exception is
354 raised during a `changing_parents` context of any level, all changes
353 raised during a `changing_parents` context of any level, all changes
355 are invalidated. If this context is open within an open transaction,
354 are invalidated. If this context is open within an open transaction,
356 the dirstate writing is delayed until that transaction is successfully
355 the dirstate writing is delayed until that transaction is successfully
357 committed (and the dirstate is invalidated on transaction abort).
356 committed (and the dirstate is invalidated on transaction abort).
358
357
359 The `changing_parents` operation is mutually exclusive with the
358 The `changing_parents` operation is mutually exclusive with the
360 `changing_files` one.
359 `changing_files` one.
361 """
360 """
362 with self._changing(repo, CHANGE_TYPE_PARENTS) as c:
361 with self._changing(repo, CHANGE_TYPE_PARENTS) as c:
363 yield c
362 yield c
364
363
365 @contextlib.contextmanager
364 @contextlib.contextmanager
366 def changing_files(self, repo):
365 def changing_files(self, repo):
367 """Wrap a dirstate change related to the set of tracked files
366 """Wrap a dirstate change related to the set of tracked files
368
367
369 This context scopes a series of dirstate modifications that change the
368 This context scopes a series of dirstate modifications that change the
370 set of tracked files. (typically `hg add`, `hg remove` etc) or some
369 set of tracked files. (typically `hg add`, `hg remove` etc) or some
371 dirstate stored information (like `hg rename --after`) but preserve
370 dirstate stored information (like `hg rename --after`) but preserve
372 the working copy parents.
371 the working copy parents.
373
372
374 The dirstate's methods that perform this kind of modifications require
373 The dirstate's methods that perform this kind of modifications require
375 this context to be present before being called.
374 this context to be present before being called.
376 Such methods are decorated with `@requires_changing_files`.
375 Such methods are decorated with `@requires_changing_files`.
377
376
378 The new dirstate contents will be written to disk when the top-most
377 The new dirstate contents will be written to disk when the top-most
379 `changing_files` context exits successfully. If an exception is raised
378 `changing_files` context exits successfully. If an exception is raised
380 during a `changing_files` context of any level, all changes are
379 during a `changing_files` context of any level, all changes are
381 invalidated. If this context is open within an open transaction, the
380 invalidated. If this context is open within an open transaction, the
382 dirstate writing is delayed until that transaction is successfully
381 dirstate writing is delayed until that transaction is successfully
383 committed (and the dirstate is invalidated on transaction abort).
382 committed (and the dirstate is invalidated on transaction abort).
384
383
385 The `changing_files` operation is mutually exclusive with the
384 The `changing_files` operation is mutually exclusive with the
386 `changing_parents` one.
385 `changing_parents` one.
387 """
386 """
388 with self._changing(repo, CHANGE_TYPE_FILES) as c:
387 with self._changing(repo, CHANGE_TYPE_FILES) as c:
389 yield c
388 yield c
390
389
391 # here to help migration to the new code
390 # here to help migration to the new code
392 def parentchange(self):
391 def parentchange(self):
393 msg = (
392 msg = (
394 "Mercurial 6.4 and later requires call to "
393 "Mercurial 6.4 and later requires call to "
395 "`dirstate.changing_parents(repo)`"
394 "`dirstate.changing_parents(repo)`"
396 )
395 )
397 raise error.ProgrammingError(msg)
396 raise error.ProgrammingError(msg)
398
397
399 @property
398 @property
400 def is_changing_any(self):
399 def is_changing_any(self):
401 """Returns true if the dirstate is in the middle of a set of changes.
400 """Returns true if the dirstate is in the middle of a set of changes.
402
401
403 This returns True for any kind of change.
402 This returns True for any kind of change.
404 """
403 """
405 return self._changing_level > 0
404 return self._changing_level > 0
406
405
407 @property
406 @property
408 def is_changing_parents(self):
407 def is_changing_parents(self):
409 """Returns true if the dirstate is in the middle of a set of changes
408 """Returns true if the dirstate is in the middle of a set of changes
410 that modify the dirstate parent.
409 that modify the dirstate parent.
411 """
410 """
412 if self._changing_level <= 0:
411 if self._changing_level <= 0:
413 return False
412 return False
414 return self._change_type == CHANGE_TYPE_PARENTS
413 return self._change_type == CHANGE_TYPE_PARENTS
415
414
416 @property
415 @property
417 def is_changing_files(self):
416 def is_changing_files(self):
418 """Returns true if the dirstate is in the middle of a set of changes
417 """Returns true if the dirstate is in the middle of a set of changes
419 that modify the files tracked or their sources.
418 that modify the files tracked or their sources.
420 """
419 """
421 if self._changing_level <= 0:
420 if self._changing_level <= 0:
422 return False
421 return False
423 return self._change_type == CHANGE_TYPE_FILES
422 return self._change_type == CHANGE_TYPE_FILES
424
423
425 @propertycache
424 @propertycache
426 def _map(self):
425 def _map(self):
427 """Return the dirstate contents (see documentation for dirstatemap)."""
426 """Return the dirstate contents (see documentation for dirstatemap)."""
428 return self._mapcls(
427 return self._mapcls(
429 self._ui,
428 self._ui,
430 self._opener,
429 self._opener,
431 self._root,
430 self._root,
432 self._nodeconstants,
431 self._nodeconstants,
433 self._use_dirstate_v2,
432 self._use_dirstate_v2,
434 )
433 )
435
434
436 @property
435 @property
437 def _sparsematcher(self):
436 def _sparsematcher(self):
438 """The matcher for the sparse checkout.
437 """The matcher for the sparse checkout.
439
438
440 The working directory may not include every file from a manifest. The
439 The working directory may not include every file from a manifest. The
441 matcher obtained by this property will match a path if it is to be
440 matcher obtained by this property will match a path if it is to be
442 included in the working directory.
441 included in the working directory.
443
442
444 When sparse if disabled, return None.
443 When sparse if disabled, return None.
445 """
444 """
446 if self._sparsematchfn is None:
445 if self._sparsematchfn is None:
447 return None
446 return None
448 # TODO there is potential to cache this property. For now, the matcher
447 # TODO there is potential to cache this property. For now, the matcher
449 # is resolved on every access. (But the called function does use a
448 # is resolved on every access. (But the called function does use a
450 # cache to keep the lookup fast.)
449 # cache to keep the lookup fast.)
451 return self._sparsematchfn()
450 return self._sparsematchfn()
452
451
453 @repocache(b'branch')
452 @repocache(b'branch')
454 def _branch(self):
453 def _branch(self):
455 f = None
454 f = None
456 data = b''
455 data = b''
457 try:
456 try:
458 f, mode = txnutil.trypending(self._root, self._opener, b'branch')
457 f, mode = txnutil.trypending(self._root, self._opener, b'branch')
459 data = f.read().strip()
458 data = f.read().strip()
460 except FileNotFoundError:
459 except FileNotFoundError:
461 pass
460 pass
462 finally:
461 finally:
463 if f is not None:
462 if f is not None:
464 f.close()
463 f.close()
465 if not data:
464 if not data:
466 return b"default"
465 return b"default"
467 return data
466 return data
468
467
469 @property
468 @property
470 def _pl(self):
469 def _pl(self):
471 return self._map.parents()
470 return self._map.parents()
472
471
473 def hasdir(self, d):
472 def hasdir(self, d):
474 return self._map.hastrackeddir(d)
473 return self._map.hastrackeddir(d)
475
474
476 @rootcache(b'.hgignore')
475 @rootcache(b'.hgignore')
477 def _ignore(self):
476 def _ignore(self):
478 files = self._ignorefiles()
477 files = self._ignorefiles()
479 if not files:
478 if not files:
480 return matchmod.never()
479 return matchmod.never()
481
480
482 pats = [b'include:%s' % f for f in files]
481 pats = [b'include:%s' % f for f in files]
483 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
482 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
484
483
485 @propertycache
484 @propertycache
486 def _slash(self):
485 def _slash(self):
487 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
486 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
488
487
489 @propertycache
488 @propertycache
490 def _checklink(self):
489 def _checklink(self):
491 return util.checklink(self._root)
490 return util.checklink(self._root)
492
491
493 @propertycache
492 @propertycache
494 def _checkexec(self):
493 def _checkexec(self):
495 return bool(util.checkexec(self._root))
494 return bool(util.checkexec(self._root))
496
495
497 @propertycache
496 @propertycache
498 def _checkcase(self):
497 def _checkcase(self):
499 return not util.fscasesensitive(self._join(b'.hg'))
498 return not util.fscasesensitive(self._join(b'.hg'))
500
499
501 def _join(self, f):
500 def _join(self, f):
502 # much faster than os.path.join()
501 # much faster than os.path.join()
503 # it's safe because f is always a relative path
502 # it's safe because f is always a relative path
504 return self._rootdir + f
503 return self._rootdir + f
505
504
506 def flagfunc(self, buildfallback):
505 def flagfunc(self, buildfallback):
507 """build a callable that returns flags associated with a filename
506 """build a callable that returns flags associated with a filename
508
507
509 The information is extracted from three possible layers:
508 The information is extracted from three possible layers:
510 1. the file system if it supports the information
509 1. the file system if it supports the information
511 2. the "fallback" information stored in the dirstate if any
510 2. the "fallback" information stored in the dirstate if any
512 3. a more expensive mechanism inferring the flags from the parents.
511 3. a more expensive mechanism inferring the flags from the parents.
513 """
512 """
514
513
515 # small hack to cache the result of buildfallback()
514 # small hack to cache the result of buildfallback()
516 fallback_func = []
515 fallback_func = []
517
516
518 def get_flags(x):
517 def get_flags(x):
519 entry = None
518 entry = None
520 fallback_value = None
519 fallback_value = None
521 try:
520 try:
522 st = os.lstat(self._join(x))
521 st = os.lstat(self._join(x))
523 except OSError:
522 except OSError:
524 return b''
523 return b''
525
524
526 if self._checklink:
525 if self._checklink:
527 if util.statislink(st):
526 if util.statislink(st):
528 return b'l'
527 return b'l'
529 else:
528 else:
530 entry = self.get_entry(x)
529 entry = self.get_entry(x)
531 if entry.has_fallback_symlink:
530 if entry.has_fallback_symlink:
532 if entry.fallback_symlink:
531 if entry.fallback_symlink:
533 return b'l'
532 return b'l'
534 else:
533 else:
535 if not fallback_func:
534 if not fallback_func:
536 fallback_func.append(buildfallback())
535 fallback_func.append(buildfallback())
537 fallback_value = fallback_func[0](x)
536 fallback_value = fallback_func[0](x)
538 if b'l' in fallback_value:
537 if b'l' in fallback_value:
539 return b'l'
538 return b'l'
540
539
541 if self._checkexec:
540 if self._checkexec:
542 if util.statisexec(st):
541 if util.statisexec(st):
543 return b'x'
542 return b'x'
544 else:
543 else:
545 if entry is None:
544 if entry is None:
546 entry = self.get_entry(x)
545 entry = self.get_entry(x)
547 if entry.has_fallback_exec:
546 if entry.has_fallback_exec:
548 if entry.fallback_exec:
547 if entry.fallback_exec:
549 return b'x'
548 return b'x'
550 else:
549 else:
551 if fallback_value is None:
550 if fallback_value is None:
552 if not fallback_func:
551 if not fallback_func:
553 fallback_func.append(buildfallback())
552 fallback_func.append(buildfallback())
554 fallback_value = fallback_func[0](x)
553 fallback_value = fallback_func[0](x)
555 if b'x' in fallback_value:
554 if b'x' in fallback_value:
556 return b'x'
555 return b'x'
557 return b''
556 return b''
558
557
559 return get_flags
558 return get_flags
560
559
561 @propertycache
560 @propertycache
562 def _cwd(self):
561 def _cwd(self):
563 # internal config: ui.forcecwd
562 # internal config: ui.forcecwd
564 forcecwd = self._ui.config(b'ui', b'forcecwd')
563 forcecwd = self._ui.config(b'ui', b'forcecwd')
565 if forcecwd:
564 if forcecwd:
566 return forcecwd
565 return forcecwd
567 return encoding.getcwd()
566 return encoding.getcwd()
568
567
569 def getcwd(self):
568 def getcwd(self):
570 """Return the path from which a canonical path is calculated.
569 """Return the path from which a canonical path is calculated.
571
570
572 This path should be used to resolve file patterns or to convert
571 This path should be used to resolve file patterns or to convert
573 canonical paths back to file paths for display. It shouldn't be
572 canonical paths back to file paths for display. It shouldn't be
574 used to get real file paths. Use vfs functions instead.
573 used to get real file paths. Use vfs functions instead.
575 """
574 """
576 cwd = self._cwd
575 cwd = self._cwd
577 if cwd == self._root:
576 if cwd == self._root:
578 return b''
577 return b''
579 # self._root ends with a path separator if self._root is '/' or 'C:\'
578 # self._root ends with a path separator if self._root is '/' or 'C:\'
580 rootsep = self._root
579 rootsep = self._root
581 if not util.endswithsep(rootsep):
580 if not util.endswithsep(rootsep):
582 rootsep += pycompat.ossep
581 rootsep += pycompat.ossep
583 if cwd.startswith(rootsep):
582 if cwd.startswith(rootsep):
584 return cwd[len(rootsep) :]
583 return cwd[len(rootsep) :]
585 else:
584 else:
586 # we're outside the repo. return an absolute path.
585 # we're outside the repo. return an absolute path.
587 return cwd
586 return cwd
588
587
589 def pathto(self, f, cwd=None):
588 def pathto(self, f, cwd=None):
590 if cwd is None:
589 if cwd is None:
591 cwd = self.getcwd()
590 cwd = self.getcwd()
592 path = util.pathto(self._root, cwd, f)
591 path = util.pathto(self._root, cwd, f)
593 if self._slash:
592 if self._slash:
594 return util.pconvert(path)
593 return util.pconvert(path)
595 return path
594 return path
596
595
597 def get_entry(self, path):
596 def get_entry(self, path):
598 """return a DirstateItem for the associated path"""
597 """return a DirstateItem for the associated path"""
599 entry = self._map.get(path)
598 entry = self._map.get(path)
600 if entry is None:
599 if entry is None:
601 return DirstateItem()
600 return DirstateItem()
602 return entry
601 return entry
603
602
604 def __contains__(self, key):
603 def __contains__(self, key):
605 return key in self._map
604 return key in self._map
606
605
607 def __iter__(self):
606 def __iter__(self):
608 return iter(sorted(self._map))
607 return iter(sorted(self._map))
609
608
610 def items(self):
609 def items(self):
611 return self._map.items()
610 return self._map.items()
612
611
613 iteritems = items
612 iteritems = items
614
613
615 def parents(self):
614 def parents(self):
616 return [self._validate(p) for p in self._pl]
615 return [self._validate(p) for p in self._pl]
617
616
618 def p1(self):
617 def p1(self):
619 return self._validate(self._pl[0])
618 return self._validate(self._pl[0])
620
619
621 def p2(self):
620 def p2(self):
622 return self._validate(self._pl[1])
621 return self._validate(self._pl[1])
623
622
624 @property
623 @property
625 def in_merge(self):
624 def in_merge(self):
626 """True if a merge is in progress"""
625 """True if a merge is in progress"""
627 return self._pl[1] != self._nodeconstants.nullid
626 return self._pl[1] != self._nodeconstants.nullid
628
627
629 def branch(self):
628 def branch(self):
630 return encoding.tolocal(self._branch)
629 return encoding.tolocal(self._branch)
631
630
632 @requires_changing_parents
631 @requires_changing_parents
633 def setparents(self, p1, p2=None):
632 def setparents(self, p1, p2=None):
634 """Set dirstate parents to p1 and p2.
633 """Set dirstate parents to p1 and p2.
635
634
636 When moving from two parents to one, "merged" entries a
635 When moving from two parents to one, "merged" entries a
637 adjusted to normal and previous copy records discarded and
636 adjusted to normal and previous copy records discarded and
638 returned by the call.
637 returned by the call.
639
638
640 See localrepo.setparents()
639 See localrepo.setparents()
641 """
640 """
642 if p2 is None:
641 if p2 is None:
643 p2 = self._nodeconstants.nullid
642 p2 = self._nodeconstants.nullid
644 if self._changing_level == 0:
643 if self._changing_level == 0:
645 raise ValueError(
644 raise ValueError(
646 "cannot set dirstate parent outside of "
645 "cannot set dirstate parent outside of "
647 "dirstate.changing_parents context manager"
646 "dirstate.changing_parents context manager"
648 )
647 )
649
648
650 self._dirty = True
649 self._dirty = True
651 oldp2 = self._pl[1]
650 oldp2 = self._pl[1]
652 if self._origpl is None:
651 if self._origpl is None:
653 self._origpl = self._pl
652 self._origpl = self._pl
654 nullid = self._nodeconstants.nullid
653 nullid = self._nodeconstants.nullid
655 # True if we need to fold p2 related state back to a linear case
654 # True if we need to fold p2 related state back to a linear case
656 fold_p2 = oldp2 != nullid and p2 == nullid
655 fold_p2 = oldp2 != nullid and p2 == nullid
657 return self._map.setparents(p1, p2, fold_p2=fold_p2)
656 return self._map.setparents(p1, p2, fold_p2=fold_p2)
658
657
659 def setbranch(self, branch, transaction):
658 def setbranch(self, branch, transaction):
660 self.__class__._branch.set(self, encoding.fromlocal(branch))
659 self.__class__._branch.set(self, encoding.fromlocal(branch))
661 if transaction is not None:
660 if transaction is not None:
662 self._setup_tr_abort(transaction)
661 self._setup_tr_abort(transaction)
663 transaction.addfilegenerator(
662 transaction.addfilegenerator(
664 b'dirstate-3-branch%s' % self._tr_key_suffix,
663 b'dirstate-3-branch%s' % self._tr_key_suffix,
665 (b'branch',),
664 (b'branch',),
666 self._write_branch,
665 self._write_branch,
667 location=b'plain',
666 location=b'plain',
668 post_finalize=True,
667 post_finalize=True,
669 )
668 )
670 return
669 return
671
670
672 vfs = self._opener
671 vfs = self._opener
673 with vfs(b'branch', b'w', atomictemp=True, checkambig=True) as f:
672 with vfs(b'branch', b'w', atomictemp=True, checkambig=True) as f:
674 self._write_branch(f)
673 self._write_branch(f)
675 # make sure filecache has the correct stat info for _branch after
674 # make sure filecache has the correct stat info for _branch after
676 # replacing the underlying file
675 # replacing the underlying file
677 #
676 #
678 # XXX do we actually need this,
677 # XXX do we actually need this,
679 # refreshing the attribute is quite cheap
678 # refreshing the attribute is quite cheap
680 ce = self._filecache[b'_branch']
679 ce = self._filecache[b'_branch']
681 if ce:
680 if ce:
682 ce.refresh()
681 ce.refresh()
683
682
684 def _write_branch(self, file_obj):
683 def _write_branch(self, file_obj):
685 file_obj.write(self._branch + b'\n')
684 file_obj.write(self._branch + b'\n')
686
685
687 def invalidate(self):
686 def invalidate(self):
688 """Causes the next access to reread the dirstate.
687 """Causes the next access to reread the dirstate.
689
688
690 This is different from localrepo.invalidatedirstate() because it always
689 This is different from localrepo.invalidatedirstate() because it always
691 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
690 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
692 check whether the dirstate has changed before rereading it."""
691 check whether the dirstate has changed before rereading it."""
693
692
694 for a in ("_map", "_branch", "_ignore"):
693 for a in ("_map", "_branch", "_ignore"):
695 if a in self.__dict__:
694 if a in self.__dict__:
696 delattr(self, a)
695 delattr(self, a)
697 self._dirty = False
696 self._dirty = False
698 self._dirty_tracked_set = False
697 self._dirty_tracked_set = False
699 self._invalidated_context = bool(
698 self._invalidated_context = bool(
700 self._changing_level > 0
699 self._changing_level > 0
701 or self._attached_to_a_transaction
700 or self._attached_to_a_transaction
702 or self._running_status
701 or self._running_status
703 )
702 )
704 self._origpl = None
703 self._origpl = None
705
704
706 @requires_changing_any
705 @requires_changing_any
707 def copy(self, source, dest):
706 def copy(self, source, dest):
708 """Mark dest as a copy of source. Unmark dest if source is None."""
707 """Mark dest as a copy of source. Unmark dest if source is None."""
709 if source == dest:
708 if source == dest:
710 return
709 return
711 self._dirty = True
710 self._dirty = True
712 if source is not None:
711 if source is not None:
713 self._check_sparse(source)
712 self._check_sparse(source)
714 self._map.copymap[dest] = source
713 self._map.copymap[dest] = source
715 else:
714 else:
716 self._map.copymap.pop(dest, None)
715 self._map.copymap.pop(dest, None)
717
716
718 def copied(self, file):
717 def copied(self, file):
719 return self._map.copymap.get(file, None)
718 return self._map.copymap.get(file, None)
720
719
721 def copies(self):
720 def copies(self):
722 return self._map.copymap
721 return self._map.copymap
723
722
724 @requires_changing_files
723 @requires_changing_files
725 def set_tracked(self, filename, reset_copy=False):
724 def set_tracked(self, filename, reset_copy=False):
726 """a "public" method for generic code to mark a file as tracked
725 """a "public" method for generic code to mark a file as tracked
727
726
728 This function is to be called outside of "update/merge" case. For
727 This function is to be called outside of "update/merge" case. For
729 example by a command like `hg add X`.
728 example by a command like `hg add X`.
730
729
731 if reset_copy is set, any existing copy information will be dropped.
730 if reset_copy is set, any existing copy information will be dropped.
732
731
733 return True the file was previously untracked, False otherwise.
732 return True the file was previously untracked, False otherwise.
734 """
733 """
735 self._dirty = True
734 self._dirty = True
736 entry = self._map.get(filename)
735 entry = self._map.get(filename)
737 if entry is None or not entry.tracked:
736 if entry is None or not entry.tracked:
738 self._check_new_tracked_filename(filename)
737 self._check_new_tracked_filename(filename)
739 pre_tracked = self._map.set_tracked(filename)
738 pre_tracked = self._map.set_tracked(filename)
740 if reset_copy:
739 if reset_copy:
741 self._map.copymap.pop(filename, None)
740 self._map.copymap.pop(filename, None)
742 if pre_tracked:
741 if pre_tracked:
743 self._dirty_tracked_set = True
742 self._dirty_tracked_set = True
744 return pre_tracked
743 return pre_tracked
745
744
746 @requires_changing_files
745 @requires_changing_files
747 def set_untracked(self, filename):
746 def set_untracked(self, filename):
748 """a "public" method for generic code to mark a file as untracked
747 """a "public" method for generic code to mark a file as untracked
749
748
750 This function is to be called outside of "update/merge" case. For
749 This function is to be called outside of "update/merge" case. For
751 example by a command like `hg remove X`.
750 example by a command like `hg remove X`.
752
751
753 return True the file was previously tracked, False otherwise.
752 return True the file was previously tracked, False otherwise.
754 """
753 """
755 ret = self._map.set_untracked(filename)
754 ret = self._map.set_untracked(filename)
756 if ret:
755 if ret:
757 self._dirty = True
756 self._dirty = True
758 self._dirty_tracked_set = True
757 self._dirty_tracked_set = True
759 return ret
758 return ret
760
759
761 @requires_changing_files_or_status
760 @requires_changing_files_or_status
762 def set_clean(self, filename, parentfiledata):
761 def set_clean(self, filename, parentfiledata):
763 """record that the current state of the file on disk is known to be clean"""
762 """record that the current state of the file on disk is known to be clean"""
764 self._dirty = True
763 self._dirty = True
765 if not self._map[filename].tracked:
764 if not self._map[filename].tracked:
766 self._check_new_tracked_filename(filename)
765 self._check_new_tracked_filename(filename)
767 (mode, size, mtime) = parentfiledata
766 (mode, size, mtime) = parentfiledata
768 self._map.set_clean(filename, mode, size, mtime)
767 self._map.set_clean(filename, mode, size, mtime)
769
768
770 @requires_changing_files_or_status
769 @requires_changing_files_or_status
771 def set_possibly_dirty(self, filename):
770 def set_possibly_dirty(self, filename):
772 """record that the current state of the file on disk is unknown"""
771 """record that the current state of the file on disk is unknown"""
773 self._dirty = True
772 self._dirty = True
774 self._map.set_possibly_dirty(filename)
773 self._map.set_possibly_dirty(filename)
775
774
776 @requires_changing_parents
775 @requires_changing_parents
777 def update_file_p1(
776 def update_file_p1(
778 self,
777 self,
779 filename,
778 filename,
780 p1_tracked,
779 p1_tracked,
781 ):
780 ):
782 """Set a file as tracked in the parent (or not)
781 """Set a file as tracked in the parent (or not)
783
782
784 This is to be called when adjust the dirstate to a new parent after an history
783 This is to be called when adjust the dirstate to a new parent after an history
785 rewriting operation.
784 rewriting operation.
786
785
787 It should not be called during a merge (p2 != nullid) and only within
786 It should not be called during a merge (p2 != nullid) and only within
788 a `with dirstate.changing_parents(repo):` context.
787 a `with dirstate.changing_parents(repo):` context.
789 """
788 """
790 if self.in_merge:
789 if self.in_merge:
791 msg = 'update_file_reference should not be called when merging'
790 msg = 'update_file_reference should not be called when merging'
792 raise error.ProgrammingError(msg)
791 raise error.ProgrammingError(msg)
793 entry = self._map.get(filename)
792 entry = self._map.get(filename)
794 if entry is None:
793 if entry is None:
795 wc_tracked = False
794 wc_tracked = False
796 else:
795 else:
797 wc_tracked = entry.tracked
796 wc_tracked = entry.tracked
798 if not (p1_tracked or wc_tracked):
797 if not (p1_tracked or wc_tracked):
799 # the file is no longer relevant to anyone
798 # the file is no longer relevant to anyone
800 if self._map.get(filename) is not None:
799 if self._map.get(filename) is not None:
801 self._map.reset_state(filename)
800 self._map.reset_state(filename)
802 self._dirty = True
801 self._dirty = True
803 elif (not p1_tracked) and wc_tracked:
802 elif (not p1_tracked) and wc_tracked:
804 if entry is not None and entry.added:
803 if entry is not None and entry.added:
805 return # avoid dropping copy information (maybe?)
804 return # avoid dropping copy information (maybe?)
806
805
807 self._map.reset_state(
806 self._map.reset_state(
808 filename,
807 filename,
809 wc_tracked,
808 wc_tracked,
810 p1_tracked,
809 p1_tracked,
811 # the underlying reference might have changed, we will have to
810 # the underlying reference might have changed, we will have to
812 # check it.
811 # check it.
813 has_meaningful_mtime=False,
812 has_meaningful_mtime=False,
814 )
813 )
815
814
816 @requires_changing_parents
815 @requires_changing_parents
817 def update_file(
816 def update_file(
818 self,
817 self,
819 filename,
818 filename,
820 wc_tracked,
819 wc_tracked,
821 p1_tracked,
820 p1_tracked,
822 p2_info=False,
821 p2_info=False,
823 possibly_dirty=False,
822 possibly_dirty=False,
824 parentfiledata=None,
823 parentfiledata=None,
825 ):
824 ):
826 """update the information about a file in the dirstate
825 """update the information about a file in the dirstate
827
826
828 This is to be called when the direstates parent changes to keep track
827 This is to be called when the direstates parent changes to keep track
829 of what is the file situation in regards to the working copy and its parent.
828 of what is the file situation in regards to the working copy and its parent.
830
829
831 This function must be called within a `dirstate.changing_parents` context.
830 This function must be called within a `dirstate.changing_parents` context.
832
831
833 note: the API is at an early stage and we might need to adjust it
832 note: the API is at an early stage and we might need to adjust it
834 depending of what information ends up being relevant and useful to
833 depending of what information ends up being relevant and useful to
835 other processing.
834 other processing.
836 """
835 """
837 self._update_file(
836 self._update_file(
838 filename=filename,
837 filename=filename,
839 wc_tracked=wc_tracked,
838 wc_tracked=wc_tracked,
840 p1_tracked=p1_tracked,
839 p1_tracked=p1_tracked,
841 p2_info=p2_info,
840 p2_info=p2_info,
842 possibly_dirty=possibly_dirty,
841 possibly_dirty=possibly_dirty,
843 parentfiledata=parentfiledata,
842 parentfiledata=parentfiledata,
844 )
843 )
845
844
846 def hacky_extension_update_file(self, *args, **kwargs):
845 def hacky_extension_update_file(self, *args, **kwargs):
847 """NEVER USE THIS, YOU DO NOT NEED IT
846 """NEVER USE THIS, YOU DO NOT NEED IT
848
847
849 This function is a variant of "update_file" to be called by a small set
848 This function is a variant of "update_file" to be called by a small set
850 of extensions, it also adjust the internal state of file, but can be
849 of extensions, it also adjust the internal state of file, but can be
851 called outside an `changing_parents` context.
850 called outside an `changing_parents` context.
852
851
853 A very small number of extension meddle with the working copy content
852 A very small number of extension meddle with the working copy content
854 in a way that requires to adjust the dirstate accordingly. At the time
853 in a way that requires to adjust the dirstate accordingly. At the time
855 this command is written they are :
854 this command is written they are :
856 - keyword,
855 - keyword,
857 - largefile,
856 - largefile,
858 PLEASE DO NOT GROW THIS LIST ANY FURTHER.
857 PLEASE DO NOT GROW THIS LIST ANY FURTHER.
859
858
860 This function could probably be replaced by more semantic one (like
859 This function could probably be replaced by more semantic one (like
861 "adjust expected size" or "always revalidate file content", etc)
860 "adjust expected size" or "always revalidate file content", etc)
862 however at the time where this is writen, this is too much of a detour
861 however at the time where this is writen, this is too much of a detour
863 to be considered.
862 to be considered.
864 """
863 """
865 if not (self._changing_level > 0 or self._running_status > 0):
864 if not (self._changing_level > 0 or self._running_status > 0):
866 msg = "requires a changes context"
865 msg = "requires a changes context"
867 raise error.ProgrammingError(msg)
866 raise error.ProgrammingError(msg)
868 self._update_file(
867 self._update_file(
869 *args,
868 *args,
870 **kwargs,
869 **kwargs,
871 )
870 )
872
871
873 def _update_file(
872 def _update_file(
874 self,
873 self,
875 filename,
874 filename,
876 wc_tracked,
875 wc_tracked,
877 p1_tracked,
876 p1_tracked,
878 p2_info=False,
877 p2_info=False,
879 possibly_dirty=False,
878 possibly_dirty=False,
880 parentfiledata=None,
879 parentfiledata=None,
881 ):
880 ):
882 # note: I do not think we need to double check name clash here since we
881 # note: I do not think we need to double check name clash here since we
883 # are in a update/merge case that should already have taken care of
882 # are in a update/merge case that should already have taken care of
884 # this. The test agrees
883 # this. The test agrees
885
884
886 self._dirty = True
885 self._dirty = True
887 old_entry = self._map.get(filename)
886 old_entry = self._map.get(filename)
888 if old_entry is None:
887 if old_entry is None:
889 prev_tracked = False
888 prev_tracked = False
890 else:
889 else:
891 prev_tracked = old_entry.tracked
890 prev_tracked = old_entry.tracked
892 if prev_tracked != wc_tracked:
891 if prev_tracked != wc_tracked:
893 self._dirty_tracked_set = True
892 self._dirty_tracked_set = True
894
893
895 self._map.reset_state(
894 self._map.reset_state(
896 filename,
895 filename,
897 wc_tracked,
896 wc_tracked,
898 p1_tracked,
897 p1_tracked,
899 p2_info=p2_info,
898 p2_info=p2_info,
900 has_meaningful_mtime=not possibly_dirty,
899 has_meaningful_mtime=not possibly_dirty,
901 parentfiledata=parentfiledata,
900 parentfiledata=parentfiledata,
902 )
901 )
903
902
904 def _check_new_tracked_filename(self, filename):
903 def _check_new_tracked_filename(self, filename):
905 scmutil.checkfilename(filename)
904 scmutil.checkfilename(filename)
906 if self._map.hastrackeddir(filename):
905 if self._map.hastrackeddir(filename):
907 msg = _(b'directory %r already in dirstate')
906 msg = _(b'directory %r already in dirstate')
908 msg %= pycompat.bytestr(filename)
907 msg %= pycompat.bytestr(filename)
909 raise error.Abort(msg)
908 raise error.Abort(msg)
910 # shadows
909 # shadows
911 for d in pathutil.finddirs(filename):
910 for d in pathutil.finddirs(filename):
912 if self._map.hastrackeddir(d):
911 if self._map.hastrackeddir(d):
913 break
912 break
914 entry = self._map.get(d)
913 entry = self._map.get(d)
915 if entry is not None and not entry.removed:
914 if entry is not None and not entry.removed:
916 msg = _(b'file %r in dirstate clashes with %r')
915 msg = _(b'file %r in dirstate clashes with %r')
917 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
916 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
918 raise error.Abort(msg)
917 raise error.Abort(msg)
919 self._check_sparse(filename)
918 self._check_sparse(filename)
920
919
921 def _check_sparse(self, filename):
920 def _check_sparse(self, filename):
922 """Check that a filename is inside the sparse profile"""
921 """Check that a filename is inside the sparse profile"""
923 sparsematch = self._sparsematcher
922 sparsematch = self._sparsematcher
924 if sparsematch is not None and not sparsematch.always():
923 if sparsematch is not None and not sparsematch.always():
925 if not sparsematch(filename):
924 if not sparsematch(filename):
926 msg = _(b"cannot add '%s' - it is outside the sparse checkout")
925 msg = _(b"cannot add '%s' - it is outside the sparse checkout")
927 hint = _(
926 hint = _(
928 b'include file with `hg debugsparse --include <pattern>` or use '
927 b'include file with `hg debugsparse --include <pattern>` or use '
929 b'`hg add -s <file>` to include file directory while adding'
928 b'`hg add -s <file>` to include file directory while adding'
930 )
929 )
931 raise error.Abort(msg % filename, hint=hint)
930 raise error.Abort(msg % filename, hint=hint)
932
931
933 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
932 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
934 if exists is None:
933 if exists is None:
935 exists = os.path.lexists(os.path.join(self._root, path))
934 exists = os.path.lexists(os.path.join(self._root, path))
936 if not exists:
935 if not exists:
937 # Maybe a path component exists
936 # Maybe a path component exists
938 if not ignoremissing and b'/' in path:
937 if not ignoremissing and b'/' in path:
939 d, f = path.rsplit(b'/', 1)
938 d, f = path.rsplit(b'/', 1)
940 d = self._normalize(d, False, ignoremissing, None)
939 d = self._normalize(d, False, ignoremissing, None)
941 folded = d + b"/" + f
940 folded = d + b"/" + f
942 else:
941 else:
943 # No path components, preserve original case
942 # No path components, preserve original case
944 folded = path
943 folded = path
945 else:
944 else:
946 # recursively normalize leading directory components
945 # recursively normalize leading directory components
947 # against dirstate
946 # against dirstate
948 if b'/' in normed:
947 if b'/' in normed:
949 d, f = normed.rsplit(b'/', 1)
948 d, f = normed.rsplit(b'/', 1)
950 d = self._normalize(d, False, ignoremissing, True)
949 d = self._normalize(d, False, ignoremissing, True)
951 r = self._root + b"/" + d
950 r = self._root + b"/" + d
952 folded = d + b"/" + util.fspath(f, r)
951 folded = d + b"/" + util.fspath(f, r)
953 else:
952 else:
954 folded = util.fspath(normed, self._root)
953 folded = util.fspath(normed, self._root)
955 storemap[normed] = folded
954 storemap[normed] = folded
956
955
957 return folded
956 return folded
958
957
959 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
958 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
960 normed = util.normcase(path)
959 normed = util.normcase(path)
961 folded = self._map.filefoldmap.get(normed, None)
960 folded = self._map.filefoldmap.get(normed, None)
962 if folded is None:
961 if folded is None:
963 if isknown:
962 if isknown:
964 folded = path
963 folded = path
965 else:
964 else:
966 folded = self._discoverpath(
965 folded = self._discoverpath(
967 path, normed, ignoremissing, exists, self._map.filefoldmap
966 path, normed, ignoremissing, exists, self._map.filefoldmap
968 )
967 )
969 return folded
968 return folded
970
969
971 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
970 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
972 normed = util.normcase(path)
971 normed = util.normcase(path)
973 folded = self._map.filefoldmap.get(normed, None)
972 folded = self._map.filefoldmap.get(normed, None)
974 if folded is None:
973 if folded is None:
975 folded = self._map.dirfoldmap.get(normed, None)
974 folded = self._map.dirfoldmap.get(normed, None)
976 if folded is None:
975 if folded is None:
977 if isknown:
976 if isknown:
978 folded = path
977 folded = path
979 else:
978 else:
980 # store discovered result in dirfoldmap so that future
979 # store discovered result in dirfoldmap so that future
981 # normalizefile calls don't start matching directories
980 # normalizefile calls don't start matching directories
982 folded = self._discoverpath(
981 folded = self._discoverpath(
983 path, normed, ignoremissing, exists, self._map.dirfoldmap
982 path, normed, ignoremissing, exists, self._map.dirfoldmap
984 )
983 )
985 return folded
984 return folded
986
985
987 def normalize(self, path, isknown=False, ignoremissing=False):
986 def normalize(self, path, isknown=False, ignoremissing=False):
988 """
987 """
989 normalize the case of a pathname when on a casefolding filesystem
988 normalize the case of a pathname when on a casefolding filesystem
990
989
991 isknown specifies whether the filename came from walking the
990 isknown specifies whether the filename came from walking the
992 disk, to avoid extra filesystem access.
991 disk, to avoid extra filesystem access.
993
992
994 If ignoremissing is True, missing path are returned
993 If ignoremissing is True, missing path are returned
995 unchanged. Otherwise, we try harder to normalize possibly
994 unchanged. Otherwise, we try harder to normalize possibly
996 existing path components.
995 existing path components.
997
996
998 The normalized case is determined based on the following precedence:
997 The normalized case is determined based on the following precedence:
999
998
1000 - version of name already stored in the dirstate
999 - version of name already stored in the dirstate
1001 - version of name stored on disk
1000 - version of name stored on disk
1002 - version provided via command arguments
1001 - version provided via command arguments
1003 """
1002 """
1004
1003
1005 if self._checkcase:
1004 if self._checkcase:
1006 return self._normalize(path, isknown, ignoremissing)
1005 return self._normalize(path, isknown, ignoremissing)
1007 return path
1006 return path
1008
1007
1009 # XXX this method is barely used, as a result:
1008 # XXX this method is barely used, as a result:
1010 # - its semantic is unclear
1009 # - its semantic is unclear
1011 # - do we really needs it ?
1010 # - do we really needs it ?
1012 @requires_changing_parents
1011 @requires_changing_parents
1013 def clear(self):
1012 def clear(self):
1014 self._map.clear()
1013 self._map.clear()
1015 self._dirty = True
1014 self._dirty = True
1016
1015
1017 @requires_changing_parents
1016 @requires_changing_parents
1018 def rebuild(self, parent, allfiles, changedfiles=None):
1017 def rebuild(self, parent, allfiles, changedfiles=None):
1019 matcher = self._sparsematcher
1018 matcher = self._sparsematcher
1020 if matcher is not None and not matcher.always():
1019 if matcher is not None and not matcher.always():
1021 # should not add non-matching files
1020 # should not add non-matching files
1022 allfiles = [f for f in allfiles if matcher(f)]
1021 allfiles = [f for f in allfiles if matcher(f)]
1023 if changedfiles:
1022 if changedfiles:
1024 changedfiles = [f for f in changedfiles if matcher(f)]
1023 changedfiles = [f for f in changedfiles if matcher(f)]
1025
1024
1026 if changedfiles is not None:
1025 if changedfiles is not None:
1027 # these files will be deleted from the dirstate when they are
1026 # these files will be deleted from the dirstate when they are
1028 # not found to be in allfiles
1027 # not found to be in allfiles
1029 dirstatefilestoremove = {f for f in self if not matcher(f)}
1028 dirstatefilestoremove = {f for f in self if not matcher(f)}
1030 changedfiles = dirstatefilestoremove.union(changedfiles)
1029 changedfiles = dirstatefilestoremove.union(changedfiles)
1031
1030
1032 if changedfiles is None:
1031 if changedfiles is None:
1033 # Rebuild entire dirstate
1032 # Rebuild entire dirstate
1034 to_lookup = allfiles
1033 to_lookup = allfiles
1035 to_drop = []
1034 to_drop = []
1036 self.clear()
1035 self.clear()
1037 elif len(changedfiles) < 10:
1036 elif len(changedfiles) < 10:
1038 # Avoid turning allfiles into a set, which can be expensive if it's
1037 # Avoid turning allfiles into a set, which can be expensive if it's
1039 # large.
1038 # large.
1040 to_lookup = []
1039 to_lookup = []
1041 to_drop = []
1040 to_drop = []
1042 for f in changedfiles:
1041 for f in changedfiles:
1043 if f in allfiles:
1042 if f in allfiles:
1044 to_lookup.append(f)
1043 to_lookup.append(f)
1045 else:
1044 else:
1046 to_drop.append(f)
1045 to_drop.append(f)
1047 else:
1046 else:
1048 changedfilesset = set(changedfiles)
1047 changedfilesset = set(changedfiles)
1049 to_lookup = changedfilesset & set(allfiles)
1048 to_lookup = changedfilesset & set(allfiles)
1050 to_drop = changedfilesset - to_lookup
1049 to_drop = changedfilesset - to_lookup
1051
1050
1052 if self._origpl is None:
1051 if self._origpl is None:
1053 self._origpl = self._pl
1052 self._origpl = self._pl
1054 self._map.setparents(parent, self._nodeconstants.nullid)
1053 self._map.setparents(parent, self._nodeconstants.nullid)
1055
1054
1056 for f in to_lookup:
1055 for f in to_lookup:
1057 if self.in_merge:
1056 if self.in_merge:
1058 self.set_tracked(f)
1057 self.set_tracked(f)
1059 else:
1058 else:
1060 self._map.reset_state(
1059 self._map.reset_state(
1061 f,
1060 f,
1062 wc_tracked=True,
1061 wc_tracked=True,
1063 p1_tracked=True,
1062 p1_tracked=True,
1064 )
1063 )
1065 for f in to_drop:
1064 for f in to_drop:
1066 self._map.reset_state(f)
1065 self._map.reset_state(f)
1067
1066
1068 self._dirty = True
1067 self._dirty = True
1069
1068
1070 def _setup_tr_abort(self, tr):
1069 def _setup_tr_abort(self, tr):
1071 """make sure we invalidate the current change on abort"""
1070 """make sure we invalidate the current change on abort"""
1072 if tr is None:
1071 if tr is None:
1073 return
1072 return
1074
1073
1075 def on_abort(tr):
1074 def on_abort(tr):
1076 self._attached_to_a_transaction = False
1075 self._attached_to_a_transaction = False
1077 self.invalidate()
1076 self.invalidate()
1078
1077
1079 tr.addabort(
1078 tr.addabort(
1080 b'dirstate-invalidate%s' % self._tr_key_suffix,
1079 b'dirstate-invalidate%s' % self._tr_key_suffix,
1081 on_abort,
1080 on_abort,
1082 )
1081 )
1083
1082
1084 def write(self, tr):
1083 def write(self, tr):
1085 if not self._dirty:
1084 if not self._dirty:
1086 return
1085 return
1087 # make sure we don't request a write of invalidated content
1086 # make sure we don't request a write of invalidated content
1088 # XXX move before the dirty check once `unlock` stop calling `write`
1087 # XXX move before the dirty check once `unlock` stop calling `write`
1089 assert not self._invalidated_context
1088 assert not self._invalidated_context
1090
1089
1091 write_key = self._use_tracked_hint and self._dirty_tracked_set
1090 write_key = self._use_tracked_hint and self._dirty_tracked_set
1092 if tr:
1091 if tr:
1093 self._setup_tr_abort(tr)
1092 self._setup_tr_abort(tr)
1094 self._attached_to_a_transaction = True
1093 self._attached_to_a_transaction = True
1095
1094
1096 def on_success(f):
1095 def on_success(f):
1097 self._attached_to_a_transaction = False
1096 self._attached_to_a_transaction = False
1098 self._writedirstate(tr, f),
1097 self._writedirstate(tr, f),
1099
1098
1100 # delay writing in-memory changes out
1099 # delay writing in-memory changes out
1101 tr.addfilegenerator(
1100 tr.addfilegenerator(
1102 b'dirstate-1-main%s' % self._tr_key_suffix,
1101 b'dirstate-1-main%s' % self._tr_key_suffix,
1103 (self._filename,),
1102 (self._filename,),
1104 on_success,
1103 on_success,
1105 location=b'plain',
1104 location=b'plain',
1106 post_finalize=True,
1105 post_finalize=True,
1107 )
1106 )
1108 if write_key:
1107 if write_key:
1109 tr.addfilegenerator(
1108 tr.addfilegenerator(
1110 b'dirstate-2-key-post%s' % self._tr_key_suffix,
1109 b'dirstate-2-key-post%s' % self._tr_key_suffix,
1111 (self._filename_th,),
1110 (self._filename_th,),
1112 lambda f: self._write_tracked_hint(tr, f),
1111 lambda f: self._write_tracked_hint(tr, f),
1113 location=b'plain',
1112 location=b'plain',
1114 post_finalize=True,
1113 post_finalize=True,
1115 )
1114 )
1116 return
1115 return
1117
1116
1118 file = lambda f: self._opener(f, b"w", atomictemp=True, checkambig=True)
1117 file = lambda f: self._opener(f, b"w", atomictemp=True, checkambig=True)
1119 with file(self._filename) as f:
1118 with file(self._filename) as f:
1120 self._writedirstate(tr, f)
1119 self._writedirstate(tr, f)
1121 if write_key:
1120 if write_key:
1122 # we update the key-file after writing to make sure reader have a
1121 # we update the key-file after writing to make sure reader have a
1123 # key that match the newly written content
1122 # key that match the newly written content
1124 with file(self._filename_th) as f:
1123 with file(self._filename_th) as f:
1125 self._write_tracked_hint(tr, f)
1124 self._write_tracked_hint(tr, f)
1126
1125
1127 def delete_tracked_hint(self):
1126 def delete_tracked_hint(self):
1128 """remove the tracked_hint file
1127 """remove the tracked_hint file
1129
1128
1130 To be used by format downgrades operation"""
1129 To be used by format downgrades operation"""
1131 self._opener.unlink(self._filename_th)
1130 self._opener.unlink(self._filename_th)
1132 self._use_tracked_hint = False
1131 self._use_tracked_hint = False
1133
1132
1134 def addparentchangecallback(self, category, callback):
1133 def addparentchangecallback(self, category, callback):
1135 """add a callback to be called when the wd parents are changed
1134 """add a callback to be called when the wd parents are changed
1136
1135
1137 Callback will be called with the following arguments:
1136 Callback will be called with the following arguments:
1138 dirstate, (oldp1, oldp2), (newp1, newp2)
1137 dirstate, (oldp1, oldp2), (newp1, newp2)
1139
1138
1140 Category is a unique identifier to allow overwriting an old callback
1139 Category is a unique identifier to allow overwriting an old callback
1141 with a newer callback.
1140 with a newer callback.
1142 """
1141 """
1143 self._plchangecallbacks[category] = callback
1142 self._plchangecallbacks[category] = callback
1144
1143
1145 def _writedirstate(self, tr, st):
1144 def _writedirstate(self, tr, st):
1146 # make sure we don't write invalidated content
1145 # make sure we don't write invalidated content
1147 assert not self._invalidated_context
1146 assert not self._invalidated_context
1148 # notify callbacks about parents change
1147 # notify callbacks about parents change
1149 if self._origpl is not None and self._origpl != self._pl:
1148 if self._origpl is not None and self._origpl != self._pl:
1150 for c, callback in sorted(self._plchangecallbacks.items()):
1149 for c, callback in sorted(self._plchangecallbacks.items()):
1151 callback(self, self._origpl, self._pl)
1150 callback(self, self._origpl, self._pl)
1152 self._origpl = None
1151 self._origpl = None
1153 self._map.write(tr, st)
1152 self._map.write(tr, st)
1154 self._dirty = False
1153 self._dirty = False
1155 self._dirty_tracked_set = False
1154 self._dirty_tracked_set = False
1156
1155
1157 def _write_tracked_hint(self, tr, f):
1156 def _write_tracked_hint(self, tr, f):
1158 key = node.hex(uuid.uuid4().bytes)
1157 key = node.hex(uuid.uuid4().bytes)
1159 f.write(b"1\n%s\n" % key) # 1 is the format version
1158 f.write(b"1\n%s\n" % key) # 1 is the format version
1160
1159
1161 def _dirignore(self, f):
1160 def _dirignore(self, f):
1162 if self._ignore(f):
1161 if self._ignore(f):
1163 return True
1162 return True
1164 for p in pathutil.finddirs(f):
1163 for p in pathutil.finddirs(f):
1165 if self._ignore(p):
1164 if self._ignore(p):
1166 return True
1165 return True
1167 return False
1166 return False
1168
1167
1169 def _ignorefiles(self):
1168 def _ignorefiles(self):
1170 files = []
1169 files = []
1171 if os.path.exists(self._join(b'.hgignore')):
1170 if os.path.exists(self._join(b'.hgignore')):
1172 files.append(self._join(b'.hgignore'))
1171 files.append(self._join(b'.hgignore'))
1173 for name, path in self._ui.configitems(b"ui"):
1172 for name, path in self._ui.configitems(b"ui"):
1174 if name == b'ignore' or name.startswith(b'ignore.'):
1173 if name == b'ignore' or name.startswith(b'ignore.'):
1175 # we need to use os.path.join here rather than self._join
1174 # we need to use os.path.join here rather than self._join
1176 # because path is arbitrary and user-specified
1175 # because path is arbitrary and user-specified
1177 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1176 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1178 return files
1177 return files
1179
1178
1180 def _ignorefileandline(self, f):
1179 def _ignorefileandline(self, f):
1181 files = collections.deque(self._ignorefiles())
1180 files = collections.deque(self._ignorefiles())
1182 visited = set()
1181 visited = set()
1183 while files:
1182 while files:
1184 i = files.popleft()
1183 i = files.popleft()
1185 patterns = matchmod.readpatternfile(
1184 patterns = matchmod.readpatternfile(
1186 i, self._ui.warn, sourceinfo=True
1185 i, self._ui.warn, sourceinfo=True
1187 )
1186 )
1188 for pattern, lineno, line in patterns:
1187 for pattern, lineno, line in patterns:
1189 kind, p = matchmod._patsplit(pattern, b'glob')
1188 kind, p = matchmod._patsplit(pattern, b'glob')
1190 if kind == b"subinclude":
1189 if kind == b"subinclude":
1191 if p not in visited:
1190 if p not in visited:
1192 files.append(p)
1191 files.append(p)
1193 continue
1192 continue
1194 m = matchmod.match(
1193 m = matchmod.match(
1195 self._root, b'', [], [pattern], warn=self._ui.warn
1194 self._root, b'', [], [pattern], warn=self._ui.warn
1196 )
1195 )
1197 if m(f):
1196 if m(f):
1198 return (i, lineno, line)
1197 return (i, lineno, line)
1199 visited.add(i)
1198 visited.add(i)
1200 return (None, -1, b"")
1199 return (None, -1, b"")
1201
1200
1202 def _walkexplicit(self, match, subrepos):
1201 def _walkexplicit(self, match, subrepos):
1203 """Get stat data about the files explicitly specified by match.
1202 """Get stat data about the files explicitly specified by match.
1204
1203
1205 Return a triple (results, dirsfound, dirsnotfound).
1204 Return a triple (results, dirsfound, dirsnotfound).
1206 - results is a mapping from filename to stat result. It also contains
1205 - results is a mapping from filename to stat result. It also contains
1207 listings mapping subrepos and .hg to None.
1206 listings mapping subrepos and .hg to None.
1208 - dirsfound is a list of files found to be directories.
1207 - dirsfound is a list of files found to be directories.
1209 - dirsnotfound is a list of files that the dirstate thinks are
1208 - dirsnotfound is a list of files that the dirstate thinks are
1210 directories and that were not found."""
1209 directories and that were not found."""
1211
1210
1212 def badtype(mode):
1211 def badtype(mode):
1213 kind = _(b'unknown')
1212 kind = _(b'unknown')
1214 if stat.S_ISCHR(mode):
1213 if stat.S_ISCHR(mode):
1215 kind = _(b'character device')
1214 kind = _(b'character device')
1216 elif stat.S_ISBLK(mode):
1215 elif stat.S_ISBLK(mode):
1217 kind = _(b'block device')
1216 kind = _(b'block device')
1218 elif stat.S_ISFIFO(mode):
1217 elif stat.S_ISFIFO(mode):
1219 kind = _(b'fifo')
1218 kind = _(b'fifo')
1220 elif stat.S_ISSOCK(mode):
1219 elif stat.S_ISSOCK(mode):
1221 kind = _(b'socket')
1220 kind = _(b'socket')
1222 elif stat.S_ISDIR(mode):
1221 elif stat.S_ISDIR(mode):
1223 kind = _(b'directory')
1222 kind = _(b'directory')
1224 return _(b'unsupported file type (type is %s)') % kind
1223 return _(b'unsupported file type (type is %s)') % kind
1225
1224
1226 badfn = match.bad
1225 badfn = match.bad
1227 dmap = self._map
1226 dmap = self._map
1228 lstat = os.lstat
1227 lstat = os.lstat
1229 getkind = stat.S_IFMT
1228 getkind = stat.S_IFMT
1230 dirkind = stat.S_IFDIR
1229 dirkind = stat.S_IFDIR
1231 regkind = stat.S_IFREG
1230 regkind = stat.S_IFREG
1232 lnkkind = stat.S_IFLNK
1231 lnkkind = stat.S_IFLNK
1233 join = self._join
1232 join = self._join
1234 dirsfound = []
1233 dirsfound = []
1235 foundadd = dirsfound.append
1234 foundadd = dirsfound.append
1236 dirsnotfound = []
1235 dirsnotfound = []
1237 notfoundadd = dirsnotfound.append
1236 notfoundadd = dirsnotfound.append
1238
1237
1239 if not match.isexact() and self._checkcase:
1238 if not match.isexact() and self._checkcase:
1240 normalize = self._normalize
1239 normalize = self._normalize
1241 else:
1240 else:
1242 normalize = None
1241 normalize = None
1243
1242
1244 files = sorted(match.files())
1243 files = sorted(match.files())
1245 subrepos.sort()
1244 subrepos.sort()
1246 i, j = 0, 0
1245 i, j = 0, 0
1247 while i < len(files) and j < len(subrepos):
1246 while i < len(files) and j < len(subrepos):
1248 subpath = subrepos[j] + b"/"
1247 subpath = subrepos[j] + b"/"
1249 if files[i] < subpath:
1248 if files[i] < subpath:
1250 i += 1
1249 i += 1
1251 continue
1250 continue
1252 while i < len(files) and files[i].startswith(subpath):
1251 while i < len(files) and files[i].startswith(subpath):
1253 del files[i]
1252 del files[i]
1254 j += 1
1253 j += 1
1255
1254
1256 if not files or b'' in files:
1255 if not files or b'' in files:
1257 files = [b'']
1256 files = [b'']
1258 # constructing the foldmap is expensive, so don't do it for the
1257 # constructing the foldmap is expensive, so don't do it for the
1259 # common case where files is ['']
1258 # common case where files is ['']
1260 normalize = None
1259 normalize = None
1261 results = dict.fromkeys(subrepos)
1260 results = dict.fromkeys(subrepos)
1262 results[b'.hg'] = None
1261 results[b'.hg'] = None
1263
1262
1264 for ff in files:
1263 for ff in files:
1265 if normalize:
1264 if normalize:
1266 nf = normalize(ff, False, True)
1265 nf = normalize(ff, False, True)
1267 else:
1266 else:
1268 nf = ff
1267 nf = ff
1269 if nf in results:
1268 if nf in results:
1270 continue
1269 continue
1271
1270
1272 try:
1271 try:
1273 st = lstat(join(nf))
1272 st = lstat(join(nf))
1274 kind = getkind(st.st_mode)
1273 kind = getkind(st.st_mode)
1275 if kind == dirkind:
1274 if kind == dirkind:
1276 if nf in dmap:
1275 if nf in dmap:
1277 # file replaced by dir on disk but still in dirstate
1276 # file replaced by dir on disk but still in dirstate
1278 results[nf] = None
1277 results[nf] = None
1279 foundadd((nf, ff))
1278 foundadd((nf, ff))
1280 elif kind == regkind or kind == lnkkind:
1279 elif kind == regkind or kind == lnkkind:
1281 results[nf] = st
1280 results[nf] = st
1282 else:
1281 else:
1283 badfn(ff, badtype(kind))
1282 badfn(ff, badtype(kind))
1284 if nf in dmap:
1283 if nf in dmap:
1285 results[nf] = None
1284 results[nf] = None
1286 except OSError as inst:
1285 except OSError as inst:
1287 # nf not found on disk - it is dirstate only
1286 # nf not found on disk - it is dirstate only
1288 if nf in dmap: # does it exactly match a missing file?
1287 if nf in dmap: # does it exactly match a missing file?
1289 results[nf] = None
1288 results[nf] = None
1290 else: # does it match a missing directory?
1289 else: # does it match a missing directory?
1291 if self._map.hasdir(nf):
1290 if self._map.hasdir(nf):
1292 notfoundadd(nf)
1291 notfoundadd(nf)
1293 else:
1292 else:
1294 badfn(ff, encoding.strtolocal(inst.strerror))
1293 badfn(ff, encoding.strtolocal(inst.strerror))
1295
1294
1296 # match.files() may contain explicitly-specified paths that shouldn't
1295 # match.files() may contain explicitly-specified paths that shouldn't
1297 # be taken; drop them from the list of files found. dirsfound/notfound
1296 # be taken; drop them from the list of files found. dirsfound/notfound
1298 # aren't filtered here because they will be tested later.
1297 # aren't filtered here because they will be tested later.
1299 if match.anypats():
1298 if match.anypats():
1300 for f in list(results):
1299 for f in list(results):
1301 if f == b'.hg' or f in subrepos:
1300 if f == b'.hg' or f in subrepos:
1302 # keep sentinel to disable further out-of-repo walks
1301 # keep sentinel to disable further out-of-repo walks
1303 continue
1302 continue
1304 if not match(f):
1303 if not match(f):
1305 del results[f]
1304 del results[f]
1306
1305
1307 # Case insensitive filesystems cannot rely on lstat() failing to detect
1306 # Case insensitive filesystems cannot rely on lstat() failing to detect
1308 # a case-only rename. Prune the stat object for any file that does not
1307 # a case-only rename. Prune the stat object for any file that does not
1309 # match the case in the filesystem, if there are multiple files that
1308 # match the case in the filesystem, if there are multiple files that
1310 # normalize to the same path.
1309 # normalize to the same path.
1311 if match.isexact() and self._checkcase:
1310 if match.isexact() and self._checkcase:
1312 normed = {}
1311 normed = {}
1313
1312
1314 for f, st in results.items():
1313 for f, st in results.items():
1315 if st is None:
1314 if st is None:
1316 continue
1315 continue
1317
1316
1318 nc = util.normcase(f)
1317 nc = util.normcase(f)
1319 paths = normed.get(nc)
1318 paths = normed.get(nc)
1320
1319
1321 if paths is None:
1320 if paths is None:
1322 paths = set()
1321 paths = set()
1323 normed[nc] = paths
1322 normed[nc] = paths
1324
1323
1325 paths.add(f)
1324 paths.add(f)
1326
1325
1327 for norm, paths in normed.items():
1326 for norm, paths in normed.items():
1328 if len(paths) > 1:
1327 if len(paths) > 1:
1329 for path in paths:
1328 for path in paths:
1330 folded = self._discoverpath(
1329 folded = self._discoverpath(
1331 path, norm, True, None, self._map.dirfoldmap
1330 path, norm, True, None, self._map.dirfoldmap
1332 )
1331 )
1333 if path != folded:
1332 if path != folded:
1334 results[path] = None
1333 results[path] = None
1335
1334
1336 return results, dirsfound, dirsnotfound
1335 return results, dirsfound, dirsnotfound
1337
1336
1338 def walk(self, match, subrepos, unknown, ignored, full=True):
1337 def walk(self, match, subrepos, unknown, ignored, full=True):
1339 """
1338 """
1340 Walk recursively through the directory tree, finding all files
1339 Walk recursively through the directory tree, finding all files
1341 matched by match.
1340 matched by match.
1342
1341
1343 If full is False, maybe skip some known-clean files.
1342 If full is False, maybe skip some known-clean files.
1344
1343
1345 Return a dict mapping filename to stat-like object (either
1344 Return a dict mapping filename to stat-like object (either
1346 mercurial.osutil.stat instance or return value of os.stat()).
1345 mercurial.osutil.stat instance or return value of os.stat()).
1347
1346
1348 """
1347 """
1349 # full is a flag that extensions that hook into walk can use -- this
1348 # full is a flag that extensions that hook into walk can use -- this
1350 # implementation doesn't use it at all. This satisfies the contract
1349 # implementation doesn't use it at all. This satisfies the contract
1351 # because we only guarantee a "maybe".
1350 # because we only guarantee a "maybe".
1352
1351
1353 if ignored:
1352 if ignored:
1354 ignore = util.never
1353 ignore = util.never
1355 dirignore = util.never
1354 dirignore = util.never
1356 elif unknown:
1355 elif unknown:
1357 ignore = self._ignore
1356 ignore = self._ignore
1358 dirignore = self._dirignore
1357 dirignore = self._dirignore
1359 else:
1358 else:
1360 # if not unknown and not ignored, drop dir recursion and step 2
1359 # if not unknown and not ignored, drop dir recursion and step 2
1361 ignore = util.always
1360 ignore = util.always
1362 dirignore = util.always
1361 dirignore = util.always
1363
1362
1364 if self._sparsematchfn is not None:
1363 if self._sparsematchfn is not None:
1365 em = matchmod.exact(match.files())
1364 em = matchmod.exact(match.files())
1366 sm = matchmod.unionmatcher([self._sparsematcher, em])
1365 sm = matchmod.unionmatcher([self._sparsematcher, em])
1367 match = matchmod.intersectmatchers(match, sm)
1366 match = matchmod.intersectmatchers(match, sm)
1368
1367
1369 matchfn = match.matchfn
1368 matchfn = match.matchfn
1370 matchalways = match.always()
1369 matchalways = match.always()
1371 matchtdir = match.traversedir
1370 matchtdir = match.traversedir
1372 dmap = self._map
1371 dmap = self._map
1373 listdir = util.listdir
1372 listdir = util.listdir
1374 lstat = os.lstat
1373 lstat = os.lstat
1375 dirkind = stat.S_IFDIR
1374 dirkind = stat.S_IFDIR
1376 regkind = stat.S_IFREG
1375 regkind = stat.S_IFREG
1377 lnkkind = stat.S_IFLNK
1376 lnkkind = stat.S_IFLNK
1378 join = self._join
1377 join = self._join
1379
1378
1380 exact = skipstep3 = False
1379 exact = skipstep3 = False
1381 if match.isexact(): # match.exact
1380 if match.isexact(): # match.exact
1382 exact = True
1381 exact = True
1383 dirignore = util.always # skip step 2
1382 dirignore = util.always # skip step 2
1384 elif match.prefix(): # match.match, no patterns
1383 elif match.prefix(): # match.match, no patterns
1385 skipstep3 = True
1384 skipstep3 = True
1386
1385
1387 if not exact and self._checkcase:
1386 if not exact and self._checkcase:
1388 normalize = self._normalize
1387 normalize = self._normalize
1389 normalizefile = self._normalizefile
1388 normalizefile = self._normalizefile
1390 skipstep3 = False
1389 skipstep3 = False
1391 else:
1390 else:
1392 normalize = self._normalize
1391 normalize = self._normalize
1393 normalizefile = None
1392 normalizefile = None
1394
1393
1395 # step 1: find all explicit files
1394 # step 1: find all explicit files
1396 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1395 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1397 if matchtdir:
1396 if matchtdir:
1398 for d in work:
1397 for d in work:
1399 matchtdir(d[0])
1398 matchtdir(d[0])
1400 for d in dirsnotfound:
1399 for d in dirsnotfound:
1401 matchtdir(d)
1400 matchtdir(d)
1402
1401
1403 skipstep3 = skipstep3 and not (work or dirsnotfound)
1402 skipstep3 = skipstep3 and not (work or dirsnotfound)
1404 work = [d for d in work if not dirignore(d[0])]
1403 work = [d for d in work if not dirignore(d[0])]
1405
1404
1406 # step 2: visit subdirectories
1405 # step 2: visit subdirectories
1407 def traverse(work, alreadynormed):
1406 def traverse(work, alreadynormed):
1408 wadd = work.append
1407 wadd = work.append
1409 while work:
1408 while work:
1410 tracing.counter('dirstate.walk work', len(work))
1409 tracing.counter('dirstate.walk work', len(work))
1411 nd = work.pop()
1410 nd = work.pop()
1412 visitentries = match.visitchildrenset(nd)
1411 visitentries = match.visitchildrenset(nd)
1413 if not visitentries:
1412 if not visitentries:
1414 continue
1413 continue
1415 if visitentries == b'this' or visitentries == b'all':
1414 if visitentries == b'this' or visitentries == b'all':
1416 visitentries = None
1415 visitentries = None
1417 skip = None
1416 skip = None
1418 if nd != b'':
1417 if nd != b'':
1419 skip = b'.hg'
1418 skip = b'.hg'
1420 try:
1419 try:
1421 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1420 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1422 entries = listdir(join(nd), stat=True, skip=skip)
1421 entries = listdir(join(nd), stat=True, skip=skip)
1423 except (PermissionError, FileNotFoundError) as inst:
1422 except (PermissionError, FileNotFoundError) as inst:
1424 match.bad(
1423 match.bad(
1425 self.pathto(nd), encoding.strtolocal(inst.strerror)
1424 self.pathto(nd), encoding.strtolocal(inst.strerror)
1426 )
1425 )
1427 continue
1426 continue
1428 for f, kind, st in entries:
1427 for f, kind, st in entries:
1429 # Some matchers may return files in the visitentries set,
1428 # Some matchers may return files in the visitentries set,
1430 # instead of 'this', if the matcher explicitly mentions them
1429 # instead of 'this', if the matcher explicitly mentions them
1431 # and is not an exactmatcher. This is acceptable; we do not
1430 # and is not an exactmatcher. This is acceptable; we do not
1432 # make any hard assumptions about file-or-directory below
1431 # make any hard assumptions about file-or-directory below
1433 # based on the presence of `f` in visitentries. If
1432 # based on the presence of `f` in visitentries. If
1434 # visitchildrenset returned a set, we can always skip the
1433 # visitchildrenset returned a set, we can always skip the
1435 # entries *not* in the set it provided regardless of whether
1434 # entries *not* in the set it provided regardless of whether
1436 # they're actually a file or a directory.
1435 # they're actually a file or a directory.
1437 if visitentries and f not in visitentries:
1436 if visitentries and f not in visitentries:
1438 continue
1437 continue
1439 if normalizefile:
1438 if normalizefile:
1440 # even though f might be a directory, we're only
1439 # even though f might be a directory, we're only
1441 # interested in comparing it to files currently in the
1440 # interested in comparing it to files currently in the
1442 # dmap -- therefore normalizefile is enough
1441 # dmap -- therefore normalizefile is enough
1443 nf = normalizefile(
1442 nf = normalizefile(
1444 nd and (nd + b"/" + f) or f, True, True
1443 nd and (nd + b"/" + f) or f, True, True
1445 )
1444 )
1446 else:
1445 else:
1447 nf = nd and (nd + b"/" + f) or f
1446 nf = nd and (nd + b"/" + f) or f
1448 if nf not in results:
1447 if nf not in results:
1449 if kind == dirkind:
1448 if kind == dirkind:
1450 if not ignore(nf):
1449 if not ignore(nf):
1451 if matchtdir:
1450 if matchtdir:
1452 matchtdir(nf)
1451 matchtdir(nf)
1453 wadd(nf)
1452 wadd(nf)
1454 if nf in dmap and (matchalways or matchfn(nf)):
1453 if nf in dmap and (matchalways or matchfn(nf)):
1455 results[nf] = None
1454 results[nf] = None
1456 elif kind == regkind or kind == lnkkind:
1455 elif kind == regkind or kind == lnkkind:
1457 if nf in dmap:
1456 if nf in dmap:
1458 if matchalways or matchfn(nf):
1457 if matchalways or matchfn(nf):
1459 results[nf] = st
1458 results[nf] = st
1460 elif (matchalways or matchfn(nf)) and not ignore(
1459 elif (matchalways or matchfn(nf)) and not ignore(
1461 nf
1460 nf
1462 ):
1461 ):
1463 # unknown file -- normalize if necessary
1462 # unknown file -- normalize if necessary
1464 if not alreadynormed:
1463 if not alreadynormed:
1465 nf = normalize(nf, False, True)
1464 nf = normalize(nf, False, True)
1466 results[nf] = st
1465 results[nf] = st
1467 elif nf in dmap and (matchalways or matchfn(nf)):
1466 elif nf in dmap and (matchalways or matchfn(nf)):
1468 results[nf] = None
1467 results[nf] = None
1469
1468
1470 for nd, d in work:
1469 for nd, d in work:
1471 # alreadynormed means that processwork doesn't have to do any
1470 # alreadynormed means that processwork doesn't have to do any
1472 # expensive directory normalization
1471 # expensive directory normalization
1473 alreadynormed = not normalize or nd == d
1472 alreadynormed = not normalize or nd == d
1474 traverse([d], alreadynormed)
1473 traverse([d], alreadynormed)
1475
1474
1476 for s in subrepos:
1475 for s in subrepos:
1477 del results[s]
1476 del results[s]
1478 del results[b'.hg']
1477 del results[b'.hg']
1479
1478
1480 # step 3: visit remaining files from dmap
1479 # step 3: visit remaining files from dmap
1481 if not skipstep3 and not exact:
1480 if not skipstep3 and not exact:
1482 # If a dmap file is not in results yet, it was either
1481 # If a dmap file is not in results yet, it was either
1483 # a) not matching matchfn b) ignored, c) missing, or d) under a
1482 # a) not matching matchfn b) ignored, c) missing, or d) under a
1484 # symlink directory.
1483 # symlink directory.
1485 if not results and matchalways:
1484 if not results and matchalways:
1486 visit = [f for f in dmap]
1485 visit = [f for f in dmap]
1487 else:
1486 else:
1488 visit = [f for f in dmap if f not in results and matchfn(f)]
1487 visit = [f for f in dmap if f not in results and matchfn(f)]
1489 visit.sort()
1488 visit.sort()
1490
1489
1491 if unknown:
1490 if unknown:
1492 # unknown == True means we walked all dirs under the roots
1491 # unknown == True means we walked all dirs under the roots
1493 # that wasn't ignored, and everything that matched was stat'ed
1492 # that wasn't ignored, and everything that matched was stat'ed
1494 # and is already in results.
1493 # and is already in results.
1495 # The rest must thus be ignored or under a symlink.
1494 # The rest must thus be ignored or under a symlink.
1496 audit_path = pathutil.pathauditor(self._root, cached=True)
1495 audit_path = pathutil.pathauditor(self._root, cached=True)
1497
1496
1498 for nf in iter(visit):
1497 for nf in iter(visit):
1499 # If a stat for the same file was already added with a
1498 # If a stat for the same file was already added with a
1500 # different case, don't add one for this, since that would
1499 # different case, don't add one for this, since that would
1501 # make it appear as if the file exists under both names
1500 # make it appear as if the file exists under both names
1502 # on disk.
1501 # on disk.
1503 if (
1502 if (
1504 normalizefile
1503 normalizefile
1505 and normalizefile(nf, True, True) in results
1504 and normalizefile(nf, True, True) in results
1506 ):
1505 ):
1507 results[nf] = None
1506 results[nf] = None
1508 # Report ignored items in the dmap as long as they are not
1507 # Report ignored items in the dmap as long as they are not
1509 # under a symlink directory.
1508 # under a symlink directory.
1510 elif audit_path.check(nf):
1509 elif audit_path.check(nf):
1511 try:
1510 try:
1512 results[nf] = lstat(join(nf))
1511 results[nf] = lstat(join(nf))
1513 # file was just ignored, no links, and exists
1512 # file was just ignored, no links, and exists
1514 except OSError:
1513 except OSError:
1515 # file doesn't exist
1514 # file doesn't exist
1516 results[nf] = None
1515 results[nf] = None
1517 else:
1516 else:
1518 # It's either missing or under a symlink directory
1517 # It's either missing or under a symlink directory
1519 # which we in this case report as missing
1518 # which we in this case report as missing
1520 results[nf] = None
1519 results[nf] = None
1521 else:
1520 else:
1522 # We may not have walked the full directory tree above,
1521 # We may not have walked the full directory tree above,
1523 # so stat and check everything we missed.
1522 # so stat and check everything we missed.
1524 iv = iter(visit)
1523 iv = iter(visit)
1525 for st in util.statfiles([join(i) for i in visit]):
1524 for st in util.statfiles([join(i) for i in visit]):
1526 results[next(iv)] = st
1525 results[next(iv)] = st
1527 return results
1526 return results
1528
1527
1529 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1528 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1530 if self._sparsematchfn is not None:
1529 if self._sparsematchfn is not None:
1531 em = matchmod.exact(matcher.files())
1530 em = matchmod.exact(matcher.files())
1532 sm = matchmod.unionmatcher([self._sparsematcher, em])
1531 sm = matchmod.unionmatcher([self._sparsematcher, em])
1533 matcher = matchmod.intersectmatchers(matcher, sm)
1532 matcher = matchmod.intersectmatchers(matcher, sm)
1534 # Force Rayon (Rust parallelism library) to respect the number of
1533 # Force Rayon (Rust parallelism library) to respect the number of
1535 # workers. This is a temporary workaround until Rust code knows
1534 # workers. This is a temporary workaround until Rust code knows
1536 # how to read the config file.
1535 # how to read the config file.
1537 numcpus = self._ui.configint(b"worker", b"numcpus")
1536 numcpus = self._ui.configint(b"worker", b"numcpus")
1538 if numcpus is not None:
1537 if numcpus is not None:
1539 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1538 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1540
1539
1541 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1540 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1542 if not workers_enabled:
1541 if not workers_enabled:
1543 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1542 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1544
1543
1545 (
1544 (
1546 lookup,
1545 lookup,
1547 modified,
1546 modified,
1548 added,
1547 added,
1549 removed,
1548 removed,
1550 deleted,
1549 deleted,
1551 clean,
1550 clean,
1552 ignored,
1551 ignored,
1553 unknown,
1552 unknown,
1554 warnings,
1553 warnings,
1555 bad,
1554 bad,
1556 traversed,
1555 traversed,
1557 dirty,
1556 dirty,
1558 ) = rustmod.status(
1557 ) = rustmod.status(
1559 self._map._map,
1558 self._map._map,
1560 matcher,
1559 matcher,
1561 self._rootdir,
1560 self._rootdir,
1562 self._ignorefiles(),
1561 self._ignorefiles(),
1563 self._checkexec,
1562 self._checkexec,
1564 bool(list_clean),
1563 bool(list_clean),
1565 bool(list_ignored),
1564 bool(list_ignored),
1566 bool(list_unknown),
1565 bool(list_unknown),
1567 bool(matcher.traversedir),
1566 bool(matcher.traversedir),
1568 )
1567 )
1569
1568
1570 self._dirty |= dirty
1569 self._dirty |= dirty
1571
1570
1572 if matcher.traversedir:
1571 if matcher.traversedir:
1573 for dir in traversed:
1572 for dir in traversed:
1574 matcher.traversedir(dir)
1573 matcher.traversedir(dir)
1575
1574
1576 if self._ui.warn:
1575 if self._ui.warn:
1577 for item in warnings:
1576 for item in warnings:
1578 if isinstance(item, tuple):
1577 if isinstance(item, tuple):
1579 file_path, syntax = item
1578 file_path, syntax = item
1580 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1579 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1581 file_path,
1580 file_path,
1582 syntax,
1581 syntax,
1583 )
1582 )
1584 self._ui.warn(msg)
1583 self._ui.warn(msg)
1585 else:
1584 else:
1586 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1585 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1587 self._ui.warn(
1586 self._ui.warn(
1588 msg
1587 msg
1589 % (
1588 % (
1590 pathutil.canonpath(
1589 pathutil.canonpath(
1591 self._rootdir, self._rootdir, item
1590 self._rootdir, self._rootdir, item
1592 ),
1591 ),
1593 b"No such file or directory",
1592 b"No such file or directory",
1594 )
1593 )
1595 )
1594 )
1596
1595
1597 for fn, message in sorted(bad):
1596 for fn, message in sorted(bad):
1598 matcher.bad(fn, encoding.strtolocal(message))
1597 matcher.bad(fn, encoding.strtolocal(message))
1599
1598
1600 status = scmutil.status(
1599 status = scmutil.status(
1601 modified=modified,
1600 modified=modified,
1602 added=added,
1601 added=added,
1603 removed=removed,
1602 removed=removed,
1604 deleted=deleted,
1603 deleted=deleted,
1605 unknown=unknown,
1604 unknown=unknown,
1606 ignored=ignored,
1605 ignored=ignored,
1607 clean=clean,
1606 clean=clean,
1608 )
1607 )
1609 return (lookup, status)
1608 return (lookup, status)
1610
1609
1611 def status(self, match, subrepos, ignored, clean, unknown):
1610 def status(self, match, subrepos, ignored, clean, unknown):
1612 """Determine the status of the working copy relative to the
1611 """Determine the status of the working copy relative to the
1613 dirstate and return a pair of (unsure, status), where status is of type
1612 dirstate and return a pair of (unsure, status), where status is of type
1614 scmutil.status and:
1613 scmutil.status and:
1615
1614
1616 unsure:
1615 unsure:
1617 files that might have been modified since the dirstate was
1616 files that might have been modified since the dirstate was
1618 written, but need to be read to be sure (size is the same
1617 written, but need to be read to be sure (size is the same
1619 but mtime differs)
1618 but mtime differs)
1620 status.modified:
1619 status.modified:
1621 files that have definitely been modified since the dirstate
1620 files that have definitely been modified since the dirstate
1622 was written (different size or mode)
1621 was written (different size or mode)
1623 status.clean:
1622 status.clean:
1624 files that have definitely not been modified since the
1623 files that have definitely not been modified since the
1625 dirstate was written
1624 dirstate was written
1626 """
1625 """
1627 if not self._running_status:
1626 if not self._running_status:
1628 msg = "Calling `status` outside a `running_status` context"
1627 msg = "Calling `status` outside a `running_status` context"
1629 raise error.ProgrammingError(msg)
1628 raise error.ProgrammingError(msg)
1630 listignored, listclean, listunknown = ignored, clean, unknown
1629 listignored, listclean, listunknown = ignored, clean, unknown
1631 lookup, modified, added, unknown, ignored = [], [], [], [], []
1630 lookup, modified, added, unknown, ignored = [], [], [], [], []
1632 removed, deleted, clean = [], [], []
1631 removed, deleted, clean = [], [], []
1633
1632
1634 dmap = self._map
1633 dmap = self._map
1635 dmap.preload()
1634 dmap.preload()
1636
1635
1637 use_rust = True
1636 use_rust = True
1638
1637
1639 if rustmod is None:
1638 if rustmod is None:
1640 use_rust = False
1639 use_rust = False
1641 elif self._checkcase:
1640 elif self._checkcase:
1642 # Case-insensitive filesystems are not handled yet
1641 # Case-insensitive filesystems are not handled yet
1643 use_rust = False
1642 use_rust = False
1644 elif subrepos:
1643 elif subrepos:
1645 use_rust = False
1644 use_rust = False
1646
1645
1647 # Get the time from the filesystem so we can disambiguate files that
1646 # Get the time from the filesystem so we can disambiguate files that
1648 # appear modified in the present or future.
1647 # appear modified in the present or future.
1649 try:
1648 try:
1650 mtime_boundary = timestamp.get_fs_now(self._opener)
1649 mtime_boundary = timestamp.get_fs_now(self._opener)
1651 except OSError:
1650 except OSError:
1652 # In largefiles or readonly context
1651 # In largefiles or readonly context
1653 mtime_boundary = None
1652 mtime_boundary = None
1654
1653
1655 if use_rust:
1654 if use_rust:
1656 try:
1655 try:
1657 res = self._rust_status(
1656 res = self._rust_status(
1658 match, listclean, listignored, listunknown
1657 match, listclean, listignored, listunknown
1659 )
1658 )
1660 return res + (mtime_boundary,)
1659 return res + (mtime_boundary,)
1661 except rustmod.FallbackError:
1660 except rustmod.FallbackError:
1662 pass
1661 pass
1663
1662
1664 def noop(f):
1663 def noop(f):
1665 pass
1664 pass
1666
1665
1667 dcontains = dmap.__contains__
1666 dcontains = dmap.__contains__
1668 dget = dmap.__getitem__
1667 dget = dmap.__getitem__
1669 ladd = lookup.append # aka "unsure"
1668 ladd = lookup.append # aka "unsure"
1670 madd = modified.append
1669 madd = modified.append
1671 aadd = added.append
1670 aadd = added.append
1672 uadd = unknown.append if listunknown else noop
1671 uadd = unknown.append if listunknown else noop
1673 iadd = ignored.append if listignored else noop
1672 iadd = ignored.append if listignored else noop
1674 radd = removed.append
1673 radd = removed.append
1675 dadd = deleted.append
1674 dadd = deleted.append
1676 cadd = clean.append if listclean else noop
1675 cadd = clean.append if listclean else noop
1677 mexact = match.exact
1676 mexact = match.exact
1678 dirignore = self._dirignore
1677 dirignore = self._dirignore
1679 checkexec = self._checkexec
1678 checkexec = self._checkexec
1680 checklink = self._checklink
1679 checklink = self._checklink
1681 copymap = self._map.copymap
1680 copymap = self._map.copymap
1682
1681
1683 # We need to do full walks when either
1682 # We need to do full walks when either
1684 # - we're listing all clean files, or
1683 # - we're listing all clean files, or
1685 # - match.traversedir does something, because match.traversedir should
1684 # - match.traversedir does something, because match.traversedir should
1686 # be called for every dir in the working dir
1685 # be called for every dir in the working dir
1687 full = listclean or match.traversedir is not None
1686 full = listclean or match.traversedir is not None
1688 for fn, st in self.walk(
1687 for fn, st in self.walk(
1689 match, subrepos, listunknown, listignored, full=full
1688 match, subrepos, listunknown, listignored, full=full
1690 ).items():
1689 ).items():
1691 if not dcontains(fn):
1690 if not dcontains(fn):
1692 if (listignored or mexact(fn)) and dirignore(fn):
1691 if (listignored or mexact(fn)) and dirignore(fn):
1693 if listignored:
1692 if listignored:
1694 iadd(fn)
1693 iadd(fn)
1695 else:
1694 else:
1696 uadd(fn)
1695 uadd(fn)
1697 continue
1696 continue
1698
1697
1699 t = dget(fn)
1698 t = dget(fn)
1700 mode = t.mode
1699 mode = t.mode
1701 size = t.size
1700 size = t.size
1702
1701
1703 if not st and t.tracked:
1702 if not st and t.tracked:
1704 dadd(fn)
1703 dadd(fn)
1705 elif t.p2_info:
1704 elif t.p2_info:
1706 madd(fn)
1705 madd(fn)
1707 elif t.added:
1706 elif t.added:
1708 aadd(fn)
1707 aadd(fn)
1709 elif t.removed:
1708 elif t.removed:
1710 radd(fn)
1709 radd(fn)
1711 elif t.tracked:
1710 elif t.tracked:
1712 if not checklink and t.has_fallback_symlink:
1711 if not checklink and t.has_fallback_symlink:
1713 # If the file system does not support symlink, the mode
1712 # If the file system does not support symlink, the mode
1714 # might not be correctly stored in the dirstate, so do not
1713 # might not be correctly stored in the dirstate, so do not
1715 # trust it.
1714 # trust it.
1716 ladd(fn)
1715 ladd(fn)
1717 elif not checkexec and t.has_fallback_exec:
1716 elif not checkexec and t.has_fallback_exec:
1718 # If the file system does not support exec bits, the mode
1717 # If the file system does not support exec bits, the mode
1719 # might not be correctly stored in the dirstate, so do not
1718 # might not be correctly stored in the dirstate, so do not
1720 # trust it.
1719 # trust it.
1721 ladd(fn)
1720 ladd(fn)
1722 elif (
1721 elif (
1723 size >= 0
1722 size >= 0
1724 and (
1723 and (
1725 (size != st.st_size and size != st.st_size & _rangemask)
1724 (size != st.st_size and size != st.st_size & _rangemask)
1726 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1725 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1727 )
1726 )
1728 or fn in copymap
1727 or fn in copymap
1729 ):
1728 ):
1730 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1729 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1731 # issue6456: Size returned may be longer due to
1730 # issue6456: Size returned may be longer due to
1732 # encryption on EXT-4 fscrypt, undecided.
1731 # encryption on EXT-4 fscrypt, undecided.
1733 ladd(fn)
1732 ladd(fn)
1734 else:
1733 else:
1735 madd(fn)
1734 madd(fn)
1736 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1735 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1737 # There might be a change in the future if for example the
1736 # There might be a change in the future if for example the
1738 # internal clock is off, but this is a case where the issues
1737 # internal clock is off, but this is a case where the issues
1739 # the user would face would be a lot worse and there is
1738 # the user would face would be a lot worse and there is
1740 # nothing we can really do.
1739 # nothing we can really do.
1741 ladd(fn)
1740 ladd(fn)
1742 elif listclean:
1741 elif listclean:
1743 cadd(fn)
1742 cadd(fn)
1744 status = scmutil.status(
1743 status = scmutil.status(
1745 modified, added, removed, deleted, unknown, ignored, clean
1744 modified, added, removed, deleted, unknown, ignored, clean
1746 )
1745 )
1747 return (lookup, status, mtime_boundary)
1746 return (lookup, status, mtime_boundary)
1748
1747
1749 def matches(self, match):
1748 def matches(self, match):
1750 """
1749 """
1751 return files in the dirstate (in whatever state) filtered by match
1750 return files in the dirstate (in whatever state) filtered by match
1752 """
1751 """
1753 dmap = self._map
1752 dmap = self._map
1754 if rustmod is not None:
1753 if rustmod is not None:
1755 dmap = self._map._map
1754 dmap = self._map._map
1756
1755
1757 if match.always():
1756 if match.always():
1758 return dmap.keys()
1757 return dmap.keys()
1759 files = match.files()
1758 files = match.files()
1760 if match.isexact():
1759 if match.isexact():
1761 # fast path -- filter the other way around, since typically files is
1760 # fast path -- filter the other way around, since typically files is
1762 # much smaller than dmap
1761 # much smaller than dmap
1763 return [f for f in files if f in dmap]
1762 return [f for f in files if f in dmap]
1764 if match.prefix() and all(fn in dmap for fn in files):
1763 if match.prefix() and all(fn in dmap for fn in files):
1765 # fast path -- all the values are known to be files, so just return
1764 # fast path -- all the values are known to be files, so just return
1766 # that
1765 # that
1767 return list(files)
1766 return list(files)
1768 return [f for f in dmap if match(f)]
1767 return [f for f in dmap if match(f)]
1769
1768
1770 def all_file_names(self):
1769 def all_file_names(self):
1771 """list all filename currently used by this dirstate
1770 """list all filename currently used by this dirstate
1772
1771
1773 This is only used to do `hg rollback` related backup in the transaction
1772 This is only used to do `hg rollback` related backup in the transaction
1774 """
1773 """
1775 files = [b'branch']
1774 files = [b'branch']
1776 if self._opener.exists(self._filename):
1775 if self._opener.exists(self._filename):
1777 files.append(self._filename)
1776 files.append(self._filename)
1778 if self._use_dirstate_v2:
1777 if self._use_dirstate_v2:
1779 files.append(self._map.docket.data_filename())
1778 files.append(self._map.docket.data_filename())
1780 return tuple(files)
1779 return tuple(files)
1781
1780
1782 def verify(self, m1, m2, p1, narrow_matcher=None):
1781 def verify(self, m1, m2, p1, narrow_matcher=None):
1783 """
1782 """
1784 check the dirstate contents against the parent manifest and yield errors
1783 check the dirstate contents against the parent manifest and yield errors
1785 """
1784 """
1786 missing_from_p1 = _(
1785 missing_from_p1 = _(
1787 b"%s marked as tracked in p1 (%s) but not in manifest1\n"
1786 b"%s marked as tracked in p1 (%s) but not in manifest1\n"
1788 )
1787 )
1789 unexpected_in_p1 = _(b"%s marked as added, but also in manifest1\n")
1788 unexpected_in_p1 = _(b"%s marked as added, but also in manifest1\n")
1790 missing_from_ps = _(
1789 missing_from_ps = _(
1791 b"%s marked as modified, but not in either manifest\n"
1790 b"%s marked as modified, but not in either manifest\n"
1792 )
1791 )
1793 missing_from_ds = _(
1792 missing_from_ds = _(
1794 b"%s in manifest1, but not marked as tracked in p1 (%s)\n"
1793 b"%s in manifest1, but not marked as tracked in p1 (%s)\n"
1795 )
1794 )
1796 for f, entry in self.items():
1795 for f, entry in self.items():
1797 if entry.p1_tracked:
1796 if entry.p1_tracked:
1798 if entry.modified and f not in m1 and f not in m2:
1797 if entry.modified and f not in m1 and f not in m2:
1799 yield missing_from_ps % f
1798 yield missing_from_ps % f
1800 elif f not in m1:
1799 elif f not in m1:
1801 yield missing_from_p1 % (f, node.short(p1))
1800 yield missing_from_p1 % (f, node.short(p1))
1802 if entry.added and f in m1:
1801 if entry.added and f in m1:
1803 yield unexpected_in_p1 % f
1802 yield unexpected_in_p1 % f
1804 for f in m1:
1803 for f in m1:
1805 if narrow_matcher is not None and not narrow_matcher(f):
1804 if narrow_matcher is not None and not narrow_matcher(f):
1806 continue
1805 continue
1807 entry = self.get_entry(f)
1806 entry = self.get_entry(f)
1808 if not entry.p1_tracked:
1807 if not entry.p1_tracked:
1809 yield missing_from_ds % (f, node.short(p1))
1808 yield missing_from_ds % (f, node.short(p1))
1809
1810
1811 dirstate = interfaceutil.implementer(intdirstate.idirstate)(DirState)
General Comments 0
You need to be logged in to leave comments. Login now