##// END OF EJS Templates
dirstate: invalidate changes when parent-change fails...
marmoute -
r50852:96e526fe default
parent child Browse files
Show More
@@ -1,1570 +1,1590 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import collections
10 10 import contextlib
11 11 import os
12 12 import stat
13 13 import uuid
14 14
15 15 from .i18n import _
16 16 from .pycompat import delattr
17 17
18 18 from hgdemandimport import tracing
19 19
20 20 from . import (
21 21 dirstatemap,
22 22 encoding,
23 23 error,
24 24 match as matchmod,
25 25 node,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 util,
31 31 )
32 32
33 33 from .dirstateutils import (
34 34 docket as docketmod,
35 35 timestamp,
36 36 )
37 37
38 38 from .interfaces import (
39 39 dirstate as intdirstate,
40 40 util as interfaceutil,
41 41 )
42 42
43 43 parsers = policy.importmod('parsers')
44 44 rustmod = policy.importrust('dirstate')
45 45
46 46 HAS_FAST_DIRSTATE_V2 = rustmod is not None
47 47
48 48 propertycache = util.propertycache
49 49 filecache = scmutil.filecache
50 50 _rangemask = dirstatemap.rangemask
51 51
52 52 DirstateItem = dirstatemap.DirstateItem
53 53
54 54
55 55 class repocache(filecache):
56 56 """filecache for files in .hg/"""
57 57
58 58 def join(self, obj, fname):
59 59 return obj._opener.join(fname)
60 60
61 61
62 62 class rootcache(filecache):
63 63 """filecache for files in the repository root"""
64 64
65 65 def join(self, obj, fname):
66 66 return obj._join(fname)
67 67
68 68
69 69 def requires_parents_change(func):
70 70 def wrap(self, *args, **kwargs):
71 71 if not self.pendingparentchange():
72 72 msg = 'calling `%s` outside of a parentchange context'
73 73 msg %= func.__name__
74 74 raise error.ProgrammingError(msg)
75 if self._invalidated_context:
76 msg = 'calling `%s` after the dirstate was invalidated'
77 raise error.ProgrammingError(msg)
75 78 return func(self, *args, **kwargs)
76 79
77 80 return wrap
78 81
79 82
80 83 def requires_no_parents_change(func):
81 84 def wrap(self, *args, **kwargs):
82 85 if self.pendingparentchange():
83 86 msg = 'calling `%s` inside of a parentchange context'
84 87 msg %= func.__name__
85 88 raise error.ProgrammingError(msg)
86 89 return func(self, *args, **kwargs)
87 90
88 91 return wrap
89 92
90 93
91 94 @interfaceutil.implementer(intdirstate.idirstate)
92 95 class dirstate:
93 96 def __init__(
94 97 self,
95 98 opener,
96 99 ui,
97 100 root,
98 101 validate,
99 102 sparsematchfn,
100 103 nodeconstants,
101 104 use_dirstate_v2,
102 105 use_tracked_hint=False,
103 106 ):
104 107 """Create a new dirstate object.
105 108
106 109 opener is an open()-like callable that can be used to open the
107 110 dirstate file; root is the root of the directory tracked by
108 111 the dirstate.
109 112 """
110 113 self._use_dirstate_v2 = use_dirstate_v2
111 114 self._use_tracked_hint = use_tracked_hint
112 115 self._nodeconstants = nodeconstants
113 116 self._opener = opener
114 117 self._validate = validate
115 118 self._root = root
116 119 # Either build a sparse-matcher or None if sparse is disabled
117 120 self._sparsematchfn = sparsematchfn
118 121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
119 122 # UNC path pointing to root share (issue4557)
120 123 self._rootdir = pathutil.normasprefix(root)
121 124 # True is any internal state may be different
122 125 self._dirty = False
123 126 # True if the set of tracked file may be different
124 127 self._dirty_tracked_set = False
125 128 self._ui = ui
126 129 self._filecache = {}
130 # nesting level of `parentchange` context
127 131 self._parentwriters = 0
132 # True if the current dirstate changing operations have been
133 # invalidated (used to make sure all nested contexts have been exited)
134 self._invalidated_context = False
128 135 self._filename = b'dirstate'
129 136 self._filename_th = b'dirstate-tracked-hint'
130 137 self._pendingfilename = b'%s.pending' % self._filename
131 138 self._plchangecallbacks = {}
132 139 self._origpl = None
133 140 self._mapcls = dirstatemap.dirstatemap
134 141 # Access and cache cwd early, so we don't access it for the first time
135 142 # after a working-copy update caused it to not exist (accessing it then
136 143 # raises an exception).
137 144 self._cwd
138 145
139 146 def prefetch_parents(self):
140 147 """make sure the parents are loaded
141 148
142 149 Used to avoid a race condition.
143 150 """
144 151 self._pl
145 152
146 153 @contextlib.contextmanager
147 154 def parentchange(self):
148 155 """Context manager for handling dirstate parents.
149 156
150 157 If an exception occurs in the scope of the context manager,
151 158 the incoherent dirstate won't be written when wlock is
152 159 released.
153 160 """
161 if self._invalidated_context:
162 msg = "trying to use an invalidated dirstate before it has reset"
163 raise error.ProgrammingError(msg)
154 164 self._parentwriters += 1
155 yield
156 # Typically we want the "undo" step of a context manager in a
157 # finally block so it happens even when an exception
158 # occurs. In this case, however, we only want to decrement
159 # parentwriters if the code in the with statement exits
160 # normally, so we don't have a try/finally here on purpose.
161 self._parentwriters -= 1
165 try:
166 yield
167 except Exception:
168 self.invalidate()
169 raise
170 finally:
171 if self._parentwriters > 0:
172 if self._invalidated_context:
173 # make sure we invalidate anything an upper context might
174 # have changed.
175 self.invalidate()
176 self._parentwriters -= 1
177 # The invalidation is complete once we exit the final context
178 # manager
179 if self._parentwriters <= 0:
180 assert self._parentwriters == 0
181 self._invalidated_context = False
162 182
163 183 def pendingparentchange(self):
164 184 """Returns true if the dirstate is in the middle of a set of changes
165 185 that modify the dirstate parent.
166 186 """
167 187 return self._parentwriters > 0
168 188
169 189 @propertycache
170 190 def _map(self):
171 191 """Return the dirstate contents (see documentation for dirstatemap)."""
172 192 self._map = self._mapcls(
173 193 self._ui,
174 194 self._opener,
175 195 self._root,
176 196 self._nodeconstants,
177 197 self._use_dirstate_v2,
178 198 )
179 199 return self._map
180 200
181 201 @property
182 202 def _sparsematcher(self):
183 203 """The matcher for the sparse checkout.
184 204
185 205 The working directory may not include every file from a manifest. The
186 206 matcher obtained by this property will match a path if it is to be
187 207 included in the working directory.
188 208
189 209 When sparse if disabled, return None.
190 210 """
191 211 if self._sparsematchfn is None:
192 212 return None
193 213 # TODO there is potential to cache this property. For now, the matcher
194 214 # is resolved on every access. (But the called function does use a
195 215 # cache to keep the lookup fast.)
196 216 return self._sparsematchfn()
197 217
198 218 @repocache(b'branch')
199 219 def _branch(self):
200 220 try:
201 221 return self._opener.read(b"branch").strip() or b"default"
202 222 except FileNotFoundError:
203 223 return b"default"
204 224
205 225 @property
206 226 def _pl(self):
207 227 return self._map.parents()
208 228
209 229 def hasdir(self, d):
210 230 return self._map.hastrackeddir(d)
211 231
212 232 @rootcache(b'.hgignore')
213 233 def _ignore(self):
214 234 files = self._ignorefiles()
215 235 if not files:
216 236 return matchmod.never()
217 237
218 238 pats = [b'include:%s' % f for f in files]
219 239 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
220 240
221 241 @propertycache
222 242 def _slash(self):
223 243 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
224 244
225 245 @propertycache
226 246 def _checklink(self):
227 247 return util.checklink(self._root)
228 248
229 249 @propertycache
230 250 def _checkexec(self):
231 251 return bool(util.checkexec(self._root))
232 252
233 253 @propertycache
234 254 def _checkcase(self):
235 255 return not util.fscasesensitive(self._join(b'.hg'))
236 256
237 257 def _join(self, f):
238 258 # much faster than os.path.join()
239 259 # it's safe because f is always a relative path
240 260 return self._rootdir + f
241 261
242 262 def flagfunc(self, buildfallback):
243 263 """build a callable that returns flags associated with a filename
244 264
245 265 The information is extracted from three possible layers:
246 266 1. the file system if it supports the information
247 267 2. the "fallback" information stored in the dirstate if any
248 268 3. a more expensive mechanism inferring the flags from the parents.
249 269 """
250 270
251 271 # small hack to cache the result of buildfallback()
252 272 fallback_func = []
253 273
254 274 def get_flags(x):
255 275 entry = None
256 276 fallback_value = None
257 277 try:
258 278 st = os.lstat(self._join(x))
259 279 except OSError:
260 280 return b''
261 281
262 282 if self._checklink:
263 283 if util.statislink(st):
264 284 return b'l'
265 285 else:
266 286 entry = self.get_entry(x)
267 287 if entry.has_fallback_symlink:
268 288 if entry.fallback_symlink:
269 289 return b'l'
270 290 else:
271 291 if not fallback_func:
272 292 fallback_func.append(buildfallback())
273 293 fallback_value = fallback_func[0](x)
274 294 if b'l' in fallback_value:
275 295 return b'l'
276 296
277 297 if self._checkexec:
278 298 if util.statisexec(st):
279 299 return b'x'
280 300 else:
281 301 if entry is None:
282 302 entry = self.get_entry(x)
283 303 if entry.has_fallback_exec:
284 304 if entry.fallback_exec:
285 305 return b'x'
286 306 else:
287 307 if fallback_value is None:
288 308 if not fallback_func:
289 309 fallback_func.append(buildfallback())
290 310 fallback_value = fallback_func[0](x)
291 311 if b'x' in fallback_value:
292 312 return b'x'
293 313 return b''
294 314
295 315 return get_flags
296 316
297 317 @propertycache
298 318 def _cwd(self):
299 319 # internal config: ui.forcecwd
300 320 forcecwd = self._ui.config(b'ui', b'forcecwd')
301 321 if forcecwd:
302 322 return forcecwd
303 323 return encoding.getcwd()
304 324
305 325 def getcwd(self):
306 326 """Return the path from which a canonical path is calculated.
307 327
308 328 This path should be used to resolve file patterns or to convert
309 329 canonical paths back to file paths for display. It shouldn't be
310 330 used to get real file paths. Use vfs functions instead.
311 331 """
312 332 cwd = self._cwd
313 333 if cwd == self._root:
314 334 return b''
315 335 # self._root ends with a path separator if self._root is '/' or 'C:\'
316 336 rootsep = self._root
317 337 if not util.endswithsep(rootsep):
318 338 rootsep += pycompat.ossep
319 339 if cwd.startswith(rootsep):
320 340 return cwd[len(rootsep) :]
321 341 else:
322 342 # we're outside the repo. return an absolute path.
323 343 return cwd
324 344
325 345 def pathto(self, f, cwd=None):
326 346 if cwd is None:
327 347 cwd = self.getcwd()
328 348 path = util.pathto(self._root, cwd, f)
329 349 if self._slash:
330 350 return util.pconvert(path)
331 351 return path
332 352
333 353 def get_entry(self, path):
334 354 """return a DirstateItem for the associated path"""
335 355 entry = self._map.get(path)
336 356 if entry is None:
337 357 return DirstateItem()
338 358 return entry
339 359
340 360 def __contains__(self, key):
341 361 return key in self._map
342 362
343 363 def __iter__(self):
344 364 return iter(sorted(self._map))
345 365
346 366 def items(self):
347 367 return self._map.items()
348 368
349 369 iteritems = items
350 370
351 371 def parents(self):
352 372 return [self._validate(p) for p in self._pl]
353 373
354 374 def p1(self):
355 375 return self._validate(self._pl[0])
356 376
357 377 def p2(self):
358 378 return self._validate(self._pl[1])
359 379
360 380 @property
361 381 def in_merge(self):
362 382 """True if a merge is in progress"""
363 383 return self._pl[1] != self._nodeconstants.nullid
364 384
365 385 def branch(self):
366 386 return encoding.tolocal(self._branch)
367 387
368 388 def setparents(self, p1, p2=None):
369 389 """Set dirstate parents to p1 and p2.
370 390
371 391 When moving from two parents to one, "merged" entries a
372 392 adjusted to normal and previous copy records discarded and
373 393 returned by the call.
374 394
375 395 See localrepo.setparents()
376 396 """
377 397 if p2 is None:
378 398 p2 = self._nodeconstants.nullid
379 399 if self._parentwriters == 0:
380 400 raise ValueError(
381 401 b"cannot set dirstate parent outside of "
382 402 b"dirstate.parentchange context manager"
383 403 )
384 404
385 405 self._dirty = True
386 406 oldp2 = self._pl[1]
387 407 if self._origpl is None:
388 408 self._origpl = self._pl
389 409 nullid = self._nodeconstants.nullid
390 410 # True if we need to fold p2 related state back to a linear case
391 411 fold_p2 = oldp2 != nullid and p2 == nullid
392 412 return self._map.setparents(p1, p2, fold_p2=fold_p2)
393 413
394 414 def setbranch(self, branch):
395 415 self.__class__._branch.set(self, encoding.fromlocal(branch))
396 416 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
397 417 try:
398 418 f.write(self._branch + b'\n')
399 419 f.close()
400 420
401 421 # make sure filecache has the correct stat info for _branch after
402 422 # replacing the underlying file
403 423 ce = self._filecache[b'_branch']
404 424 if ce:
405 425 ce.refresh()
406 426 except: # re-raises
407 427 f.discard()
408 428 raise
409 429
410 430 def invalidate(self):
411 431 """Causes the next access to reread the dirstate.
412 432
413 433 This is different from localrepo.invalidatedirstate() because it always
414 434 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
415 435 check whether the dirstate has changed before rereading it."""
416 436
417 437 for a in ("_map", "_branch", "_ignore"):
418 438 if a in self.__dict__:
419 439 delattr(self, a)
420 440 self._dirty = False
421 441 self._dirty_tracked_set = False
422 self._parentwriters = 0
442 self._invalidated_context = self._parentwriters > 0
423 443 self._origpl = None
424 444
425 445 def copy(self, source, dest):
426 446 """Mark dest as a copy of source. Unmark dest if source is None."""
427 447 if source == dest:
428 448 return
429 449 self._dirty = True
430 450 if source is not None:
431 451 self._check_sparse(source)
432 452 self._map.copymap[dest] = source
433 453 else:
434 454 self._map.copymap.pop(dest, None)
435 455
436 456 def copied(self, file):
437 457 return self._map.copymap.get(file, None)
438 458
439 459 def copies(self):
440 460 return self._map.copymap
441 461
442 462 @requires_no_parents_change
443 463 def set_tracked(self, filename, reset_copy=False):
444 464 """a "public" method for generic code to mark a file as tracked
445 465
446 466 This function is to be called outside of "update/merge" case. For
447 467 example by a command like `hg add X`.
448 468
449 469 if reset_copy is set, any existing copy information will be dropped.
450 470
451 471 return True the file was previously untracked, False otherwise.
452 472 """
453 473 self._dirty = True
454 474 entry = self._map.get(filename)
455 475 if entry is None or not entry.tracked:
456 476 self._check_new_tracked_filename(filename)
457 477 pre_tracked = self._map.set_tracked(filename)
458 478 if reset_copy:
459 479 self._map.copymap.pop(filename, None)
460 480 if pre_tracked:
461 481 self._dirty_tracked_set = True
462 482 return pre_tracked
463 483
464 484 @requires_no_parents_change
465 485 def set_untracked(self, filename):
466 486 """a "public" method for generic code to mark a file as untracked
467 487
468 488 This function is to be called outside of "update/merge" case. For
469 489 example by a command like `hg remove X`.
470 490
471 491 return True the file was previously tracked, False otherwise.
472 492 """
473 493 ret = self._map.set_untracked(filename)
474 494 if ret:
475 495 self._dirty = True
476 496 self._dirty_tracked_set = True
477 497 return ret
478 498
479 499 @requires_no_parents_change
480 500 def set_clean(self, filename, parentfiledata):
481 501 """record that the current state of the file on disk is known to be clean"""
482 502 self._dirty = True
483 503 if not self._map[filename].tracked:
484 504 self._check_new_tracked_filename(filename)
485 505 (mode, size, mtime) = parentfiledata
486 506 self._map.set_clean(filename, mode, size, mtime)
487 507
488 508 @requires_no_parents_change
489 509 def set_possibly_dirty(self, filename):
490 510 """record that the current state of the file on disk is unknown"""
491 511 self._dirty = True
492 512 self._map.set_possibly_dirty(filename)
493 513
494 514 @requires_parents_change
495 515 def update_file_p1(
496 516 self,
497 517 filename,
498 518 p1_tracked,
499 519 ):
500 520 """Set a file as tracked in the parent (or not)
501 521
502 522 This is to be called when adjust the dirstate to a new parent after an history
503 523 rewriting operation.
504 524
505 525 It should not be called during a merge (p2 != nullid) and only within
506 526 a `with dirstate.parentchange():` context.
507 527 """
508 528 if self.in_merge:
509 529 msg = b'update_file_reference should not be called when merging'
510 530 raise error.ProgrammingError(msg)
511 531 entry = self._map.get(filename)
512 532 if entry is None:
513 533 wc_tracked = False
514 534 else:
515 535 wc_tracked = entry.tracked
516 536 if not (p1_tracked or wc_tracked):
517 537 # the file is no longer relevant to anyone
518 538 if self._map.get(filename) is not None:
519 539 self._map.reset_state(filename)
520 540 self._dirty = True
521 541 elif (not p1_tracked) and wc_tracked:
522 542 if entry is not None and entry.added:
523 543 return # avoid dropping copy information (maybe?)
524 544
525 545 self._map.reset_state(
526 546 filename,
527 547 wc_tracked,
528 548 p1_tracked,
529 549 # the underlying reference might have changed, we will have to
530 550 # check it.
531 551 has_meaningful_mtime=False,
532 552 )
533 553
534 554 @requires_parents_change
535 555 def update_file(
536 556 self,
537 557 filename,
538 558 wc_tracked,
539 559 p1_tracked,
540 560 p2_info=False,
541 561 possibly_dirty=False,
542 562 parentfiledata=None,
543 563 ):
544 564 """update the information about a file in the dirstate
545 565
546 566 This is to be called when the direstates parent changes to keep track
547 567 of what is the file situation in regards to the working copy and its parent.
548 568
549 569 This function must be called within a `dirstate.parentchange` context.
550 570
551 571 note: the API is at an early stage and we might need to adjust it
552 572 depending of what information ends up being relevant and useful to
553 573 other processing.
554 574 """
555 575
556 576 # note: I do not think we need to double check name clash here since we
557 577 # are in a update/merge case that should already have taken care of
558 578 # this. The test agrees
559 579
560 580 self._dirty = True
561 581 old_entry = self._map.get(filename)
562 582 if old_entry is None:
563 583 prev_tracked = False
564 584 else:
565 585 prev_tracked = old_entry.tracked
566 586 if prev_tracked != wc_tracked:
567 587 self._dirty_tracked_set = True
568 588
569 589 self._map.reset_state(
570 590 filename,
571 591 wc_tracked,
572 592 p1_tracked,
573 593 p2_info=p2_info,
574 594 has_meaningful_mtime=not possibly_dirty,
575 595 parentfiledata=parentfiledata,
576 596 )
577 597
578 598 def _check_new_tracked_filename(self, filename):
579 599 scmutil.checkfilename(filename)
580 600 if self._map.hastrackeddir(filename):
581 601 msg = _(b'directory %r already in dirstate')
582 602 msg %= pycompat.bytestr(filename)
583 603 raise error.Abort(msg)
584 604 # shadows
585 605 for d in pathutil.finddirs(filename):
586 606 if self._map.hastrackeddir(d):
587 607 break
588 608 entry = self._map.get(d)
589 609 if entry is not None and not entry.removed:
590 610 msg = _(b'file %r in dirstate clashes with %r')
591 611 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
592 612 raise error.Abort(msg)
593 613 self._check_sparse(filename)
594 614
595 615 def _check_sparse(self, filename):
596 616 """Check that a filename is inside the sparse profile"""
597 617 sparsematch = self._sparsematcher
598 618 if sparsematch is not None and not sparsematch.always():
599 619 if not sparsematch(filename):
600 620 msg = _(b"cannot add '%s' - it is outside the sparse checkout")
601 621 hint = _(
602 622 b'include file with `hg debugsparse --include <pattern>` or use '
603 623 b'`hg add -s <file>` to include file directory while adding'
604 624 )
605 625 raise error.Abort(msg % filename, hint=hint)
606 626
607 627 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
608 628 if exists is None:
609 629 exists = os.path.lexists(os.path.join(self._root, path))
610 630 if not exists:
611 631 # Maybe a path component exists
612 632 if not ignoremissing and b'/' in path:
613 633 d, f = path.rsplit(b'/', 1)
614 634 d = self._normalize(d, False, ignoremissing, None)
615 635 folded = d + b"/" + f
616 636 else:
617 637 # No path components, preserve original case
618 638 folded = path
619 639 else:
620 640 # recursively normalize leading directory components
621 641 # against dirstate
622 642 if b'/' in normed:
623 643 d, f = normed.rsplit(b'/', 1)
624 644 d = self._normalize(d, False, ignoremissing, True)
625 645 r = self._root + b"/" + d
626 646 folded = d + b"/" + util.fspath(f, r)
627 647 else:
628 648 folded = util.fspath(normed, self._root)
629 649 storemap[normed] = folded
630 650
631 651 return folded
632 652
633 653 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
634 654 normed = util.normcase(path)
635 655 folded = self._map.filefoldmap.get(normed, None)
636 656 if folded is None:
637 657 if isknown:
638 658 folded = path
639 659 else:
640 660 folded = self._discoverpath(
641 661 path, normed, ignoremissing, exists, self._map.filefoldmap
642 662 )
643 663 return folded
644 664
645 665 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
646 666 normed = util.normcase(path)
647 667 folded = self._map.filefoldmap.get(normed, None)
648 668 if folded is None:
649 669 folded = self._map.dirfoldmap.get(normed, None)
650 670 if folded is None:
651 671 if isknown:
652 672 folded = path
653 673 else:
654 674 # store discovered result in dirfoldmap so that future
655 675 # normalizefile calls don't start matching directories
656 676 folded = self._discoverpath(
657 677 path, normed, ignoremissing, exists, self._map.dirfoldmap
658 678 )
659 679 return folded
660 680
661 681 def normalize(self, path, isknown=False, ignoremissing=False):
662 682 """
663 683 normalize the case of a pathname when on a casefolding filesystem
664 684
665 685 isknown specifies whether the filename came from walking the
666 686 disk, to avoid extra filesystem access.
667 687
668 688 If ignoremissing is True, missing path are returned
669 689 unchanged. Otherwise, we try harder to normalize possibly
670 690 existing path components.
671 691
672 692 The normalized case is determined based on the following precedence:
673 693
674 694 - version of name already stored in the dirstate
675 695 - version of name stored on disk
676 696 - version provided via command arguments
677 697 """
678 698
679 699 if self._checkcase:
680 700 return self._normalize(path, isknown, ignoremissing)
681 701 return path
682 702
683 703 def clear(self):
684 704 self._map.clear()
685 705 self._dirty = True
686 706
687 707 def rebuild(self, parent, allfiles, changedfiles=None):
688 708
689 709 matcher = self._sparsematcher
690 710 if matcher is not None and not matcher.always():
691 711 # should not add non-matching files
692 712 allfiles = [f for f in allfiles if matcher(f)]
693 713 if changedfiles:
694 714 changedfiles = [f for f in changedfiles if matcher(f)]
695 715
696 716 if changedfiles is not None:
697 717 # these files will be deleted from the dirstate when they are
698 718 # not found to be in allfiles
699 719 dirstatefilestoremove = {f for f in self if not matcher(f)}
700 720 changedfiles = dirstatefilestoremove.union(changedfiles)
701 721
702 722 if changedfiles is None:
703 723 # Rebuild entire dirstate
704 724 to_lookup = allfiles
705 725 to_drop = []
706 726 self.clear()
707 727 elif len(changedfiles) < 10:
708 728 # Avoid turning allfiles into a set, which can be expensive if it's
709 729 # large.
710 730 to_lookup = []
711 731 to_drop = []
712 732 for f in changedfiles:
713 733 if f in allfiles:
714 734 to_lookup.append(f)
715 735 else:
716 736 to_drop.append(f)
717 737 else:
718 738 changedfilesset = set(changedfiles)
719 739 to_lookup = changedfilesset & set(allfiles)
720 740 to_drop = changedfilesset - to_lookup
721 741
722 742 if self._origpl is None:
723 743 self._origpl = self._pl
724 744 self._map.setparents(parent, self._nodeconstants.nullid)
725 745
726 746 for f in to_lookup:
727 747
728 748 if self.in_merge:
729 749 self.set_tracked(f)
730 750 else:
731 751 self._map.reset_state(
732 752 f,
733 753 wc_tracked=True,
734 754 p1_tracked=True,
735 755 )
736 756 for f in to_drop:
737 757 self._map.reset_state(f)
738 758
739 759 self._dirty = True
740 760
741 761 def identity(self):
742 762 """Return identity of dirstate itself to detect changing in storage
743 763
744 764 If identity of previous dirstate is equal to this, writing
745 765 changes based on the former dirstate out can keep consistency.
746 766 """
747 767 return self._map.identity
748 768
749 769 def write(self, tr):
750 770 if not self._dirty:
751 771 return
752 772
753 773 write_key = self._use_tracked_hint and self._dirty_tracked_set
754 774 if tr:
755 775 # delay writing in-memory changes out
756 776 tr.addfilegenerator(
757 777 b'dirstate-1-main',
758 778 (self._filename,),
759 779 lambda f: self._writedirstate(tr, f),
760 780 location=b'plain',
761 781 post_finalize=True,
762 782 )
763 783 if write_key:
764 784 tr.addfilegenerator(
765 785 b'dirstate-2-key-post',
766 786 (self._filename_th,),
767 787 lambda f: self._write_tracked_hint(tr, f),
768 788 location=b'plain',
769 789 post_finalize=True,
770 790 )
771 791 return
772 792
773 793 file = lambda f: self._opener(f, b"w", atomictemp=True, checkambig=True)
774 794 with file(self._filename) as f:
775 795 self._writedirstate(tr, f)
776 796 if write_key:
777 797 # we update the key-file after writing to make sure reader have a
778 798 # key that match the newly written content
779 799 with file(self._filename_th) as f:
780 800 self._write_tracked_hint(tr, f)
781 801
782 802 def delete_tracked_hint(self):
783 803 """remove the tracked_hint file
784 804
785 805 To be used by format downgrades operation"""
786 806 self._opener.unlink(self._filename_th)
787 807 self._use_tracked_hint = False
788 808
789 809 def addparentchangecallback(self, category, callback):
790 810 """add a callback to be called when the wd parents are changed
791 811
792 812 Callback will be called with the following arguments:
793 813 dirstate, (oldp1, oldp2), (newp1, newp2)
794 814
795 815 Category is a unique identifier to allow overwriting an old callback
796 816 with a newer callback.
797 817 """
798 818 self._plchangecallbacks[category] = callback
799 819
800 820 def _writedirstate(self, tr, st):
801 821 # notify callbacks about parents change
802 822 if self._origpl is not None and self._origpl != self._pl:
803 823 for c, callback in sorted(self._plchangecallbacks.items()):
804 824 callback(self, self._origpl, self._pl)
805 825 self._origpl = None
806 826 self._map.write(tr, st)
807 827 self._dirty = False
808 828 self._dirty_tracked_set = False
809 829
810 830 def _write_tracked_hint(self, tr, f):
811 831 key = node.hex(uuid.uuid4().bytes)
812 832 f.write(b"1\n%s\n" % key) # 1 is the format version
813 833
814 834 def _dirignore(self, f):
815 835 if self._ignore(f):
816 836 return True
817 837 for p in pathutil.finddirs(f):
818 838 if self._ignore(p):
819 839 return True
820 840 return False
821 841
822 842 def _ignorefiles(self):
823 843 files = []
824 844 if os.path.exists(self._join(b'.hgignore')):
825 845 files.append(self._join(b'.hgignore'))
826 846 for name, path in self._ui.configitems(b"ui"):
827 847 if name == b'ignore' or name.startswith(b'ignore.'):
828 848 # we need to use os.path.join here rather than self._join
829 849 # because path is arbitrary and user-specified
830 850 files.append(os.path.join(self._rootdir, util.expandpath(path)))
831 851 return files
832 852
833 853 def _ignorefileandline(self, f):
834 854 files = collections.deque(self._ignorefiles())
835 855 visited = set()
836 856 while files:
837 857 i = files.popleft()
838 858 patterns = matchmod.readpatternfile(
839 859 i, self._ui.warn, sourceinfo=True
840 860 )
841 861 for pattern, lineno, line in patterns:
842 862 kind, p = matchmod._patsplit(pattern, b'glob')
843 863 if kind == b"subinclude":
844 864 if p not in visited:
845 865 files.append(p)
846 866 continue
847 867 m = matchmod.match(
848 868 self._root, b'', [], [pattern], warn=self._ui.warn
849 869 )
850 870 if m(f):
851 871 return (i, lineno, line)
852 872 visited.add(i)
853 873 return (None, -1, b"")
854 874
855 875 def _walkexplicit(self, match, subrepos):
856 876 """Get stat data about the files explicitly specified by match.
857 877
858 878 Return a triple (results, dirsfound, dirsnotfound).
859 879 - results is a mapping from filename to stat result. It also contains
860 880 listings mapping subrepos and .hg to None.
861 881 - dirsfound is a list of files found to be directories.
862 882 - dirsnotfound is a list of files that the dirstate thinks are
863 883 directories and that were not found."""
864 884
865 885 def badtype(mode):
866 886 kind = _(b'unknown')
867 887 if stat.S_ISCHR(mode):
868 888 kind = _(b'character device')
869 889 elif stat.S_ISBLK(mode):
870 890 kind = _(b'block device')
871 891 elif stat.S_ISFIFO(mode):
872 892 kind = _(b'fifo')
873 893 elif stat.S_ISSOCK(mode):
874 894 kind = _(b'socket')
875 895 elif stat.S_ISDIR(mode):
876 896 kind = _(b'directory')
877 897 return _(b'unsupported file type (type is %s)') % kind
878 898
879 899 badfn = match.bad
880 900 dmap = self._map
881 901 lstat = os.lstat
882 902 getkind = stat.S_IFMT
883 903 dirkind = stat.S_IFDIR
884 904 regkind = stat.S_IFREG
885 905 lnkkind = stat.S_IFLNK
886 906 join = self._join
887 907 dirsfound = []
888 908 foundadd = dirsfound.append
889 909 dirsnotfound = []
890 910 notfoundadd = dirsnotfound.append
891 911
892 912 if not match.isexact() and self._checkcase:
893 913 normalize = self._normalize
894 914 else:
895 915 normalize = None
896 916
897 917 files = sorted(match.files())
898 918 subrepos.sort()
899 919 i, j = 0, 0
900 920 while i < len(files) and j < len(subrepos):
901 921 subpath = subrepos[j] + b"/"
902 922 if files[i] < subpath:
903 923 i += 1
904 924 continue
905 925 while i < len(files) and files[i].startswith(subpath):
906 926 del files[i]
907 927 j += 1
908 928
909 929 if not files or b'' in files:
910 930 files = [b'']
911 931 # constructing the foldmap is expensive, so don't do it for the
912 932 # common case where files is ['']
913 933 normalize = None
914 934 results = dict.fromkeys(subrepos)
915 935 results[b'.hg'] = None
916 936
917 937 for ff in files:
918 938 if normalize:
919 939 nf = normalize(ff, False, True)
920 940 else:
921 941 nf = ff
922 942 if nf in results:
923 943 continue
924 944
925 945 try:
926 946 st = lstat(join(nf))
927 947 kind = getkind(st.st_mode)
928 948 if kind == dirkind:
929 949 if nf in dmap:
930 950 # file replaced by dir on disk but still in dirstate
931 951 results[nf] = None
932 952 foundadd((nf, ff))
933 953 elif kind == regkind or kind == lnkkind:
934 954 results[nf] = st
935 955 else:
936 956 badfn(ff, badtype(kind))
937 957 if nf in dmap:
938 958 results[nf] = None
939 959 except OSError as inst: # nf not found on disk - it is dirstate only
940 960 if nf in dmap: # does it exactly match a missing file?
941 961 results[nf] = None
942 962 else: # does it match a missing directory?
943 963 if self._map.hasdir(nf):
944 964 notfoundadd(nf)
945 965 else:
946 966 badfn(ff, encoding.strtolocal(inst.strerror))
947 967
948 968 # match.files() may contain explicitly-specified paths that shouldn't
949 969 # be taken; drop them from the list of files found. dirsfound/notfound
950 970 # aren't filtered here because they will be tested later.
951 971 if match.anypats():
952 972 for f in list(results):
953 973 if f == b'.hg' or f in subrepos:
954 974 # keep sentinel to disable further out-of-repo walks
955 975 continue
956 976 if not match(f):
957 977 del results[f]
958 978
959 979 # Case insensitive filesystems cannot rely on lstat() failing to detect
960 980 # a case-only rename. Prune the stat object for any file that does not
961 981 # match the case in the filesystem, if there are multiple files that
962 982 # normalize to the same path.
963 983 if match.isexact() and self._checkcase:
964 984 normed = {}
965 985
966 986 for f, st in results.items():
967 987 if st is None:
968 988 continue
969 989
970 990 nc = util.normcase(f)
971 991 paths = normed.get(nc)
972 992
973 993 if paths is None:
974 994 paths = set()
975 995 normed[nc] = paths
976 996
977 997 paths.add(f)
978 998
979 999 for norm, paths in normed.items():
980 1000 if len(paths) > 1:
981 1001 for path in paths:
982 1002 folded = self._discoverpath(
983 1003 path, norm, True, None, self._map.dirfoldmap
984 1004 )
985 1005 if path != folded:
986 1006 results[path] = None
987 1007
988 1008 return results, dirsfound, dirsnotfound
989 1009
990 1010 def walk(self, match, subrepos, unknown, ignored, full=True):
991 1011 """
992 1012 Walk recursively through the directory tree, finding all files
993 1013 matched by match.
994 1014
995 1015 If full is False, maybe skip some known-clean files.
996 1016
997 1017 Return a dict mapping filename to stat-like object (either
998 1018 mercurial.osutil.stat instance or return value of os.stat()).
999 1019
1000 1020 """
1001 1021 # full is a flag that extensions that hook into walk can use -- this
1002 1022 # implementation doesn't use it at all. This satisfies the contract
1003 1023 # because we only guarantee a "maybe".
1004 1024
1005 1025 if ignored:
1006 1026 ignore = util.never
1007 1027 dirignore = util.never
1008 1028 elif unknown:
1009 1029 ignore = self._ignore
1010 1030 dirignore = self._dirignore
1011 1031 else:
1012 1032 # if not unknown and not ignored, drop dir recursion and step 2
1013 1033 ignore = util.always
1014 1034 dirignore = util.always
1015 1035
1016 1036 if self._sparsematchfn is not None:
1017 1037 em = matchmod.exact(match.files())
1018 1038 sm = matchmod.unionmatcher([self._sparsematcher, em])
1019 1039 match = matchmod.intersectmatchers(match, sm)
1020 1040
1021 1041 matchfn = match.matchfn
1022 1042 matchalways = match.always()
1023 1043 matchtdir = match.traversedir
1024 1044 dmap = self._map
1025 1045 listdir = util.listdir
1026 1046 lstat = os.lstat
1027 1047 dirkind = stat.S_IFDIR
1028 1048 regkind = stat.S_IFREG
1029 1049 lnkkind = stat.S_IFLNK
1030 1050 join = self._join
1031 1051
1032 1052 exact = skipstep3 = False
1033 1053 if match.isexact(): # match.exact
1034 1054 exact = True
1035 1055 dirignore = util.always # skip step 2
1036 1056 elif match.prefix(): # match.match, no patterns
1037 1057 skipstep3 = True
1038 1058
1039 1059 if not exact and self._checkcase:
1040 1060 normalize = self._normalize
1041 1061 normalizefile = self._normalizefile
1042 1062 skipstep3 = False
1043 1063 else:
1044 1064 normalize = self._normalize
1045 1065 normalizefile = None
1046 1066
1047 1067 # step 1: find all explicit files
1048 1068 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1049 1069 if matchtdir:
1050 1070 for d in work:
1051 1071 matchtdir(d[0])
1052 1072 for d in dirsnotfound:
1053 1073 matchtdir(d)
1054 1074
1055 1075 skipstep3 = skipstep3 and not (work or dirsnotfound)
1056 1076 work = [d for d in work if not dirignore(d[0])]
1057 1077
1058 1078 # step 2: visit subdirectories
1059 1079 def traverse(work, alreadynormed):
1060 1080 wadd = work.append
1061 1081 while work:
1062 1082 tracing.counter('dirstate.walk work', len(work))
1063 1083 nd = work.pop()
1064 1084 visitentries = match.visitchildrenset(nd)
1065 1085 if not visitentries:
1066 1086 continue
1067 1087 if visitentries == b'this' or visitentries == b'all':
1068 1088 visitentries = None
1069 1089 skip = None
1070 1090 if nd != b'':
1071 1091 skip = b'.hg'
1072 1092 try:
1073 1093 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1074 1094 entries = listdir(join(nd), stat=True, skip=skip)
1075 1095 except (PermissionError, FileNotFoundError) as inst:
1076 1096 match.bad(
1077 1097 self.pathto(nd), encoding.strtolocal(inst.strerror)
1078 1098 )
1079 1099 continue
1080 1100 for f, kind, st in entries:
1081 1101 # Some matchers may return files in the visitentries set,
1082 1102 # instead of 'this', if the matcher explicitly mentions them
1083 1103 # and is not an exactmatcher. This is acceptable; we do not
1084 1104 # make any hard assumptions about file-or-directory below
1085 1105 # based on the presence of `f` in visitentries. If
1086 1106 # visitchildrenset returned a set, we can always skip the
1087 1107 # entries *not* in the set it provided regardless of whether
1088 1108 # they're actually a file or a directory.
1089 1109 if visitentries and f not in visitentries:
1090 1110 continue
1091 1111 if normalizefile:
1092 1112 # even though f might be a directory, we're only
1093 1113 # interested in comparing it to files currently in the
1094 1114 # dmap -- therefore normalizefile is enough
1095 1115 nf = normalizefile(
1096 1116 nd and (nd + b"/" + f) or f, True, True
1097 1117 )
1098 1118 else:
1099 1119 nf = nd and (nd + b"/" + f) or f
1100 1120 if nf not in results:
1101 1121 if kind == dirkind:
1102 1122 if not ignore(nf):
1103 1123 if matchtdir:
1104 1124 matchtdir(nf)
1105 1125 wadd(nf)
1106 1126 if nf in dmap and (matchalways or matchfn(nf)):
1107 1127 results[nf] = None
1108 1128 elif kind == regkind or kind == lnkkind:
1109 1129 if nf in dmap:
1110 1130 if matchalways or matchfn(nf):
1111 1131 results[nf] = st
1112 1132 elif (matchalways or matchfn(nf)) and not ignore(
1113 1133 nf
1114 1134 ):
1115 1135 # unknown file -- normalize if necessary
1116 1136 if not alreadynormed:
1117 1137 nf = normalize(nf, False, True)
1118 1138 results[nf] = st
1119 1139 elif nf in dmap and (matchalways or matchfn(nf)):
1120 1140 results[nf] = None
1121 1141
1122 1142 for nd, d in work:
1123 1143 # alreadynormed means that processwork doesn't have to do any
1124 1144 # expensive directory normalization
1125 1145 alreadynormed = not normalize or nd == d
1126 1146 traverse([d], alreadynormed)
1127 1147
1128 1148 for s in subrepos:
1129 1149 del results[s]
1130 1150 del results[b'.hg']
1131 1151
1132 1152 # step 3: visit remaining files from dmap
1133 1153 if not skipstep3 and not exact:
1134 1154 # If a dmap file is not in results yet, it was either
1135 1155 # a) not matching matchfn b) ignored, c) missing, or d) under a
1136 1156 # symlink directory.
1137 1157 if not results and matchalways:
1138 1158 visit = [f for f in dmap]
1139 1159 else:
1140 1160 visit = [f for f in dmap if f not in results and matchfn(f)]
1141 1161 visit.sort()
1142 1162
1143 1163 if unknown:
1144 1164 # unknown == True means we walked all dirs under the roots
1145 1165 # that wasn't ignored, and everything that matched was stat'ed
1146 1166 # and is already in results.
1147 1167 # The rest must thus be ignored or under a symlink.
1148 1168 audit_path = pathutil.pathauditor(self._root, cached=True)
1149 1169
1150 1170 for nf in iter(visit):
1151 1171 # If a stat for the same file was already added with a
1152 1172 # different case, don't add one for this, since that would
1153 1173 # make it appear as if the file exists under both names
1154 1174 # on disk.
1155 1175 if (
1156 1176 normalizefile
1157 1177 and normalizefile(nf, True, True) in results
1158 1178 ):
1159 1179 results[nf] = None
1160 1180 # Report ignored items in the dmap as long as they are not
1161 1181 # under a symlink directory.
1162 1182 elif audit_path.check(nf):
1163 1183 try:
1164 1184 results[nf] = lstat(join(nf))
1165 1185 # file was just ignored, no links, and exists
1166 1186 except OSError:
1167 1187 # file doesn't exist
1168 1188 results[nf] = None
1169 1189 else:
1170 1190 # It's either missing or under a symlink directory
1171 1191 # which we in this case report as missing
1172 1192 results[nf] = None
1173 1193 else:
1174 1194 # We may not have walked the full directory tree above,
1175 1195 # so stat and check everything we missed.
1176 1196 iv = iter(visit)
1177 1197 for st in util.statfiles([join(i) for i in visit]):
1178 1198 results[next(iv)] = st
1179 1199 return results
1180 1200
1181 1201 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1182 1202 if self._sparsematchfn is not None:
1183 1203 em = matchmod.exact(matcher.files())
1184 1204 sm = matchmod.unionmatcher([self._sparsematcher, em])
1185 1205 matcher = matchmod.intersectmatchers(matcher, sm)
1186 1206 # Force Rayon (Rust parallelism library) to respect the number of
1187 1207 # workers. This is a temporary workaround until Rust code knows
1188 1208 # how to read the config file.
1189 1209 numcpus = self._ui.configint(b"worker", b"numcpus")
1190 1210 if numcpus is not None:
1191 1211 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1192 1212
1193 1213 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1194 1214 if not workers_enabled:
1195 1215 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1196 1216
1197 1217 (
1198 1218 lookup,
1199 1219 modified,
1200 1220 added,
1201 1221 removed,
1202 1222 deleted,
1203 1223 clean,
1204 1224 ignored,
1205 1225 unknown,
1206 1226 warnings,
1207 1227 bad,
1208 1228 traversed,
1209 1229 dirty,
1210 1230 ) = rustmod.status(
1211 1231 self._map._map,
1212 1232 matcher,
1213 1233 self._rootdir,
1214 1234 self._ignorefiles(),
1215 1235 self._checkexec,
1216 1236 bool(list_clean),
1217 1237 bool(list_ignored),
1218 1238 bool(list_unknown),
1219 1239 bool(matcher.traversedir),
1220 1240 )
1221 1241
1222 1242 self._dirty |= dirty
1223 1243
1224 1244 if matcher.traversedir:
1225 1245 for dir in traversed:
1226 1246 matcher.traversedir(dir)
1227 1247
1228 1248 if self._ui.warn:
1229 1249 for item in warnings:
1230 1250 if isinstance(item, tuple):
1231 1251 file_path, syntax = item
1232 1252 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1233 1253 file_path,
1234 1254 syntax,
1235 1255 )
1236 1256 self._ui.warn(msg)
1237 1257 else:
1238 1258 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1239 1259 self._ui.warn(
1240 1260 msg
1241 1261 % (
1242 1262 pathutil.canonpath(
1243 1263 self._rootdir, self._rootdir, item
1244 1264 ),
1245 1265 b"No such file or directory",
1246 1266 )
1247 1267 )
1248 1268
1249 1269 for (fn, message) in bad:
1250 1270 matcher.bad(fn, encoding.strtolocal(message))
1251 1271
1252 1272 status = scmutil.status(
1253 1273 modified=modified,
1254 1274 added=added,
1255 1275 removed=removed,
1256 1276 deleted=deleted,
1257 1277 unknown=unknown,
1258 1278 ignored=ignored,
1259 1279 clean=clean,
1260 1280 )
1261 1281 return (lookup, status)
1262 1282
1263 1283 def status(self, match, subrepos, ignored, clean, unknown):
1264 1284 """Determine the status of the working copy relative to the
1265 1285 dirstate and return a pair of (unsure, status), where status is of type
1266 1286 scmutil.status and:
1267 1287
1268 1288 unsure:
1269 1289 files that might have been modified since the dirstate was
1270 1290 written, but need to be read to be sure (size is the same
1271 1291 but mtime differs)
1272 1292 status.modified:
1273 1293 files that have definitely been modified since the dirstate
1274 1294 was written (different size or mode)
1275 1295 status.clean:
1276 1296 files that have definitely not been modified since the
1277 1297 dirstate was written
1278 1298 """
1279 1299 listignored, listclean, listunknown = ignored, clean, unknown
1280 1300 lookup, modified, added, unknown, ignored = [], [], [], [], []
1281 1301 removed, deleted, clean = [], [], []
1282 1302
1283 1303 dmap = self._map
1284 1304 dmap.preload()
1285 1305
1286 1306 use_rust = True
1287 1307
1288 1308 allowed_matchers = (
1289 1309 matchmod.alwaysmatcher,
1290 1310 matchmod.differencematcher,
1291 1311 matchmod.exactmatcher,
1292 1312 matchmod.includematcher,
1293 1313 matchmod.intersectionmatcher,
1294 1314 matchmod.nevermatcher,
1295 1315 matchmod.unionmatcher,
1296 1316 )
1297 1317
1298 1318 if rustmod is None:
1299 1319 use_rust = False
1300 1320 elif self._checkcase:
1301 1321 # Case-insensitive filesystems are not handled yet
1302 1322 use_rust = False
1303 1323 elif subrepos:
1304 1324 use_rust = False
1305 1325 elif not isinstance(match, allowed_matchers):
1306 1326 # Some matchers have yet to be implemented
1307 1327 use_rust = False
1308 1328
1309 1329 # Get the time from the filesystem so we can disambiguate files that
1310 1330 # appear modified in the present or future.
1311 1331 try:
1312 1332 mtime_boundary = timestamp.get_fs_now(self._opener)
1313 1333 except OSError:
1314 1334 # In largefiles or readonly context
1315 1335 mtime_boundary = None
1316 1336
1317 1337 if use_rust:
1318 1338 try:
1319 1339 res = self._rust_status(
1320 1340 match, listclean, listignored, listunknown
1321 1341 )
1322 1342 return res + (mtime_boundary,)
1323 1343 except rustmod.FallbackError:
1324 1344 pass
1325 1345
1326 1346 def noop(f):
1327 1347 pass
1328 1348
1329 1349 dcontains = dmap.__contains__
1330 1350 dget = dmap.__getitem__
1331 1351 ladd = lookup.append # aka "unsure"
1332 1352 madd = modified.append
1333 1353 aadd = added.append
1334 1354 uadd = unknown.append if listunknown else noop
1335 1355 iadd = ignored.append if listignored else noop
1336 1356 radd = removed.append
1337 1357 dadd = deleted.append
1338 1358 cadd = clean.append if listclean else noop
1339 1359 mexact = match.exact
1340 1360 dirignore = self._dirignore
1341 1361 checkexec = self._checkexec
1342 1362 checklink = self._checklink
1343 1363 copymap = self._map.copymap
1344 1364
1345 1365 # We need to do full walks when either
1346 1366 # - we're listing all clean files, or
1347 1367 # - match.traversedir does something, because match.traversedir should
1348 1368 # be called for every dir in the working dir
1349 1369 full = listclean or match.traversedir is not None
1350 1370 for fn, st in self.walk(
1351 1371 match, subrepos, listunknown, listignored, full=full
1352 1372 ).items():
1353 1373 if not dcontains(fn):
1354 1374 if (listignored or mexact(fn)) and dirignore(fn):
1355 1375 if listignored:
1356 1376 iadd(fn)
1357 1377 else:
1358 1378 uadd(fn)
1359 1379 continue
1360 1380
1361 1381 t = dget(fn)
1362 1382 mode = t.mode
1363 1383 size = t.size
1364 1384
1365 1385 if not st and t.tracked:
1366 1386 dadd(fn)
1367 1387 elif t.p2_info:
1368 1388 madd(fn)
1369 1389 elif t.added:
1370 1390 aadd(fn)
1371 1391 elif t.removed:
1372 1392 radd(fn)
1373 1393 elif t.tracked:
1374 1394 if not checklink and t.has_fallback_symlink:
1375 1395 # If the file system does not support symlink, the mode
1376 1396 # might not be correctly stored in the dirstate, so do not
1377 1397 # trust it.
1378 1398 ladd(fn)
1379 1399 elif not checkexec and t.has_fallback_exec:
1380 1400 # If the file system does not support exec bits, the mode
1381 1401 # might not be correctly stored in the dirstate, so do not
1382 1402 # trust it.
1383 1403 ladd(fn)
1384 1404 elif (
1385 1405 size >= 0
1386 1406 and (
1387 1407 (size != st.st_size and size != st.st_size & _rangemask)
1388 1408 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1389 1409 )
1390 1410 or fn in copymap
1391 1411 ):
1392 1412 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1393 1413 # issue6456: Size returned may be longer due to
1394 1414 # encryption on EXT-4 fscrypt, undecided.
1395 1415 ladd(fn)
1396 1416 else:
1397 1417 madd(fn)
1398 1418 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1399 1419 # There might be a change in the future if for example the
1400 1420 # internal clock is off, but this is a case where the issues
1401 1421 # the user would face would be a lot worse and there is
1402 1422 # nothing we can really do.
1403 1423 ladd(fn)
1404 1424 elif listclean:
1405 1425 cadd(fn)
1406 1426 status = scmutil.status(
1407 1427 modified, added, removed, deleted, unknown, ignored, clean
1408 1428 )
1409 1429 return (lookup, status, mtime_boundary)
1410 1430
1411 1431 def matches(self, match):
1412 1432 """
1413 1433 return files in the dirstate (in whatever state) filtered by match
1414 1434 """
1415 1435 dmap = self._map
1416 1436 if rustmod is not None:
1417 1437 dmap = self._map._map
1418 1438
1419 1439 if match.always():
1420 1440 return dmap.keys()
1421 1441 files = match.files()
1422 1442 if match.isexact():
1423 1443 # fast path -- filter the other way around, since typically files is
1424 1444 # much smaller than dmap
1425 1445 return [f for f in files if f in dmap]
1426 1446 if match.prefix() and all(fn in dmap for fn in files):
1427 1447 # fast path -- all the values are known to be files, so just return
1428 1448 # that
1429 1449 return list(files)
1430 1450 return [f for f in dmap if match(f)]
1431 1451
1432 1452 def _actualfilename(self, tr):
1433 1453 if tr:
1434 1454 return self._pendingfilename
1435 1455 else:
1436 1456 return self._filename
1437 1457
1438 1458 def data_backup_filename(self, backupname):
1439 1459 if not self._use_dirstate_v2:
1440 1460 return None
1441 1461 return backupname + b'.v2-data'
1442 1462
1443 1463 def _new_backup_data_filename(self, backupname):
1444 1464 """return a filename to backup a data-file or None"""
1445 1465 if not self._use_dirstate_v2:
1446 1466 return None
1447 1467 data_filename = self._map.docket.data_filename()
1448 1468 return data_filename, self.data_backup_filename(backupname)
1449 1469
1450 1470 def backup_data_file(self, backupname):
1451 1471 if not self._use_dirstate_v2:
1452 1472 return None
1453 1473 docket = docketmod.DirstateDocket.parse(
1454 1474 self._opener.read(backupname),
1455 1475 self._nodeconstants,
1456 1476 )
1457 1477 return self.data_backup_filename(backupname), docket.data_filename()
1458 1478
1459 1479 def savebackup(self, tr, backupname):
1460 1480 '''Save current dirstate into backup file'''
1461 1481 filename = self._actualfilename(tr)
1462 1482 assert backupname != filename
1463 1483
1464 1484 # use '_writedirstate' instead of 'write' to write changes certainly,
1465 1485 # because the latter omits writing out if transaction is running.
1466 1486 # output file will be used to create backup of dirstate at this point.
1467 1487 if self._dirty or not self._opener.exists(filename):
1468 1488 self._writedirstate(
1469 1489 tr,
1470 1490 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1471 1491 )
1472 1492
1473 1493 if tr:
1474 1494 # ensure that subsequent tr.writepending returns True for
1475 1495 # changes written out above, even if dirstate is never
1476 1496 # changed after this
1477 1497 tr.addfilegenerator(
1478 1498 b'dirstate-1-main',
1479 1499 (self._filename,),
1480 1500 lambda f: self._writedirstate(tr, f),
1481 1501 location=b'plain',
1482 1502 post_finalize=True,
1483 1503 )
1484 1504
1485 1505 # ensure that pending file written above is unlinked at
1486 1506 # failure, even if tr.writepending isn't invoked until the
1487 1507 # end of this transaction
1488 1508 tr.registertmp(filename, location=b'plain')
1489 1509
1490 1510 self._opener.tryunlink(backupname)
1491 1511 # hardlink backup is okay because _writedirstate is always called
1492 1512 # with an "atomictemp=True" file.
1493 1513 util.copyfile(
1494 1514 self._opener.join(filename),
1495 1515 self._opener.join(backupname),
1496 1516 hardlink=True,
1497 1517 )
1498 1518 data_pair = self._new_backup_data_filename(backupname)
1499 1519 if data_pair is not None:
1500 1520 data_filename, bck_data_filename = data_pair
1501 1521 util.copyfile(
1502 1522 self._opener.join(data_filename),
1503 1523 self._opener.join(bck_data_filename),
1504 1524 hardlink=True,
1505 1525 )
1506 1526 if tr is not None:
1507 1527 # ensure that pending file written above is unlinked at
1508 1528 # failure, even if tr.writepending isn't invoked until the
1509 1529 # end of this transaction
1510 1530 tr.registertmp(bck_data_filename, location=b'plain')
1511 1531
1512 1532 def restorebackup(self, tr, backupname):
1513 1533 '''Restore dirstate by backup file'''
1514 1534 # this "invalidate()" prevents "wlock.release()" from writing
1515 1535 # changes of dirstate out after restoring from backup file
1516 1536 self.invalidate()
1517 1537 filename = self._actualfilename(tr)
1518 1538 o = self._opener
1519 1539 data_pair = self.backup_data_file(backupname)
1520 1540 if util.samefile(o.join(backupname), o.join(filename)):
1521 1541 o.unlink(backupname)
1522 1542 else:
1523 1543 o.rename(backupname, filename, checkambig=True)
1524 1544
1525 1545 if data_pair is not None:
1526 1546 data_backup, target = data_pair
1527 1547 if o.exists(target) and util.samefile(
1528 1548 o.join(data_backup), o.join(target)
1529 1549 ):
1530 1550 o.unlink(data_backup)
1531 1551 else:
1532 1552 o.rename(data_backup, target, checkambig=True)
1533 1553
1534 1554 def clearbackup(self, tr, backupname):
1535 1555 '''Clear backup file'''
1536 1556 o = self._opener
1537 1557 data_backup = self.backup_data_file(backupname)
1538 1558 o.unlink(backupname)
1539 1559
1540 1560 if data_backup is not None:
1541 1561 o.unlink(data_backup[0])
1542 1562
1543 1563 def verify(self, m1, m2, p1, narrow_matcher=None):
1544 1564 """
1545 1565 check the dirstate contents against the parent manifest and yield errors
1546 1566 """
1547 1567 missing_from_p1 = _(
1548 1568 b"%s marked as tracked in p1 (%s) but not in manifest1\n"
1549 1569 )
1550 1570 unexpected_in_p1 = _(b"%s marked as added, but also in manifest1\n")
1551 1571 missing_from_ps = _(
1552 1572 b"%s marked as modified, but not in either manifest\n"
1553 1573 )
1554 1574 missing_from_ds = _(
1555 1575 b"%s in manifest1, but not marked as tracked in p1 (%s)\n"
1556 1576 )
1557 1577 for f, entry in self.items():
1558 1578 if entry.p1_tracked:
1559 1579 if entry.modified and f not in m1 and f not in m2:
1560 1580 yield missing_from_ps % f
1561 1581 elif f not in m1:
1562 1582 yield missing_from_p1 % (f, node.short(p1))
1563 1583 if entry.added and f in m1:
1564 1584 yield unexpected_in_p1 % f
1565 1585 for f in m1:
1566 1586 if narrow_matcher is not None and not narrow_matcher(f):
1567 1587 continue
1568 1588 entry = self.get_entry(f)
1569 1589 if not entry.p1_tracked:
1570 1590 yield missing_from_ds % (f, node.short(p1))
General Comments 0
You need to be logged in to leave comments. Login now