##// END OF EJS Templates
dirstate: add narrow support to `verify`...
Raphaël Gomès -
r50717:1d8721be default
parent child Browse files
Show More
@@ -1,1561 +1,1563 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import collections
10 10 import contextlib
11 11 import os
12 12 import stat
13 13 import uuid
14 14
15 15 from .i18n import _
16 16 from .pycompat import delattr
17 17
18 18 from hgdemandimport import tracing
19 19
20 20 from . import (
21 21 dirstatemap,
22 22 encoding,
23 23 error,
24 24 match as matchmod,
25 25 node,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 util,
31 31 )
32 32
33 33 from .dirstateutils import (
34 34 docket as docketmod,
35 35 timestamp,
36 36 )
37 37
38 38 from .interfaces import (
39 39 dirstate as intdirstate,
40 40 util as interfaceutil,
41 41 )
42 42
43 43 parsers = policy.importmod('parsers')
44 44 rustmod = policy.importrust('dirstate')
45 45
46 46 HAS_FAST_DIRSTATE_V2 = rustmod is not None
47 47
48 48 propertycache = util.propertycache
49 49 filecache = scmutil.filecache
50 50 _rangemask = dirstatemap.rangemask
51 51
52 52 DirstateItem = dirstatemap.DirstateItem
53 53
54 54
55 55 class repocache(filecache):
56 56 """filecache for files in .hg/"""
57 57
58 58 def join(self, obj, fname):
59 59 return obj._opener.join(fname)
60 60
61 61
62 62 class rootcache(filecache):
63 63 """filecache for files in the repository root"""
64 64
65 65 def join(self, obj, fname):
66 66 return obj._join(fname)
67 67
68 68
69 69 def requires_parents_change(func):
70 70 def wrap(self, *args, **kwargs):
71 71 if not self.pendingparentchange():
72 72 msg = 'calling `%s` outside of a parentchange context'
73 73 msg %= func.__name__
74 74 raise error.ProgrammingError(msg)
75 75 return func(self, *args, **kwargs)
76 76
77 77 return wrap
78 78
79 79
80 80 def requires_no_parents_change(func):
81 81 def wrap(self, *args, **kwargs):
82 82 if self.pendingparentchange():
83 83 msg = 'calling `%s` inside of a parentchange context'
84 84 msg %= func.__name__
85 85 raise error.ProgrammingError(msg)
86 86 return func(self, *args, **kwargs)
87 87
88 88 return wrap
89 89
90 90
91 91 @interfaceutil.implementer(intdirstate.idirstate)
92 92 class dirstate:
93 93 def __init__(
94 94 self,
95 95 opener,
96 96 ui,
97 97 root,
98 98 validate,
99 99 sparsematchfn,
100 100 nodeconstants,
101 101 use_dirstate_v2,
102 102 use_tracked_hint=False,
103 103 ):
104 104 """Create a new dirstate object.
105 105
106 106 opener is an open()-like callable that can be used to open the
107 107 dirstate file; root is the root of the directory tracked by
108 108 the dirstate.
109 109 """
110 110 self._use_dirstate_v2 = use_dirstate_v2
111 111 self._use_tracked_hint = use_tracked_hint
112 112 self._nodeconstants = nodeconstants
113 113 self._opener = opener
114 114 self._validate = validate
115 115 self._root = root
116 116 # Either build a sparse-matcher or None if sparse is disabled
117 117 self._sparsematchfn = sparsematchfn
118 118 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
119 119 # UNC path pointing to root share (issue4557)
120 120 self._rootdir = pathutil.normasprefix(root)
121 121 # True is any internal state may be different
122 122 self._dirty = False
123 123 # True if the set of tracked file may be different
124 124 self._dirty_tracked_set = False
125 125 self._ui = ui
126 126 self._filecache = {}
127 127 self._parentwriters = 0
128 128 self._filename = b'dirstate'
129 129 self._filename_th = b'dirstate-tracked-hint'
130 130 self._pendingfilename = b'%s.pending' % self._filename
131 131 self._plchangecallbacks = {}
132 132 self._origpl = None
133 133 self._mapcls = dirstatemap.dirstatemap
134 134 # Access and cache cwd early, so we don't access it for the first time
135 135 # after a working-copy update caused it to not exist (accessing it then
136 136 # raises an exception).
137 137 self._cwd
138 138
139 139 def prefetch_parents(self):
140 140 """make sure the parents are loaded
141 141
142 142 Used to avoid a race condition.
143 143 """
144 144 self._pl
145 145
146 146 @contextlib.contextmanager
147 147 def parentchange(self):
148 148 """Context manager for handling dirstate parents.
149 149
150 150 If an exception occurs in the scope of the context manager,
151 151 the incoherent dirstate won't be written when wlock is
152 152 released.
153 153 """
154 154 self._parentwriters += 1
155 155 yield
156 156 # Typically we want the "undo" step of a context manager in a
157 157 # finally block so it happens even when an exception
158 158 # occurs. In this case, however, we only want to decrement
159 159 # parentwriters if the code in the with statement exits
160 160 # normally, so we don't have a try/finally here on purpose.
161 161 self._parentwriters -= 1
162 162
163 163 def pendingparentchange(self):
164 164 """Returns true if the dirstate is in the middle of a set of changes
165 165 that modify the dirstate parent.
166 166 """
167 167 return self._parentwriters > 0
168 168
169 169 @propertycache
170 170 def _map(self):
171 171 """Return the dirstate contents (see documentation for dirstatemap)."""
172 172 self._map = self._mapcls(
173 173 self._ui,
174 174 self._opener,
175 175 self._root,
176 176 self._nodeconstants,
177 177 self._use_dirstate_v2,
178 178 )
179 179 return self._map
180 180
181 181 @property
182 182 def _sparsematcher(self):
183 183 """The matcher for the sparse checkout.
184 184
185 185 The working directory may not include every file from a manifest. The
186 186 matcher obtained by this property will match a path if it is to be
187 187 included in the working directory.
188 188
189 189 When sparse if disabled, return None.
190 190 """
191 191 if self._sparsematchfn is None:
192 192 return None
193 193 # TODO there is potential to cache this property. For now, the matcher
194 194 # is resolved on every access. (But the called function does use a
195 195 # cache to keep the lookup fast.)
196 196 return self._sparsematchfn()
197 197
198 198 @repocache(b'branch')
199 199 def _branch(self):
200 200 try:
201 201 return self._opener.read(b"branch").strip() or b"default"
202 202 except FileNotFoundError:
203 203 return b"default"
204 204
205 205 @property
206 206 def _pl(self):
207 207 return self._map.parents()
208 208
209 209 def hasdir(self, d):
210 210 return self._map.hastrackeddir(d)
211 211
212 212 @rootcache(b'.hgignore')
213 213 def _ignore(self):
214 214 files = self._ignorefiles()
215 215 if not files:
216 216 return matchmod.never()
217 217
218 218 pats = [b'include:%s' % f for f in files]
219 219 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
220 220
221 221 @propertycache
222 222 def _slash(self):
223 223 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
224 224
225 225 @propertycache
226 226 def _checklink(self):
227 227 return util.checklink(self._root)
228 228
229 229 @propertycache
230 230 def _checkexec(self):
231 231 return bool(util.checkexec(self._root))
232 232
233 233 @propertycache
234 234 def _checkcase(self):
235 235 return not util.fscasesensitive(self._join(b'.hg'))
236 236
237 237 def _join(self, f):
238 238 # much faster than os.path.join()
239 239 # it's safe because f is always a relative path
240 240 return self._rootdir + f
241 241
242 242 def flagfunc(self, buildfallback):
243 243 """build a callable that returns flags associated with a filename
244 244
245 245 The information is extracted from three possible layers:
246 246 1. the file system if it supports the information
247 247 2. the "fallback" information stored in the dirstate if any
248 248 3. a more expensive mechanism inferring the flags from the parents.
249 249 """
250 250
251 251 # small hack to cache the result of buildfallback()
252 252 fallback_func = []
253 253
254 254 def get_flags(x):
255 255 entry = None
256 256 fallback_value = None
257 257 try:
258 258 st = os.lstat(self._join(x))
259 259 except OSError:
260 260 return b''
261 261
262 262 if self._checklink:
263 263 if util.statislink(st):
264 264 return b'l'
265 265 else:
266 266 entry = self.get_entry(x)
267 267 if entry.has_fallback_symlink:
268 268 if entry.fallback_symlink:
269 269 return b'l'
270 270 else:
271 271 if not fallback_func:
272 272 fallback_func.append(buildfallback())
273 273 fallback_value = fallback_func[0](x)
274 274 if b'l' in fallback_value:
275 275 return b'l'
276 276
277 277 if self._checkexec:
278 278 if util.statisexec(st):
279 279 return b'x'
280 280 else:
281 281 if entry is None:
282 282 entry = self.get_entry(x)
283 283 if entry.has_fallback_exec:
284 284 if entry.fallback_exec:
285 285 return b'x'
286 286 else:
287 287 if fallback_value is None:
288 288 if not fallback_func:
289 289 fallback_func.append(buildfallback())
290 290 fallback_value = fallback_func[0](x)
291 291 if b'x' in fallback_value:
292 292 return b'x'
293 293 return b''
294 294
295 295 return get_flags
296 296
297 297 @propertycache
298 298 def _cwd(self):
299 299 # internal config: ui.forcecwd
300 300 forcecwd = self._ui.config(b'ui', b'forcecwd')
301 301 if forcecwd:
302 302 return forcecwd
303 303 return encoding.getcwd()
304 304
305 305 def getcwd(self):
306 306 """Return the path from which a canonical path is calculated.
307 307
308 308 This path should be used to resolve file patterns or to convert
309 309 canonical paths back to file paths for display. It shouldn't be
310 310 used to get real file paths. Use vfs functions instead.
311 311 """
312 312 cwd = self._cwd
313 313 if cwd == self._root:
314 314 return b''
315 315 # self._root ends with a path separator if self._root is '/' or 'C:\'
316 316 rootsep = self._root
317 317 if not util.endswithsep(rootsep):
318 318 rootsep += pycompat.ossep
319 319 if cwd.startswith(rootsep):
320 320 return cwd[len(rootsep) :]
321 321 else:
322 322 # we're outside the repo. return an absolute path.
323 323 return cwd
324 324
325 325 def pathto(self, f, cwd=None):
326 326 if cwd is None:
327 327 cwd = self.getcwd()
328 328 path = util.pathto(self._root, cwd, f)
329 329 if self._slash:
330 330 return util.pconvert(path)
331 331 return path
332 332
333 333 def get_entry(self, path):
334 334 """return a DirstateItem for the associated path"""
335 335 entry = self._map.get(path)
336 336 if entry is None:
337 337 return DirstateItem()
338 338 return entry
339 339
340 340 def __contains__(self, key):
341 341 return key in self._map
342 342
343 343 def __iter__(self):
344 344 return iter(sorted(self._map))
345 345
346 346 def items(self):
347 347 return self._map.items()
348 348
349 349 iteritems = items
350 350
351 351 def parents(self):
352 352 return [self._validate(p) for p in self._pl]
353 353
354 354 def p1(self):
355 355 return self._validate(self._pl[0])
356 356
357 357 def p2(self):
358 358 return self._validate(self._pl[1])
359 359
360 360 @property
361 361 def in_merge(self):
362 362 """True if a merge is in progress"""
363 363 return self._pl[1] != self._nodeconstants.nullid
364 364
365 365 def branch(self):
366 366 return encoding.tolocal(self._branch)
367 367
368 368 def setparents(self, p1, p2=None):
369 369 """Set dirstate parents to p1 and p2.
370 370
371 371 When moving from two parents to one, "merged" entries a
372 372 adjusted to normal and previous copy records discarded and
373 373 returned by the call.
374 374
375 375 See localrepo.setparents()
376 376 """
377 377 if p2 is None:
378 378 p2 = self._nodeconstants.nullid
379 379 if self._parentwriters == 0:
380 380 raise ValueError(
381 381 b"cannot set dirstate parent outside of "
382 382 b"dirstate.parentchange context manager"
383 383 )
384 384
385 385 self._dirty = True
386 386 oldp2 = self._pl[1]
387 387 if self._origpl is None:
388 388 self._origpl = self._pl
389 389 nullid = self._nodeconstants.nullid
390 390 # True if we need to fold p2 related state back to a linear case
391 391 fold_p2 = oldp2 != nullid and p2 == nullid
392 392 return self._map.setparents(p1, p2, fold_p2=fold_p2)
393 393
394 394 def setbranch(self, branch):
395 395 self.__class__._branch.set(self, encoding.fromlocal(branch))
396 396 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
397 397 try:
398 398 f.write(self._branch + b'\n')
399 399 f.close()
400 400
401 401 # make sure filecache has the correct stat info for _branch after
402 402 # replacing the underlying file
403 403 ce = self._filecache[b'_branch']
404 404 if ce:
405 405 ce.refresh()
406 406 except: # re-raises
407 407 f.discard()
408 408 raise
409 409
410 410 def invalidate(self):
411 411 """Causes the next access to reread the dirstate.
412 412
413 413 This is different from localrepo.invalidatedirstate() because it always
414 414 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
415 415 check whether the dirstate has changed before rereading it."""
416 416
417 417 for a in ("_map", "_branch", "_ignore"):
418 418 if a in self.__dict__:
419 419 delattr(self, a)
420 420 self._dirty = False
421 421 self._dirty_tracked_set = False
422 422 self._parentwriters = 0
423 423 self._origpl = None
424 424
425 425 def copy(self, source, dest):
426 426 """Mark dest as a copy of source. Unmark dest if source is None."""
427 427 if source == dest:
428 428 return
429 429 self._dirty = True
430 430 if source is not None:
431 431 self._check_sparse(source)
432 432 self._map.copymap[dest] = source
433 433 else:
434 434 self._map.copymap.pop(dest, None)
435 435
436 436 def copied(self, file):
437 437 return self._map.copymap.get(file, None)
438 438
439 439 def copies(self):
440 440 return self._map.copymap
441 441
442 442 @requires_no_parents_change
443 443 def set_tracked(self, filename, reset_copy=False):
444 444 """a "public" method for generic code to mark a file as tracked
445 445
446 446 This function is to be called outside of "update/merge" case. For
447 447 example by a command like `hg add X`.
448 448
449 449 if reset_copy is set, any existing copy information will be dropped.
450 450
451 451 return True the file was previously untracked, False otherwise.
452 452 """
453 453 self._dirty = True
454 454 entry = self._map.get(filename)
455 455 if entry is None or not entry.tracked:
456 456 self._check_new_tracked_filename(filename)
457 457 pre_tracked = self._map.set_tracked(filename)
458 458 if reset_copy:
459 459 self._map.copymap.pop(filename, None)
460 460 if pre_tracked:
461 461 self._dirty_tracked_set = True
462 462 return pre_tracked
463 463
464 464 @requires_no_parents_change
465 465 def set_untracked(self, filename):
466 466 """a "public" method for generic code to mark a file as untracked
467 467
468 468 This function is to be called outside of "update/merge" case. For
469 469 example by a command like `hg remove X`.
470 470
471 471 return True the file was previously tracked, False otherwise.
472 472 """
473 473 ret = self._map.set_untracked(filename)
474 474 if ret:
475 475 self._dirty = True
476 476 self._dirty_tracked_set = True
477 477 return ret
478 478
479 479 @requires_no_parents_change
480 480 def set_clean(self, filename, parentfiledata):
481 481 """record that the current state of the file on disk is known to be clean"""
482 482 self._dirty = True
483 483 if not self._map[filename].tracked:
484 484 self._check_new_tracked_filename(filename)
485 485 (mode, size, mtime) = parentfiledata
486 486 self._map.set_clean(filename, mode, size, mtime)
487 487
488 488 @requires_no_parents_change
489 489 def set_possibly_dirty(self, filename):
490 490 """record that the current state of the file on disk is unknown"""
491 491 self._dirty = True
492 492 self._map.set_possibly_dirty(filename)
493 493
494 494 @requires_parents_change
495 495 def update_file_p1(
496 496 self,
497 497 filename,
498 498 p1_tracked,
499 499 ):
500 500 """Set a file as tracked in the parent (or not)
501 501
502 502 This is to be called when adjust the dirstate to a new parent after an history
503 503 rewriting operation.
504 504
505 505 It should not be called during a merge (p2 != nullid) and only within
506 506 a `with dirstate.parentchange():` context.
507 507 """
508 508 if self.in_merge:
509 509 msg = b'update_file_reference should not be called when merging'
510 510 raise error.ProgrammingError(msg)
511 511 entry = self._map.get(filename)
512 512 if entry is None:
513 513 wc_tracked = False
514 514 else:
515 515 wc_tracked = entry.tracked
516 516 if not (p1_tracked or wc_tracked):
517 517 # the file is no longer relevant to anyone
518 518 if self._map.get(filename) is not None:
519 519 self._map.reset_state(filename)
520 520 self._dirty = True
521 521 elif (not p1_tracked) and wc_tracked:
522 522 if entry is not None and entry.added:
523 523 return # avoid dropping copy information (maybe?)
524 524
525 525 self._map.reset_state(
526 526 filename,
527 527 wc_tracked,
528 528 p1_tracked,
529 529 # the underlying reference might have changed, we will have to
530 530 # check it.
531 531 has_meaningful_mtime=False,
532 532 )
533 533
534 534 @requires_parents_change
535 535 def update_file(
536 536 self,
537 537 filename,
538 538 wc_tracked,
539 539 p1_tracked,
540 540 p2_info=False,
541 541 possibly_dirty=False,
542 542 parentfiledata=None,
543 543 ):
544 544 """update the information about a file in the dirstate
545 545
546 546 This is to be called when the direstates parent changes to keep track
547 547 of what is the file situation in regards to the working copy and its parent.
548 548
549 549 This function must be called within a `dirstate.parentchange` context.
550 550
551 551 note: the API is at an early stage and we might need to adjust it
552 552 depending of what information ends up being relevant and useful to
553 553 other processing.
554 554 """
555 555
556 556 # note: I do not think we need to double check name clash here since we
557 557 # are in a update/merge case that should already have taken care of
558 558 # this. The test agrees
559 559
560 560 self._dirty = True
561 561 old_entry = self._map.get(filename)
562 562 if old_entry is None:
563 563 prev_tracked = False
564 564 else:
565 565 prev_tracked = old_entry.tracked
566 566 if prev_tracked != wc_tracked:
567 567 self._dirty_tracked_set = True
568 568
569 569 self._map.reset_state(
570 570 filename,
571 571 wc_tracked,
572 572 p1_tracked,
573 573 p2_info=p2_info,
574 574 has_meaningful_mtime=not possibly_dirty,
575 575 parentfiledata=parentfiledata,
576 576 )
577 577
578 578 def _check_new_tracked_filename(self, filename):
579 579 scmutil.checkfilename(filename)
580 580 if self._map.hastrackeddir(filename):
581 581 msg = _(b'directory %r already in dirstate')
582 582 msg %= pycompat.bytestr(filename)
583 583 raise error.Abort(msg)
584 584 # shadows
585 585 for d in pathutil.finddirs(filename):
586 586 if self._map.hastrackeddir(d):
587 587 break
588 588 entry = self._map.get(d)
589 589 if entry is not None and not entry.removed:
590 590 msg = _(b'file %r in dirstate clashes with %r')
591 591 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
592 592 raise error.Abort(msg)
593 593 self._check_sparse(filename)
594 594
595 595 def _check_sparse(self, filename):
596 596 """Check that a filename is inside the sparse profile"""
597 597 sparsematch = self._sparsematcher
598 598 if sparsematch is not None and not sparsematch.always():
599 599 if not sparsematch(filename):
600 600 msg = _(b"cannot add '%s' - it is outside the sparse checkout")
601 601 hint = _(
602 602 b'include file with `hg debugsparse --include <pattern>` or use '
603 603 b'`hg add -s <file>` to include file directory while adding'
604 604 )
605 605 raise error.Abort(msg % filename, hint=hint)
606 606
607 607 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
608 608 if exists is None:
609 609 exists = os.path.lexists(os.path.join(self._root, path))
610 610 if not exists:
611 611 # Maybe a path component exists
612 612 if not ignoremissing and b'/' in path:
613 613 d, f = path.rsplit(b'/', 1)
614 614 d = self._normalize(d, False, ignoremissing, None)
615 615 folded = d + b"/" + f
616 616 else:
617 617 # No path components, preserve original case
618 618 folded = path
619 619 else:
620 620 # recursively normalize leading directory components
621 621 # against dirstate
622 622 if b'/' in normed:
623 623 d, f = normed.rsplit(b'/', 1)
624 624 d = self._normalize(d, False, ignoremissing, True)
625 625 r = self._root + b"/" + d
626 626 folded = d + b"/" + util.fspath(f, r)
627 627 else:
628 628 folded = util.fspath(normed, self._root)
629 629 storemap[normed] = folded
630 630
631 631 return folded
632 632
633 633 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
634 634 normed = util.normcase(path)
635 635 folded = self._map.filefoldmap.get(normed, None)
636 636 if folded is None:
637 637 if isknown:
638 638 folded = path
639 639 else:
640 640 folded = self._discoverpath(
641 641 path, normed, ignoremissing, exists, self._map.filefoldmap
642 642 )
643 643 return folded
644 644
645 645 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
646 646 normed = util.normcase(path)
647 647 folded = self._map.filefoldmap.get(normed, None)
648 648 if folded is None:
649 649 folded = self._map.dirfoldmap.get(normed, None)
650 650 if folded is None:
651 651 if isknown:
652 652 folded = path
653 653 else:
654 654 # store discovered result in dirfoldmap so that future
655 655 # normalizefile calls don't start matching directories
656 656 folded = self._discoverpath(
657 657 path, normed, ignoremissing, exists, self._map.dirfoldmap
658 658 )
659 659 return folded
660 660
661 661 def normalize(self, path, isknown=False, ignoremissing=False):
662 662 """
663 663 normalize the case of a pathname when on a casefolding filesystem
664 664
665 665 isknown specifies whether the filename came from walking the
666 666 disk, to avoid extra filesystem access.
667 667
668 668 If ignoremissing is True, missing path are returned
669 669 unchanged. Otherwise, we try harder to normalize possibly
670 670 existing path components.
671 671
672 672 The normalized case is determined based on the following precedence:
673 673
674 674 - version of name already stored in the dirstate
675 675 - version of name stored on disk
676 676 - version provided via command arguments
677 677 """
678 678
679 679 if self._checkcase:
680 680 return self._normalize(path, isknown, ignoremissing)
681 681 return path
682 682
683 683 def clear(self):
684 684 self._map.clear()
685 685 self._dirty = True
686 686
687 687 def rebuild(self, parent, allfiles, changedfiles=None):
688 688
689 689 matcher = self._sparsematcher
690 690 if matcher is not None and not matcher.always():
691 691 # should not add non-matching files
692 692 allfiles = [f for f in allfiles if matcher(f)]
693 693 if changedfiles:
694 694 changedfiles = [f for f in changedfiles if matcher(f)]
695 695
696 696 if changedfiles is not None:
697 697 # these files will be deleted from the dirstate when they are
698 698 # not found to be in allfiles
699 699 dirstatefilestoremove = {f for f in self if not matcher(f)}
700 700 changedfiles = dirstatefilestoremove.union(changedfiles)
701 701
702 702 if changedfiles is None:
703 703 # Rebuild entire dirstate
704 704 to_lookup = allfiles
705 705 to_drop = []
706 706 self.clear()
707 707 elif len(changedfiles) < 10:
708 708 # Avoid turning allfiles into a set, which can be expensive if it's
709 709 # large.
710 710 to_lookup = []
711 711 to_drop = []
712 712 for f in changedfiles:
713 713 if f in allfiles:
714 714 to_lookup.append(f)
715 715 else:
716 716 to_drop.append(f)
717 717 else:
718 718 changedfilesset = set(changedfiles)
719 719 to_lookup = changedfilesset & set(allfiles)
720 720 to_drop = changedfilesset - to_lookup
721 721
722 722 if self._origpl is None:
723 723 self._origpl = self._pl
724 724 self._map.setparents(parent, self._nodeconstants.nullid)
725 725
726 726 for f in to_lookup:
727 727
728 728 if self.in_merge:
729 729 self.set_tracked(f)
730 730 else:
731 731 self._map.reset_state(
732 732 f,
733 733 wc_tracked=True,
734 734 p1_tracked=True,
735 735 )
736 736 for f in to_drop:
737 737 self._map.reset_state(f)
738 738
739 739 self._dirty = True
740 740
741 741 def identity(self):
742 742 """Return identity of dirstate itself to detect changing in storage
743 743
744 744 If identity of previous dirstate is equal to this, writing
745 745 changes based on the former dirstate out can keep consistency.
746 746 """
747 747 return self._map.identity
748 748
749 749 def write(self, tr):
750 750 if not self._dirty:
751 751 return
752 752
753 753 write_key = self._use_tracked_hint and self._dirty_tracked_set
754 754 if tr:
755 755 # delay writing in-memory changes out
756 756 tr.addfilegenerator(
757 757 b'dirstate-1-main',
758 758 (self._filename,),
759 759 lambda f: self._writedirstate(tr, f),
760 760 location=b'plain',
761 761 post_finalize=True,
762 762 )
763 763 if write_key:
764 764 tr.addfilegenerator(
765 765 b'dirstate-2-key-post',
766 766 (self._filename_th,),
767 767 lambda f: self._write_tracked_hint(tr, f),
768 768 location=b'plain',
769 769 post_finalize=True,
770 770 )
771 771 return
772 772
773 773 file = lambda f: self._opener(f, b"w", atomictemp=True, checkambig=True)
774 774 with file(self._filename) as f:
775 775 self._writedirstate(tr, f)
776 776 if write_key:
777 777 # we update the key-file after writing to make sure reader have a
778 778 # key that match the newly written content
779 779 with file(self._filename_th) as f:
780 780 self._write_tracked_hint(tr, f)
781 781
782 782 def delete_tracked_hint(self):
783 783 """remove the tracked_hint file
784 784
785 785 To be used by format downgrades operation"""
786 786 self._opener.unlink(self._filename_th)
787 787 self._use_tracked_hint = False
788 788
789 789 def addparentchangecallback(self, category, callback):
790 790 """add a callback to be called when the wd parents are changed
791 791
792 792 Callback will be called with the following arguments:
793 793 dirstate, (oldp1, oldp2), (newp1, newp2)
794 794
795 795 Category is a unique identifier to allow overwriting an old callback
796 796 with a newer callback.
797 797 """
798 798 self._plchangecallbacks[category] = callback
799 799
800 800 def _writedirstate(self, tr, st):
801 801 # notify callbacks about parents change
802 802 if self._origpl is not None and self._origpl != self._pl:
803 803 for c, callback in sorted(self._plchangecallbacks.items()):
804 804 callback(self, self._origpl, self._pl)
805 805 self._origpl = None
806 806 self._map.write(tr, st)
807 807 self._dirty = False
808 808 self._dirty_tracked_set = False
809 809
810 810 def _write_tracked_hint(self, tr, f):
811 811 key = node.hex(uuid.uuid4().bytes)
812 812 f.write(b"1\n%s\n" % key) # 1 is the format version
813 813
814 814 def _dirignore(self, f):
815 815 if self._ignore(f):
816 816 return True
817 817 for p in pathutil.finddirs(f):
818 818 if self._ignore(p):
819 819 return True
820 820 return False
821 821
822 822 def _ignorefiles(self):
823 823 files = []
824 824 if os.path.exists(self._join(b'.hgignore')):
825 825 files.append(self._join(b'.hgignore'))
826 826 for name, path in self._ui.configitems(b"ui"):
827 827 if name == b'ignore' or name.startswith(b'ignore.'):
828 828 # we need to use os.path.join here rather than self._join
829 829 # because path is arbitrary and user-specified
830 830 files.append(os.path.join(self._rootdir, util.expandpath(path)))
831 831 return files
832 832
833 833 def _ignorefileandline(self, f):
834 834 files = collections.deque(self._ignorefiles())
835 835 visited = set()
836 836 while files:
837 837 i = files.popleft()
838 838 patterns = matchmod.readpatternfile(
839 839 i, self._ui.warn, sourceinfo=True
840 840 )
841 841 for pattern, lineno, line in patterns:
842 842 kind, p = matchmod._patsplit(pattern, b'glob')
843 843 if kind == b"subinclude":
844 844 if p not in visited:
845 845 files.append(p)
846 846 continue
847 847 m = matchmod.match(
848 848 self._root, b'', [], [pattern], warn=self._ui.warn
849 849 )
850 850 if m(f):
851 851 return (i, lineno, line)
852 852 visited.add(i)
853 853 return (None, -1, b"")
854 854
855 855 def _walkexplicit(self, match, subrepos):
856 856 """Get stat data about the files explicitly specified by match.
857 857
858 858 Return a triple (results, dirsfound, dirsnotfound).
859 859 - results is a mapping from filename to stat result. It also contains
860 860 listings mapping subrepos and .hg to None.
861 861 - dirsfound is a list of files found to be directories.
862 862 - dirsnotfound is a list of files that the dirstate thinks are
863 863 directories and that were not found."""
864 864
865 865 def badtype(mode):
866 866 kind = _(b'unknown')
867 867 if stat.S_ISCHR(mode):
868 868 kind = _(b'character device')
869 869 elif stat.S_ISBLK(mode):
870 870 kind = _(b'block device')
871 871 elif stat.S_ISFIFO(mode):
872 872 kind = _(b'fifo')
873 873 elif stat.S_ISSOCK(mode):
874 874 kind = _(b'socket')
875 875 elif stat.S_ISDIR(mode):
876 876 kind = _(b'directory')
877 877 return _(b'unsupported file type (type is %s)') % kind
878 878
879 879 badfn = match.bad
880 880 dmap = self._map
881 881 lstat = os.lstat
882 882 getkind = stat.S_IFMT
883 883 dirkind = stat.S_IFDIR
884 884 regkind = stat.S_IFREG
885 885 lnkkind = stat.S_IFLNK
886 886 join = self._join
887 887 dirsfound = []
888 888 foundadd = dirsfound.append
889 889 dirsnotfound = []
890 890 notfoundadd = dirsnotfound.append
891 891
892 892 if not match.isexact() and self._checkcase:
893 893 normalize = self._normalize
894 894 else:
895 895 normalize = None
896 896
897 897 files = sorted(match.files())
898 898 subrepos.sort()
899 899 i, j = 0, 0
900 900 while i < len(files) and j < len(subrepos):
901 901 subpath = subrepos[j] + b"/"
902 902 if files[i] < subpath:
903 903 i += 1
904 904 continue
905 905 while i < len(files) and files[i].startswith(subpath):
906 906 del files[i]
907 907 j += 1
908 908
909 909 if not files or b'' in files:
910 910 files = [b'']
911 911 # constructing the foldmap is expensive, so don't do it for the
912 912 # common case where files is ['']
913 913 normalize = None
914 914 results = dict.fromkeys(subrepos)
915 915 results[b'.hg'] = None
916 916
917 917 for ff in files:
918 918 if normalize:
919 919 nf = normalize(ff, False, True)
920 920 else:
921 921 nf = ff
922 922 if nf in results:
923 923 continue
924 924
925 925 try:
926 926 st = lstat(join(nf))
927 927 kind = getkind(st.st_mode)
928 928 if kind == dirkind:
929 929 if nf in dmap:
930 930 # file replaced by dir on disk but still in dirstate
931 931 results[nf] = None
932 932 foundadd((nf, ff))
933 933 elif kind == regkind or kind == lnkkind:
934 934 results[nf] = st
935 935 else:
936 936 badfn(ff, badtype(kind))
937 937 if nf in dmap:
938 938 results[nf] = None
939 939 except OSError as inst: # nf not found on disk - it is dirstate only
940 940 if nf in dmap: # does it exactly match a missing file?
941 941 results[nf] = None
942 942 else: # does it match a missing directory?
943 943 if self._map.hasdir(nf):
944 944 notfoundadd(nf)
945 945 else:
946 946 badfn(ff, encoding.strtolocal(inst.strerror))
947 947
948 948 # match.files() may contain explicitly-specified paths that shouldn't
949 949 # be taken; drop them from the list of files found. dirsfound/notfound
950 950 # aren't filtered here because they will be tested later.
951 951 if match.anypats():
952 952 for f in list(results):
953 953 if f == b'.hg' or f in subrepos:
954 954 # keep sentinel to disable further out-of-repo walks
955 955 continue
956 956 if not match(f):
957 957 del results[f]
958 958
959 959 # Case insensitive filesystems cannot rely on lstat() failing to detect
960 960 # a case-only rename. Prune the stat object for any file that does not
961 961 # match the case in the filesystem, if there are multiple files that
962 962 # normalize to the same path.
963 963 if match.isexact() and self._checkcase:
964 964 normed = {}
965 965
966 966 for f, st in results.items():
967 967 if st is None:
968 968 continue
969 969
970 970 nc = util.normcase(f)
971 971 paths = normed.get(nc)
972 972
973 973 if paths is None:
974 974 paths = set()
975 975 normed[nc] = paths
976 976
977 977 paths.add(f)
978 978
979 979 for norm, paths in normed.items():
980 980 if len(paths) > 1:
981 981 for path in paths:
982 982 folded = self._discoverpath(
983 983 path, norm, True, None, self._map.dirfoldmap
984 984 )
985 985 if path != folded:
986 986 results[path] = None
987 987
988 988 return results, dirsfound, dirsnotfound
989 989
990 990 def walk(self, match, subrepos, unknown, ignored, full=True):
991 991 """
992 992 Walk recursively through the directory tree, finding all files
993 993 matched by match.
994 994
995 995 If full is False, maybe skip some known-clean files.
996 996
997 997 Return a dict mapping filename to stat-like object (either
998 998 mercurial.osutil.stat instance or return value of os.stat()).
999 999
1000 1000 """
1001 1001 # full is a flag that extensions that hook into walk can use -- this
1002 1002 # implementation doesn't use it at all. This satisfies the contract
1003 1003 # because we only guarantee a "maybe".
1004 1004
1005 1005 if ignored:
1006 1006 ignore = util.never
1007 1007 dirignore = util.never
1008 1008 elif unknown:
1009 1009 ignore = self._ignore
1010 1010 dirignore = self._dirignore
1011 1011 else:
1012 1012 # if not unknown and not ignored, drop dir recursion and step 2
1013 1013 ignore = util.always
1014 1014 dirignore = util.always
1015 1015
1016 1016 if self._sparsematchfn is not None:
1017 1017 em = matchmod.exact(match.files())
1018 1018 sm = matchmod.unionmatcher([self._sparsematcher, em])
1019 1019 match = matchmod.intersectmatchers(match, sm)
1020 1020
1021 1021 matchfn = match.matchfn
1022 1022 matchalways = match.always()
1023 1023 matchtdir = match.traversedir
1024 1024 dmap = self._map
1025 1025 listdir = util.listdir
1026 1026 lstat = os.lstat
1027 1027 dirkind = stat.S_IFDIR
1028 1028 regkind = stat.S_IFREG
1029 1029 lnkkind = stat.S_IFLNK
1030 1030 join = self._join
1031 1031
1032 1032 exact = skipstep3 = False
1033 1033 if match.isexact(): # match.exact
1034 1034 exact = True
1035 1035 dirignore = util.always # skip step 2
1036 1036 elif match.prefix(): # match.match, no patterns
1037 1037 skipstep3 = True
1038 1038
1039 1039 if not exact and self._checkcase:
1040 1040 normalize = self._normalize
1041 1041 normalizefile = self._normalizefile
1042 1042 skipstep3 = False
1043 1043 else:
1044 1044 normalize = self._normalize
1045 1045 normalizefile = None
1046 1046
1047 1047 # step 1: find all explicit files
1048 1048 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1049 1049 if matchtdir:
1050 1050 for d in work:
1051 1051 matchtdir(d[0])
1052 1052 for d in dirsnotfound:
1053 1053 matchtdir(d)
1054 1054
1055 1055 skipstep3 = skipstep3 and not (work or dirsnotfound)
1056 1056 work = [d for d in work if not dirignore(d[0])]
1057 1057
1058 1058 # step 2: visit subdirectories
1059 1059 def traverse(work, alreadynormed):
1060 1060 wadd = work.append
1061 1061 while work:
1062 1062 tracing.counter('dirstate.walk work', len(work))
1063 1063 nd = work.pop()
1064 1064 visitentries = match.visitchildrenset(nd)
1065 1065 if not visitentries:
1066 1066 continue
1067 1067 if visitentries == b'this' or visitentries == b'all':
1068 1068 visitentries = None
1069 1069 skip = None
1070 1070 if nd != b'':
1071 1071 skip = b'.hg'
1072 1072 try:
1073 1073 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1074 1074 entries = listdir(join(nd), stat=True, skip=skip)
1075 1075 except (PermissionError, FileNotFoundError) as inst:
1076 1076 match.bad(
1077 1077 self.pathto(nd), encoding.strtolocal(inst.strerror)
1078 1078 )
1079 1079 continue
1080 1080 for f, kind, st in entries:
1081 1081 # Some matchers may return files in the visitentries set,
1082 1082 # instead of 'this', if the matcher explicitly mentions them
1083 1083 # and is not an exactmatcher. This is acceptable; we do not
1084 1084 # make any hard assumptions about file-or-directory below
1085 1085 # based on the presence of `f` in visitentries. If
1086 1086 # visitchildrenset returned a set, we can always skip the
1087 1087 # entries *not* in the set it provided regardless of whether
1088 1088 # they're actually a file or a directory.
1089 1089 if visitentries and f not in visitentries:
1090 1090 continue
1091 1091 if normalizefile:
1092 1092 # even though f might be a directory, we're only
1093 1093 # interested in comparing it to files currently in the
1094 1094 # dmap -- therefore normalizefile is enough
1095 1095 nf = normalizefile(
1096 1096 nd and (nd + b"/" + f) or f, True, True
1097 1097 )
1098 1098 else:
1099 1099 nf = nd and (nd + b"/" + f) or f
1100 1100 if nf not in results:
1101 1101 if kind == dirkind:
1102 1102 if not ignore(nf):
1103 1103 if matchtdir:
1104 1104 matchtdir(nf)
1105 1105 wadd(nf)
1106 1106 if nf in dmap and (matchalways or matchfn(nf)):
1107 1107 results[nf] = None
1108 1108 elif kind == regkind or kind == lnkkind:
1109 1109 if nf in dmap:
1110 1110 if matchalways or matchfn(nf):
1111 1111 results[nf] = st
1112 1112 elif (matchalways or matchfn(nf)) and not ignore(
1113 1113 nf
1114 1114 ):
1115 1115 # unknown file -- normalize if necessary
1116 1116 if not alreadynormed:
1117 1117 nf = normalize(nf, False, True)
1118 1118 results[nf] = st
1119 1119 elif nf in dmap and (matchalways or matchfn(nf)):
1120 1120 results[nf] = None
1121 1121
1122 1122 for nd, d in work:
1123 1123 # alreadynormed means that processwork doesn't have to do any
1124 1124 # expensive directory normalization
1125 1125 alreadynormed = not normalize or nd == d
1126 1126 traverse([d], alreadynormed)
1127 1127
1128 1128 for s in subrepos:
1129 1129 del results[s]
1130 1130 del results[b'.hg']
1131 1131
1132 1132 # step 3: visit remaining files from dmap
1133 1133 if not skipstep3 and not exact:
1134 1134 # If a dmap file is not in results yet, it was either
1135 1135 # a) not matching matchfn b) ignored, c) missing, or d) under a
1136 1136 # symlink directory.
1137 1137 if not results and matchalways:
1138 1138 visit = [f for f in dmap]
1139 1139 else:
1140 1140 visit = [f for f in dmap if f not in results and matchfn(f)]
1141 1141 visit.sort()
1142 1142
1143 1143 if unknown:
1144 1144 # unknown == True means we walked all dirs under the roots
1145 1145 # that wasn't ignored, and everything that matched was stat'ed
1146 1146 # and is already in results.
1147 1147 # The rest must thus be ignored or under a symlink.
1148 1148 audit_path = pathutil.pathauditor(self._root, cached=True)
1149 1149
1150 1150 for nf in iter(visit):
1151 1151 # If a stat for the same file was already added with a
1152 1152 # different case, don't add one for this, since that would
1153 1153 # make it appear as if the file exists under both names
1154 1154 # on disk.
1155 1155 if (
1156 1156 normalizefile
1157 1157 and normalizefile(nf, True, True) in results
1158 1158 ):
1159 1159 results[nf] = None
1160 1160 # Report ignored items in the dmap as long as they are not
1161 1161 # under a symlink directory.
1162 1162 elif audit_path.check(nf):
1163 1163 try:
1164 1164 results[nf] = lstat(join(nf))
1165 1165 # file was just ignored, no links, and exists
1166 1166 except OSError:
1167 1167 # file doesn't exist
1168 1168 results[nf] = None
1169 1169 else:
1170 1170 # It's either missing or under a symlink directory
1171 1171 # which we in this case report as missing
1172 1172 results[nf] = None
1173 1173 else:
1174 1174 # We may not have walked the full directory tree above,
1175 1175 # so stat and check everything we missed.
1176 1176 iv = iter(visit)
1177 1177 for st in util.statfiles([join(i) for i in visit]):
1178 1178 results[next(iv)] = st
1179 1179 return results
1180 1180
1181 1181 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1182 1182 if self._sparsematchfn is not None:
1183 1183 em = matchmod.exact(matcher.files())
1184 1184 sm = matchmod.unionmatcher([self._sparsematcher, em])
1185 1185 matcher = matchmod.intersectmatchers(matcher, sm)
1186 1186 # Force Rayon (Rust parallelism library) to respect the number of
1187 1187 # workers. This is a temporary workaround until Rust code knows
1188 1188 # how to read the config file.
1189 1189 numcpus = self._ui.configint(b"worker", b"numcpus")
1190 1190 if numcpus is not None:
1191 1191 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1192 1192
1193 1193 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1194 1194 if not workers_enabled:
1195 1195 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1196 1196
1197 1197 (
1198 1198 lookup,
1199 1199 modified,
1200 1200 added,
1201 1201 removed,
1202 1202 deleted,
1203 1203 clean,
1204 1204 ignored,
1205 1205 unknown,
1206 1206 warnings,
1207 1207 bad,
1208 1208 traversed,
1209 1209 dirty,
1210 1210 ) = rustmod.status(
1211 1211 self._map._map,
1212 1212 matcher,
1213 1213 self._rootdir,
1214 1214 self._ignorefiles(),
1215 1215 self._checkexec,
1216 1216 bool(list_clean),
1217 1217 bool(list_ignored),
1218 1218 bool(list_unknown),
1219 1219 bool(matcher.traversedir),
1220 1220 )
1221 1221
1222 1222 self._dirty |= dirty
1223 1223
1224 1224 if matcher.traversedir:
1225 1225 for dir in traversed:
1226 1226 matcher.traversedir(dir)
1227 1227
1228 1228 if self._ui.warn:
1229 1229 for item in warnings:
1230 1230 if isinstance(item, tuple):
1231 1231 file_path, syntax = item
1232 1232 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1233 1233 file_path,
1234 1234 syntax,
1235 1235 )
1236 1236 self._ui.warn(msg)
1237 1237 else:
1238 1238 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1239 1239 self._ui.warn(
1240 1240 msg
1241 1241 % (
1242 1242 pathutil.canonpath(
1243 1243 self._rootdir, self._rootdir, item
1244 1244 ),
1245 1245 b"No such file or directory",
1246 1246 )
1247 1247 )
1248 1248
1249 1249 for (fn, message) in bad:
1250 1250 matcher.bad(fn, encoding.strtolocal(message))
1251 1251
1252 1252 status = scmutil.status(
1253 1253 modified=modified,
1254 1254 added=added,
1255 1255 removed=removed,
1256 1256 deleted=deleted,
1257 1257 unknown=unknown,
1258 1258 ignored=ignored,
1259 1259 clean=clean,
1260 1260 )
1261 1261 return (lookup, status)
1262 1262
1263 1263 def status(self, match, subrepos, ignored, clean, unknown):
1264 1264 """Determine the status of the working copy relative to the
1265 1265 dirstate and return a pair of (unsure, status), where status is of type
1266 1266 scmutil.status and:
1267 1267
1268 1268 unsure:
1269 1269 files that might have been modified since the dirstate was
1270 1270 written, but need to be read to be sure (size is the same
1271 1271 but mtime differs)
1272 1272 status.modified:
1273 1273 files that have definitely been modified since the dirstate
1274 1274 was written (different size or mode)
1275 1275 status.clean:
1276 1276 files that have definitely not been modified since the
1277 1277 dirstate was written
1278 1278 """
1279 1279 listignored, listclean, listunknown = ignored, clean, unknown
1280 1280 lookup, modified, added, unknown, ignored = [], [], [], [], []
1281 1281 removed, deleted, clean = [], [], []
1282 1282
1283 1283 dmap = self._map
1284 1284 dmap.preload()
1285 1285
1286 1286 use_rust = True
1287 1287
1288 1288 allowed_matchers = (
1289 1289 matchmod.alwaysmatcher,
1290 1290 matchmod.differencematcher,
1291 1291 matchmod.exactmatcher,
1292 1292 matchmod.includematcher,
1293 1293 matchmod.intersectionmatcher,
1294 1294 matchmod.nevermatcher,
1295 1295 matchmod.unionmatcher,
1296 1296 )
1297 1297
1298 1298 if rustmod is None:
1299 1299 use_rust = False
1300 1300 elif self._checkcase:
1301 1301 # Case-insensitive filesystems are not handled yet
1302 1302 use_rust = False
1303 1303 elif subrepos:
1304 1304 use_rust = False
1305 1305 elif not isinstance(match, allowed_matchers):
1306 1306 # Some matchers have yet to be implemented
1307 1307 use_rust = False
1308 1308
1309 1309 # Get the time from the filesystem so we can disambiguate files that
1310 1310 # appear modified in the present or future.
1311 1311 try:
1312 1312 mtime_boundary = timestamp.get_fs_now(self._opener)
1313 1313 except OSError:
1314 1314 # In largefiles or readonly context
1315 1315 mtime_boundary = None
1316 1316
1317 1317 if use_rust:
1318 1318 try:
1319 1319 res = self._rust_status(
1320 1320 match, listclean, listignored, listunknown
1321 1321 )
1322 1322 return res + (mtime_boundary,)
1323 1323 except rustmod.FallbackError:
1324 1324 pass
1325 1325
1326 1326 def noop(f):
1327 1327 pass
1328 1328
1329 1329 dcontains = dmap.__contains__
1330 1330 dget = dmap.__getitem__
1331 1331 ladd = lookup.append # aka "unsure"
1332 1332 madd = modified.append
1333 1333 aadd = added.append
1334 1334 uadd = unknown.append if listunknown else noop
1335 1335 iadd = ignored.append if listignored else noop
1336 1336 radd = removed.append
1337 1337 dadd = deleted.append
1338 1338 cadd = clean.append if listclean else noop
1339 1339 mexact = match.exact
1340 1340 dirignore = self._dirignore
1341 1341 checkexec = self._checkexec
1342 1342 checklink = self._checklink
1343 1343 copymap = self._map.copymap
1344 1344
1345 1345 # We need to do full walks when either
1346 1346 # - we're listing all clean files, or
1347 1347 # - match.traversedir does something, because match.traversedir should
1348 1348 # be called for every dir in the working dir
1349 1349 full = listclean or match.traversedir is not None
1350 1350 for fn, st in self.walk(
1351 1351 match, subrepos, listunknown, listignored, full=full
1352 1352 ).items():
1353 1353 if not dcontains(fn):
1354 1354 if (listignored or mexact(fn)) and dirignore(fn):
1355 1355 if listignored:
1356 1356 iadd(fn)
1357 1357 else:
1358 1358 uadd(fn)
1359 1359 continue
1360 1360
1361 1361 t = dget(fn)
1362 1362 mode = t.mode
1363 1363 size = t.size
1364 1364
1365 1365 if not st and t.tracked:
1366 1366 dadd(fn)
1367 1367 elif t.p2_info:
1368 1368 madd(fn)
1369 1369 elif t.added:
1370 1370 aadd(fn)
1371 1371 elif t.removed:
1372 1372 radd(fn)
1373 1373 elif t.tracked:
1374 1374 if not checklink and t.has_fallback_symlink:
1375 1375 # If the file system does not support symlink, the mode
1376 1376 # might not be correctly stored in the dirstate, so do not
1377 1377 # trust it.
1378 1378 ladd(fn)
1379 1379 elif not checkexec and t.has_fallback_exec:
1380 1380 # If the file system does not support exec bits, the mode
1381 1381 # might not be correctly stored in the dirstate, so do not
1382 1382 # trust it.
1383 1383 ladd(fn)
1384 1384 elif (
1385 1385 size >= 0
1386 1386 and (
1387 1387 (size != st.st_size and size != st.st_size & _rangemask)
1388 1388 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1389 1389 )
1390 1390 or fn in copymap
1391 1391 ):
1392 1392 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1393 1393 # issue6456: Size returned may be longer due to
1394 1394 # encryption on EXT-4 fscrypt, undecided.
1395 1395 ladd(fn)
1396 1396 else:
1397 1397 madd(fn)
1398 1398 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1399 1399 # There might be a change in the future if for example the
1400 1400 # internal clock is off, but this is a case where the issues
1401 1401 # the user would face would be a lot worse and there is
1402 1402 # nothing we can really do.
1403 1403 ladd(fn)
1404 1404 elif listclean:
1405 1405 cadd(fn)
1406 1406 status = scmutil.status(
1407 1407 modified, added, removed, deleted, unknown, ignored, clean
1408 1408 )
1409 1409 return (lookup, status, mtime_boundary)
1410 1410
1411 1411 def matches(self, match):
1412 1412 """
1413 1413 return files in the dirstate (in whatever state) filtered by match
1414 1414 """
1415 1415 dmap = self._map
1416 1416 if rustmod is not None:
1417 1417 dmap = self._map._map
1418 1418
1419 1419 if match.always():
1420 1420 return dmap.keys()
1421 1421 files = match.files()
1422 1422 if match.isexact():
1423 1423 # fast path -- filter the other way around, since typically files is
1424 1424 # much smaller than dmap
1425 1425 return [f for f in files if f in dmap]
1426 1426 if match.prefix() and all(fn in dmap for fn in files):
1427 1427 # fast path -- all the values are known to be files, so just return
1428 1428 # that
1429 1429 return list(files)
1430 1430 return [f for f in dmap if match(f)]
1431 1431
1432 1432 def _actualfilename(self, tr):
1433 1433 if tr:
1434 1434 return self._pendingfilename
1435 1435 else:
1436 1436 return self._filename
1437 1437
1438 1438 def data_backup_filename(self, backupname):
1439 1439 if not self._use_dirstate_v2:
1440 1440 return None
1441 1441 return backupname + b'.v2-data'
1442 1442
1443 1443 def _new_backup_data_filename(self, backupname):
1444 1444 """return a filename to backup a data-file or None"""
1445 1445 if not self._use_dirstate_v2:
1446 1446 return None
1447 1447 data_filename = self._map.docket.data_filename()
1448 1448 return data_filename, self.data_backup_filename(backupname)
1449 1449
1450 1450 def backup_data_file(self, backupname):
1451 1451 if not self._use_dirstate_v2:
1452 1452 return None
1453 1453 docket = docketmod.DirstateDocket.parse(
1454 1454 self._opener.read(backupname),
1455 1455 self._nodeconstants,
1456 1456 )
1457 1457 return self.data_backup_filename(backupname), docket.data_filename()
1458 1458
1459 1459 def savebackup(self, tr, backupname):
1460 1460 '''Save current dirstate into backup file'''
1461 1461 filename = self._actualfilename(tr)
1462 1462 assert backupname != filename
1463 1463
1464 1464 # use '_writedirstate' instead of 'write' to write changes certainly,
1465 1465 # because the latter omits writing out if transaction is running.
1466 1466 # output file will be used to create backup of dirstate at this point.
1467 1467 if self._dirty or not self._opener.exists(filename):
1468 1468 self._writedirstate(
1469 1469 tr,
1470 1470 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1471 1471 )
1472 1472
1473 1473 if tr:
1474 1474 # ensure that subsequent tr.writepending returns True for
1475 1475 # changes written out above, even if dirstate is never
1476 1476 # changed after this
1477 1477 tr.addfilegenerator(
1478 1478 b'dirstate-1-main',
1479 1479 (self._filename,),
1480 1480 lambda f: self._writedirstate(tr, f),
1481 1481 location=b'plain',
1482 1482 post_finalize=True,
1483 1483 )
1484 1484
1485 1485 # ensure that pending file written above is unlinked at
1486 1486 # failure, even if tr.writepending isn't invoked until the
1487 1487 # end of this transaction
1488 1488 tr.registertmp(filename, location=b'plain')
1489 1489
1490 1490 self._opener.tryunlink(backupname)
1491 1491 # hardlink backup is okay because _writedirstate is always called
1492 1492 # with an "atomictemp=True" file.
1493 1493 util.copyfile(
1494 1494 self._opener.join(filename),
1495 1495 self._opener.join(backupname),
1496 1496 hardlink=True,
1497 1497 )
1498 1498 data_pair = self._new_backup_data_filename(backupname)
1499 1499 if data_pair is not None:
1500 1500 data_filename, bck_data_filename = data_pair
1501 1501 util.copyfile(
1502 1502 self._opener.join(data_filename),
1503 1503 self._opener.join(bck_data_filename),
1504 1504 hardlink=True,
1505 1505 )
1506 1506 if tr is not None:
1507 1507 # ensure that pending file written above is unlinked at
1508 1508 # failure, even if tr.writepending isn't invoked until the
1509 1509 # end of this transaction
1510 1510 tr.registertmp(bck_data_filename, location=b'plain')
1511 1511
1512 1512 def restorebackup(self, tr, backupname):
1513 1513 '''Restore dirstate by backup file'''
1514 1514 # this "invalidate()" prevents "wlock.release()" from writing
1515 1515 # changes of dirstate out after restoring from backup file
1516 1516 self.invalidate()
1517 1517 filename = self._actualfilename(tr)
1518 1518 o = self._opener
1519 1519 data_pair = self.backup_data_file(backupname)
1520 1520 if util.samefile(o.join(backupname), o.join(filename)):
1521 1521 o.unlink(backupname)
1522 1522 else:
1523 1523 o.rename(backupname, filename, checkambig=True)
1524 1524
1525 1525 if data_pair is not None:
1526 1526 data_backup, target = data_pair
1527 1527 if o.exists(target) and util.samefile(
1528 1528 o.join(data_backup), o.join(target)
1529 1529 ):
1530 1530 o.unlink(data_backup)
1531 1531 else:
1532 1532 o.rename(data_backup, target, checkambig=True)
1533 1533
1534 1534 def clearbackup(self, tr, backupname):
1535 1535 '''Clear backup file'''
1536 1536 o = self._opener
1537 1537 data_backup = self.backup_data_file(backupname)
1538 1538 o.unlink(backupname)
1539 1539
1540 1540 if data_backup is not None:
1541 1541 o.unlink(data_backup[0])
1542 1542
1543 def verify(self, m1, m2):
1543 def verify(self, m1, m2, narrow_matcher=None):
1544 1544 """check the dirstate content again the parent manifest and yield errors"""
1545 1545 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1546 1546 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1547 1547 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1548 1548 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1549 1549 for f, entry in self.items():
1550 1550 state = entry.state
1551 1551 if entry.p1_tracked:
1552 1552 if entry.modified and f not in m1 and f not in m2:
1553 1553 yield (missing_from_ps, f, state)
1554 1554 elif f not in m1:
1555 1555 yield (missing_from_p1, f, state)
1556 1556 if entry.added and f in m1:
1557 1557 yield (unexpected_in_p1, f, state)
1558 1558 for f in m1:
1559 if narrow_matcher is not None and not narrow_matcher(f):
1560 continue
1559 1561 entry = self.get_entry(f)
1560 1562 if not entry.p1_tracked:
1561 1563 yield (missing_from_ds, f, entry.state)
General Comments 0
You need to be logged in to leave comments. Login now