##// END OF EJS Templates
cleanup: drop `dirstate.is_changing_parent` deprecated since 6.5...
marmoute -
r52028:88ef8021 default
parent child Browse files
Show More
@@ -1,1842 +1,1832 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import collections
10 10 import contextlib
11 11 import os
12 12 import stat
13 13 import uuid
14 14
15 15 from .i18n import _
16 16
17 17 from hgdemandimport import tracing
18 18
19 19 from . import (
20 20 dirstatemap,
21 21 encoding,
22 22 error,
23 23 match as matchmod,
24 24 node,
25 25 pathutil,
26 26 policy,
27 27 pycompat,
28 28 scmutil,
29 29 txnutil,
30 30 util,
31 31 )
32 32
33 33 from .dirstateutils import (
34 34 timestamp,
35 35 )
36 36
37 37 from .interfaces import (
38 38 dirstate as intdirstate,
39 39 util as interfaceutil,
40 40 )
41 41
42 42 parsers = policy.importmod('parsers')
43 43 rustmod = policy.importrust('dirstate')
44 44
45 45 # use to detect lack of a parameter
46 46 SENTINEL = object()
47 47
48 48 HAS_FAST_DIRSTATE_V2 = rustmod is not None
49 49
50 50 propertycache = util.propertycache
51 51 filecache = scmutil.filecache
52 52 _rangemask = dirstatemap.rangemask
53 53
54 54 DirstateItem = dirstatemap.DirstateItem
55 55
56 56
57 57 class repocache(filecache):
58 58 """filecache for files in .hg/"""
59 59
60 60 def join(self, obj, fname):
61 61 return obj._opener.join(fname)
62 62
63 63
64 64 class rootcache(filecache):
65 65 """filecache for files in the repository root"""
66 66
67 67 def join(self, obj, fname):
68 68 return obj._join(fname)
69 69
70 70
71 71 def check_invalidated(func):
72 72 """check that the func is called with a non-invalidated dirstate
73 73
74 74 The dirstate is in an "invalidated state" after an error occured during its
75 75 modification and remains so until we exited the top level scope that framed
76 76 such change.
77 77 """
78 78
79 79 def wrap(self, *args, **kwargs):
80 80 if self._invalidated_context:
81 81 msg = 'calling `%s` after the dirstate was invalidated'
82 82 msg %= func.__name__
83 83 raise error.ProgrammingError(msg)
84 84 return func(self, *args, **kwargs)
85 85
86 86 return wrap
87 87
88 88
89 89 def requires_changing_parents(func):
90 90 def wrap(self, *args, **kwargs):
91 91 if not self.is_changing_parents:
92 92 msg = 'calling `%s` outside of a changing_parents context'
93 93 msg %= func.__name__
94 94 raise error.ProgrammingError(msg)
95 95 return func(self, *args, **kwargs)
96 96
97 97 return check_invalidated(wrap)
98 98
99 99
100 100 def requires_changing_files(func):
101 101 def wrap(self, *args, **kwargs):
102 102 if not self.is_changing_files:
103 103 msg = 'calling `%s` outside of a `changing_files`'
104 104 msg %= func.__name__
105 105 raise error.ProgrammingError(msg)
106 106 return func(self, *args, **kwargs)
107 107
108 108 return check_invalidated(wrap)
109 109
110 110
111 111 def requires_changing_any(func):
112 112 def wrap(self, *args, **kwargs):
113 113 if not self.is_changing_any:
114 114 msg = 'calling `%s` outside of a changing context'
115 115 msg %= func.__name__
116 116 raise error.ProgrammingError(msg)
117 117 return func(self, *args, **kwargs)
118 118
119 119 return check_invalidated(wrap)
120 120
121 121
122 122 def requires_changing_files_or_status(func):
123 123 def wrap(self, *args, **kwargs):
124 124 if not (self.is_changing_files or self._running_status > 0):
125 125 msg = (
126 126 'calling `%s` outside of a changing_files '
127 127 'or running_status context'
128 128 )
129 129 msg %= func.__name__
130 130 raise error.ProgrammingError(msg)
131 131 return func(self, *args, **kwargs)
132 132
133 133 return check_invalidated(wrap)
134 134
135 135
136 136 CHANGE_TYPE_PARENTS = "parents"
137 137 CHANGE_TYPE_FILES = "files"
138 138
139 139
140 140 @interfaceutil.implementer(intdirstate.idirstate)
141 141 class dirstate:
142 142
143 143 # used by largefile to avoid overwritting transaction callback
144 144 _tr_key_suffix = b''
145 145
146 146 def __init__(
147 147 self,
148 148 opener,
149 149 ui,
150 150 root,
151 151 validate,
152 152 sparsematchfn,
153 153 nodeconstants,
154 154 use_dirstate_v2,
155 155 use_tracked_hint=False,
156 156 ):
157 157 """Create a new dirstate object.
158 158
159 159 opener is an open()-like callable that can be used to open the
160 160 dirstate file; root is the root of the directory tracked by
161 161 the dirstate.
162 162 """
163 163 self._use_dirstate_v2 = use_dirstate_v2
164 164 self._use_tracked_hint = use_tracked_hint
165 165 self._nodeconstants = nodeconstants
166 166 self._opener = opener
167 167 self._validate = validate
168 168 self._root = root
169 169 # Either build a sparse-matcher or None if sparse is disabled
170 170 self._sparsematchfn = sparsematchfn
171 171 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
172 172 # UNC path pointing to root share (issue4557)
173 173 self._rootdir = pathutil.normasprefix(root)
174 174 # True is any internal state may be different
175 175 self._dirty = False
176 176 # True if the set of tracked file may be different
177 177 self._dirty_tracked_set = False
178 178 self._ui = ui
179 179 self._filecache = {}
180 180 # nesting level of `changing_parents` context
181 181 self._changing_level = 0
182 182 # the change currently underway
183 183 self._change_type = None
184 184 # number of open _running_status context
185 185 self._running_status = 0
186 186 # True if the current dirstate changing operations have been
187 187 # invalidated (used to make sure all nested contexts have been exited)
188 188 self._invalidated_context = False
189 189 self._attached_to_a_transaction = False
190 190 self._filename = b'dirstate'
191 191 self._filename_th = b'dirstate-tracked-hint'
192 192 self._pendingfilename = b'%s.pending' % self._filename
193 193 self._plchangecallbacks = {}
194 194 self._origpl = None
195 195 self._mapcls = dirstatemap.dirstatemap
196 196 # Access and cache cwd early, so we don't access it for the first time
197 197 # after a working-copy update caused it to not exist (accessing it then
198 198 # raises an exception).
199 199 self._cwd
200 200
201 201 def refresh(self):
202 202 # XXX if this happens, you likely did not enter the `changing_xxx`
203 203 # using `repo.dirstate`, so a later `repo.dirstate` accesss might call
204 204 # `refresh`.
205 205 if self.is_changing_any:
206 206 msg = "refreshing the dirstate in the middle of a change"
207 207 raise error.ProgrammingError(msg)
208 208 if '_branch' in vars(self):
209 209 del self._branch
210 210 if '_map' in vars(self) and self._map.may_need_refresh():
211 211 self.invalidate()
212 212
213 213 def prefetch_parents(self):
214 214 """make sure the parents are loaded
215 215
216 216 Used to avoid a race condition.
217 217 """
218 218 self._pl
219 219
220 220 @contextlib.contextmanager
221 221 @check_invalidated
222 222 def running_status(self, repo):
223 223 """Wrap a status operation
224 224
225 225 This context is not mutally exclusive with the `changing_*` context. It
226 226 also do not warrant for the `wlock` to be taken.
227 227
228 228 If the wlock is taken, this context will behave in a simple way, and
229 229 ensure the data are scheduled for write when leaving the top level
230 230 context.
231 231
232 232 If the lock is not taken, it will only warrant that the data are either
233 233 committed (written) and rolled back (invalidated) when exiting the top
234 234 level context. The write/invalidate action must be performed by the
235 235 wrapped code.
236 236
237 237
238 238 The expected logic is:
239 239
240 240 A: read the dirstate
241 241 B: run status
242 242 This might make the dirstate dirty by updating cache,
243 243 especially in Rust.
244 244 C: do more "post status fixup if relevant
245 245 D: try to take the w-lock (this will invalidate the changes if they were raced)
246 246 E0: if dirstate changed on disk β†’ discard change (done by dirstate internal)
247 247 E1: elif lock was acquired β†’ write the changes
248 248 E2: else β†’ discard the changes
249 249 """
250 250 has_lock = repo.currentwlock() is not None
251 251 is_changing = self.is_changing_any
252 252 tr = repo.currenttransaction()
253 253 has_tr = tr is not None
254 254 nested = bool(self._running_status)
255 255
256 256 first_and_alone = not (is_changing or has_tr or nested)
257 257
258 258 # enforce no change happened outside of a proper context.
259 259 if first_and_alone and self._dirty:
260 260 has_tr = repo.currenttransaction() is not None
261 261 if not has_tr and self._changing_level == 0 and self._dirty:
262 262 msg = "entering a status context, but dirstate is already dirty"
263 263 raise error.ProgrammingError(msg)
264 264
265 265 should_write = has_lock and not (nested or is_changing)
266 266
267 267 self._running_status += 1
268 268 try:
269 269 yield
270 270 except Exception:
271 271 self.invalidate()
272 272 raise
273 273 finally:
274 274 self._running_status -= 1
275 275 if self._invalidated_context:
276 276 should_write = False
277 277 self.invalidate()
278 278
279 279 if should_write:
280 280 assert repo.currenttransaction() is tr
281 281 self.write(tr)
282 282 elif not has_lock:
283 283 if self._dirty:
284 284 msg = b'dirstate dirty while exiting an isolated status context'
285 285 repo.ui.develwarn(msg)
286 286 self.invalidate()
287 287
288 288 @contextlib.contextmanager
289 289 @check_invalidated
290 290 def _changing(self, repo, change_type):
291 291 if repo.currentwlock() is None:
292 292 msg = b"trying to change the dirstate without holding the wlock"
293 293 raise error.ProgrammingError(msg)
294 294
295 295 has_tr = repo.currenttransaction() is not None
296 296 if not has_tr and self._changing_level == 0 and self._dirty:
297 297 msg = b"entering a changing context, but dirstate is already dirty"
298 298 repo.ui.develwarn(msg)
299 299
300 300 assert self._changing_level >= 0
301 301 # different type of change are mutually exclusive
302 302 if self._change_type is None:
303 303 assert self._changing_level == 0
304 304 self._change_type = change_type
305 305 elif self._change_type != change_type:
306 306 msg = (
307 307 'trying to open "%s" dirstate-changing context while a "%s" is'
308 308 ' already open'
309 309 )
310 310 msg %= (change_type, self._change_type)
311 311 raise error.ProgrammingError(msg)
312 312 should_write = False
313 313 self._changing_level += 1
314 314 try:
315 315 yield
316 316 except: # re-raises
317 317 self.invalidate() # this will set `_invalidated_context`
318 318 raise
319 319 finally:
320 320 assert self._changing_level > 0
321 321 self._changing_level -= 1
322 322 # If the dirstate is being invalidated, call invalidate again.
323 323 # This will throw away anything added by a upper context and
324 324 # reset the `_invalidated_context` flag when relevant
325 325 if self._changing_level <= 0:
326 326 self._change_type = None
327 327 assert self._changing_level == 0
328 328 if self._invalidated_context:
329 329 # make sure we invalidate anything an upper context might
330 330 # have changed.
331 331 self.invalidate()
332 332 else:
333 333 should_write = self._changing_level <= 0
334 334 tr = repo.currenttransaction()
335 335 if has_tr != (tr is not None):
336 336 if has_tr:
337 337 m = "transaction vanished while changing dirstate"
338 338 else:
339 339 m = "transaction appeared while changing dirstate"
340 340 raise error.ProgrammingError(m)
341 341 if should_write:
342 342 self.write(tr)
343 343
344 344 @contextlib.contextmanager
345 345 def changing_parents(self, repo):
346 346 """Wrap a dirstate change related to a change of working copy parents
347 347
348 348 This context scopes a series of dirstate modifications that match an
349 349 update of the working copy parents (typically `hg update`, `hg merge`
350 350 etc).
351 351
352 352 The dirstate's methods that perform this kind of modifications require
353 353 this context to be present before being called.
354 354 Such methods are decorated with `@requires_changing_parents`.
355 355
356 356 The new dirstate contents will be written to disk when the top-most
357 357 `changing_parents` context exits successfully. If an exception is
358 358 raised during a `changing_parents` context of any level, all changes
359 359 are invalidated. If this context is open within an open transaction,
360 360 the dirstate writing is delayed until that transaction is successfully
361 361 committed (and the dirstate is invalidated on transaction abort).
362 362
363 363 The `changing_parents` operation is mutually exclusive with the
364 364 `changing_files` one.
365 365 """
366 366 with self._changing(repo, CHANGE_TYPE_PARENTS) as c:
367 367 yield c
368 368
369 369 @contextlib.contextmanager
370 370 def changing_files(self, repo):
371 371 """Wrap a dirstate change related to the set of tracked files
372 372
373 373 This context scopes a series of dirstate modifications that change the
374 374 set of tracked files. (typically `hg add`, `hg remove` etc) or some
375 375 dirstate stored information (like `hg rename --after`) but preserve
376 376 the working copy parents.
377 377
378 378 The dirstate's methods that perform this kind of modifications require
379 379 this context to be present before being called.
380 380 Such methods are decorated with `@requires_changing_files`.
381 381
382 382 The new dirstate contents will be written to disk when the top-most
383 383 `changing_files` context exits successfully. If an exception is raised
384 384 during a `changing_files` context of any level, all changes are
385 385 invalidated. If this context is open within an open transaction, the
386 386 dirstate writing is delayed until that transaction is successfully
387 387 committed (and the dirstate is invalidated on transaction abort).
388 388
389 389 The `changing_files` operation is mutually exclusive with the
390 390 `changing_parents` one.
391 391 """
392 392 with self._changing(repo, CHANGE_TYPE_FILES) as c:
393 393 yield c
394 394
395 395 # here to help migration to the new code
396 396 def parentchange(self):
397 397 msg = (
398 398 "Mercurial 6.4 and later requires call to "
399 399 "`dirstate.changing_parents(repo)`"
400 400 )
401 401 raise error.ProgrammingError(msg)
402 402
403 403 @property
404 404 def is_changing_any(self):
405 405 """Returns true if the dirstate is in the middle of a set of changes.
406 406
407 407 This returns True for any kind of change.
408 408 """
409 409 return self._changing_level > 0
410 410
411 def pendingparentchange(self):
412 return self.is_changing_parent()
413
414 def is_changing_parent(self):
415 """Returns true if the dirstate is in the middle of a set of changes
416 that modify the dirstate parent.
417 """
418 self._ui.deprecwarn(b"dirstate.is_changing_parents", b"6.5")
419 return self.is_changing_parents
420
421 411 @property
422 412 def is_changing_parents(self):
423 413 """Returns true if the dirstate is in the middle of a set of changes
424 414 that modify the dirstate parent.
425 415 """
426 416 if self._changing_level <= 0:
427 417 return False
428 418 return self._change_type == CHANGE_TYPE_PARENTS
429 419
430 420 @property
431 421 def is_changing_files(self):
432 422 """Returns true if the dirstate is in the middle of a set of changes
433 423 that modify the files tracked or their sources.
434 424 """
435 425 if self._changing_level <= 0:
436 426 return False
437 427 return self._change_type == CHANGE_TYPE_FILES
438 428
439 429 @propertycache
440 430 def _map(self):
441 431 """Return the dirstate contents (see documentation for dirstatemap)."""
442 432 return self._mapcls(
443 433 self._ui,
444 434 self._opener,
445 435 self._root,
446 436 self._nodeconstants,
447 437 self._use_dirstate_v2,
448 438 )
449 439
450 440 @property
451 441 def _sparsematcher(self):
452 442 """The matcher for the sparse checkout.
453 443
454 444 The working directory may not include every file from a manifest. The
455 445 matcher obtained by this property will match a path if it is to be
456 446 included in the working directory.
457 447
458 448 When sparse if disabled, return None.
459 449 """
460 450 if self._sparsematchfn is None:
461 451 return None
462 452 # TODO there is potential to cache this property. For now, the matcher
463 453 # is resolved on every access. (But the called function does use a
464 454 # cache to keep the lookup fast.)
465 455 return self._sparsematchfn()
466 456
467 457 @repocache(b'branch')
468 458 def _branch(self):
469 459 f = None
470 460 data = b''
471 461 try:
472 462 f, mode = txnutil.trypending(self._root, self._opener, b'branch')
473 463 data = f.read().strip()
474 464 except FileNotFoundError:
475 465 pass
476 466 finally:
477 467 if f is not None:
478 468 f.close()
479 469 if not data:
480 470 return b"default"
481 471 return data
482 472
483 473 @property
484 474 def _pl(self):
485 475 return self._map.parents()
486 476
487 477 def hasdir(self, d):
488 478 return self._map.hastrackeddir(d)
489 479
490 480 @rootcache(b'.hgignore')
491 481 def _ignore(self):
492 482 files = self._ignorefiles()
493 483 if not files:
494 484 return matchmod.never()
495 485
496 486 pats = [b'include:%s' % f for f in files]
497 487 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
498 488
499 489 @propertycache
500 490 def _slash(self):
501 491 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
502 492
503 493 @propertycache
504 494 def _checklink(self):
505 495 return util.checklink(self._root)
506 496
507 497 @propertycache
508 498 def _checkexec(self):
509 499 return bool(util.checkexec(self._root))
510 500
511 501 @propertycache
512 502 def _checkcase(self):
513 503 return not util.fscasesensitive(self._join(b'.hg'))
514 504
515 505 def _join(self, f):
516 506 # much faster than os.path.join()
517 507 # it's safe because f is always a relative path
518 508 return self._rootdir + f
519 509
520 510 def flagfunc(self, buildfallback):
521 511 """build a callable that returns flags associated with a filename
522 512
523 513 The information is extracted from three possible layers:
524 514 1. the file system if it supports the information
525 515 2. the "fallback" information stored in the dirstate if any
526 516 3. a more expensive mechanism inferring the flags from the parents.
527 517 """
528 518
529 519 # small hack to cache the result of buildfallback()
530 520 fallback_func = []
531 521
532 522 def get_flags(x):
533 523 entry = None
534 524 fallback_value = None
535 525 try:
536 526 st = os.lstat(self._join(x))
537 527 except OSError:
538 528 return b''
539 529
540 530 if self._checklink:
541 531 if util.statislink(st):
542 532 return b'l'
543 533 else:
544 534 entry = self.get_entry(x)
545 535 if entry.has_fallback_symlink:
546 536 if entry.fallback_symlink:
547 537 return b'l'
548 538 else:
549 539 if not fallback_func:
550 540 fallback_func.append(buildfallback())
551 541 fallback_value = fallback_func[0](x)
552 542 if b'l' in fallback_value:
553 543 return b'l'
554 544
555 545 if self._checkexec:
556 546 if util.statisexec(st):
557 547 return b'x'
558 548 else:
559 549 if entry is None:
560 550 entry = self.get_entry(x)
561 551 if entry.has_fallback_exec:
562 552 if entry.fallback_exec:
563 553 return b'x'
564 554 else:
565 555 if fallback_value is None:
566 556 if not fallback_func:
567 557 fallback_func.append(buildfallback())
568 558 fallback_value = fallback_func[0](x)
569 559 if b'x' in fallback_value:
570 560 return b'x'
571 561 return b''
572 562
573 563 return get_flags
574 564
575 565 @propertycache
576 566 def _cwd(self):
577 567 # internal config: ui.forcecwd
578 568 forcecwd = self._ui.config(b'ui', b'forcecwd')
579 569 if forcecwd:
580 570 return forcecwd
581 571 return encoding.getcwd()
582 572
583 573 def getcwd(self):
584 574 """Return the path from which a canonical path is calculated.
585 575
586 576 This path should be used to resolve file patterns or to convert
587 577 canonical paths back to file paths for display. It shouldn't be
588 578 used to get real file paths. Use vfs functions instead.
589 579 """
590 580 cwd = self._cwd
591 581 if cwd == self._root:
592 582 return b''
593 583 # self._root ends with a path separator if self._root is '/' or 'C:\'
594 584 rootsep = self._root
595 585 if not util.endswithsep(rootsep):
596 586 rootsep += pycompat.ossep
597 587 if cwd.startswith(rootsep):
598 588 return cwd[len(rootsep) :]
599 589 else:
600 590 # we're outside the repo. return an absolute path.
601 591 return cwd
602 592
603 593 def pathto(self, f, cwd=None):
604 594 if cwd is None:
605 595 cwd = self.getcwd()
606 596 path = util.pathto(self._root, cwd, f)
607 597 if self._slash:
608 598 return util.pconvert(path)
609 599 return path
610 600
611 601 def get_entry(self, path):
612 602 """return a DirstateItem for the associated path"""
613 603 entry = self._map.get(path)
614 604 if entry is None:
615 605 return DirstateItem()
616 606 return entry
617 607
618 608 def __contains__(self, key):
619 609 return key in self._map
620 610
621 611 def __iter__(self):
622 612 return iter(sorted(self._map))
623 613
624 614 def items(self):
625 615 return self._map.items()
626 616
627 617 iteritems = items
628 618
629 619 def parents(self):
630 620 return [self._validate(p) for p in self._pl]
631 621
632 622 def p1(self):
633 623 return self._validate(self._pl[0])
634 624
635 625 def p2(self):
636 626 return self._validate(self._pl[1])
637 627
638 628 @property
639 629 def in_merge(self):
640 630 """True if a merge is in progress"""
641 631 return self._pl[1] != self._nodeconstants.nullid
642 632
643 633 def branch(self):
644 634 return encoding.tolocal(self._branch)
645 635
646 636 @requires_changing_parents
647 637 def setparents(self, p1, p2=None):
648 638 """Set dirstate parents to p1 and p2.
649 639
650 640 When moving from two parents to one, "merged" entries a
651 641 adjusted to normal and previous copy records discarded and
652 642 returned by the call.
653 643
654 644 See localrepo.setparents()
655 645 """
656 646 if p2 is None:
657 647 p2 = self._nodeconstants.nullid
658 648 if self._changing_level == 0:
659 649 raise ValueError(
660 650 b"cannot set dirstate parent outside of "
661 651 b"dirstate.changing_parents context manager"
662 652 )
663 653
664 654 self._dirty = True
665 655 oldp2 = self._pl[1]
666 656 if self._origpl is None:
667 657 self._origpl = self._pl
668 658 nullid = self._nodeconstants.nullid
669 659 # True if we need to fold p2 related state back to a linear case
670 660 fold_p2 = oldp2 != nullid and p2 == nullid
671 661 return self._map.setparents(p1, p2, fold_p2=fold_p2)
672 662
673 663 def setbranch(self, branch, transaction=SENTINEL):
674 664 self.__class__._branch.set(self, encoding.fromlocal(branch))
675 665 if transaction is SENTINEL:
676 666 msg = b"setbranch needs a `transaction` argument"
677 667 self._ui.deprecwarn(msg, b'6.5')
678 668 transaction = None
679 669 if transaction is not None:
680 670 self._setup_tr_abort(transaction)
681 671 transaction.addfilegenerator(
682 672 b'dirstate-3-branch%s' % self._tr_key_suffix,
683 673 (b'branch',),
684 674 self._write_branch,
685 675 location=b'plain',
686 676 post_finalize=True,
687 677 )
688 678 return
689 679
690 680 vfs = self._opener
691 681 with vfs(b'branch', b'w', atomictemp=True, checkambig=True) as f:
692 682 self._write_branch(f)
693 683 # make sure filecache has the correct stat info for _branch after
694 684 # replacing the underlying file
695 685 #
696 686 # XXX do we actually need this,
697 687 # refreshing the attribute is quite cheap
698 688 ce = self._filecache[b'_branch']
699 689 if ce:
700 690 ce.refresh()
701 691
702 692 def _write_branch(self, file_obj):
703 693 file_obj.write(self._branch + b'\n')
704 694
705 695 def invalidate(self):
706 696 """Causes the next access to reread the dirstate.
707 697
708 698 This is different from localrepo.invalidatedirstate() because it always
709 699 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
710 700 check whether the dirstate has changed before rereading it."""
711 701
712 702 for a in ("_map", "_branch", "_ignore"):
713 703 if a in self.__dict__:
714 704 delattr(self, a)
715 705 self._dirty = False
716 706 self._dirty_tracked_set = False
717 707 self._invalidated_context = bool(
718 708 self._changing_level > 0
719 709 or self._attached_to_a_transaction
720 710 or self._running_status
721 711 )
722 712 self._origpl = None
723 713
724 714 @requires_changing_any
725 715 def copy(self, source, dest):
726 716 """Mark dest as a copy of source. Unmark dest if source is None."""
727 717 if source == dest:
728 718 return
729 719 self._dirty = True
730 720 if source is not None:
731 721 self._check_sparse(source)
732 722 self._map.copymap[dest] = source
733 723 else:
734 724 self._map.copymap.pop(dest, None)
735 725
736 726 def copied(self, file):
737 727 return self._map.copymap.get(file, None)
738 728
739 729 def copies(self):
740 730 return self._map.copymap
741 731
742 732 @requires_changing_files
743 733 def set_tracked(self, filename, reset_copy=False):
744 734 """a "public" method for generic code to mark a file as tracked
745 735
746 736 This function is to be called outside of "update/merge" case. For
747 737 example by a command like `hg add X`.
748 738
749 739 if reset_copy is set, any existing copy information will be dropped.
750 740
751 741 return True the file was previously untracked, False otherwise.
752 742 """
753 743 self._dirty = True
754 744 entry = self._map.get(filename)
755 745 if entry is None or not entry.tracked:
756 746 self._check_new_tracked_filename(filename)
757 747 pre_tracked = self._map.set_tracked(filename)
758 748 if reset_copy:
759 749 self._map.copymap.pop(filename, None)
760 750 if pre_tracked:
761 751 self._dirty_tracked_set = True
762 752 return pre_tracked
763 753
764 754 @requires_changing_files
765 755 def set_untracked(self, filename):
766 756 """a "public" method for generic code to mark a file as untracked
767 757
768 758 This function is to be called outside of "update/merge" case. For
769 759 example by a command like `hg remove X`.
770 760
771 761 return True the file was previously tracked, False otherwise.
772 762 """
773 763 ret = self._map.set_untracked(filename)
774 764 if ret:
775 765 self._dirty = True
776 766 self._dirty_tracked_set = True
777 767 return ret
778 768
779 769 @requires_changing_files_or_status
780 770 def set_clean(self, filename, parentfiledata):
781 771 """record that the current state of the file on disk is known to be clean"""
782 772 self._dirty = True
783 773 if not self._map[filename].tracked:
784 774 self._check_new_tracked_filename(filename)
785 775 (mode, size, mtime) = parentfiledata
786 776 self._map.set_clean(filename, mode, size, mtime)
787 777
788 778 @requires_changing_files_or_status
789 779 def set_possibly_dirty(self, filename):
790 780 """record that the current state of the file on disk is unknown"""
791 781 self._dirty = True
792 782 self._map.set_possibly_dirty(filename)
793 783
794 784 @requires_changing_parents
795 785 def update_file_p1(
796 786 self,
797 787 filename,
798 788 p1_tracked,
799 789 ):
800 790 """Set a file as tracked in the parent (or not)
801 791
802 792 This is to be called when adjust the dirstate to a new parent after an history
803 793 rewriting operation.
804 794
805 795 It should not be called during a merge (p2 != nullid) and only within
806 796 a `with dirstate.changing_parents(repo):` context.
807 797 """
808 798 if self.in_merge:
809 799 msg = b'update_file_reference should not be called when merging'
810 800 raise error.ProgrammingError(msg)
811 801 entry = self._map.get(filename)
812 802 if entry is None:
813 803 wc_tracked = False
814 804 else:
815 805 wc_tracked = entry.tracked
816 806 if not (p1_tracked or wc_tracked):
817 807 # the file is no longer relevant to anyone
818 808 if self._map.get(filename) is not None:
819 809 self._map.reset_state(filename)
820 810 self._dirty = True
821 811 elif (not p1_tracked) and wc_tracked:
822 812 if entry is not None and entry.added:
823 813 return # avoid dropping copy information (maybe?)
824 814
825 815 self._map.reset_state(
826 816 filename,
827 817 wc_tracked,
828 818 p1_tracked,
829 819 # the underlying reference might have changed, we will have to
830 820 # check it.
831 821 has_meaningful_mtime=False,
832 822 )
833 823
834 824 @requires_changing_parents
835 825 def update_file(
836 826 self,
837 827 filename,
838 828 wc_tracked,
839 829 p1_tracked,
840 830 p2_info=False,
841 831 possibly_dirty=False,
842 832 parentfiledata=None,
843 833 ):
844 834 """update the information about a file in the dirstate
845 835
846 836 This is to be called when the direstates parent changes to keep track
847 837 of what is the file situation in regards to the working copy and its parent.
848 838
849 839 This function must be called within a `dirstate.changing_parents` context.
850 840
851 841 note: the API is at an early stage and we might need to adjust it
852 842 depending of what information ends up being relevant and useful to
853 843 other processing.
854 844 """
855 845 self._update_file(
856 846 filename=filename,
857 847 wc_tracked=wc_tracked,
858 848 p1_tracked=p1_tracked,
859 849 p2_info=p2_info,
860 850 possibly_dirty=possibly_dirty,
861 851 parentfiledata=parentfiledata,
862 852 )
863 853
864 854 def hacky_extension_update_file(self, *args, **kwargs):
865 855 """NEVER USE THIS, YOU DO NOT NEED IT
866 856
867 857 This function is a variant of "update_file" to be called by a small set
868 858 of extensions, it also adjust the internal state of file, but can be
869 859 called outside an `changing_parents` context.
870 860
871 861 A very small number of extension meddle with the working copy content
872 862 in a way that requires to adjust the dirstate accordingly. At the time
873 863 this command is written they are :
874 864 - keyword,
875 865 - largefile,
876 866 PLEASE DO NOT GROW THIS LIST ANY FURTHER.
877 867
878 868 This function could probably be replaced by more semantic one (like
879 869 "adjust expected size" or "always revalidate file content", etc)
880 870 however at the time where this is writen, this is too much of a detour
881 871 to be considered.
882 872 """
883 873 if not (self._changing_level > 0 or self._running_status > 0):
884 874 msg = "requires a changes context"
885 875 raise error.ProgrammingError(msg)
886 876 self._update_file(
887 877 *args,
888 878 **kwargs,
889 879 )
890 880
891 881 def _update_file(
892 882 self,
893 883 filename,
894 884 wc_tracked,
895 885 p1_tracked,
896 886 p2_info=False,
897 887 possibly_dirty=False,
898 888 parentfiledata=None,
899 889 ):
900 890
901 891 # note: I do not think we need to double check name clash here since we
902 892 # are in a update/merge case that should already have taken care of
903 893 # this. The test agrees
904 894
905 895 self._dirty = True
906 896 old_entry = self._map.get(filename)
907 897 if old_entry is None:
908 898 prev_tracked = False
909 899 else:
910 900 prev_tracked = old_entry.tracked
911 901 if prev_tracked != wc_tracked:
912 902 self._dirty_tracked_set = True
913 903
914 904 self._map.reset_state(
915 905 filename,
916 906 wc_tracked,
917 907 p1_tracked,
918 908 p2_info=p2_info,
919 909 has_meaningful_mtime=not possibly_dirty,
920 910 parentfiledata=parentfiledata,
921 911 )
922 912
923 913 def _check_new_tracked_filename(self, filename):
924 914 scmutil.checkfilename(filename)
925 915 if self._map.hastrackeddir(filename):
926 916 msg = _(b'directory %r already in dirstate')
927 917 msg %= pycompat.bytestr(filename)
928 918 raise error.Abort(msg)
929 919 # shadows
930 920 for d in pathutil.finddirs(filename):
931 921 if self._map.hastrackeddir(d):
932 922 break
933 923 entry = self._map.get(d)
934 924 if entry is not None and not entry.removed:
935 925 msg = _(b'file %r in dirstate clashes with %r')
936 926 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
937 927 raise error.Abort(msg)
938 928 self._check_sparse(filename)
939 929
940 930 def _check_sparse(self, filename):
941 931 """Check that a filename is inside the sparse profile"""
942 932 sparsematch = self._sparsematcher
943 933 if sparsematch is not None and not sparsematch.always():
944 934 if not sparsematch(filename):
945 935 msg = _(b"cannot add '%s' - it is outside the sparse checkout")
946 936 hint = _(
947 937 b'include file with `hg debugsparse --include <pattern>` or use '
948 938 b'`hg add -s <file>` to include file directory while adding'
949 939 )
950 940 raise error.Abort(msg % filename, hint=hint)
951 941
952 942 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
953 943 if exists is None:
954 944 exists = os.path.lexists(os.path.join(self._root, path))
955 945 if not exists:
956 946 # Maybe a path component exists
957 947 if not ignoremissing and b'/' in path:
958 948 d, f = path.rsplit(b'/', 1)
959 949 d = self._normalize(d, False, ignoremissing, None)
960 950 folded = d + b"/" + f
961 951 else:
962 952 # No path components, preserve original case
963 953 folded = path
964 954 else:
965 955 # recursively normalize leading directory components
966 956 # against dirstate
967 957 if b'/' in normed:
968 958 d, f = normed.rsplit(b'/', 1)
969 959 d = self._normalize(d, False, ignoremissing, True)
970 960 r = self._root + b"/" + d
971 961 folded = d + b"/" + util.fspath(f, r)
972 962 else:
973 963 folded = util.fspath(normed, self._root)
974 964 storemap[normed] = folded
975 965
976 966 return folded
977 967
978 968 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
979 969 normed = util.normcase(path)
980 970 folded = self._map.filefoldmap.get(normed, None)
981 971 if folded is None:
982 972 if isknown:
983 973 folded = path
984 974 else:
985 975 folded = self._discoverpath(
986 976 path, normed, ignoremissing, exists, self._map.filefoldmap
987 977 )
988 978 return folded
989 979
990 980 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
991 981 normed = util.normcase(path)
992 982 folded = self._map.filefoldmap.get(normed, None)
993 983 if folded is None:
994 984 folded = self._map.dirfoldmap.get(normed, None)
995 985 if folded is None:
996 986 if isknown:
997 987 folded = path
998 988 else:
999 989 # store discovered result in dirfoldmap so that future
1000 990 # normalizefile calls don't start matching directories
1001 991 folded = self._discoverpath(
1002 992 path, normed, ignoremissing, exists, self._map.dirfoldmap
1003 993 )
1004 994 return folded
1005 995
1006 996 def normalize(self, path, isknown=False, ignoremissing=False):
1007 997 """
1008 998 normalize the case of a pathname when on a casefolding filesystem
1009 999
1010 1000 isknown specifies whether the filename came from walking the
1011 1001 disk, to avoid extra filesystem access.
1012 1002
1013 1003 If ignoremissing is True, missing path are returned
1014 1004 unchanged. Otherwise, we try harder to normalize possibly
1015 1005 existing path components.
1016 1006
1017 1007 The normalized case is determined based on the following precedence:
1018 1008
1019 1009 - version of name already stored in the dirstate
1020 1010 - version of name stored on disk
1021 1011 - version provided via command arguments
1022 1012 """
1023 1013
1024 1014 if self._checkcase:
1025 1015 return self._normalize(path, isknown, ignoremissing)
1026 1016 return path
1027 1017
1028 1018 # XXX this method is barely used, as a result:
1029 1019 # - its semantic is unclear
1030 1020 # - do we really needs it ?
1031 1021 @requires_changing_parents
1032 1022 def clear(self):
1033 1023 self._map.clear()
1034 1024 self._dirty = True
1035 1025
1036 1026 @requires_changing_parents
1037 1027 def rebuild(self, parent, allfiles, changedfiles=None):
1038 1028 matcher = self._sparsematcher
1039 1029 if matcher is not None and not matcher.always():
1040 1030 # should not add non-matching files
1041 1031 allfiles = [f for f in allfiles if matcher(f)]
1042 1032 if changedfiles:
1043 1033 changedfiles = [f for f in changedfiles if matcher(f)]
1044 1034
1045 1035 if changedfiles is not None:
1046 1036 # these files will be deleted from the dirstate when they are
1047 1037 # not found to be in allfiles
1048 1038 dirstatefilestoremove = {f for f in self if not matcher(f)}
1049 1039 changedfiles = dirstatefilestoremove.union(changedfiles)
1050 1040
1051 1041 if changedfiles is None:
1052 1042 # Rebuild entire dirstate
1053 1043 to_lookup = allfiles
1054 1044 to_drop = []
1055 1045 self.clear()
1056 1046 elif len(changedfiles) < 10:
1057 1047 # Avoid turning allfiles into a set, which can be expensive if it's
1058 1048 # large.
1059 1049 to_lookup = []
1060 1050 to_drop = []
1061 1051 for f in changedfiles:
1062 1052 if f in allfiles:
1063 1053 to_lookup.append(f)
1064 1054 else:
1065 1055 to_drop.append(f)
1066 1056 else:
1067 1057 changedfilesset = set(changedfiles)
1068 1058 to_lookup = changedfilesset & set(allfiles)
1069 1059 to_drop = changedfilesset - to_lookup
1070 1060
1071 1061 if self._origpl is None:
1072 1062 self._origpl = self._pl
1073 1063 self._map.setparents(parent, self._nodeconstants.nullid)
1074 1064
1075 1065 for f in to_lookup:
1076 1066 if self.in_merge:
1077 1067 self.set_tracked(f)
1078 1068 else:
1079 1069 self._map.reset_state(
1080 1070 f,
1081 1071 wc_tracked=True,
1082 1072 p1_tracked=True,
1083 1073 )
1084 1074 for f in to_drop:
1085 1075 self._map.reset_state(f)
1086 1076
1087 1077 self._dirty = True
1088 1078
1089 1079 def _setup_tr_abort(self, tr):
1090 1080 """make sure we invalidate the current change on abort"""
1091 1081 if tr is None:
1092 1082 return
1093 1083
1094 1084 def on_abort(tr):
1095 1085 self._attached_to_a_transaction = False
1096 1086 self.invalidate()
1097 1087
1098 1088 tr.addabort(
1099 1089 b'dirstate-invalidate%s' % self._tr_key_suffix,
1100 1090 on_abort,
1101 1091 )
1102 1092
1103 1093 def write(self, tr):
1104 1094 if not self._dirty:
1105 1095 return
1106 1096 # make sure we don't request a write of invalidated content
1107 1097 # XXX move before the dirty check once `unlock` stop calling `write`
1108 1098 assert not self._invalidated_context
1109 1099
1110 1100 write_key = self._use_tracked_hint and self._dirty_tracked_set
1111 1101 if tr:
1112 1102
1113 1103 self._setup_tr_abort(tr)
1114 1104 self._attached_to_a_transaction = True
1115 1105
1116 1106 def on_success(f):
1117 1107 self._attached_to_a_transaction = False
1118 1108 self._writedirstate(tr, f),
1119 1109
1120 1110 # delay writing in-memory changes out
1121 1111 tr.addfilegenerator(
1122 1112 b'dirstate-1-main%s' % self._tr_key_suffix,
1123 1113 (self._filename,),
1124 1114 on_success,
1125 1115 location=b'plain',
1126 1116 post_finalize=True,
1127 1117 )
1128 1118 if write_key:
1129 1119 tr.addfilegenerator(
1130 1120 b'dirstate-2-key-post%s' % self._tr_key_suffix,
1131 1121 (self._filename_th,),
1132 1122 lambda f: self._write_tracked_hint(tr, f),
1133 1123 location=b'plain',
1134 1124 post_finalize=True,
1135 1125 )
1136 1126 return
1137 1127
1138 1128 file = lambda f: self._opener(f, b"w", atomictemp=True, checkambig=True)
1139 1129 with file(self._filename) as f:
1140 1130 self._writedirstate(tr, f)
1141 1131 if write_key:
1142 1132 # we update the key-file after writing to make sure reader have a
1143 1133 # key that match the newly written content
1144 1134 with file(self._filename_th) as f:
1145 1135 self._write_tracked_hint(tr, f)
1146 1136
1147 1137 def delete_tracked_hint(self):
1148 1138 """remove the tracked_hint file
1149 1139
1150 1140 To be used by format downgrades operation"""
1151 1141 self._opener.unlink(self._filename_th)
1152 1142 self._use_tracked_hint = False
1153 1143
1154 1144 def addparentchangecallback(self, category, callback):
1155 1145 """add a callback to be called when the wd parents are changed
1156 1146
1157 1147 Callback will be called with the following arguments:
1158 1148 dirstate, (oldp1, oldp2), (newp1, newp2)
1159 1149
1160 1150 Category is a unique identifier to allow overwriting an old callback
1161 1151 with a newer callback.
1162 1152 """
1163 1153 self._plchangecallbacks[category] = callback
1164 1154
1165 1155 def _writedirstate(self, tr, st):
1166 1156 # make sure we don't write invalidated content
1167 1157 assert not self._invalidated_context
1168 1158 # notify callbacks about parents change
1169 1159 if self._origpl is not None and self._origpl != self._pl:
1170 1160 for c, callback in sorted(self._plchangecallbacks.items()):
1171 1161 callback(self, self._origpl, self._pl)
1172 1162 self._origpl = None
1173 1163 self._map.write(tr, st)
1174 1164 self._dirty = False
1175 1165 self._dirty_tracked_set = False
1176 1166
1177 1167 def _write_tracked_hint(self, tr, f):
1178 1168 key = node.hex(uuid.uuid4().bytes)
1179 1169 f.write(b"1\n%s\n" % key) # 1 is the format version
1180 1170
1181 1171 def _dirignore(self, f):
1182 1172 if self._ignore(f):
1183 1173 return True
1184 1174 for p in pathutil.finddirs(f):
1185 1175 if self._ignore(p):
1186 1176 return True
1187 1177 return False
1188 1178
1189 1179 def _ignorefiles(self):
1190 1180 files = []
1191 1181 if os.path.exists(self._join(b'.hgignore')):
1192 1182 files.append(self._join(b'.hgignore'))
1193 1183 for name, path in self._ui.configitems(b"ui"):
1194 1184 if name == b'ignore' or name.startswith(b'ignore.'):
1195 1185 # we need to use os.path.join here rather than self._join
1196 1186 # because path is arbitrary and user-specified
1197 1187 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1198 1188 return files
1199 1189
1200 1190 def _ignorefileandline(self, f):
1201 1191 files = collections.deque(self._ignorefiles())
1202 1192 visited = set()
1203 1193 while files:
1204 1194 i = files.popleft()
1205 1195 patterns = matchmod.readpatternfile(
1206 1196 i, self._ui.warn, sourceinfo=True
1207 1197 )
1208 1198 for pattern, lineno, line in patterns:
1209 1199 kind, p = matchmod._patsplit(pattern, b'glob')
1210 1200 if kind == b"subinclude":
1211 1201 if p not in visited:
1212 1202 files.append(p)
1213 1203 continue
1214 1204 m = matchmod.match(
1215 1205 self._root, b'', [], [pattern], warn=self._ui.warn
1216 1206 )
1217 1207 if m(f):
1218 1208 return (i, lineno, line)
1219 1209 visited.add(i)
1220 1210 return (None, -1, b"")
1221 1211
1222 1212 def _walkexplicit(self, match, subrepos):
1223 1213 """Get stat data about the files explicitly specified by match.
1224 1214
1225 1215 Return a triple (results, dirsfound, dirsnotfound).
1226 1216 - results is a mapping from filename to stat result. It also contains
1227 1217 listings mapping subrepos and .hg to None.
1228 1218 - dirsfound is a list of files found to be directories.
1229 1219 - dirsnotfound is a list of files that the dirstate thinks are
1230 1220 directories and that were not found."""
1231 1221
1232 1222 def badtype(mode):
1233 1223 kind = _(b'unknown')
1234 1224 if stat.S_ISCHR(mode):
1235 1225 kind = _(b'character device')
1236 1226 elif stat.S_ISBLK(mode):
1237 1227 kind = _(b'block device')
1238 1228 elif stat.S_ISFIFO(mode):
1239 1229 kind = _(b'fifo')
1240 1230 elif stat.S_ISSOCK(mode):
1241 1231 kind = _(b'socket')
1242 1232 elif stat.S_ISDIR(mode):
1243 1233 kind = _(b'directory')
1244 1234 return _(b'unsupported file type (type is %s)') % kind
1245 1235
1246 1236 badfn = match.bad
1247 1237 dmap = self._map
1248 1238 lstat = os.lstat
1249 1239 getkind = stat.S_IFMT
1250 1240 dirkind = stat.S_IFDIR
1251 1241 regkind = stat.S_IFREG
1252 1242 lnkkind = stat.S_IFLNK
1253 1243 join = self._join
1254 1244 dirsfound = []
1255 1245 foundadd = dirsfound.append
1256 1246 dirsnotfound = []
1257 1247 notfoundadd = dirsnotfound.append
1258 1248
1259 1249 if not match.isexact() and self._checkcase:
1260 1250 normalize = self._normalize
1261 1251 else:
1262 1252 normalize = None
1263 1253
1264 1254 files = sorted(match.files())
1265 1255 subrepos.sort()
1266 1256 i, j = 0, 0
1267 1257 while i < len(files) and j < len(subrepos):
1268 1258 subpath = subrepos[j] + b"/"
1269 1259 if files[i] < subpath:
1270 1260 i += 1
1271 1261 continue
1272 1262 while i < len(files) and files[i].startswith(subpath):
1273 1263 del files[i]
1274 1264 j += 1
1275 1265
1276 1266 if not files or b'' in files:
1277 1267 files = [b'']
1278 1268 # constructing the foldmap is expensive, so don't do it for the
1279 1269 # common case where files is ['']
1280 1270 normalize = None
1281 1271 results = dict.fromkeys(subrepos)
1282 1272 results[b'.hg'] = None
1283 1273
1284 1274 for ff in files:
1285 1275 if normalize:
1286 1276 nf = normalize(ff, False, True)
1287 1277 else:
1288 1278 nf = ff
1289 1279 if nf in results:
1290 1280 continue
1291 1281
1292 1282 try:
1293 1283 st = lstat(join(nf))
1294 1284 kind = getkind(st.st_mode)
1295 1285 if kind == dirkind:
1296 1286 if nf in dmap:
1297 1287 # file replaced by dir on disk but still in dirstate
1298 1288 results[nf] = None
1299 1289 foundadd((nf, ff))
1300 1290 elif kind == regkind or kind == lnkkind:
1301 1291 results[nf] = st
1302 1292 else:
1303 1293 badfn(ff, badtype(kind))
1304 1294 if nf in dmap:
1305 1295 results[nf] = None
1306 1296 except (OSError) as inst:
1307 1297 # nf not found on disk - it is dirstate only
1308 1298 if nf in dmap: # does it exactly match a missing file?
1309 1299 results[nf] = None
1310 1300 else: # does it match a missing directory?
1311 1301 if self._map.hasdir(nf):
1312 1302 notfoundadd(nf)
1313 1303 else:
1314 1304 badfn(ff, encoding.strtolocal(inst.strerror))
1315 1305
1316 1306 # match.files() may contain explicitly-specified paths that shouldn't
1317 1307 # be taken; drop them from the list of files found. dirsfound/notfound
1318 1308 # aren't filtered here because they will be tested later.
1319 1309 if match.anypats():
1320 1310 for f in list(results):
1321 1311 if f == b'.hg' or f in subrepos:
1322 1312 # keep sentinel to disable further out-of-repo walks
1323 1313 continue
1324 1314 if not match(f):
1325 1315 del results[f]
1326 1316
1327 1317 # Case insensitive filesystems cannot rely on lstat() failing to detect
1328 1318 # a case-only rename. Prune the stat object for any file that does not
1329 1319 # match the case in the filesystem, if there are multiple files that
1330 1320 # normalize to the same path.
1331 1321 if match.isexact() and self._checkcase:
1332 1322 normed = {}
1333 1323
1334 1324 for f, st in results.items():
1335 1325 if st is None:
1336 1326 continue
1337 1327
1338 1328 nc = util.normcase(f)
1339 1329 paths = normed.get(nc)
1340 1330
1341 1331 if paths is None:
1342 1332 paths = set()
1343 1333 normed[nc] = paths
1344 1334
1345 1335 paths.add(f)
1346 1336
1347 1337 for norm, paths in normed.items():
1348 1338 if len(paths) > 1:
1349 1339 for path in paths:
1350 1340 folded = self._discoverpath(
1351 1341 path, norm, True, None, self._map.dirfoldmap
1352 1342 )
1353 1343 if path != folded:
1354 1344 results[path] = None
1355 1345
1356 1346 return results, dirsfound, dirsnotfound
1357 1347
1358 1348 def walk(self, match, subrepos, unknown, ignored, full=True):
1359 1349 """
1360 1350 Walk recursively through the directory tree, finding all files
1361 1351 matched by match.
1362 1352
1363 1353 If full is False, maybe skip some known-clean files.
1364 1354
1365 1355 Return a dict mapping filename to stat-like object (either
1366 1356 mercurial.osutil.stat instance or return value of os.stat()).
1367 1357
1368 1358 """
1369 1359 # full is a flag that extensions that hook into walk can use -- this
1370 1360 # implementation doesn't use it at all. This satisfies the contract
1371 1361 # because we only guarantee a "maybe".
1372 1362
1373 1363 if ignored:
1374 1364 ignore = util.never
1375 1365 dirignore = util.never
1376 1366 elif unknown:
1377 1367 ignore = self._ignore
1378 1368 dirignore = self._dirignore
1379 1369 else:
1380 1370 # if not unknown and not ignored, drop dir recursion and step 2
1381 1371 ignore = util.always
1382 1372 dirignore = util.always
1383 1373
1384 1374 if self._sparsematchfn is not None:
1385 1375 em = matchmod.exact(match.files())
1386 1376 sm = matchmod.unionmatcher([self._sparsematcher, em])
1387 1377 match = matchmod.intersectmatchers(match, sm)
1388 1378
1389 1379 matchfn = match.matchfn
1390 1380 matchalways = match.always()
1391 1381 matchtdir = match.traversedir
1392 1382 dmap = self._map
1393 1383 listdir = util.listdir
1394 1384 lstat = os.lstat
1395 1385 dirkind = stat.S_IFDIR
1396 1386 regkind = stat.S_IFREG
1397 1387 lnkkind = stat.S_IFLNK
1398 1388 join = self._join
1399 1389
1400 1390 exact = skipstep3 = False
1401 1391 if match.isexact(): # match.exact
1402 1392 exact = True
1403 1393 dirignore = util.always # skip step 2
1404 1394 elif match.prefix(): # match.match, no patterns
1405 1395 skipstep3 = True
1406 1396
1407 1397 if not exact and self._checkcase:
1408 1398 normalize = self._normalize
1409 1399 normalizefile = self._normalizefile
1410 1400 skipstep3 = False
1411 1401 else:
1412 1402 normalize = self._normalize
1413 1403 normalizefile = None
1414 1404
1415 1405 # step 1: find all explicit files
1416 1406 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1417 1407 if matchtdir:
1418 1408 for d in work:
1419 1409 matchtdir(d[0])
1420 1410 for d in dirsnotfound:
1421 1411 matchtdir(d)
1422 1412
1423 1413 skipstep3 = skipstep3 and not (work or dirsnotfound)
1424 1414 work = [d for d in work if not dirignore(d[0])]
1425 1415
1426 1416 # step 2: visit subdirectories
1427 1417 def traverse(work, alreadynormed):
1428 1418 wadd = work.append
1429 1419 while work:
1430 1420 tracing.counter('dirstate.walk work', len(work))
1431 1421 nd = work.pop()
1432 1422 visitentries = match.visitchildrenset(nd)
1433 1423 if not visitentries:
1434 1424 continue
1435 1425 if visitentries == b'this' or visitentries == b'all':
1436 1426 visitentries = None
1437 1427 skip = None
1438 1428 if nd != b'':
1439 1429 skip = b'.hg'
1440 1430 try:
1441 1431 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1442 1432 entries = listdir(join(nd), stat=True, skip=skip)
1443 1433 except (PermissionError, FileNotFoundError) as inst:
1444 1434 match.bad(
1445 1435 self.pathto(nd), encoding.strtolocal(inst.strerror)
1446 1436 )
1447 1437 continue
1448 1438 for f, kind, st in entries:
1449 1439 # Some matchers may return files in the visitentries set,
1450 1440 # instead of 'this', if the matcher explicitly mentions them
1451 1441 # and is not an exactmatcher. This is acceptable; we do not
1452 1442 # make any hard assumptions about file-or-directory below
1453 1443 # based on the presence of `f` in visitentries. If
1454 1444 # visitchildrenset returned a set, we can always skip the
1455 1445 # entries *not* in the set it provided regardless of whether
1456 1446 # they're actually a file or a directory.
1457 1447 if visitentries and f not in visitentries:
1458 1448 continue
1459 1449 if normalizefile:
1460 1450 # even though f might be a directory, we're only
1461 1451 # interested in comparing it to files currently in the
1462 1452 # dmap -- therefore normalizefile is enough
1463 1453 nf = normalizefile(
1464 1454 nd and (nd + b"/" + f) or f, True, True
1465 1455 )
1466 1456 else:
1467 1457 nf = nd and (nd + b"/" + f) or f
1468 1458 if nf not in results:
1469 1459 if kind == dirkind:
1470 1460 if not ignore(nf):
1471 1461 if matchtdir:
1472 1462 matchtdir(nf)
1473 1463 wadd(nf)
1474 1464 if nf in dmap and (matchalways or matchfn(nf)):
1475 1465 results[nf] = None
1476 1466 elif kind == regkind or kind == lnkkind:
1477 1467 if nf in dmap:
1478 1468 if matchalways or matchfn(nf):
1479 1469 results[nf] = st
1480 1470 elif (matchalways or matchfn(nf)) and not ignore(
1481 1471 nf
1482 1472 ):
1483 1473 # unknown file -- normalize if necessary
1484 1474 if not alreadynormed:
1485 1475 nf = normalize(nf, False, True)
1486 1476 results[nf] = st
1487 1477 elif nf in dmap and (matchalways or matchfn(nf)):
1488 1478 results[nf] = None
1489 1479
1490 1480 for nd, d in work:
1491 1481 # alreadynormed means that processwork doesn't have to do any
1492 1482 # expensive directory normalization
1493 1483 alreadynormed = not normalize or nd == d
1494 1484 traverse([d], alreadynormed)
1495 1485
1496 1486 for s in subrepos:
1497 1487 del results[s]
1498 1488 del results[b'.hg']
1499 1489
1500 1490 # step 3: visit remaining files from dmap
1501 1491 if not skipstep3 and not exact:
1502 1492 # If a dmap file is not in results yet, it was either
1503 1493 # a) not matching matchfn b) ignored, c) missing, or d) under a
1504 1494 # symlink directory.
1505 1495 if not results and matchalways:
1506 1496 visit = [f for f in dmap]
1507 1497 else:
1508 1498 visit = [f for f in dmap if f not in results and matchfn(f)]
1509 1499 visit.sort()
1510 1500
1511 1501 if unknown:
1512 1502 # unknown == True means we walked all dirs under the roots
1513 1503 # that wasn't ignored, and everything that matched was stat'ed
1514 1504 # and is already in results.
1515 1505 # The rest must thus be ignored or under a symlink.
1516 1506 audit_path = pathutil.pathauditor(self._root, cached=True)
1517 1507
1518 1508 for nf in iter(visit):
1519 1509 # If a stat for the same file was already added with a
1520 1510 # different case, don't add one for this, since that would
1521 1511 # make it appear as if the file exists under both names
1522 1512 # on disk.
1523 1513 if (
1524 1514 normalizefile
1525 1515 and normalizefile(nf, True, True) in results
1526 1516 ):
1527 1517 results[nf] = None
1528 1518 # Report ignored items in the dmap as long as they are not
1529 1519 # under a symlink directory.
1530 1520 elif audit_path.check(nf):
1531 1521 try:
1532 1522 results[nf] = lstat(join(nf))
1533 1523 # file was just ignored, no links, and exists
1534 1524 except OSError:
1535 1525 # file doesn't exist
1536 1526 results[nf] = None
1537 1527 else:
1538 1528 # It's either missing or under a symlink directory
1539 1529 # which we in this case report as missing
1540 1530 results[nf] = None
1541 1531 else:
1542 1532 # We may not have walked the full directory tree above,
1543 1533 # so stat and check everything we missed.
1544 1534 iv = iter(visit)
1545 1535 for st in util.statfiles([join(i) for i in visit]):
1546 1536 results[next(iv)] = st
1547 1537 return results
1548 1538
1549 1539 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1550 1540 if self._sparsematchfn is not None:
1551 1541 em = matchmod.exact(matcher.files())
1552 1542 sm = matchmod.unionmatcher([self._sparsematcher, em])
1553 1543 matcher = matchmod.intersectmatchers(matcher, sm)
1554 1544 # Force Rayon (Rust parallelism library) to respect the number of
1555 1545 # workers. This is a temporary workaround until Rust code knows
1556 1546 # how to read the config file.
1557 1547 numcpus = self._ui.configint(b"worker", b"numcpus")
1558 1548 if numcpus is not None:
1559 1549 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1560 1550
1561 1551 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1562 1552 if not workers_enabled:
1563 1553 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1564 1554
1565 1555 (
1566 1556 lookup,
1567 1557 modified,
1568 1558 added,
1569 1559 removed,
1570 1560 deleted,
1571 1561 clean,
1572 1562 ignored,
1573 1563 unknown,
1574 1564 warnings,
1575 1565 bad,
1576 1566 traversed,
1577 1567 dirty,
1578 1568 ) = rustmod.status(
1579 1569 self._map._map,
1580 1570 matcher,
1581 1571 self._rootdir,
1582 1572 self._ignorefiles(),
1583 1573 self._checkexec,
1584 1574 bool(list_clean),
1585 1575 bool(list_ignored),
1586 1576 bool(list_unknown),
1587 1577 bool(matcher.traversedir),
1588 1578 )
1589 1579
1590 1580 self._dirty |= dirty
1591 1581
1592 1582 if matcher.traversedir:
1593 1583 for dir in traversed:
1594 1584 matcher.traversedir(dir)
1595 1585
1596 1586 if self._ui.warn:
1597 1587 for item in warnings:
1598 1588 if isinstance(item, tuple):
1599 1589 file_path, syntax = item
1600 1590 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1601 1591 file_path,
1602 1592 syntax,
1603 1593 )
1604 1594 self._ui.warn(msg)
1605 1595 else:
1606 1596 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1607 1597 self._ui.warn(
1608 1598 msg
1609 1599 % (
1610 1600 pathutil.canonpath(
1611 1601 self._rootdir, self._rootdir, item
1612 1602 ),
1613 1603 b"No such file or directory",
1614 1604 )
1615 1605 )
1616 1606
1617 1607 for fn, message in bad:
1618 1608 matcher.bad(fn, encoding.strtolocal(message))
1619 1609
1620 1610 status = scmutil.status(
1621 1611 modified=modified,
1622 1612 added=added,
1623 1613 removed=removed,
1624 1614 deleted=deleted,
1625 1615 unknown=unknown,
1626 1616 ignored=ignored,
1627 1617 clean=clean,
1628 1618 )
1629 1619 return (lookup, status)
1630 1620
1631 1621 def status(self, match, subrepos, ignored, clean, unknown):
1632 1622 """Determine the status of the working copy relative to the
1633 1623 dirstate and return a pair of (unsure, status), where status is of type
1634 1624 scmutil.status and:
1635 1625
1636 1626 unsure:
1637 1627 files that might have been modified since the dirstate was
1638 1628 written, but need to be read to be sure (size is the same
1639 1629 but mtime differs)
1640 1630 status.modified:
1641 1631 files that have definitely been modified since the dirstate
1642 1632 was written (different size or mode)
1643 1633 status.clean:
1644 1634 files that have definitely not been modified since the
1645 1635 dirstate was written
1646 1636 """
1647 1637 if not self._running_status:
1648 1638 msg = "Calling `status` outside a `running_status` context"
1649 1639 raise error.ProgrammingError(msg)
1650 1640 listignored, listclean, listunknown = ignored, clean, unknown
1651 1641 lookup, modified, added, unknown, ignored = [], [], [], [], []
1652 1642 removed, deleted, clean = [], [], []
1653 1643
1654 1644 dmap = self._map
1655 1645 dmap.preload()
1656 1646
1657 1647 use_rust = True
1658 1648
1659 1649 allowed_matchers = (
1660 1650 matchmod.alwaysmatcher,
1661 1651 matchmod.differencematcher,
1662 1652 matchmod.exactmatcher,
1663 1653 matchmod.includematcher,
1664 1654 matchmod.intersectionmatcher,
1665 1655 matchmod.nevermatcher,
1666 1656 matchmod.unionmatcher,
1667 1657 )
1668 1658
1669 1659 if rustmod is None:
1670 1660 use_rust = False
1671 1661 elif self._checkcase:
1672 1662 # Case-insensitive filesystems are not handled yet
1673 1663 use_rust = False
1674 1664 elif subrepos:
1675 1665 use_rust = False
1676 1666 elif not isinstance(match, allowed_matchers):
1677 1667 # Some matchers have yet to be implemented
1678 1668 use_rust = False
1679 1669
1680 1670 # Get the time from the filesystem so we can disambiguate files that
1681 1671 # appear modified in the present or future.
1682 1672 try:
1683 1673 mtime_boundary = timestamp.get_fs_now(self._opener)
1684 1674 except OSError:
1685 1675 # In largefiles or readonly context
1686 1676 mtime_boundary = None
1687 1677
1688 1678 if use_rust:
1689 1679 try:
1690 1680 res = self._rust_status(
1691 1681 match, listclean, listignored, listunknown
1692 1682 )
1693 1683 return res + (mtime_boundary,)
1694 1684 except rustmod.FallbackError:
1695 1685 pass
1696 1686
1697 1687 def noop(f):
1698 1688 pass
1699 1689
1700 1690 dcontains = dmap.__contains__
1701 1691 dget = dmap.__getitem__
1702 1692 ladd = lookup.append # aka "unsure"
1703 1693 madd = modified.append
1704 1694 aadd = added.append
1705 1695 uadd = unknown.append if listunknown else noop
1706 1696 iadd = ignored.append if listignored else noop
1707 1697 radd = removed.append
1708 1698 dadd = deleted.append
1709 1699 cadd = clean.append if listclean else noop
1710 1700 mexact = match.exact
1711 1701 dirignore = self._dirignore
1712 1702 checkexec = self._checkexec
1713 1703 checklink = self._checklink
1714 1704 copymap = self._map.copymap
1715 1705
1716 1706 # We need to do full walks when either
1717 1707 # - we're listing all clean files, or
1718 1708 # - match.traversedir does something, because match.traversedir should
1719 1709 # be called for every dir in the working dir
1720 1710 full = listclean or match.traversedir is not None
1721 1711 for fn, st in self.walk(
1722 1712 match, subrepos, listunknown, listignored, full=full
1723 1713 ).items():
1724 1714 if not dcontains(fn):
1725 1715 if (listignored or mexact(fn)) and dirignore(fn):
1726 1716 if listignored:
1727 1717 iadd(fn)
1728 1718 else:
1729 1719 uadd(fn)
1730 1720 continue
1731 1721
1732 1722 t = dget(fn)
1733 1723 mode = t.mode
1734 1724 size = t.size
1735 1725
1736 1726 if not st and t.tracked:
1737 1727 dadd(fn)
1738 1728 elif t.p2_info:
1739 1729 madd(fn)
1740 1730 elif t.added:
1741 1731 aadd(fn)
1742 1732 elif t.removed:
1743 1733 radd(fn)
1744 1734 elif t.tracked:
1745 1735 if not checklink and t.has_fallback_symlink:
1746 1736 # If the file system does not support symlink, the mode
1747 1737 # might not be correctly stored in the dirstate, so do not
1748 1738 # trust it.
1749 1739 ladd(fn)
1750 1740 elif not checkexec and t.has_fallback_exec:
1751 1741 # If the file system does not support exec bits, the mode
1752 1742 # might not be correctly stored in the dirstate, so do not
1753 1743 # trust it.
1754 1744 ladd(fn)
1755 1745 elif (
1756 1746 size >= 0
1757 1747 and (
1758 1748 (size != st.st_size and size != st.st_size & _rangemask)
1759 1749 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1760 1750 )
1761 1751 or fn in copymap
1762 1752 ):
1763 1753 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1764 1754 # issue6456: Size returned may be longer due to
1765 1755 # encryption on EXT-4 fscrypt, undecided.
1766 1756 ladd(fn)
1767 1757 else:
1768 1758 madd(fn)
1769 1759 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1770 1760 # There might be a change in the future if for example the
1771 1761 # internal clock is off, but this is a case where the issues
1772 1762 # the user would face would be a lot worse and there is
1773 1763 # nothing we can really do.
1774 1764 ladd(fn)
1775 1765 elif listclean:
1776 1766 cadd(fn)
1777 1767 status = scmutil.status(
1778 1768 modified, added, removed, deleted, unknown, ignored, clean
1779 1769 )
1780 1770 return (lookup, status, mtime_boundary)
1781 1771
1782 1772 def matches(self, match):
1783 1773 """
1784 1774 return files in the dirstate (in whatever state) filtered by match
1785 1775 """
1786 1776 dmap = self._map
1787 1777 if rustmod is not None:
1788 1778 dmap = self._map._map
1789 1779
1790 1780 if match.always():
1791 1781 return dmap.keys()
1792 1782 files = match.files()
1793 1783 if match.isexact():
1794 1784 # fast path -- filter the other way around, since typically files is
1795 1785 # much smaller than dmap
1796 1786 return [f for f in files if f in dmap]
1797 1787 if match.prefix() and all(fn in dmap for fn in files):
1798 1788 # fast path -- all the values are known to be files, so just return
1799 1789 # that
1800 1790 return list(files)
1801 1791 return [f for f in dmap if match(f)]
1802 1792
1803 1793 def all_file_names(self):
1804 1794 """list all filename currently used by this dirstate
1805 1795
1806 1796 This is only used to do `hg rollback` related backup in the transaction
1807 1797 """
1808 1798 files = [b'branch']
1809 1799 if self._opener.exists(self._filename):
1810 1800 files.append(self._filename)
1811 1801 if self._use_dirstate_v2:
1812 1802 files.append(self._map.docket.data_filename())
1813 1803 return tuple(files)
1814 1804
1815 1805 def verify(self, m1, m2, p1, narrow_matcher=None):
1816 1806 """
1817 1807 check the dirstate contents against the parent manifest and yield errors
1818 1808 """
1819 1809 missing_from_p1 = _(
1820 1810 b"%s marked as tracked in p1 (%s) but not in manifest1\n"
1821 1811 )
1822 1812 unexpected_in_p1 = _(b"%s marked as added, but also in manifest1\n")
1823 1813 missing_from_ps = _(
1824 1814 b"%s marked as modified, but not in either manifest\n"
1825 1815 )
1826 1816 missing_from_ds = _(
1827 1817 b"%s in manifest1, but not marked as tracked in p1 (%s)\n"
1828 1818 )
1829 1819 for f, entry in self.items():
1830 1820 if entry.p1_tracked:
1831 1821 if entry.modified and f not in m1 and f not in m2:
1832 1822 yield missing_from_ps % f
1833 1823 elif f not in m1:
1834 1824 yield missing_from_p1 % (f, node.short(p1))
1835 1825 if entry.added and f in m1:
1836 1826 yield unexpected_in_p1 % f
1837 1827 for f in m1:
1838 1828 if narrow_matcher is not None and not narrow_matcher(f):
1839 1829 continue
1840 1830 entry = self.get_entry(f)
1841 1831 if not entry.p1_tracked:
1842 1832 yield missing_from_ds % (f, node.short(p1))
General Comments 0
You need to be logged in to leave comments. Login now