##// END OF EJS Templates
dirstate: remove the interface decorator to help pytype...
Matt Harbison -
r52701:c1d7ac70 default
parent child Browse files
Show More
@@ -1,1809 +1,1811 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import collections
10 10 import contextlib
11 11 import os
12 12 import stat
13 13 import uuid
14 14
15 15 from .i18n import _
16 16
17 17 from hgdemandimport import tracing
18 18
19 19 from . import (
20 20 dirstatemap,
21 21 encoding,
22 22 error,
23 23 match as matchmod,
24 24 node,
25 25 pathutil,
26 26 policy,
27 27 pycompat,
28 28 scmutil,
29 29 txnutil,
30 30 util,
31 31 )
32 32
33 33 from .dirstateutils import (
34 34 timestamp,
35 35 )
36 36
37 37 from .interfaces import (
38 38 dirstate as intdirstate,
39 39 util as interfaceutil,
40 40 )
41 41
42 42 parsers = policy.importmod('parsers')
43 43 rustmod = policy.importrust('dirstate')
44 44
45 45 HAS_FAST_DIRSTATE_V2 = rustmod is not None
46 46
47 47 propertycache = util.propertycache
48 48 filecache = scmutil.filecache
49 49 _rangemask = dirstatemap.rangemask
50 50
51 51 DirstateItem = dirstatemap.DirstateItem
52 52
53 53
54 54 class repocache(filecache):
55 55 """filecache for files in .hg/"""
56 56
57 57 def join(self, obj, fname):
58 58 return obj._opener.join(fname)
59 59
60 60
61 61 class rootcache(filecache):
62 62 """filecache for files in the repository root"""
63 63
64 64 def join(self, obj, fname):
65 65 return obj._join(fname)
66 66
67 67
68 68 def check_invalidated(func):
69 69 """check that the func is called with a non-invalidated dirstate
70 70
71 71 The dirstate is in an "invalidated state" after an error occured during its
72 72 modification and remains so until we exited the top level scope that framed
73 73 such change.
74 74 """
75 75
76 76 def wrap(self, *args, **kwargs):
77 77 if self._invalidated_context:
78 78 msg = 'calling `%s` after the dirstate was invalidated'
79 79 msg %= func.__name__
80 80 raise error.ProgrammingError(msg)
81 81 return func(self, *args, **kwargs)
82 82
83 83 return wrap
84 84
85 85
86 86 def requires_changing_parents(func):
87 87 def wrap(self, *args, **kwargs):
88 88 if not self.is_changing_parents:
89 89 msg = 'calling `%s` outside of a changing_parents context'
90 90 msg %= func.__name__
91 91 raise error.ProgrammingError(msg)
92 92 return func(self, *args, **kwargs)
93 93
94 94 return check_invalidated(wrap)
95 95
96 96
97 97 def requires_changing_files(func):
98 98 def wrap(self, *args, **kwargs):
99 99 if not self.is_changing_files:
100 100 msg = 'calling `%s` outside of a `changing_files`'
101 101 msg %= func.__name__
102 102 raise error.ProgrammingError(msg)
103 103 return func(self, *args, **kwargs)
104 104
105 105 return check_invalidated(wrap)
106 106
107 107
108 108 def requires_changing_any(func):
109 109 def wrap(self, *args, **kwargs):
110 110 if not self.is_changing_any:
111 111 msg = 'calling `%s` outside of a changing context'
112 112 msg %= func.__name__
113 113 raise error.ProgrammingError(msg)
114 114 return func(self, *args, **kwargs)
115 115
116 116 return check_invalidated(wrap)
117 117
118 118
119 119 def requires_changing_files_or_status(func):
120 120 def wrap(self, *args, **kwargs):
121 121 if not (self.is_changing_files or self._running_status > 0):
122 122 msg = (
123 123 'calling `%s` outside of a changing_files '
124 124 'or running_status context'
125 125 )
126 126 msg %= func.__name__
127 127 raise error.ProgrammingError(msg)
128 128 return func(self, *args, **kwargs)
129 129
130 130 return check_invalidated(wrap)
131 131
132 132
133 133 CHANGE_TYPE_PARENTS = "parents"
134 134 CHANGE_TYPE_FILES = "files"
135 135
136 136
137 @interfaceutil.implementer(intdirstate.idirstate)
138 class dirstate:
137 class DirState:
139 138 # used by largefile to avoid overwritting transaction callback
140 139 _tr_key_suffix = b''
141 140
142 141 def __init__(
143 142 self,
144 143 opener,
145 144 ui,
146 145 root,
147 146 validate,
148 147 sparsematchfn,
149 148 nodeconstants,
150 149 use_dirstate_v2,
151 150 use_tracked_hint=False,
152 151 ):
153 152 """Create a new dirstate object.
154 153
155 154 opener is an open()-like callable that can be used to open the
156 155 dirstate file; root is the root of the directory tracked by
157 156 the dirstate.
158 157 """
159 158 self._use_dirstate_v2 = use_dirstate_v2
160 159 self._use_tracked_hint = use_tracked_hint
161 160 self._nodeconstants = nodeconstants
162 161 self._opener = opener
163 162 self._validate = validate
164 163 self._root = root
165 164 # Either build a sparse-matcher or None if sparse is disabled
166 165 self._sparsematchfn = sparsematchfn
167 166 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
168 167 # UNC path pointing to root share (issue4557)
169 168 self._rootdir = pathutil.normasprefix(root)
170 169 # True is any internal state may be different
171 170 self._dirty = False
172 171 # True if the set of tracked file may be different
173 172 self._dirty_tracked_set = False
174 173 self._ui = ui
175 174 self._filecache = {}
176 175 # nesting level of `changing_parents` context
177 176 self._changing_level = 0
178 177 # the change currently underway
179 178 self._change_type = None
180 179 # number of open _running_status context
181 180 self._running_status = 0
182 181 # True if the current dirstate changing operations have been
183 182 # invalidated (used to make sure all nested contexts have been exited)
184 183 self._invalidated_context = False
185 184 self._attached_to_a_transaction = False
186 185 self._filename = b'dirstate'
187 186 self._filename_th = b'dirstate-tracked-hint'
188 187 self._pendingfilename = b'%s.pending' % self._filename
189 188 self._plchangecallbacks = {}
190 189 self._origpl = None
191 190 self._mapcls = dirstatemap.dirstatemap
192 191 # Access and cache cwd early, so we don't access it for the first time
193 192 # after a working-copy update caused it to not exist (accessing it then
194 193 # raises an exception).
195 194 self._cwd
196 195
197 196 def refresh(self):
198 197 # XXX if this happens, you likely did not enter the `changing_xxx`
199 198 # using `repo.dirstate`, so a later `repo.dirstate` accesss might call
200 199 # `refresh`.
201 200 if self.is_changing_any:
202 201 msg = "refreshing the dirstate in the middle of a change"
203 202 raise error.ProgrammingError(msg)
204 203 if '_branch' in vars(self):
205 204 del self._branch
206 205 if '_map' in vars(self) and self._map.may_need_refresh():
207 206 self.invalidate()
208 207
209 208 def prefetch_parents(self):
210 209 """make sure the parents are loaded
211 210
212 211 Used to avoid a race condition.
213 212 """
214 213 self._pl
215 214
216 215 @contextlib.contextmanager
217 216 @check_invalidated
218 217 def running_status(self, repo):
219 218 """Wrap a status operation
220 219
221 220 This context is not mutally exclusive with the `changing_*` context. It
222 221 also do not warrant for the `wlock` to be taken.
223 222
224 223 If the wlock is taken, this context will behave in a simple way, and
225 224 ensure the data are scheduled for write when leaving the top level
226 225 context.
227 226
228 227 If the lock is not taken, it will only warrant that the data are either
229 228 committed (written) and rolled back (invalidated) when exiting the top
230 229 level context. The write/invalidate action must be performed by the
231 230 wrapped code.
232 231
233 232
234 233 The expected logic is:
235 234
236 235 A: read the dirstate
237 236 B: run status
238 237 This might make the dirstate dirty by updating cache,
239 238 especially in Rust.
240 239 C: do more "post status fixup if relevant
241 240 D: try to take the w-lock (this will invalidate the changes if they were raced)
242 241 E0: if dirstate changed on disk β†’ discard change (done by dirstate internal)
243 242 E1: elif lock was acquired β†’ write the changes
244 243 E2: else β†’ discard the changes
245 244 """
246 245 has_lock = repo.currentwlock() is not None
247 246 is_changing = self.is_changing_any
248 247 tr = repo.currenttransaction()
249 248 has_tr = tr is not None
250 249 nested = bool(self._running_status)
251 250
252 251 first_and_alone = not (is_changing or has_tr or nested)
253 252
254 253 # enforce no change happened outside of a proper context.
255 254 if first_and_alone and self._dirty:
256 255 has_tr = repo.currenttransaction() is not None
257 256 if not has_tr and self._changing_level == 0 and self._dirty:
258 257 msg = "entering a status context, but dirstate is already dirty"
259 258 raise error.ProgrammingError(msg)
260 259
261 260 should_write = has_lock and not (nested or is_changing)
262 261
263 262 self._running_status += 1
264 263 try:
265 264 yield
266 265 except Exception:
267 266 self.invalidate()
268 267 raise
269 268 finally:
270 269 self._running_status -= 1
271 270 if self._invalidated_context:
272 271 should_write = False
273 272 self.invalidate()
274 273
275 274 if should_write:
276 275 assert repo.currenttransaction() is tr
277 276 self.write(tr)
278 277 elif not has_lock:
279 278 if self._dirty:
280 279 msg = b'dirstate dirty while exiting an isolated status context'
281 280 repo.ui.develwarn(msg)
282 281 self.invalidate()
283 282
284 283 @contextlib.contextmanager
285 284 @check_invalidated
286 285 def _changing(self, repo, change_type):
287 286 if repo.currentwlock() is None:
288 287 msg = b"trying to change the dirstate without holding the wlock"
289 288 raise error.ProgrammingError(msg)
290 289
291 290 has_tr = repo.currenttransaction() is not None
292 291 if not has_tr and self._changing_level == 0 and self._dirty:
293 292 msg = b"entering a changing context, but dirstate is already dirty"
294 293 repo.ui.develwarn(msg)
295 294
296 295 assert self._changing_level >= 0
297 296 # different type of change are mutually exclusive
298 297 if self._change_type is None:
299 298 assert self._changing_level == 0
300 299 self._change_type = change_type
301 300 elif self._change_type != change_type:
302 301 msg = (
303 302 'trying to open "%s" dirstate-changing context while a "%s" is'
304 303 ' already open'
305 304 )
306 305 msg %= (change_type, self._change_type)
307 306 raise error.ProgrammingError(msg)
308 307 should_write = False
309 308 self._changing_level += 1
310 309 try:
311 310 yield
312 311 except: # re-raises
313 312 self.invalidate() # this will set `_invalidated_context`
314 313 raise
315 314 finally:
316 315 assert self._changing_level > 0
317 316 self._changing_level -= 1
318 317 # If the dirstate is being invalidated, call invalidate again.
319 318 # This will throw away anything added by a upper context and
320 319 # reset the `_invalidated_context` flag when relevant
321 320 if self._changing_level <= 0:
322 321 self._change_type = None
323 322 assert self._changing_level == 0
324 323 if self._invalidated_context:
325 324 # make sure we invalidate anything an upper context might
326 325 # have changed.
327 326 self.invalidate()
328 327 else:
329 328 should_write = self._changing_level <= 0
330 329 tr = repo.currenttransaction()
331 330 if has_tr != (tr is not None):
332 331 if has_tr:
333 332 m = "transaction vanished while changing dirstate"
334 333 else:
335 334 m = "transaction appeared while changing dirstate"
336 335 raise error.ProgrammingError(m)
337 336 if should_write:
338 337 self.write(tr)
339 338
340 339 @contextlib.contextmanager
341 340 def changing_parents(self, repo):
342 341 """Wrap a dirstate change related to a change of working copy parents
343 342
344 343 This context scopes a series of dirstate modifications that match an
345 344 update of the working copy parents (typically `hg update`, `hg merge`
346 345 etc).
347 346
348 347 The dirstate's methods that perform this kind of modifications require
349 348 this context to be present before being called.
350 349 Such methods are decorated with `@requires_changing_parents`.
351 350
352 351 The new dirstate contents will be written to disk when the top-most
353 352 `changing_parents` context exits successfully. If an exception is
354 353 raised during a `changing_parents` context of any level, all changes
355 354 are invalidated. If this context is open within an open transaction,
356 355 the dirstate writing is delayed until that transaction is successfully
357 356 committed (and the dirstate is invalidated on transaction abort).
358 357
359 358 The `changing_parents` operation is mutually exclusive with the
360 359 `changing_files` one.
361 360 """
362 361 with self._changing(repo, CHANGE_TYPE_PARENTS) as c:
363 362 yield c
364 363
365 364 @contextlib.contextmanager
366 365 def changing_files(self, repo):
367 366 """Wrap a dirstate change related to the set of tracked files
368 367
369 368 This context scopes a series of dirstate modifications that change the
370 369 set of tracked files. (typically `hg add`, `hg remove` etc) or some
371 370 dirstate stored information (like `hg rename --after`) but preserve
372 371 the working copy parents.
373 372
374 373 The dirstate's methods that perform this kind of modifications require
375 374 this context to be present before being called.
376 375 Such methods are decorated with `@requires_changing_files`.
377 376
378 377 The new dirstate contents will be written to disk when the top-most
379 378 `changing_files` context exits successfully. If an exception is raised
380 379 during a `changing_files` context of any level, all changes are
381 380 invalidated. If this context is open within an open transaction, the
382 381 dirstate writing is delayed until that transaction is successfully
383 382 committed (and the dirstate is invalidated on transaction abort).
384 383
385 384 The `changing_files` operation is mutually exclusive with the
386 385 `changing_parents` one.
387 386 """
388 387 with self._changing(repo, CHANGE_TYPE_FILES) as c:
389 388 yield c
390 389
391 390 # here to help migration to the new code
392 391 def parentchange(self):
393 392 msg = (
394 393 "Mercurial 6.4 and later requires call to "
395 394 "`dirstate.changing_parents(repo)`"
396 395 )
397 396 raise error.ProgrammingError(msg)
398 397
399 398 @property
400 399 def is_changing_any(self):
401 400 """Returns true if the dirstate is in the middle of a set of changes.
402 401
403 402 This returns True for any kind of change.
404 403 """
405 404 return self._changing_level > 0
406 405
407 406 @property
408 407 def is_changing_parents(self):
409 408 """Returns true if the dirstate is in the middle of a set of changes
410 409 that modify the dirstate parent.
411 410 """
412 411 if self._changing_level <= 0:
413 412 return False
414 413 return self._change_type == CHANGE_TYPE_PARENTS
415 414
416 415 @property
417 416 def is_changing_files(self):
418 417 """Returns true if the dirstate is in the middle of a set of changes
419 418 that modify the files tracked or their sources.
420 419 """
421 420 if self._changing_level <= 0:
422 421 return False
423 422 return self._change_type == CHANGE_TYPE_FILES
424 423
425 424 @propertycache
426 425 def _map(self):
427 426 """Return the dirstate contents (see documentation for dirstatemap)."""
428 427 return self._mapcls(
429 428 self._ui,
430 429 self._opener,
431 430 self._root,
432 431 self._nodeconstants,
433 432 self._use_dirstate_v2,
434 433 )
435 434
436 435 @property
437 436 def _sparsematcher(self):
438 437 """The matcher for the sparse checkout.
439 438
440 439 The working directory may not include every file from a manifest. The
441 440 matcher obtained by this property will match a path if it is to be
442 441 included in the working directory.
443 442
444 443 When sparse if disabled, return None.
445 444 """
446 445 if self._sparsematchfn is None:
447 446 return None
448 447 # TODO there is potential to cache this property. For now, the matcher
449 448 # is resolved on every access. (But the called function does use a
450 449 # cache to keep the lookup fast.)
451 450 return self._sparsematchfn()
452 451
453 452 @repocache(b'branch')
454 453 def _branch(self):
455 454 f = None
456 455 data = b''
457 456 try:
458 457 f, mode = txnutil.trypending(self._root, self._opener, b'branch')
459 458 data = f.read().strip()
460 459 except FileNotFoundError:
461 460 pass
462 461 finally:
463 462 if f is not None:
464 463 f.close()
465 464 if not data:
466 465 return b"default"
467 466 return data
468 467
469 468 @property
470 469 def _pl(self):
471 470 return self._map.parents()
472 471
473 472 def hasdir(self, d):
474 473 return self._map.hastrackeddir(d)
475 474
476 475 @rootcache(b'.hgignore')
477 476 def _ignore(self):
478 477 files = self._ignorefiles()
479 478 if not files:
480 479 return matchmod.never()
481 480
482 481 pats = [b'include:%s' % f for f in files]
483 482 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
484 483
485 484 @propertycache
486 485 def _slash(self):
487 486 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
488 487
489 488 @propertycache
490 489 def _checklink(self):
491 490 return util.checklink(self._root)
492 491
493 492 @propertycache
494 493 def _checkexec(self):
495 494 return bool(util.checkexec(self._root))
496 495
497 496 @propertycache
498 497 def _checkcase(self):
499 498 return not util.fscasesensitive(self._join(b'.hg'))
500 499
501 500 def _join(self, f):
502 501 # much faster than os.path.join()
503 502 # it's safe because f is always a relative path
504 503 return self._rootdir + f
505 504
506 505 def flagfunc(self, buildfallback):
507 506 """build a callable that returns flags associated with a filename
508 507
509 508 The information is extracted from three possible layers:
510 509 1. the file system if it supports the information
511 510 2. the "fallback" information stored in the dirstate if any
512 511 3. a more expensive mechanism inferring the flags from the parents.
513 512 """
514 513
515 514 # small hack to cache the result of buildfallback()
516 515 fallback_func = []
517 516
518 517 def get_flags(x):
519 518 entry = None
520 519 fallback_value = None
521 520 try:
522 521 st = os.lstat(self._join(x))
523 522 except OSError:
524 523 return b''
525 524
526 525 if self._checklink:
527 526 if util.statislink(st):
528 527 return b'l'
529 528 else:
530 529 entry = self.get_entry(x)
531 530 if entry.has_fallback_symlink:
532 531 if entry.fallback_symlink:
533 532 return b'l'
534 533 else:
535 534 if not fallback_func:
536 535 fallback_func.append(buildfallback())
537 536 fallback_value = fallback_func[0](x)
538 537 if b'l' in fallback_value:
539 538 return b'l'
540 539
541 540 if self._checkexec:
542 541 if util.statisexec(st):
543 542 return b'x'
544 543 else:
545 544 if entry is None:
546 545 entry = self.get_entry(x)
547 546 if entry.has_fallback_exec:
548 547 if entry.fallback_exec:
549 548 return b'x'
550 549 else:
551 550 if fallback_value is None:
552 551 if not fallback_func:
553 552 fallback_func.append(buildfallback())
554 553 fallback_value = fallback_func[0](x)
555 554 if b'x' in fallback_value:
556 555 return b'x'
557 556 return b''
558 557
559 558 return get_flags
560 559
561 560 @propertycache
562 561 def _cwd(self):
563 562 # internal config: ui.forcecwd
564 563 forcecwd = self._ui.config(b'ui', b'forcecwd')
565 564 if forcecwd:
566 565 return forcecwd
567 566 return encoding.getcwd()
568 567
569 568 def getcwd(self):
570 569 """Return the path from which a canonical path is calculated.
571 570
572 571 This path should be used to resolve file patterns or to convert
573 572 canonical paths back to file paths for display. It shouldn't be
574 573 used to get real file paths. Use vfs functions instead.
575 574 """
576 575 cwd = self._cwd
577 576 if cwd == self._root:
578 577 return b''
579 578 # self._root ends with a path separator if self._root is '/' or 'C:\'
580 579 rootsep = self._root
581 580 if not util.endswithsep(rootsep):
582 581 rootsep += pycompat.ossep
583 582 if cwd.startswith(rootsep):
584 583 return cwd[len(rootsep) :]
585 584 else:
586 585 # we're outside the repo. return an absolute path.
587 586 return cwd
588 587
589 588 def pathto(self, f, cwd=None):
590 589 if cwd is None:
591 590 cwd = self.getcwd()
592 591 path = util.pathto(self._root, cwd, f)
593 592 if self._slash:
594 593 return util.pconvert(path)
595 594 return path
596 595
597 596 def get_entry(self, path):
598 597 """return a DirstateItem for the associated path"""
599 598 entry = self._map.get(path)
600 599 if entry is None:
601 600 return DirstateItem()
602 601 return entry
603 602
604 603 def __contains__(self, key):
605 604 return key in self._map
606 605
607 606 def __iter__(self):
608 607 return iter(sorted(self._map))
609 608
610 609 def items(self):
611 610 return self._map.items()
612 611
613 612 iteritems = items
614 613
615 614 def parents(self):
616 615 return [self._validate(p) for p in self._pl]
617 616
618 617 def p1(self):
619 618 return self._validate(self._pl[0])
620 619
621 620 def p2(self):
622 621 return self._validate(self._pl[1])
623 622
624 623 @property
625 624 def in_merge(self):
626 625 """True if a merge is in progress"""
627 626 return self._pl[1] != self._nodeconstants.nullid
628 627
629 628 def branch(self):
630 629 return encoding.tolocal(self._branch)
631 630
632 631 @requires_changing_parents
633 632 def setparents(self, p1, p2=None):
634 633 """Set dirstate parents to p1 and p2.
635 634
636 635 When moving from two parents to one, "merged" entries a
637 636 adjusted to normal and previous copy records discarded and
638 637 returned by the call.
639 638
640 639 See localrepo.setparents()
641 640 """
642 641 if p2 is None:
643 642 p2 = self._nodeconstants.nullid
644 643 if self._changing_level == 0:
645 644 raise ValueError(
646 645 "cannot set dirstate parent outside of "
647 646 "dirstate.changing_parents context manager"
648 647 )
649 648
650 649 self._dirty = True
651 650 oldp2 = self._pl[1]
652 651 if self._origpl is None:
653 652 self._origpl = self._pl
654 653 nullid = self._nodeconstants.nullid
655 654 # True if we need to fold p2 related state back to a linear case
656 655 fold_p2 = oldp2 != nullid and p2 == nullid
657 656 return self._map.setparents(p1, p2, fold_p2=fold_p2)
658 657
659 658 def setbranch(self, branch, transaction):
660 659 self.__class__._branch.set(self, encoding.fromlocal(branch))
661 660 if transaction is not None:
662 661 self._setup_tr_abort(transaction)
663 662 transaction.addfilegenerator(
664 663 b'dirstate-3-branch%s' % self._tr_key_suffix,
665 664 (b'branch',),
666 665 self._write_branch,
667 666 location=b'plain',
668 667 post_finalize=True,
669 668 )
670 669 return
671 670
672 671 vfs = self._opener
673 672 with vfs(b'branch', b'w', atomictemp=True, checkambig=True) as f:
674 673 self._write_branch(f)
675 674 # make sure filecache has the correct stat info for _branch after
676 675 # replacing the underlying file
677 676 #
678 677 # XXX do we actually need this,
679 678 # refreshing the attribute is quite cheap
680 679 ce = self._filecache[b'_branch']
681 680 if ce:
682 681 ce.refresh()
683 682
684 683 def _write_branch(self, file_obj):
685 684 file_obj.write(self._branch + b'\n')
686 685
687 686 def invalidate(self):
688 687 """Causes the next access to reread the dirstate.
689 688
690 689 This is different from localrepo.invalidatedirstate() because it always
691 690 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
692 691 check whether the dirstate has changed before rereading it."""
693 692
694 693 for a in ("_map", "_branch", "_ignore"):
695 694 if a in self.__dict__:
696 695 delattr(self, a)
697 696 self._dirty = False
698 697 self._dirty_tracked_set = False
699 698 self._invalidated_context = bool(
700 699 self._changing_level > 0
701 700 or self._attached_to_a_transaction
702 701 or self._running_status
703 702 )
704 703 self._origpl = None
705 704
706 705 @requires_changing_any
707 706 def copy(self, source, dest):
708 707 """Mark dest as a copy of source. Unmark dest if source is None."""
709 708 if source == dest:
710 709 return
711 710 self._dirty = True
712 711 if source is not None:
713 712 self._check_sparse(source)
714 713 self._map.copymap[dest] = source
715 714 else:
716 715 self._map.copymap.pop(dest, None)
717 716
718 717 def copied(self, file):
719 718 return self._map.copymap.get(file, None)
720 719
721 720 def copies(self):
722 721 return self._map.copymap
723 722
724 723 @requires_changing_files
725 724 def set_tracked(self, filename, reset_copy=False):
726 725 """a "public" method for generic code to mark a file as tracked
727 726
728 727 This function is to be called outside of "update/merge" case. For
729 728 example by a command like `hg add X`.
730 729
731 730 if reset_copy is set, any existing copy information will be dropped.
732 731
733 732 return True the file was previously untracked, False otherwise.
734 733 """
735 734 self._dirty = True
736 735 entry = self._map.get(filename)
737 736 if entry is None or not entry.tracked:
738 737 self._check_new_tracked_filename(filename)
739 738 pre_tracked = self._map.set_tracked(filename)
740 739 if reset_copy:
741 740 self._map.copymap.pop(filename, None)
742 741 if pre_tracked:
743 742 self._dirty_tracked_set = True
744 743 return pre_tracked
745 744
746 745 @requires_changing_files
747 746 def set_untracked(self, filename):
748 747 """a "public" method for generic code to mark a file as untracked
749 748
750 749 This function is to be called outside of "update/merge" case. For
751 750 example by a command like `hg remove X`.
752 751
753 752 return True the file was previously tracked, False otherwise.
754 753 """
755 754 ret = self._map.set_untracked(filename)
756 755 if ret:
757 756 self._dirty = True
758 757 self._dirty_tracked_set = True
759 758 return ret
760 759
761 760 @requires_changing_files_or_status
762 761 def set_clean(self, filename, parentfiledata):
763 762 """record that the current state of the file on disk is known to be clean"""
764 763 self._dirty = True
765 764 if not self._map[filename].tracked:
766 765 self._check_new_tracked_filename(filename)
767 766 (mode, size, mtime) = parentfiledata
768 767 self._map.set_clean(filename, mode, size, mtime)
769 768
770 769 @requires_changing_files_or_status
771 770 def set_possibly_dirty(self, filename):
772 771 """record that the current state of the file on disk is unknown"""
773 772 self._dirty = True
774 773 self._map.set_possibly_dirty(filename)
775 774
776 775 @requires_changing_parents
777 776 def update_file_p1(
778 777 self,
779 778 filename,
780 779 p1_tracked,
781 780 ):
782 781 """Set a file as tracked in the parent (or not)
783 782
784 783 This is to be called when adjust the dirstate to a new parent after an history
785 784 rewriting operation.
786 785
787 786 It should not be called during a merge (p2 != nullid) and only within
788 787 a `with dirstate.changing_parents(repo):` context.
789 788 """
790 789 if self.in_merge:
791 790 msg = 'update_file_reference should not be called when merging'
792 791 raise error.ProgrammingError(msg)
793 792 entry = self._map.get(filename)
794 793 if entry is None:
795 794 wc_tracked = False
796 795 else:
797 796 wc_tracked = entry.tracked
798 797 if not (p1_tracked or wc_tracked):
799 798 # the file is no longer relevant to anyone
800 799 if self._map.get(filename) is not None:
801 800 self._map.reset_state(filename)
802 801 self._dirty = True
803 802 elif (not p1_tracked) and wc_tracked:
804 803 if entry is not None and entry.added:
805 804 return # avoid dropping copy information (maybe?)
806 805
807 806 self._map.reset_state(
808 807 filename,
809 808 wc_tracked,
810 809 p1_tracked,
811 810 # the underlying reference might have changed, we will have to
812 811 # check it.
813 812 has_meaningful_mtime=False,
814 813 )
815 814
816 815 @requires_changing_parents
817 816 def update_file(
818 817 self,
819 818 filename,
820 819 wc_tracked,
821 820 p1_tracked,
822 821 p2_info=False,
823 822 possibly_dirty=False,
824 823 parentfiledata=None,
825 824 ):
826 825 """update the information about a file in the dirstate
827 826
828 827 This is to be called when the direstates parent changes to keep track
829 828 of what is the file situation in regards to the working copy and its parent.
830 829
831 830 This function must be called within a `dirstate.changing_parents` context.
832 831
833 832 note: the API is at an early stage and we might need to adjust it
834 833 depending of what information ends up being relevant and useful to
835 834 other processing.
836 835 """
837 836 self._update_file(
838 837 filename=filename,
839 838 wc_tracked=wc_tracked,
840 839 p1_tracked=p1_tracked,
841 840 p2_info=p2_info,
842 841 possibly_dirty=possibly_dirty,
843 842 parentfiledata=parentfiledata,
844 843 )
845 844
846 845 def hacky_extension_update_file(self, *args, **kwargs):
847 846 """NEVER USE THIS, YOU DO NOT NEED IT
848 847
849 848 This function is a variant of "update_file" to be called by a small set
850 849 of extensions, it also adjust the internal state of file, but can be
851 850 called outside an `changing_parents` context.
852 851
853 852 A very small number of extension meddle with the working copy content
854 853 in a way that requires to adjust the dirstate accordingly. At the time
855 854 this command is written they are :
856 855 - keyword,
857 856 - largefile,
858 857 PLEASE DO NOT GROW THIS LIST ANY FURTHER.
859 858
860 859 This function could probably be replaced by more semantic one (like
861 860 "adjust expected size" or "always revalidate file content", etc)
862 861 however at the time where this is writen, this is too much of a detour
863 862 to be considered.
864 863 """
865 864 if not (self._changing_level > 0 or self._running_status > 0):
866 865 msg = "requires a changes context"
867 866 raise error.ProgrammingError(msg)
868 867 self._update_file(
869 868 *args,
870 869 **kwargs,
871 870 )
872 871
873 872 def _update_file(
874 873 self,
875 874 filename,
876 875 wc_tracked,
877 876 p1_tracked,
878 877 p2_info=False,
879 878 possibly_dirty=False,
880 879 parentfiledata=None,
881 880 ):
882 881 # note: I do not think we need to double check name clash here since we
883 882 # are in a update/merge case that should already have taken care of
884 883 # this. The test agrees
885 884
886 885 self._dirty = True
887 886 old_entry = self._map.get(filename)
888 887 if old_entry is None:
889 888 prev_tracked = False
890 889 else:
891 890 prev_tracked = old_entry.tracked
892 891 if prev_tracked != wc_tracked:
893 892 self._dirty_tracked_set = True
894 893
895 894 self._map.reset_state(
896 895 filename,
897 896 wc_tracked,
898 897 p1_tracked,
899 898 p2_info=p2_info,
900 899 has_meaningful_mtime=not possibly_dirty,
901 900 parentfiledata=parentfiledata,
902 901 )
903 902
904 903 def _check_new_tracked_filename(self, filename):
905 904 scmutil.checkfilename(filename)
906 905 if self._map.hastrackeddir(filename):
907 906 msg = _(b'directory %r already in dirstate')
908 907 msg %= pycompat.bytestr(filename)
909 908 raise error.Abort(msg)
910 909 # shadows
911 910 for d in pathutil.finddirs(filename):
912 911 if self._map.hastrackeddir(d):
913 912 break
914 913 entry = self._map.get(d)
915 914 if entry is not None and not entry.removed:
916 915 msg = _(b'file %r in dirstate clashes with %r')
917 916 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
918 917 raise error.Abort(msg)
919 918 self._check_sparse(filename)
920 919
921 920 def _check_sparse(self, filename):
922 921 """Check that a filename is inside the sparse profile"""
923 922 sparsematch = self._sparsematcher
924 923 if sparsematch is not None and not sparsematch.always():
925 924 if not sparsematch(filename):
926 925 msg = _(b"cannot add '%s' - it is outside the sparse checkout")
927 926 hint = _(
928 927 b'include file with `hg debugsparse --include <pattern>` or use '
929 928 b'`hg add -s <file>` to include file directory while adding'
930 929 )
931 930 raise error.Abort(msg % filename, hint=hint)
932 931
933 932 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
934 933 if exists is None:
935 934 exists = os.path.lexists(os.path.join(self._root, path))
936 935 if not exists:
937 936 # Maybe a path component exists
938 937 if not ignoremissing and b'/' in path:
939 938 d, f = path.rsplit(b'/', 1)
940 939 d = self._normalize(d, False, ignoremissing, None)
941 940 folded = d + b"/" + f
942 941 else:
943 942 # No path components, preserve original case
944 943 folded = path
945 944 else:
946 945 # recursively normalize leading directory components
947 946 # against dirstate
948 947 if b'/' in normed:
949 948 d, f = normed.rsplit(b'/', 1)
950 949 d = self._normalize(d, False, ignoremissing, True)
951 950 r = self._root + b"/" + d
952 951 folded = d + b"/" + util.fspath(f, r)
953 952 else:
954 953 folded = util.fspath(normed, self._root)
955 954 storemap[normed] = folded
956 955
957 956 return folded
958 957
959 958 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
960 959 normed = util.normcase(path)
961 960 folded = self._map.filefoldmap.get(normed, None)
962 961 if folded is None:
963 962 if isknown:
964 963 folded = path
965 964 else:
966 965 folded = self._discoverpath(
967 966 path, normed, ignoremissing, exists, self._map.filefoldmap
968 967 )
969 968 return folded
970 969
971 970 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
972 971 normed = util.normcase(path)
973 972 folded = self._map.filefoldmap.get(normed, None)
974 973 if folded is None:
975 974 folded = self._map.dirfoldmap.get(normed, None)
976 975 if folded is None:
977 976 if isknown:
978 977 folded = path
979 978 else:
980 979 # store discovered result in dirfoldmap so that future
981 980 # normalizefile calls don't start matching directories
982 981 folded = self._discoverpath(
983 982 path, normed, ignoremissing, exists, self._map.dirfoldmap
984 983 )
985 984 return folded
986 985
987 986 def normalize(self, path, isknown=False, ignoremissing=False):
988 987 """
989 988 normalize the case of a pathname when on a casefolding filesystem
990 989
991 990 isknown specifies whether the filename came from walking the
992 991 disk, to avoid extra filesystem access.
993 992
994 993 If ignoremissing is True, missing path are returned
995 994 unchanged. Otherwise, we try harder to normalize possibly
996 995 existing path components.
997 996
998 997 The normalized case is determined based on the following precedence:
999 998
1000 999 - version of name already stored in the dirstate
1001 1000 - version of name stored on disk
1002 1001 - version provided via command arguments
1003 1002 """
1004 1003
1005 1004 if self._checkcase:
1006 1005 return self._normalize(path, isknown, ignoremissing)
1007 1006 return path
1008 1007
1009 1008 # XXX this method is barely used, as a result:
1010 1009 # - its semantic is unclear
1011 1010 # - do we really needs it ?
1012 1011 @requires_changing_parents
1013 1012 def clear(self):
1014 1013 self._map.clear()
1015 1014 self._dirty = True
1016 1015
1017 1016 @requires_changing_parents
1018 1017 def rebuild(self, parent, allfiles, changedfiles=None):
1019 1018 matcher = self._sparsematcher
1020 1019 if matcher is not None and not matcher.always():
1021 1020 # should not add non-matching files
1022 1021 allfiles = [f for f in allfiles if matcher(f)]
1023 1022 if changedfiles:
1024 1023 changedfiles = [f for f in changedfiles if matcher(f)]
1025 1024
1026 1025 if changedfiles is not None:
1027 1026 # these files will be deleted from the dirstate when they are
1028 1027 # not found to be in allfiles
1029 1028 dirstatefilestoremove = {f for f in self if not matcher(f)}
1030 1029 changedfiles = dirstatefilestoremove.union(changedfiles)
1031 1030
1032 1031 if changedfiles is None:
1033 1032 # Rebuild entire dirstate
1034 1033 to_lookup = allfiles
1035 1034 to_drop = []
1036 1035 self.clear()
1037 1036 elif len(changedfiles) < 10:
1038 1037 # Avoid turning allfiles into a set, which can be expensive if it's
1039 1038 # large.
1040 1039 to_lookup = []
1041 1040 to_drop = []
1042 1041 for f in changedfiles:
1043 1042 if f in allfiles:
1044 1043 to_lookup.append(f)
1045 1044 else:
1046 1045 to_drop.append(f)
1047 1046 else:
1048 1047 changedfilesset = set(changedfiles)
1049 1048 to_lookup = changedfilesset & set(allfiles)
1050 1049 to_drop = changedfilesset - to_lookup
1051 1050
1052 1051 if self._origpl is None:
1053 1052 self._origpl = self._pl
1054 1053 self._map.setparents(parent, self._nodeconstants.nullid)
1055 1054
1056 1055 for f in to_lookup:
1057 1056 if self.in_merge:
1058 1057 self.set_tracked(f)
1059 1058 else:
1060 1059 self._map.reset_state(
1061 1060 f,
1062 1061 wc_tracked=True,
1063 1062 p1_tracked=True,
1064 1063 )
1065 1064 for f in to_drop:
1066 1065 self._map.reset_state(f)
1067 1066
1068 1067 self._dirty = True
1069 1068
1070 1069 def _setup_tr_abort(self, tr):
1071 1070 """make sure we invalidate the current change on abort"""
1072 1071 if tr is None:
1073 1072 return
1074 1073
1075 1074 def on_abort(tr):
1076 1075 self._attached_to_a_transaction = False
1077 1076 self.invalidate()
1078 1077
1079 1078 tr.addabort(
1080 1079 b'dirstate-invalidate%s' % self._tr_key_suffix,
1081 1080 on_abort,
1082 1081 )
1083 1082
1084 1083 def write(self, tr):
1085 1084 if not self._dirty:
1086 1085 return
1087 1086 # make sure we don't request a write of invalidated content
1088 1087 # XXX move before the dirty check once `unlock` stop calling `write`
1089 1088 assert not self._invalidated_context
1090 1089
1091 1090 write_key = self._use_tracked_hint and self._dirty_tracked_set
1092 1091 if tr:
1093 1092 self._setup_tr_abort(tr)
1094 1093 self._attached_to_a_transaction = True
1095 1094
1096 1095 def on_success(f):
1097 1096 self._attached_to_a_transaction = False
1098 1097 self._writedirstate(tr, f),
1099 1098
1100 1099 # delay writing in-memory changes out
1101 1100 tr.addfilegenerator(
1102 1101 b'dirstate-1-main%s' % self._tr_key_suffix,
1103 1102 (self._filename,),
1104 1103 on_success,
1105 1104 location=b'plain',
1106 1105 post_finalize=True,
1107 1106 )
1108 1107 if write_key:
1109 1108 tr.addfilegenerator(
1110 1109 b'dirstate-2-key-post%s' % self._tr_key_suffix,
1111 1110 (self._filename_th,),
1112 1111 lambda f: self._write_tracked_hint(tr, f),
1113 1112 location=b'plain',
1114 1113 post_finalize=True,
1115 1114 )
1116 1115 return
1117 1116
1118 1117 file = lambda f: self._opener(f, b"w", atomictemp=True, checkambig=True)
1119 1118 with file(self._filename) as f:
1120 1119 self._writedirstate(tr, f)
1121 1120 if write_key:
1122 1121 # we update the key-file after writing to make sure reader have a
1123 1122 # key that match the newly written content
1124 1123 with file(self._filename_th) as f:
1125 1124 self._write_tracked_hint(tr, f)
1126 1125
1127 1126 def delete_tracked_hint(self):
1128 1127 """remove the tracked_hint file
1129 1128
1130 1129 To be used by format downgrades operation"""
1131 1130 self._opener.unlink(self._filename_th)
1132 1131 self._use_tracked_hint = False
1133 1132
1134 1133 def addparentchangecallback(self, category, callback):
1135 1134 """add a callback to be called when the wd parents are changed
1136 1135
1137 1136 Callback will be called with the following arguments:
1138 1137 dirstate, (oldp1, oldp2), (newp1, newp2)
1139 1138
1140 1139 Category is a unique identifier to allow overwriting an old callback
1141 1140 with a newer callback.
1142 1141 """
1143 1142 self._plchangecallbacks[category] = callback
1144 1143
1145 1144 def _writedirstate(self, tr, st):
1146 1145 # make sure we don't write invalidated content
1147 1146 assert not self._invalidated_context
1148 1147 # notify callbacks about parents change
1149 1148 if self._origpl is not None and self._origpl != self._pl:
1150 1149 for c, callback in sorted(self._plchangecallbacks.items()):
1151 1150 callback(self, self._origpl, self._pl)
1152 1151 self._origpl = None
1153 1152 self._map.write(tr, st)
1154 1153 self._dirty = False
1155 1154 self._dirty_tracked_set = False
1156 1155
1157 1156 def _write_tracked_hint(self, tr, f):
1158 1157 key = node.hex(uuid.uuid4().bytes)
1159 1158 f.write(b"1\n%s\n" % key) # 1 is the format version
1160 1159
1161 1160 def _dirignore(self, f):
1162 1161 if self._ignore(f):
1163 1162 return True
1164 1163 for p in pathutil.finddirs(f):
1165 1164 if self._ignore(p):
1166 1165 return True
1167 1166 return False
1168 1167
1169 1168 def _ignorefiles(self):
1170 1169 files = []
1171 1170 if os.path.exists(self._join(b'.hgignore')):
1172 1171 files.append(self._join(b'.hgignore'))
1173 1172 for name, path in self._ui.configitems(b"ui"):
1174 1173 if name == b'ignore' or name.startswith(b'ignore.'):
1175 1174 # we need to use os.path.join here rather than self._join
1176 1175 # because path is arbitrary and user-specified
1177 1176 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1178 1177 return files
1179 1178
1180 1179 def _ignorefileandline(self, f):
1181 1180 files = collections.deque(self._ignorefiles())
1182 1181 visited = set()
1183 1182 while files:
1184 1183 i = files.popleft()
1185 1184 patterns = matchmod.readpatternfile(
1186 1185 i, self._ui.warn, sourceinfo=True
1187 1186 )
1188 1187 for pattern, lineno, line in patterns:
1189 1188 kind, p = matchmod._patsplit(pattern, b'glob')
1190 1189 if kind == b"subinclude":
1191 1190 if p not in visited:
1192 1191 files.append(p)
1193 1192 continue
1194 1193 m = matchmod.match(
1195 1194 self._root, b'', [], [pattern], warn=self._ui.warn
1196 1195 )
1197 1196 if m(f):
1198 1197 return (i, lineno, line)
1199 1198 visited.add(i)
1200 1199 return (None, -1, b"")
1201 1200
1202 1201 def _walkexplicit(self, match, subrepos):
1203 1202 """Get stat data about the files explicitly specified by match.
1204 1203
1205 1204 Return a triple (results, dirsfound, dirsnotfound).
1206 1205 - results is a mapping from filename to stat result. It also contains
1207 1206 listings mapping subrepos and .hg to None.
1208 1207 - dirsfound is a list of files found to be directories.
1209 1208 - dirsnotfound is a list of files that the dirstate thinks are
1210 1209 directories and that were not found."""
1211 1210
1212 1211 def badtype(mode):
1213 1212 kind = _(b'unknown')
1214 1213 if stat.S_ISCHR(mode):
1215 1214 kind = _(b'character device')
1216 1215 elif stat.S_ISBLK(mode):
1217 1216 kind = _(b'block device')
1218 1217 elif stat.S_ISFIFO(mode):
1219 1218 kind = _(b'fifo')
1220 1219 elif stat.S_ISSOCK(mode):
1221 1220 kind = _(b'socket')
1222 1221 elif stat.S_ISDIR(mode):
1223 1222 kind = _(b'directory')
1224 1223 return _(b'unsupported file type (type is %s)') % kind
1225 1224
1226 1225 badfn = match.bad
1227 1226 dmap = self._map
1228 1227 lstat = os.lstat
1229 1228 getkind = stat.S_IFMT
1230 1229 dirkind = stat.S_IFDIR
1231 1230 regkind = stat.S_IFREG
1232 1231 lnkkind = stat.S_IFLNK
1233 1232 join = self._join
1234 1233 dirsfound = []
1235 1234 foundadd = dirsfound.append
1236 1235 dirsnotfound = []
1237 1236 notfoundadd = dirsnotfound.append
1238 1237
1239 1238 if not match.isexact() and self._checkcase:
1240 1239 normalize = self._normalize
1241 1240 else:
1242 1241 normalize = None
1243 1242
1244 1243 files = sorted(match.files())
1245 1244 subrepos.sort()
1246 1245 i, j = 0, 0
1247 1246 while i < len(files) and j < len(subrepos):
1248 1247 subpath = subrepos[j] + b"/"
1249 1248 if files[i] < subpath:
1250 1249 i += 1
1251 1250 continue
1252 1251 while i < len(files) and files[i].startswith(subpath):
1253 1252 del files[i]
1254 1253 j += 1
1255 1254
1256 1255 if not files or b'' in files:
1257 1256 files = [b'']
1258 1257 # constructing the foldmap is expensive, so don't do it for the
1259 1258 # common case where files is ['']
1260 1259 normalize = None
1261 1260 results = dict.fromkeys(subrepos)
1262 1261 results[b'.hg'] = None
1263 1262
1264 1263 for ff in files:
1265 1264 if normalize:
1266 1265 nf = normalize(ff, False, True)
1267 1266 else:
1268 1267 nf = ff
1269 1268 if nf in results:
1270 1269 continue
1271 1270
1272 1271 try:
1273 1272 st = lstat(join(nf))
1274 1273 kind = getkind(st.st_mode)
1275 1274 if kind == dirkind:
1276 1275 if nf in dmap:
1277 1276 # file replaced by dir on disk but still in dirstate
1278 1277 results[nf] = None
1279 1278 foundadd((nf, ff))
1280 1279 elif kind == regkind or kind == lnkkind:
1281 1280 results[nf] = st
1282 1281 else:
1283 1282 badfn(ff, badtype(kind))
1284 1283 if nf in dmap:
1285 1284 results[nf] = None
1286 1285 except OSError as inst:
1287 1286 # nf not found on disk - it is dirstate only
1288 1287 if nf in dmap: # does it exactly match a missing file?
1289 1288 results[nf] = None
1290 1289 else: # does it match a missing directory?
1291 1290 if self._map.hasdir(nf):
1292 1291 notfoundadd(nf)
1293 1292 else:
1294 1293 badfn(ff, encoding.strtolocal(inst.strerror))
1295 1294
1296 1295 # match.files() may contain explicitly-specified paths that shouldn't
1297 1296 # be taken; drop them from the list of files found. dirsfound/notfound
1298 1297 # aren't filtered here because they will be tested later.
1299 1298 if match.anypats():
1300 1299 for f in list(results):
1301 1300 if f == b'.hg' or f in subrepos:
1302 1301 # keep sentinel to disable further out-of-repo walks
1303 1302 continue
1304 1303 if not match(f):
1305 1304 del results[f]
1306 1305
1307 1306 # Case insensitive filesystems cannot rely on lstat() failing to detect
1308 1307 # a case-only rename. Prune the stat object for any file that does not
1309 1308 # match the case in the filesystem, if there are multiple files that
1310 1309 # normalize to the same path.
1311 1310 if match.isexact() and self._checkcase:
1312 1311 normed = {}
1313 1312
1314 1313 for f, st in results.items():
1315 1314 if st is None:
1316 1315 continue
1317 1316
1318 1317 nc = util.normcase(f)
1319 1318 paths = normed.get(nc)
1320 1319
1321 1320 if paths is None:
1322 1321 paths = set()
1323 1322 normed[nc] = paths
1324 1323
1325 1324 paths.add(f)
1326 1325
1327 1326 for norm, paths in normed.items():
1328 1327 if len(paths) > 1:
1329 1328 for path in paths:
1330 1329 folded = self._discoverpath(
1331 1330 path, norm, True, None, self._map.dirfoldmap
1332 1331 )
1333 1332 if path != folded:
1334 1333 results[path] = None
1335 1334
1336 1335 return results, dirsfound, dirsnotfound
1337 1336
1338 1337 def walk(self, match, subrepos, unknown, ignored, full=True):
1339 1338 """
1340 1339 Walk recursively through the directory tree, finding all files
1341 1340 matched by match.
1342 1341
1343 1342 If full is False, maybe skip some known-clean files.
1344 1343
1345 1344 Return a dict mapping filename to stat-like object (either
1346 1345 mercurial.osutil.stat instance or return value of os.stat()).
1347 1346
1348 1347 """
1349 1348 # full is a flag that extensions that hook into walk can use -- this
1350 1349 # implementation doesn't use it at all. This satisfies the contract
1351 1350 # because we only guarantee a "maybe".
1352 1351
1353 1352 if ignored:
1354 1353 ignore = util.never
1355 1354 dirignore = util.never
1356 1355 elif unknown:
1357 1356 ignore = self._ignore
1358 1357 dirignore = self._dirignore
1359 1358 else:
1360 1359 # if not unknown and not ignored, drop dir recursion and step 2
1361 1360 ignore = util.always
1362 1361 dirignore = util.always
1363 1362
1364 1363 if self._sparsematchfn is not None:
1365 1364 em = matchmod.exact(match.files())
1366 1365 sm = matchmod.unionmatcher([self._sparsematcher, em])
1367 1366 match = matchmod.intersectmatchers(match, sm)
1368 1367
1369 1368 matchfn = match.matchfn
1370 1369 matchalways = match.always()
1371 1370 matchtdir = match.traversedir
1372 1371 dmap = self._map
1373 1372 listdir = util.listdir
1374 1373 lstat = os.lstat
1375 1374 dirkind = stat.S_IFDIR
1376 1375 regkind = stat.S_IFREG
1377 1376 lnkkind = stat.S_IFLNK
1378 1377 join = self._join
1379 1378
1380 1379 exact = skipstep3 = False
1381 1380 if match.isexact(): # match.exact
1382 1381 exact = True
1383 1382 dirignore = util.always # skip step 2
1384 1383 elif match.prefix(): # match.match, no patterns
1385 1384 skipstep3 = True
1386 1385
1387 1386 if not exact and self._checkcase:
1388 1387 normalize = self._normalize
1389 1388 normalizefile = self._normalizefile
1390 1389 skipstep3 = False
1391 1390 else:
1392 1391 normalize = self._normalize
1393 1392 normalizefile = None
1394 1393
1395 1394 # step 1: find all explicit files
1396 1395 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1397 1396 if matchtdir:
1398 1397 for d in work:
1399 1398 matchtdir(d[0])
1400 1399 for d in dirsnotfound:
1401 1400 matchtdir(d)
1402 1401
1403 1402 skipstep3 = skipstep3 and not (work or dirsnotfound)
1404 1403 work = [d for d in work if not dirignore(d[0])]
1405 1404
1406 1405 # step 2: visit subdirectories
1407 1406 def traverse(work, alreadynormed):
1408 1407 wadd = work.append
1409 1408 while work:
1410 1409 tracing.counter('dirstate.walk work', len(work))
1411 1410 nd = work.pop()
1412 1411 visitentries = match.visitchildrenset(nd)
1413 1412 if not visitentries:
1414 1413 continue
1415 1414 if visitentries == b'this' or visitentries == b'all':
1416 1415 visitentries = None
1417 1416 skip = None
1418 1417 if nd != b'':
1419 1418 skip = b'.hg'
1420 1419 try:
1421 1420 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1422 1421 entries = listdir(join(nd), stat=True, skip=skip)
1423 1422 except (PermissionError, FileNotFoundError) as inst:
1424 1423 match.bad(
1425 1424 self.pathto(nd), encoding.strtolocal(inst.strerror)
1426 1425 )
1427 1426 continue
1428 1427 for f, kind, st in entries:
1429 1428 # Some matchers may return files in the visitentries set,
1430 1429 # instead of 'this', if the matcher explicitly mentions them
1431 1430 # and is not an exactmatcher. This is acceptable; we do not
1432 1431 # make any hard assumptions about file-or-directory below
1433 1432 # based on the presence of `f` in visitentries. If
1434 1433 # visitchildrenset returned a set, we can always skip the
1435 1434 # entries *not* in the set it provided regardless of whether
1436 1435 # they're actually a file or a directory.
1437 1436 if visitentries and f not in visitentries:
1438 1437 continue
1439 1438 if normalizefile:
1440 1439 # even though f might be a directory, we're only
1441 1440 # interested in comparing it to files currently in the
1442 1441 # dmap -- therefore normalizefile is enough
1443 1442 nf = normalizefile(
1444 1443 nd and (nd + b"/" + f) or f, True, True
1445 1444 )
1446 1445 else:
1447 1446 nf = nd and (nd + b"/" + f) or f
1448 1447 if nf not in results:
1449 1448 if kind == dirkind:
1450 1449 if not ignore(nf):
1451 1450 if matchtdir:
1452 1451 matchtdir(nf)
1453 1452 wadd(nf)
1454 1453 if nf in dmap and (matchalways or matchfn(nf)):
1455 1454 results[nf] = None
1456 1455 elif kind == regkind or kind == lnkkind:
1457 1456 if nf in dmap:
1458 1457 if matchalways or matchfn(nf):
1459 1458 results[nf] = st
1460 1459 elif (matchalways or matchfn(nf)) and not ignore(
1461 1460 nf
1462 1461 ):
1463 1462 # unknown file -- normalize if necessary
1464 1463 if not alreadynormed:
1465 1464 nf = normalize(nf, False, True)
1466 1465 results[nf] = st
1467 1466 elif nf in dmap and (matchalways or matchfn(nf)):
1468 1467 results[nf] = None
1469 1468
1470 1469 for nd, d in work:
1471 1470 # alreadynormed means that processwork doesn't have to do any
1472 1471 # expensive directory normalization
1473 1472 alreadynormed = not normalize or nd == d
1474 1473 traverse([d], alreadynormed)
1475 1474
1476 1475 for s in subrepos:
1477 1476 del results[s]
1478 1477 del results[b'.hg']
1479 1478
1480 1479 # step 3: visit remaining files from dmap
1481 1480 if not skipstep3 and not exact:
1482 1481 # If a dmap file is not in results yet, it was either
1483 1482 # a) not matching matchfn b) ignored, c) missing, or d) under a
1484 1483 # symlink directory.
1485 1484 if not results and matchalways:
1486 1485 visit = [f for f in dmap]
1487 1486 else:
1488 1487 visit = [f for f in dmap if f not in results and matchfn(f)]
1489 1488 visit.sort()
1490 1489
1491 1490 if unknown:
1492 1491 # unknown == True means we walked all dirs under the roots
1493 1492 # that wasn't ignored, and everything that matched was stat'ed
1494 1493 # and is already in results.
1495 1494 # The rest must thus be ignored or under a symlink.
1496 1495 audit_path = pathutil.pathauditor(self._root, cached=True)
1497 1496
1498 1497 for nf in iter(visit):
1499 1498 # If a stat for the same file was already added with a
1500 1499 # different case, don't add one for this, since that would
1501 1500 # make it appear as if the file exists under both names
1502 1501 # on disk.
1503 1502 if (
1504 1503 normalizefile
1505 1504 and normalizefile(nf, True, True) in results
1506 1505 ):
1507 1506 results[nf] = None
1508 1507 # Report ignored items in the dmap as long as they are not
1509 1508 # under a symlink directory.
1510 1509 elif audit_path.check(nf):
1511 1510 try:
1512 1511 results[nf] = lstat(join(nf))
1513 1512 # file was just ignored, no links, and exists
1514 1513 except OSError:
1515 1514 # file doesn't exist
1516 1515 results[nf] = None
1517 1516 else:
1518 1517 # It's either missing or under a symlink directory
1519 1518 # which we in this case report as missing
1520 1519 results[nf] = None
1521 1520 else:
1522 1521 # We may not have walked the full directory tree above,
1523 1522 # so stat and check everything we missed.
1524 1523 iv = iter(visit)
1525 1524 for st in util.statfiles([join(i) for i in visit]):
1526 1525 results[next(iv)] = st
1527 1526 return results
1528 1527
1529 1528 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1530 1529 if self._sparsematchfn is not None:
1531 1530 em = matchmod.exact(matcher.files())
1532 1531 sm = matchmod.unionmatcher([self._sparsematcher, em])
1533 1532 matcher = matchmod.intersectmatchers(matcher, sm)
1534 1533 # Force Rayon (Rust parallelism library) to respect the number of
1535 1534 # workers. This is a temporary workaround until Rust code knows
1536 1535 # how to read the config file.
1537 1536 numcpus = self._ui.configint(b"worker", b"numcpus")
1538 1537 if numcpus is not None:
1539 1538 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1540 1539
1541 1540 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1542 1541 if not workers_enabled:
1543 1542 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1544 1543
1545 1544 (
1546 1545 lookup,
1547 1546 modified,
1548 1547 added,
1549 1548 removed,
1550 1549 deleted,
1551 1550 clean,
1552 1551 ignored,
1553 1552 unknown,
1554 1553 warnings,
1555 1554 bad,
1556 1555 traversed,
1557 1556 dirty,
1558 1557 ) = rustmod.status(
1559 1558 self._map._map,
1560 1559 matcher,
1561 1560 self._rootdir,
1562 1561 self._ignorefiles(),
1563 1562 self._checkexec,
1564 1563 bool(list_clean),
1565 1564 bool(list_ignored),
1566 1565 bool(list_unknown),
1567 1566 bool(matcher.traversedir),
1568 1567 )
1569 1568
1570 1569 self._dirty |= dirty
1571 1570
1572 1571 if matcher.traversedir:
1573 1572 for dir in traversed:
1574 1573 matcher.traversedir(dir)
1575 1574
1576 1575 if self._ui.warn:
1577 1576 for item in warnings:
1578 1577 if isinstance(item, tuple):
1579 1578 file_path, syntax = item
1580 1579 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1581 1580 file_path,
1582 1581 syntax,
1583 1582 )
1584 1583 self._ui.warn(msg)
1585 1584 else:
1586 1585 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1587 1586 self._ui.warn(
1588 1587 msg
1589 1588 % (
1590 1589 pathutil.canonpath(
1591 1590 self._rootdir, self._rootdir, item
1592 1591 ),
1593 1592 b"No such file or directory",
1594 1593 )
1595 1594 )
1596 1595
1597 1596 for fn, message in sorted(bad):
1598 1597 matcher.bad(fn, encoding.strtolocal(message))
1599 1598
1600 1599 status = scmutil.status(
1601 1600 modified=modified,
1602 1601 added=added,
1603 1602 removed=removed,
1604 1603 deleted=deleted,
1605 1604 unknown=unknown,
1606 1605 ignored=ignored,
1607 1606 clean=clean,
1608 1607 )
1609 1608 return (lookup, status)
1610 1609
1611 1610 def status(self, match, subrepos, ignored, clean, unknown):
1612 1611 """Determine the status of the working copy relative to the
1613 1612 dirstate and return a pair of (unsure, status), where status is of type
1614 1613 scmutil.status and:
1615 1614
1616 1615 unsure:
1617 1616 files that might have been modified since the dirstate was
1618 1617 written, but need to be read to be sure (size is the same
1619 1618 but mtime differs)
1620 1619 status.modified:
1621 1620 files that have definitely been modified since the dirstate
1622 1621 was written (different size or mode)
1623 1622 status.clean:
1624 1623 files that have definitely not been modified since the
1625 1624 dirstate was written
1626 1625 """
1627 1626 if not self._running_status:
1628 1627 msg = "Calling `status` outside a `running_status` context"
1629 1628 raise error.ProgrammingError(msg)
1630 1629 listignored, listclean, listunknown = ignored, clean, unknown
1631 1630 lookup, modified, added, unknown, ignored = [], [], [], [], []
1632 1631 removed, deleted, clean = [], [], []
1633 1632
1634 1633 dmap = self._map
1635 1634 dmap.preload()
1636 1635
1637 1636 use_rust = True
1638 1637
1639 1638 if rustmod is None:
1640 1639 use_rust = False
1641 1640 elif self._checkcase:
1642 1641 # Case-insensitive filesystems are not handled yet
1643 1642 use_rust = False
1644 1643 elif subrepos:
1645 1644 use_rust = False
1646 1645
1647 1646 # Get the time from the filesystem so we can disambiguate files that
1648 1647 # appear modified in the present or future.
1649 1648 try:
1650 1649 mtime_boundary = timestamp.get_fs_now(self._opener)
1651 1650 except OSError:
1652 1651 # In largefiles or readonly context
1653 1652 mtime_boundary = None
1654 1653
1655 1654 if use_rust:
1656 1655 try:
1657 1656 res = self._rust_status(
1658 1657 match, listclean, listignored, listunknown
1659 1658 )
1660 1659 return res + (mtime_boundary,)
1661 1660 except rustmod.FallbackError:
1662 1661 pass
1663 1662
1664 1663 def noop(f):
1665 1664 pass
1666 1665
1667 1666 dcontains = dmap.__contains__
1668 1667 dget = dmap.__getitem__
1669 1668 ladd = lookup.append # aka "unsure"
1670 1669 madd = modified.append
1671 1670 aadd = added.append
1672 1671 uadd = unknown.append if listunknown else noop
1673 1672 iadd = ignored.append if listignored else noop
1674 1673 radd = removed.append
1675 1674 dadd = deleted.append
1676 1675 cadd = clean.append if listclean else noop
1677 1676 mexact = match.exact
1678 1677 dirignore = self._dirignore
1679 1678 checkexec = self._checkexec
1680 1679 checklink = self._checklink
1681 1680 copymap = self._map.copymap
1682 1681
1683 1682 # We need to do full walks when either
1684 1683 # - we're listing all clean files, or
1685 1684 # - match.traversedir does something, because match.traversedir should
1686 1685 # be called for every dir in the working dir
1687 1686 full = listclean or match.traversedir is not None
1688 1687 for fn, st in self.walk(
1689 1688 match, subrepos, listunknown, listignored, full=full
1690 1689 ).items():
1691 1690 if not dcontains(fn):
1692 1691 if (listignored or mexact(fn)) and dirignore(fn):
1693 1692 if listignored:
1694 1693 iadd(fn)
1695 1694 else:
1696 1695 uadd(fn)
1697 1696 continue
1698 1697
1699 1698 t = dget(fn)
1700 1699 mode = t.mode
1701 1700 size = t.size
1702 1701
1703 1702 if not st and t.tracked:
1704 1703 dadd(fn)
1705 1704 elif t.p2_info:
1706 1705 madd(fn)
1707 1706 elif t.added:
1708 1707 aadd(fn)
1709 1708 elif t.removed:
1710 1709 radd(fn)
1711 1710 elif t.tracked:
1712 1711 if not checklink and t.has_fallback_symlink:
1713 1712 # If the file system does not support symlink, the mode
1714 1713 # might not be correctly stored in the dirstate, so do not
1715 1714 # trust it.
1716 1715 ladd(fn)
1717 1716 elif not checkexec and t.has_fallback_exec:
1718 1717 # If the file system does not support exec bits, the mode
1719 1718 # might not be correctly stored in the dirstate, so do not
1720 1719 # trust it.
1721 1720 ladd(fn)
1722 1721 elif (
1723 1722 size >= 0
1724 1723 and (
1725 1724 (size != st.st_size and size != st.st_size & _rangemask)
1726 1725 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1727 1726 )
1728 1727 or fn in copymap
1729 1728 ):
1730 1729 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1731 1730 # issue6456: Size returned may be longer due to
1732 1731 # encryption on EXT-4 fscrypt, undecided.
1733 1732 ladd(fn)
1734 1733 else:
1735 1734 madd(fn)
1736 1735 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1737 1736 # There might be a change in the future if for example the
1738 1737 # internal clock is off, but this is a case where the issues
1739 1738 # the user would face would be a lot worse and there is
1740 1739 # nothing we can really do.
1741 1740 ladd(fn)
1742 1741 elif listclean:
1743 1742 cadd(fn)
1744 1743 status = scmutil.status(
1745 1744 modified, added, removed, deleted, unknown, ignored, clean
1746 1745 )
1747 1746 return (lookup, status, mtime_boundary)
1748 1747
1749 1748 def matches(self, match):
1750 1749 """
1751 1750 return files in the dirstate (in whatever state) filtered by match
1752 1751 """
1753 1752 dmap = self._map
1754 1753 if rustmod is not None:
1755 1754 dmap = self._map._map
1756 1755
1757 1756 if match.always():
1758 1757 return dmap.keys()
1759 1758 files = match.files()
1760 1759 if match.isexact():
1761 1760 # fast path -- filter the other way around, since typically files is
1762 1761 # much smaller than dmap
1763 1762 return [f for f in files if f in dmap]
1764 1763 if match.prefix() and all(fn in dmap for fn in files):
1765 1764 # fast path -- all the values are known to be files, so just return
1766 1765 # that
1767 1766 return list(files)
1768 1767 return [f for f in dmap if match(f)]
1769 1768
1770 1769 def all_file_names(self):
1771 1770 """list all filename currently used by this dirstate
1772 1771
1773 1772 This is only used to do `hg rollback` related backup in the transaction
1774 1773 """
1775 1774 files = [b'branch']
1776 1775 if self._opener.exists(self._filename):
1777 1776 files.append(self._filename)
1778 1777 if self._use_dirstate_v2:
1779 1778 files.append(self._map.docket.data_filename())
1780 1779 return tuple(files)
1781 1780
1782 1781 def verify(self, m1, m2, p1, narrow_matcher=None):
1783 1782 """
1784 1783 check the dirstate contents against the parent manifest and yield errors
1785 1784 """
1786 1785 missing_from_p1 = _(
1787 1786 b"%s marked as tracked in p1 (%s) but not in manifest1\n"
1788 1787 )
1789 1788 unexpected_in_p1 = _(b"%s marked as added, but also in manifest1\n")
1790 1789 missing_from_ps = _(
1791 1790 b"%s marked as modified, but not in either manifest\n"
1792 1791 )
1793 1792 missing_from_ds = _(
1794 1793 b"%s in manifest1, but not marked as tracked in p1 (%s)\n"
1795 1794 )
1796 1795 for f, entry in self.items():
1797 1796 if entry.p1_tracked:
1798 1797 if entry.modified and f not in m1 and f not in m2:
1799 1798 yield missing_from_ps % f
1800 1799 elif f not in m1:
1801 1800 yield missing_from_p1 % (f, node.short(p1))
1802 1801 if entry.added and f in m1:
1803 1802 yield unexpected_in_p1 % f
1804 1803 for f in m1:
1805 1804 if narrow_matcher is not None and not narrow_matcher(f):
1806 1805 continue
1807 1806 entry = self.get_entry(f)
1808 1807 if not entry.p1_tracked:
1809 1808 yield missing_from_ds % (f, node.short(p1))
1809
1810
1811 dirstate = interfaceutil.implementer(intdirstate.idirstate)(DirState)
General Comments 0
You need to be logged in to leave comments. Login now