##// END OF EJS Templates
dirstate: drop the `_otherparent` method...
marmoute -
r48729:496a8e38 default
parent child Browse files
Show More
@@ -1,1634 +1,1619 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .pycompat import delattr
18 18
19 19 from hgdemandimport import tracing
20 20
21 21 from . import (
22 22 dirstatemap,
23 23 encoding,
24 24 error,
25 25 match as matchmod,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 sparse,
31 31 util,
32 32 )
33 33
34 34 from .interfaces import (
35 35 dirstate as intdirstate,
36 36 util as interfaceutil,
37 37 )
38 38
39 39 parsers = policy.importmod('parsers')
40 40 rustmod = policy.importrust('dirstate')
41 41
42 42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43 43
44 44 propertycache = util.propertycache
45 45 filecache = scmutil.filecache
46 46 _rangemask = dirstatemap.rangemask
47 47
48 48 DirstateItem = parsers.DirstateItem
49 49
50 50
51 51 class repocache(filecache):
52 52 """filecache for files in .hg/"""
53 53
54 54 def join(self, obj, fname):
55 55 return obj._opener.join(fname)
56 56
57 57
58 58 class rootcache(filecache):
59 59 """filecache for files in the repository root"""
60 60
61 61 def join(self, obj, fname):
62 62 return obj._join(fname)
63 63
64 64
65 65 def _getfsnow(vfs):
66 66 '''Get "now" timestamp on filesystem'''
67 67 tmpfd, tmpname = vfs.mkstemp()
68 68 try:
69 69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 70 finally:
71 71 os.close(tmpfd)
72 72 vfs.unlink(tmpname)
73 73
74 74
75 75 def requires_parents_change(func):
76 76 def wrap(self, *args, **kwargs):
77 77 if not self.pendingparentchange():
78 78 msg = 'calling `%s` outside of a parentchange context'
79 79 msg %= func.__name__
80 80 raise error.ProgrammingError(msg)
81 81 return func(self, *args, **kwargs)
82 82
83 83 return wrap
84 84
85 85
86 86 def requires_no_parents_change(func):
87 87 def wrap(self, *args, **kwargs):
88 88 if self.pendingparentchange():
89 89 msg = 'calling `%s` inside of a parentchange context'
90 90 msg %= func.__name__
91 91 raise error.ProgrammingError(msg)
92 92 return func(self, *args, **kwargs)
93 93
94 94 return wrap
95 95
96 96
97 97 @interfaceutil.implementer(intdirstate.idirstate)
98 98 class dirstate(object):
99 99 def __init__(
100 100 self,
101 101 opener,
102 102 ui,
103 103 root,
104 104 validate,
105 105 sparsematchfn,
106 106 nodeconstants,
107 107 use_dirstate_v2,
108 108 ):
109 109 """Create a new dirstate object.
110 110
111 111 opener is an open()-like callable that can be used to open the
112 112 dirstate file; root is the root of the directory tracked by
113 113 the dirstate.
114 114 """
115 115 self._use_dirstate_v2 = use_dirstate_v2
116 116 self._nodeconstants = nodeconstants
117 117 self._opener = opener
118 118 self._validate = validate
119 119 self._root = root
120 120 self._sparsematchfn = sparsematchfn
121 121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 122 # UNC path pointing to root share (issue4557)
123 123 self._rootdir = pathutil.normasprefix(root)
124 124 self._dirty = False
125 125 self._lastnormaltime = 0
126 126 self._ui = ui
127 127 self._filecache = {}
128 128 self._parentwriters = 0
129 129 self._filename = b'dirstate'
130 130 self._pendingfilename = b'%s.pending' % self._filename
131 131 self._plchangecallbacks = {}
132 132 self._origpl = None
133 133 self._updatedfiles = set()
134 134 self._mapcls = dirstatemap.dirstatemap
135 135 # Access and cache cwd early, so we don't access it for the first time
136 136 # after a working-copy update caused it to not exist (accessing it then
137 137 # raises an exception).
138 138 self._cwd
139 139
140 140 def prefetch_parents(self):
141 141 """make sure the parents are loaded
142 142
143 143 Used to avoid a race condition.
144 144 """
145 145 self._pl
146 146
147 147 @contextlib.contextmanager
148 148 def parentchange(self):
149 149 """Context manager for handling dirstate parents.
150 150
151 151 If an exception occurs in the scope of the context manager,
152 152 the incoherent dirstate won't be written when wlock is
153 153 released.
154 154 """
155 155 self._parentwriters += 1
156 156 yield
157 157 # Typically we want the "undo" step of a context manager in a
158 158 # finally block so it happens even when an exception
159 159 # occurs. In this case, however, we only want to decrement
160 160 # parentwriters if the code in the with statement exits
161 161 # normally, so we don't have a try/finally here on purpose.
162 162 self._parentwriters -= 1
163 163
164 164 def pendingparentchange(self):
165 165 """Returns true if the dirstate is in the middle of a set of changes
166 166 that modify the dirstate parent.
167 167 """
168 168 return self._parentwriters > 0
169 169
170 170 @propertycache
171 171 def _map(self):
172 172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 173 self._map = self._mapcls(
174 174 self._ui,
175 175 self._opener,
176 176 self._root,
177 177 self._nodeconstants,
178 178 self._use_dirstate_v2,
179 179 )
180 180 return self._map
181 181
182 182 @property
183 183 def _sparsematcher(self):
184 184 """The matcher for the sparse checkout.
185 185
186 186 The working directory may not include every file from a manifest. The
187 187 matcher obtained by this property will match a path if it is to be
188 188 included in the working directory.
189 189 """
190 190 # TODO there is potential to cache this property. For now, the matcher
191 191 # is resolved on every access. (But the called function does use a
192 192 # cache to keep the lookup fast.)
193 193 return self._sparsematchfn()
194 194
195 195 @repocache(b'branch')
196 196 def _branch(self):
197 197 try:
198 198 return self._opener.read(b"branch").strip() or b"default"
199 199 except IOError as inst:
200 200 if inst.errno != errno.ENOENT:
201 201 raise
202 202 return b"default"
203 203
204 204 @property
205 205 def _pl(self):
206 206 return self._map.parents()
207 207
208 208 def hasdir(self, d):
209 209 return self._map.hastrackeddir(d)
210 210
211 211 @rootcache(b'.hgignore')
212 212 def _ignore(self):
213 213 files = self._ignorefiles()
214 214 if not files:
215 215 return matchmod.never()
216 216
217 217 pats = [b'include:%s' % f for f in files]
218 218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219 219
220 220 @propertycache
221 221 def _slash(self):
222 222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223 223
224 224 @propertycache
225 225 def _checklink(self):
226 226 return util.checklink(self._root)
227 227
228 228 @propertycache
229 229 def _checkexec(self):
230 230 return bool(util.checkexec(self._root))
231 231
232 232 @propertycache
233 233 def _checkcase(self):
234 234 return not util.fscasesensitive(self._join(b'.hg'))
235 235
236 236 def _join(self, f):
237 237 # much faster than os.path.join()
238 238 # it's safe because f is always a relative path
239 239 return self._rootdir + f
240 240
241 241 def flagfunc(self, buildfallback):
242 242 if self._checklink and self._checkexec:
243 243
244 244 def f(x):
245 245 try:
246 246 st = os.lstat(self._join(x))
247 247 if util.statislink(st):
248 248 return b'l'
249 249 if util.statisexec(st):
250 250 return b'x'
251 251 except OSError:
252 252 pass
253 253 return b''
254 254
255 255 return f
256 256
257 257 fallback = buildfallback()
258 258 if self._checklink:
259 259
260 260 def f(x):
261 261 if os.path.islink(self._join(x)):
262 262 return b'l'
263 263 if b'x' in fallback(x):
264 264 return b'x'
265 265 return b''
266 266
267 267 return f
268 268 if self._checkexec:
269 269
270 270 def f(x):
271 271 if b'l' in fallback(x):
272 272 return b'l'
273 273 if util.isexec(self._join(x)):
274 274 return b'x'
275 275 return b''
276 276
277 277 return f
278 278 else:
279 279 return fallback
280 280
281 281 @propertycache
282 282 def _cwd(self):
283 283 # internal config: ui.forcecwd
284 284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 285 if forcecwd:
286 286 return forcecwd
287 287 return encoding.getcwd()
288 288
289 289 def getcwd(self):
290 290 """Return the path from which a canonical path is calculated.
291 291
292 292 This path should be used to resolve file patterns or to convert
293 293 canonical paths back to file paths for display. It shouldn't be
294 294 used to get real file paths. Use vfs functions instead.
295 295 """
296 296 cwd = self._cwd
297 297 if cwd == self._root:
298 298 return b''
299 299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 300 rootsep = self._root
301 301 if not util.endswithsep(rootsep):
302 302 rootsep += pycompat.ossep
303 303 if cwd.startswith(rootsep):
304 304 return cwd[len(rootsep) :]
305 305 else:
306 306 # we're outside the repo. return an absolute path.
307 307 return cwd
308 308
309 309 def pathto(self, f, cwd=None):
310 310 if cwd is None:
311 311 cwd = self.getcwd()
312 312 path = util.pathto(self._root, cwd, f)
313 313 if self._slash:
314 314 return util.pconvert(path)
315 315 return path
316 316
317 317 def __getitem__(self, key):
318 318 """Return the current state of key (a filename) in the dirstate.
319 319
320 320 States are:
321 321 n normal
322 322 m needs merging
323 323 r marked for removal
324 324 a marked for addition
325 325 ? not tracked
326 326
327 327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 328 consider migrating all user of this to going through the dirstate entry
329 329 instead.
330 330 """
331 331 entry = self._map.get(key)
332 332 if entry is not None:
333 333 return entry.state
334 334 return b'?'
335 335
336 336 def __contains__(self, key):
337 337 return key in self._map
338 338
339 339 def __iter__(self):
340 340 return iter(sorted(self._map))
341 341
342 342 def items(self):
343 343 return pycompat.iteritems(self._map)
344 344
345 345 iteritems = items
346 346
347 347 def directories(self):
348 348 return self._map.directories()
349 349
350 350 def parents(self):
351 351 return [self._validate(p) for p in self._pl]
352 352
353 353 def p1(self):
354 354 return self._validate(self._pl[0])
355 355
356 356 def p2(self):
357 357 return self._validate(self._pl[1])
358 358
359 359 @property
360 360 def in_merge(self):
361 361 """True if a merge is in progress"""
362 362 return self._pl[1] != self._nodeconstants.nullid
363 363
364 364 def branch(self):
365 365 return encoding.tolocal(self._branch)
366 366
367 367 def setparents(self, p1, p2=None):
368 368 """Set dirstate parents to p1 and p2.
369 369
370 370 When moving from two parents to one, "merged" entries a
371 371 adjusted to normal and previous copy records discarded and
372 372 returned by the call.
373 373
374 374 See localrepo.setparents()
375 375 """
376 376 if p2 is None:
377 377 p2 = self._nodeconstants.nullid
378 378 if self._parentwriters == 0:
379 379 raise ValueError(
380 380 b"cannot set dirstate parent outside of "
381 381 b"dirstate.parentchange context manager"
382 382 )
383 383
384 384 self._dirty = True
385 385 oldp2 = self._pl[1]
386 386 if self._origpl is None:
387 387 self._origpl = self._pl
388 388 self._map.setparents(p1, p2)
389 389 copies = {}
390 390 if (
391 391 oldp2 != self._nodeconstants.nullid
392 392 and p2 == self._nodeconstants.nullid
393 393 ):
394 394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395 395
396 396 for f in candidatefiles:
397 397 s = self._map.get(f)
398 398 if s is None:
399 399 continue
400 400
401 401 # Discard "merged" markers when moving away from a merge state
402 402 if s.merged:
403 403 source = self._map.copymap.get(f)
404 404 if source:
405 405 copies[f] = source
406 406 self._normallookup(f)
407 407 # Also fix up otherparent markers
408 408 elif s.from_p2:
409 409 source = self._map.copymap.get(f)
410 410 if source:
411 411 copies[f] = source
412 412 self._add(f)
413 413 return copies
414 414
415 415 def setbranch(self, branch):
416 416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 418 try:
419 419 f.write(self._branch + b'\n')
420 420 f.close()
421 421
422 422 # make sure filecache has the correct stat info for _branch after
423 423 # replacing the underlying file
424 424 ce = self._filecache[b'_branch']
425 425 if ce:
426 426 ce.refresh()
427 427 except: # re-raises
428 428 f.discard()
429 429 raise
430 430
431 431 def invalidate(self):
432 432 """Causes the next access to reread the dirstate.
433 433
434 434 This is different from localrepo.invalidatedirstate() because it always
435 435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 436 check whether the dirstate has changed before rereading it."""
437 437
438 438 for a in ("_map", "_branch", "_ignore"):
439 439 if a in self.__dict__:
440 440 delattr(self, a)
441 441 self._lastnormaltime = 0
442 442 self._dirty = False
443 443 self._updatedfiles.clear()
444 444 self._parentwriters = 0
445 445 self._origpl = None
446 446
447 447 def copy(self, source, dest):
448 448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 449 if source == dest:
450 450 return
451 451 self._dirty = True
452 452 if source is not None:
453 453 self._map.copymap[dest] = source
454 454 self._updatedfiles.add(source)
455 455 self._updatedfiles.add(dest)
456 456 elif self._map.copymap.pop(dest, None):
457 457 self._updatedfiles.add(dest)
458 458
459 459 def copied(self, file):
460 460 return self._map.copymap.get(file, None)
461 461
462 462 def copies(self):
463 463 return self._map.copymap
464 464
465 465 @requires_no_parents_change
466 466 def set_tracked(self, filename):
467 467 """a "public" method for generic code to mark a file as tracked
468 468
469 469 This function is to be called outside of "update/merge" case. For
470 470 example by a command like `hg add X`.
471 471
472 472 return True the file was previously untracked, False otherwise.
473 473 """
474 474 entry = self._map.get(filename)
475 475 if entry is None:
476 476 self._add(filename)
477 477 return True
478 478 elif not entry.tracked:
479 479 self._normallookup(filename)
480 480 return True
481 481 # XXX This is probably overkill for more case, but we need this to
482 482 # fully replace the `normallookup` call with `set_tracked` one.
483 483 # Consider smoothing this in the future.
484 484 self.set_possibly_dirty(filename)
485 485 return False
486 486
487 487 @requires_no_parents_change
488 488 def set_untracked(self, filename):
489 489 """a "public" method for generic code to mark a file as untracked
490 490
491 491 This function is to be called outside of "update/merge" case. For
492 492 example by a command like `hg remove X`.
493 493
494 494 return True the file was previously tracked, False otherwise.
495 495 """
496 496 entry = self._map.get(filename)
497 497 if entry is None:
498 498 return False
499 499 elif entry.added:
500 500 self._drop(filename)
501 501 return True
502 502 else:
503 503 self._dirty = True
504 504 self._updatedfiles.add(filename)
505 505 self._map.set_untracked(filename)
506 506 return True
507 507
508 508 @requires_no_parents_change
509 509 def set_clean(self, filename, parentfiledata=None):
510 510 """record that the current state of the file on disk is known to be clean"""
511 511 self._dirty = True
512 512 self._updatedfiles.add(filename)
513 513 if parentfiledata:
514 514 (mode, size, mtime) = parentfiledata
515 515 else:
516 516 (mode, size, mtime) = self._get_filedata(filename)
517 517 self._addpath(filename, mode=mode, size=size, mtime=mtime)
518 518 self._map.copymap.pop(filename, None)
519 519 if filename in self._map.nonnormalset:
520 520 self._map.nonnormalset.remove(filename)
521 521 if mtime > self._lastnormaltime:
522 522 # Remember the most recent modification timeslot for status(),
523 523 # to make sure we won't miss future size-preserving file content
524 524 # modifications that happen within the same timeslot.
525 525 self._lastnormaltime = mtime
526 526
527 527 @requires_no_parents_change
528 528 def set_possibly_dirty(self, filename):
529 529 """record that the current state of the file on disk is unknown"""
530 530 self._dirty = True
531 531 self._updatedfiles.add(filename)
532 532 self._map.set_possibly_dirty(filename)
533 533
534 534 @requires_parents_change
535 535 def update_file_p1(
536 536 self,
537 537 filename,
538 538 p1_tracked,
539 539 ):
540 540 """Set a file as tracked in the parent (or not)
541 541
542 542 This is to be called when adjust the dirstate to a new parent after an history
543 543 rewriting operation.
544 544
545 545 It should not be called during a merge (p2 != nullid) and only within
546 546 a `with dirstate.parentchange():` context.
547 547 """
548 548 if self.in_merge:
549 549 msg = b'update_file_reference should not be called when merging'
550 550 raise error.ProgrammingError(msg)
551 551 entry = self._map.get(filename)
552 552 if entry is None:
553 553 wc_tracked = False
554 554 else:
555 555 wc_tracked = entry.tracked
556 556 possibly_dirty = False
557 557 if p1_tracked and wc_tracked:
558 558 # the underlying reference might have changed, we will have to
559 559 # check it.
560 560 possibly_dirty = True
561 561 elif not (p1_tracked or wc_tracked):
562 562 # the file is no longer relevant to anyone
563 563 self._drop(filename)
564 564 elif (not p1_tracked) and wc_tracked:
565 565 if entry is not None and entry.added:
566 566 return # avoid dropping copy information (maybe?)
567 567 elif p1_tracked and not wc_tracked:
568 568 pass
569 569 else:
570 570 assert False, 'unreachable'
571 571
572 572 # this mean we are doing call for file we do not really care about the
573 573 # data (eg: added or removed), however this should be a minor overhead
574 574 # compared to the overall update process calling this.
575 575 parentfiledata = None
576 576 if wc_tracked:
577 577 parentfiledata = self._get_filedata(filename)
578 578
579 579 self._updatedfiles.add(filename)
580 580 self._map.reset_state(
581 581 filename,
582 582 wc_tracked,
583 583 p1_tracked,
584 584 possibly_dirty=possibly_dirty,
585 585 parentfiledata=parentfiledata,
586 586 )
587 587 if (
588 588 parentfiledata is not None
589 589 and parentfiledata[2] > self._lastnormaltime
590 590 ):
591 591 # Remember the most recent modification timeslot for status(),
592 592 # to make sure we won't miss future size-preserving file content
593 593 # modifications that happen within the same timeslot.
594 594 self._lastnormaltime = parentfiledata[2]
595 595
596 596 @requires_parents_change
597 597 def update_file(
598 598 self,
599 599 filename,
600 600 wc_tracked,
601 601 p1_tracked,
602 602 p2_tracked=False,
603 603 merged=False,
604 604 clean_p1=False,
605 605 clean_p2=False,
606 606 possibly_dirty=False,
607 607 parentfiledata=None,
608 608 ):
609 609 """update the information about a file in the dirstate
610 610
611 611 This is to be called when the direstates parent changes to keep track
612 612 of what is the file situation in regards to the working copy and its parent.
613 613
614 614 This function must be called within a `dirstate.parentchange` context.
615 615
616 616 note: the API is at an early stage and we might need to adjust it
617 617 depending of what information ends up being relevant and useful to
618 618 other processing.
619 619 """
620 620 if merged and (clean_p1 or clean_p2):
621 621 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
622 622 raise error.ProgrammingError(msg)
623 623
624 624 # note: I do not think we need to double check name clash here since we
625 625 # are in a update/merge case that should already have taken care of
626 626 # this. The test agrees
627 627
628 628 self._dirty = True
629 629 self._updatedfiles.add(filename)
630 630
631 631 need_parent_file_data = (
632 632 not (possibly_dirty or clean_p2 or merged)
633 633 and wc_tracked
634 634 and p1_tracked
635 635 )
636 636
637 637 # this mean we are doing call for file we do not really care about the
638 638 # data (eg: added or removed), however this should be a minor overhead
639 639 # compared to the overall update process calling this.
640 640 if need_parent_file_data:
641 641 if parentfiledata is None:
642 642 parentfiledata = self._get_filedata(filename)
643 643 mtime = parentfiledata[2]
644 644
645 645 if mtime > self._lastnormaltime:
646 646 # Remember the most recent modification timeslot for
647 647 # status(), to make sure we won't miss future
648 648 # size-preserving file content modifications that happen
649 649 # within the same timeslot.
650 650 self._lastnormaltime = mtime
651 651
652 652 self._map.reset_state(
653 653 filename,
654 654 wc_tracked,
655 655 p1_tracked,
656 656 p2_tracked=p2_tracked,
657 657 merged=merged,
658 658 clean_p1=clean_p1,
659 659 clean_p2=clean_p2,
660 660 possibly_dirty=possibly_dirty,
661 661 parentfiledata=parentfiledata,
662 662 )
663 663 if (
664 664 parentfiledata is not None
665 665 and parentfiledata[2] > self._lastnormaltime
666 666 ):
667 667 # Remember the most recent modification timeslot for status(),
668 668 # to make sure we won't miss future size-preserving file content
669 669 # modifications that happen within the same timeslot.
670 670 self._lastnormaltime = parentfiledata[2]
671 671
672 672 def _addpath(
673 673 self,
674 674 f,
675 675 mode=0,
676 676 size=None,
677 677 mtime=None,
678 678 added=False,
679 679 merged=False,
680 680 from_p2=False,
681 681 possibly_dirty=False,
682 682 ):
683 683 entry = self._map.get(f)
684 684 if added or entry is not None and entry.removed:
685 685 scmutil.checkfilename(f)
686 686 if self._map.hastrackeddir(f):
687 687 msg = _(b'directory %r already in dirstate')
688 688 msg %= pycompat.bytestr(f)
689 689 raise error.Abort(msg)
690 690 # shadows
691 691 for d in pathutil.finddirs(f):
692 692 if self._map.hastrackeddir(d):
693 693 break
694 694 entry = self._map.get(d)
695 695 if entry is not None and not entry.removed:
696 696 msg = _(b'file %r in dirstate clashes with %r')
697 697 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
698 698 raise error.Abort(msg)
699 699 self._dirty = True
700 700 self._updatedfiles.add(f)
701 701 self._map.addfile(
702 702 f,
703 703 mode=mode,
704 704 size=size,
705 705 mtime=mtime,
706 706 added=added,
707 707 merged=merged,
708 708 from_p2=from_p2,
709 709 possibly_dirty=possibly_dirty,
710 710 )
711 711
712 712 def _get_filedata(self, filename):
713 713 """returns"""
714 714 s = os.lstat(self._join(filename))
715 715 mode = s.st_mode
716 716 size = s.st_size
717 717 mtime = s[stat.ST_MTIME]
718 718 return (mode, size, mtime)
719 719
720 720 def _normallookup(self, f):
721 721 '''Mark a file normal, but possibly dirty.'''
722 722 if self.in_merge:
723 723 # if there is a merge going on and the file was either
724 724 # "merged" or coming from other parent (-2) before
725 725 # being removed, restore that state.
726 726 entry = self._map.get(f)
727 727 if entry is not None:
728 728 # XXX this should probably be dealt with a a lower level
729 729 # (see `merged_removed` and `from_p2_removed`)
730 730 if entry.merged_removed or entry.from_p2_removed:
731 731 source = self._map.copymap.get(f)
732 if entry.merged_removed:
733 self._otherparent(f)
734 elif entry.from_p2_removed:
735 self._otherparent(f)
732 self._addpath(f, from_p2=True)
733 self._map.copymap.pop(f, None)
736 734 if source is not None:
737 735 self.copy(source, f)
738 736 return
739 737 elif entry.merged or entry.from_p2:
740 738 return
741 739 self._addpath(f, possibly_dirty=True)
742 740 self._map.copymap.pop(f, None)
743 741
744 def _otherparent(self, f):
745 if not self.in_merge:
746 msg = _(b"setting %r to other parent only allowed in merges") % f
747 raise error.Abort(msg)
748 entry = self._map.get(f)
749 if entry is not None and entry.tracked:
750 # merge-like
751 self._addpath(f, merged=True)
752 else:
753 # add-like
754 self._addpath(f, from_p2=True)
755 self._map.copymap.pop(f, None)
756
757 742 def _add(self, filename):
758 743 """internal function to mark a file as added"""
759 744 self._addpath(filename, added=True)
760 745 self._map.copymap.pop(filename, None)
761 746
762 747 def _drop(self, filename):
763 748 """internal function to drop a file from the dirstate"""
764 749 if self._map.dropfile(filename):
765 750 self._dirty = True
766 751 self._updatedfiles.add(filename)
767 752 self._map.copymap.pop(filename, None)
768 753
769 754 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
770 755 if exists is None:
771 756 exists = os.path.lexists(os.path.join(self._root, path))
772 757 if not exists:
773 758 # Maybe a path component exists
774 759 if not ignoremissing and b'/' in path:
775 760 d, f = path.rsplit(b'/', 1)
776 761 d = self._normalize(d, False, ignoremissing, None)
777 762 folded = d + b"/" + f
778 763 else:
779 764 # No path components, preserve original case
780 765 folded = path
781 766 else:
782 767 # recursively normalize leading directory components
783 768 # against dirstate
784 769 if b'/' in normed:
785 770 d, f = normed.rsplit(b'/', 1)
786 771 d = self._normalize(d, False, ignoremissing, True)
787 772 r = self._root + b"/" + d
788 773 folded = d + b"/" + util.fspath(f, r)
789 774 else:
790 775 folded = util.fspath(normed, self._root)
791 776 storemap[normed] = folded
792 777
793 778 return folded
794 779
795 780 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
796 781 normed = util.normcase(path)
797 782 folded = self._map.filefoldmap.get(normed, None)
798 783 if folded is None:
799 784 if isknown:
800 785 folded = path
801 786 else:
802 787 folded = self._discoverpath(
803 788 path, normed, ignoremissing, exists, self._map.filefoldmap
804 789 )
805 790 return folded
806 791
807 792 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
808 793 normed = util.normcase(path)
809 794 folded = self._map.filefoldmap.get(normed, None)
810 795 if folded is None:
811 796 folded = self._map.dirfoldmap.get(normed, None)
812 797 if folded is None:
813 798 if isknown:
814 799 folded = path
815 800 else:
816 801 # store discovered result in dirfoldmap so that future
817 802 # normalizefile calls don't start matching directories
818 803 folded = self._discoverpath(
819 804 path, normed, ignoremissing, exists, self._map.dirfoldmap
820 805 )
821 806 return folded
822 807
823 808 def normalize(self, path, isknown=False, ignoremissing=False):
824 809 """
825 810 normalize the case of a pathname when on a casefolding filesystem
826 811
827 812 isknown specifies whether the filename came from walking the
828 813 disk, to avoid extra filesystem access.
829 814
830 815 If ignoremissing is True, missing path are returned
831 816 unchanged. Otherwise, we try harder to normalize possibly
832 817 existing path components.
833 818
834 819 The normalized case is determined based on the following precedence:
835 820
836 821 - version of name already stored in the dirstate
837 822 - version of name stored on disk
838 823 - version provided via command arguments
839 824 """
840 825
841 826 if self._checkcase:
842 827 return self._normalize(path, isknown, ignoremissing)
843 828 return path
844 829
845 830 def clear(self):
846 831 self._map.clear()
847 832 self._lastnormaltime = 0
848 833 self._updatedfiles.clear()
849 834 self._dirty = True
850 835
851 836 def rebuild(self, parent, allfiles, changedfiles=None):
852 837 if changedfiles is None:
853 838 # Rebuild entire dirstate
854 839 to_lookup = allfiles
855 840 to_drop = []
856 841 lastnormaltime = self._lastnormaltime
857 842 self.clear()
858 843 self._lastnormaltime = lastnormaltime
859 844 elif len(changedfiles) < 10:
860 845 # Avoid turning allfiles into a set, which can be expensive if it's
861 846 # large.
862 847 to_lookup = []
863 848 to_drop = []
864 849 for f in changedfiles:
865 850 if f in allfiles:
866 851 to_lookup.append(f)
867 852 else:
868 853 to_drop.append(f)
869 854 else:
870 855 changedfilesset = set(changedfiles)
871 856 to_lookup = changedfilesset & set(allfiles)
872 857 to_drop = changedfilesset - to_lookup
873 858
874 859 if self._origpl is None:
875 860 self._origpl = self._pl
876 861 self._map.setparents(parent, self._nodeconstants.nullid)
877 862
878 863 for f in to_lookup:
879 864 self._normallookup(f)
880 865 for f in to_drop:
881 866 self._drop(f)
882 867
883 868 self._dirty = True
884 869
885 870 def identity(self):
886 871 """Return identity of dirstate itself to detect changing in storage
887 872
888 873 If identity of previous dirstate is equal to this, writing
889 874 changes based on the former dirstate out can keep consistency.
890 875 """
891 876 return self._map.identity
892 877
893 878 def write(self, tr):
894 879 if not self._dirty:
895 880 return
896 881
897 882 filename = self._filename
898 883 if tr:
899 884 # 'dirstate.write()' is not only for writing in-memory
900 885 # changes out, but also for dropping ambiguous timestamp.
901 886 # delayed writing re-raise "ambiguous timestamp issue".
902 887 # See also the wiki page below for detail:
903 888 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
904 889
905 890 # emulate dropping timestamp in 'parsers.pack_dirstate'
906 891 now = _getfsnow(self._opener)
907 892 self._map.clearambiguoustimes(self._updatedfiles, now)
908 893
909 894 # emulate that all 'dirstate.normal' results are written out
910 895 self._lastnormaltime = 0
911 896 self._updatedfiles.clear()
912 897
913 898 # delay writing in-memory changes out
914 899 tr.addfilegenerator(
915 900 b'dirstate',
916 901 (self._filename,),
917 902 lambda f: self._writedirstate(tr, f),
918 903 location=b'plain',
919 904 )
920 905 return
921 906
922 907 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
923 908 self._writedirstate(tr, st)
924 909
925 910 def addparentchangecallback(self, category, callback):
926 911 """add a callback to be called when the wd parents are changed
927 912
928 913 Callback will be called with the following arguments:
929 914 dirstate, (oldp1, oldp2), (newp1, newp2)
930 915
931 916 Category is a unique identifier to allow overwriting an old callback
932 917 with a newer callback.
933 918 """
934 919 self._plchangecallbacks[category] = callback
935 920
936 921 def _writedirstate(self, tr, st):
937 922 # notify callbacks about parents change
938 923 if self._origpl is not None and self._origpl != self._pl:
939 924 for c, callback in sorted(
940 925 pycompat.iteritems(self._plchangecallbacks)
941 926 ):
942 927 callback(self, self._origpl, self._pl)
943 928 self._origpl = None
944 929 # use the modification time of the newly created temporary file as the
945 930 # filesystem's notion of 'now'
946 931 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
947 932
948 933 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
949 934 # timestamp of each entries in dirstate, because of 'now > mtime'
950 935 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
951 936 if delaywrite > 0:
952 937 # do we have any files to delay for?
953 938 for f, e in pycompat.iteritems(self._map):
954 939 if e.need_delay(now):
955 940 import time # to avoid useless import
956 941
957 942 # rather than sleep n seconds, sleep until the next
958 943 # multiple of n seconds
959 944 clock = time.time()
960 945 start = int(clock) - (int(clock) % delaywrite)
961 946 end = start + delaywrite
962 947 time.sleep(end - clock)
963 948 now = end # trust our estimate that the end is near now
964 949 break
965 950
966 951 self._map.write(tr, st, now)
967 952 self._lastnormaltime = 0
968 953 self._dirty = False
969 954
970 955 def _dirignore(self, f):
971 956 if self._ignore(f):
972 957 return True
973 958 for p in pathutil.finddirs(f):
974 959 if self._ignore(p):
975 960 return True
976 961 return False
977 962
978 963 def _ignorefiles(self):
979 964 files = []
980 965 if os.path.exists(self._join(b'.hgignore')):
981 966 files.append(self._join(b'.hgignore'))
982 967 for name, path in self._ui.configitems(b"ui"):
983 968 if name == b'ignore' or name.startswith(b'ignore.'):
984 969 # we need to use os.path.join here rather than self._join
985 970 # because path is arbitrary and user-specified
986 971 files.append(os.path.join(self._rootdir, util.expandpath(path)))
987 972 return files
988 973
989 974 def _ignorefileandline(self, f):
990 975 files = collections.deque(self._ignorefiles())
991 976 visited = set()
992 977 while files:
993 978 i = files.popleft()
994 979 patterns = matchmod.readpatternfile(
995 980 i, self._ui.warn, sourceinfo=True
996 981 )
997 982 for pattern, lineno, line in patterns:
998 983 kind, p = matchmod._patsplit(pattern, b'glob')
999 984 if kind == b"subinclude":
1000 985 if p not in visited:
1001 986 files.append(p)
1002 987 continue
1003 988 m = matchmod.match(
1004 989 self._root, b'', [], [pattern], warn=self._ui.warn
1005 990 )
1006 991 if m(f):
1007 992 return (i, lineno, line)
1008 993 visited.add(i)
1009 994 return (None, -1, b"")
1010 995
1011 996 def _walkexplicit(self, match, subrepos):
1012 997 """Get stat data about the files explicitly specified by match.
1013 998
1014 999 Return a triple (results, dirsfound, dirsnotfound).
1015 1000 - results is a mapping from filename to stat result. It also contains
1016 1001 listings mapping subrepos and .hg to None.
1017 1002 - dirsfound is a list of files found to be directories.
1018 1003 - dirsnotfound is a list of files that the dirstate thinks are
1019 1004 directories and that were not found."""
1020 1005
1021 1006 def badtype(mode):
1022 1007 kind = _(b'unknown')
1023 1008 if stat.S_ISCHR(mode):
1024 1009 kind = _(b'character device')
1025 1010 elif stat.S_ISBLK(mode):
1026 1011 kind = _(b'block device')
1027 1012 elif stat.S_ISFIFO(mode):
1028 1013 kind = _(b'fifo')
1029 1014 elif stat.S_ISSOCK(mode):
1030 1015 kind = _(b'socket')
1031 1016 elif stat.S_ISDIR(mode):
1032 1017 kind = _(b'directory')
1033 1018 return _(b'unsupported file type (type is %s)') % kind
1034 1019
1035 1020 badfn = match.bad
1036 1021 dmap = self._map
1037 1022 lstat = os.lstat
1038 1023 getkind = stat.S_IFMT
1039 1024 dirkind = stat.S_IFDIR
1040 1025 regkind = stat.S_IFREG
1041 1026 lnkkind = stat.S_IFLNK
1042 1027 join = self._join
1043 1028 dirsfound = []
1044 1029 foundadd = dirsfound.append
1045 1030 dirsnotfound = []
1046 1031 notfoundadd = dirsnotfound.append
1047 1032
1048 1033 if not match.isexact() and self._checkcase:
1049 1034 normalize = self._normalize
1050 1035 else:
1051 1036 normalize = None
1052 1037
1053 1038 files = sorted(match.files())
1054 1039 subrepos.sort()
1055 1040 i, j = 0, 0
1056 1041 while i < len(files) and j < len(subrepos):
1057 1042 subpath = subrepos[j] + b"/"
1058 1043 if files[i] < subpath:
1059 1044 i += 1
1060 1045 continue
1061 1046 while i < len(files) and files[i].startswith(subpath):
1062 1047 del files[i]
1063 1048 j += 1
1064 1049
1065 1050 if not files or b'' in files:
1066 1051 files = [b'']
1067 1052 # constructing the foldmap is expensive, so don't do it for the
1068 1053 # common case where files is ['']
1069 1054 normalize = None
1070 1055 results = dict.fromkeys(subrepos)
1071 1056 results[b'.hg'] = None
1072 1057
1073 1058 for ff in files:
1074 1059 if normalize:
1075 1060 nf = normalize(ff, False, True)
1076 1061 else:
1077 1062 nf = ff
1078 1063 if nf in results:
1079 1064 continue
1080 1065
1081 1066 try:
1082 1067 st = lstat(join(nf))
1083 1068 kind = getkind(st.st_mode)
1084 1069 if kind == dirkind:
1085 1070 if nf in dmap:
1086 1071 # file replaced by dir on disk but still in dirstate
1087 1072 results[nf] = None
1088 1073 foundadd((nf, ff))
1089 1074 elif kind == regkind or kind == lnkkind:
1090 1075 results[nf] = st
1091 1076 else:
1092 1077 badfn(ff, badtype(kind))
1093 1078 if nf in dmap:
1094 1079 results[nf] = None
1095 1080 except OSError as inst: # nf not found on disk - it is dirstate only
1096 1081 if nf in dmap: # does it exactly match a missing file?
1097 1082 results[nf] = None
1098 1083 else: # does it match a missing directory?
1099 1084 if self._map.hasdir(nf):
1100 1085 notfoundadd(nf)
1101 1086 else:
1102 1087 badfn(ff, encoding.strtolocal(inst.strerror))
1103 1088
1104 1089 # match.files() may contain explicitly-specified paths that shouldn't
1105 1090 # be taken; drop them from the list of files found. dirsfound/notfound
1106 1091 # aren't filtered here because they will be tested later.
1107 1092 if match.anypats():
1108 1093 for f in list(results):
1109 1094 if f == b'.hg' or f in subrepos:
1110 1095 # keep sentinel to disable further out-of-repo walks
1111 1096 continue
1112 1097 if not match(f):
1113 1098 del results[f]
1114 1099
1115 1100 # Case insensitive filesystems cannot rely on lstat() failing to detect
1116 1101 # a case-only rename. Prune the stat object for any file that does not
1117 1102 # match the case in the filesystem, if there are multiple files that
1118 1103 # normalize to the same path.
1119 1104 if match.isexact() and self._checkcase:
1120 1105 normed = {}
1121 1106
1122 1107 for f, st in pycompat.iteritems(results):
1123 1108 if st is None:
1124 1109 continue
1125 1110
1126 1111 nc = util.normcase(f)
1127 1112 paths = normed.get(nc)
1128 1113
1129 1114 if paths is None:
1130 1115 paths = set()
1131 1116 normed[nc] = paths
1132 1117
1133 1118 paths.add(f)
1134 1119
1135 1120 for norm, paths in pycompat.iteritems(normed):
1136 1121 if len(paths) > 1:
1137 1122 for path in paths:
1138 1123 folded = self._discoverpath(
1139 1124 path, norm, True, None, self._map.dirfoldmap
1140 1125 )
1141 1126 if path != folded:
1142 1127 results[path] = None
1143 1128
1144 1129 return results, dirsfound, dirsnotfound
1145 1130
1146 1131 def walk(self, match, subrepos, unknown, ignored, full=True):
1147 1132 """
1148 1133 Walk recursively through the directory tree, finding all files
1149 1134 matched by match.
1150 1135
1151 1136 If full is False, maybe skip some known-clean files.
1152 1137
1153 1138 Return a dict mapping filename to stat-like object (either
1154 1139 mercurial.osutil.stat instance or return value of os.stat()).
1155 1140
1156 1141 """
1157 1142 # full is a flag that extensions that hook into walk can use -- this
1158 1143 # implementation doesn't use it at all. This satisfies the contract
1159 1144 # because we only guarantee a "maybe".
1160 1145
1161 1146 if ignored:
1162 1147 ignore = util.never
1163 1148 dirignore = util.never
1164 1149 elif unknown:
1165 1150 ignore = self._ignore
1166 1151 dirignore = self._dirignore
1167 1152 else:
1168 1153 # if not unknown and not ignored, drop dir recursion and step 2
1169 1154 ignore = util.always
1170 1155 dirignore = util.always
1171 1156
1172 1157 matchfn = match.matchfn
1173 1158 matchalways = match.always()
1174 1159 matchtdir = match.traversedir
1175 1160 dmap = self._map
1176 1161 listdir = util.listdir
1177 1162 lstat = os.lstat
1178 1163 dirkind = stat.S_IFDIR
1179 1164 regkind = stat.S_IFREG
1180 1165 lnkkind = stat.S_IFLNK
1181 1166 join = self._join
1182 1167
1183 1168 exact = skipstep3 = False
1184 1169 if match.isexact(): # match.exact
1185 1170 exact = True
1186 1171 dirignore = util.always # skip step 2
1187 1172 elif match.prefix(): # match.match, no patterns
1188 1173 skipstep3 = True
1189 1174
1190 1175 if not exact and self._checkcase:
1191 1176 normalize = self._normalize
1192 1177 normalizefile = self._normalizefile
1193 1178 skipstep3 = False
1194 1179 else:
1195 1180 normalize = self._normalize
1196 1181 normalizefile = None
1197 1182
1198 1183 # step 1: find all explicit files
1199 1184 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1200 1185 if matchtdir:
1201 1186 for d in work:
1202 1187 matchtdir(d[0])
1203 1188 for d in dirsnotfound:
1204 1189 matchtdir(d)
1205 1190
1206 1191 skipstep3 = skipstep3 and not (work or dirsnotfound)
1207 1192 work = [d for d in work if not dirignore(d[0])]
1208 1193
1209 1194 # step 2: visit subdirectories
1210 1195 def traverse(work, alreadynormed):
1211 1196 wadd = work.append
1212 1197 while work:
1213 1198 tracing.counter('dirstate.walk work', len(work))
1214 1199 nd = work.pop()
1215 1200 visitentries = match.visitchildrenset(nd)
1216 1201 if not visitentries:
1217 1202 continue
1218 1203 if visitentries == b'this' or visitentries == b'all':
1219 1204 visitentries = None
1220 1205 skip = None
1221 1206 if nd != b'':
1222 1207 skip = b'.hg'
1223 1208 try:
1224 1209 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1225 1210 entries = listdir(join(nd), stat=True, skip=skip)
1226 1211 except OSError as inst:
1227 1212 if inst.errno in (errno.EACCES, errno.ENOENT):
1228 1213 match.bad(
1229 1214 self.pathto(nd), encoding.strtolocal(inst.strerror)
1230 1215 )
1231 1216 continue
1232 1217 raise
1233 1218 for f, kind, st in entries:
1234 1219 # Some matchers may return files in the visitentries set,
1235 1220 # instead of 'this', if the matcher explicitly mentions them
1236 1221 # and is not an exactmatcher. This is acceptable; we do not
1237 1222 # make any hard assumptions about file-or-directory below
1238 1223 # based on the presence of `f` in visitentries. If
1239 1224 # visitchildrenset returned a set, we can always skip the
1240 1225 # entries *not* in the set it provided regardless of whether
1241 1226 # they're actually a file or a directory.
1242 1227 if visitentries and f not in visitentries:
1243 1228 continue
1244 1229 if normalizefile:
1245 1230 # even though f might be a directory, we're only
1246 1231 # interested in comparing it to files currently in the
1247 1232 # dmap -- therefore normalizefile is enough
1248 1233 nf = normalizefile(
1249 1234 nd and (nd + b"/" + f) or f, True, True
1250 1235 )
1251 1236 else:
1252 1237 nf = nd and (nd + b"/" + f) or f
1253 1238 if nf not in results:
1254 1239 if kind == dirkind:
1255 1240 if not ignore(nf):
1256 1241 if matchtdir:
1257 1242 matchtdir(nf)
1258 1243 wadd(nf)
1259 1244 if nf in dmap and (matchalways or matchfn(nf)):
1260 1245 results[nf] = None
1261 1246 elif kind == regkind or kind == lnkkind:
1262 1247 if nf in dmap:
1263 1248 if matchalways or matchfn(nf):
1264 1249 results[nf] = st
1265 1250 elif (matchalways or matchfn(nf)) and not ignore(
1266 1251 nf
1267 1252 ):
1268 1253 # unknown file -- normalize if necessary
1269 1254 if not alreadynormed:
1270 1255 nf = normalize(nf, False, True)
1271 1256 results[nf] = st
1272 1257 elif nf in dmap and (matchalways or matchfn(nf)):
1273 1258 results[nf] = None
1274 1259
1275 1260 for nd, d in work:
1276 1261 # alreadynormed means that processwork doesn't have to do any
1277 1262 # expensive directory normalization
1278 1263 alreadynormed = not normalize or nd == d
1279 1264 traverse([d], alreadynormed)
1280 1265
1281 1266 for s in subrepos:
1282 1267 del results[s]
1283 1268 del results[b'.hg']
1284 1269
1285 1270 # step 3: visit remaining files from dmap
1286 1271 if not skipstep3 and not exact:
1287 1272 # If a dmap file is not in results yet, it was either
1288 1273 # a) not matching matchfn b) ignored, c) missing, or d) under a
1289 1274 # symlink directory.
1290 1275 if not results and matchalways:
1291 1276 visit = [f for f in dmap]
1292 1277 else:
1293 1278 visit = [f for f in dmap if f not in results and matchfn(f)]
1294 1279 visit.sort()
1295 1280
1296 1281 if unknown:
1297 1282 # unknown == True means we walked all dirs under the roots
1298 1283 # that wasn't ignored, and everything that matched was stat'ed
1299 1284 # and is already in results.
1300 1285 # The rest must thus be ignored or under a symlink.
1301 1286 audit_path = pathutil.pathauditor(self._root, cached=True)
1302 1287
1303 1288 for nf in iter(visit):
1304 1289 # If a stat for the same file was already added with a
1305 1290 # different case, don't add one for this, since that would
1306 1291 # make it appear as if the file exists under both names
1307 1292 # on disk.
1308 1293 if (
1309 1294 normalizefile
1310 1295 and normalizefile(nf, True, True) in results
1311 1296 ):
1312 1297 results[nf] = None
1313 1298 # Report ignored items in the dmap as long as they are not
1314 1299 # under a symlink directory.
1315 1300 elif audit_path.check(nf):
1316 1301 try:
1317 1302 results[nf] = lstat(join(nf))
1318 1303 # file was just ignored, no links, and exists
1319 1304 except OSError:
1320 1305 # file doesn't exist
1321 1306 results[nf] = None
1322 1307 else:
1323 1308 # It's either missing or under a symlink directory
1324 1309 # which we in this case report as missing
1325 1310 results[nf] = None
1326 1311 else:
1327 1312 # We may not have walked the full directory tree above,
1328 1313 # so stat and check everything we missed.
1329 1314 iv = iter(visit)
1330 1315 for st in util.statfiles([join(i) for i in visit]):
1331 1316 results[next(iv)] = st
1332 1317 return results
1333 1318
1334 1319 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1335 1320 # Force Rayon (Rust parallelism library) to respect the number of
1336 1321 # workers. This is a temporary workaround until Rust code knows
1337 1322 # how to read the config file.
1338 1323 numcpus = self._ui.configint(b"worker", b"numcpus")
1339 1324 if numcpus is not None:
1340 1325 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1341 1326
1342 1327 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1343 1328 if not workers_enabled:
1344 1329 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1345 1330
1346 1331 (
1347 1332 lookup,
1348 1333 modified,
1349 1334 added,
1350 1335 removed,
1351 1336 deleted,
1352 1337 clean,
1353 1338 ignored,
1354 1339 unknown,
1355 1340 warnings,
1356 1341 bad,
1357 1342 traversed,
1358 1343 dirty,
1359 1344 ) = rustmod.status(
1360 1345 self._map._rustmap,
1361 1346 matcher,
1362 1347 self._rootdir,
1363 1348 self._ignorefiles(),
1364 1349 self._checkexec,
1365 1350 self._lastnormaltime,
1366 1351 bool(list_clean),
1367 1352 bool(list_ignored),
1368 1353 bool(list_unknown),
1369 1354 bool(matcher.traversedir),
1370 1355 )
1371 1356
1372 1357 self._dirty |= dirty
1373 1358
1374 1359 if matcher.traversedir:
1375 1360 for dir in traversed:
1376 1361 matcher.traversedir(dir)
1377 1362
1378 1363 if self._ui.warn:
1379 1364 for item in warnings:
1380 1365 if isinstance(item, tuple):
1381 1366 file_path, syntax = item
1382 1367 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1383 1368 file_path,
1384 1369 syntax,
1385 1370 )
1386 1371 self._ui.warn(msg)
1387 1372 else:
1388 1373 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1389 1374 self._ui.warn(
1390 1375 msg
1391 1376 % (
1392 1377 pathutil.canonpath(
1393 1378 self._rootdir, self._rootdir, item
1394 1379 ),
1395 1380 b"No such file or directory",
1396 1381 )
1397 1382 )
1398 1383
1399 1384 for (fn, message) in bad:
1400 1385 matcher.bad(fn, encoding.strtolocal(message))
1401 1386
1402 1387 status = scmutil.status(
1403 1388 modified=modified,
1404 1389 added=added,
1405 1390 removed=removed,
1406 1391 deleted=deleted,
1407 1392 unknown=unknown,
1408 1393 ignored=ignored,
1409 1394 clean=clean,
1410 1395 )
1411 1396 return (lookup, status)
1412 1397
1413 1398 def status(self, match, subrepos, ignored, clean, unknown):
1414 1399 """Determine the status of the working copy relative to the
1415 1400 dirstate and return a pair of (unsure, status), where status is of type
1416 1401 scmutil.status and:
1417 1402
1418 1403 unsure:
1419 1404 files that might have been modified since the dirstate was
1420 1405 written, but need to be read to be sure (size is the same
1421 1406 but mtime differs)
1422 1407 status.modified:
1423 1408 files that have definitely been modified since the dirstate
1424 1409 was written (different size or mode)
1425 1410 status.clean:
1426 1411 files that have definitely not been modified since the
1427 1412 dirstate was written
1428 1413 """
1429 1414 listignored, listclean, listunknown = ignored, clean, unknown
1430 1415 lookup, modified, added, unknown, ignored = [], [], [], [], []
1431 1416 removed, deleted, clean = [], [], []
1432 1417
1433 1418 dmap = self._map
1434 1419 dmap.preload()
1435 1420
1436 1421 use_rust = True
1437 1422
1438 1423 allowed_matchers = (
1439 1424 matchmod.alwaysmatcher,
1440 1425 matchmod.exactmatcher,
1441 1426 matchmod.includematcher,
1442 1427 )
1443 1428
1444 1429 if rustmod is None:
1445 1430 use_rust = False
1446 1431 elif self._checkcase:
1447 1432 # Case-insensitive filesystems are not handled yet
1448 1433 use_rust = False
1449 1434 elif subrepos:
1450 1435 use_rust = False
1451 1436 elif sparse.enabled:
1452 1437 use_rust = False
1453 1438 elif not isinstance(match, allowed_matchers):
1454 1439 # Some matchers have yet to be implemented
1455 1440 use_rust = False
1456 1441
1457 1442 if use_rust:
1458 1443 try:
1459 1444 return self._rust_status(
1460 1445 match, listclean, listignored, listunknown
1461 1446 )
1462 1447 except rustmod.FallbackError:
1463 1448 pass
1464 1449
1465 1450 def noop(f):
1466 1451 pass
1467 1452
1468 1453 dcontains = dmap.__contains__
1469 1454 dget = dmap.__getitem__
1470 1455 ladd = lookup.append # aka "unsure"
1471 1456 madd = modified.append
1472 1457 aadd = added.append
1473 1458 uadd = unknown.append if listunknown else noop
1474 1459 iadd = ignored.append if listignored else noop
1475 1460 radd = removed.append
1476 1461 dadd = deleted.append
1477 1462 cadd = clean.append if listclean else noop
1478 1463 mexact = match.exact
1479 1464 dirignore = self._dirignore
1480 1465 checkexec = self._checkexec
1481 1466 copymap = self._map.copymap
1482 1467 lastnormaltime = self._lastnormaltime
1483 1468
1484 1469 # We need to do full walks when either
1485 1470 # - we're listing all clean files, or
1486 1471 # - match.traversedir does something, because match.traversedir should
1487 1472 # be called for every dir in the working dir
1488 1473 full = listclean or match.traversedir is not None
1489 1474 for fn, st in pycompat.iteritems(
1490 1475 self.walk(match, subrepos, listunknown, listignored, full=full)
1491 1476 ):
1492 1477 if not dcontains(fn):
1493 1478 if (listignored or mexact(fn)) and dirignore(fn):
1494 1479 if listignored:
1495 1480 iadd(fn)
1496 1481 else:
1497 1482 uadd(fn)
1498 1483 continue
1499 1484
1500 1485 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1501 1486 # written like that for performance reasons. dmap[fn] is not a
1502 1487 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1503 1488 # opcode has fast paths when the value to be unpacked is a tuple or
1504 1489 # a list, but falls back to creating a full-fledged iterator in
1505 1490 # general. That is much slower than simply accessing and storing the
1506 1491 # tuple members one by one.
1507 1492 t = dget(fn)
1508 1493 mode = t.mode
1509 1494 size = t.size
1510 1495 time = t.mtime
1511 1496
1512 1497 if not st and t.tracked:
1513 1498 dadd(fn)
1514 1499 elif t.merged:
1515 1500 madd(fn)
1516 1501 elif t.added:
1517 1502 aadd(fn)
1518 1503 elif t.removed:
1519 1504 radd(fn)
1520 1505 elif t.tracked:
1521 1506 if (
1522 1507 size >= 0
1523 1508 and (
1524 1509 (size != st.st_size and size != st.st_size & _rangemask)
1525 1510 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1526 1511 )
1527 1512 or t.from_p2
1528 1513 or fn in copymap
1529 1514 ):
1530 1515 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1531 1516 # issue6456: Size returned may be longer due to
1532 1517 # encryption on EXT-4 fscrypt, undecided.
1533 1518 ladd(fn)
1534 1519 else:
1535 1520 madd(fn)
1536 1521 elif (
1537 1522 time != st[stat.ST_MTIME]
1538 1523 and time != st[stat.ST_MTIME] & _rangemask
1539 1524 ):
1540 1525 ladd(fn)
1541 1526 elif st[stat.ST_MTIME] == lastnormaltime:
1542 1527 # fn may have just been marked as normal and it may have
1543 1528 # changed in the same second without changing its size.
1544 1529 # This can happen if we quickly do multiple commits.
1545 1530 # Force lookup, so we don't miss such a racy file change.
1546 1531 ladd(fn)
1547 1532 elif listclean:
1548 1533 cadd(fn)
1549 1534 status = scmutil.status(
1550 1535 modified, added, removed, deleted, unknown, ignored, clean
1551 1536 )
1552 1537 return (lookup, status)
1553 1538
1554 1539 def matches(self, match):
1555 1540 """
1556 1541 return files in the dirstate (in whatever state) filtered by match
1557 1542 """
1558 1543 dmap = self._map
1559 1544 if rustmod is not None:
1560 1545 dmap = self._map._rustmap
1561 1546
1562 1547 if match.always():
1563 1548 return dmap.keys()
1564 1549 files = match.files()
1565 1550 if match.isexact():
1566 1551 # fast path -- filter the other way around, since typically files is
1567 1552 # much smaller than dmap
1568 1553 return [f for f in files if f in dmap]
1569 1554 if match.prefix() and all(fn in dmap for fn in files):
1570 1555 # fast path -- all the values are known to be files, so just return
1571 1556 # that
1572 1557 return list(files)
1573 1558 return [f for f in dmap if match(f)]
1574 1559
1575 1560 def _actualfilename(self, tr):
1576 1561 if tr:
1577 1562 return self._pendingfilename
1578 1563 else:
1579 1564 return self._filename
1580 1565
1581 1566 def savebackup(self, tr, backupname):
1582 1567 '''Save current dirstate into backup file'''
1583 1568 filename = self._actualfilename(tr)
1584 1569 assert backupname != filename
1585 1570
1586 1571 # use '_writedirstate' instead of 'write' to write changes certainly,
1587 1572 # because the latter omits writing out if transaction is running.
1588 1573 # output file will be used to create backup of dirstate at this point.
1589 1574 if self._dirty or not self._opener.exists(filename):
1590 1575 self._writedirstate(
1591 1576 tr,
1592 1577 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1593 1578 )
1594 1579
1595 1580 if tr:
1596 1581 # ensure that subsequent tr.writepending returns True for
1597 1582 # changes written out above, even if dirstate is never
1598 1583 # changed after this
1599 1584 tr.addfilegenerator(
1600 1585 b'dirstate',
1601 1586 (self._filename,),
1602 1587 lambda f: self._writedirstate(tr, f),
1603 1588 location=b'plain',
1604 1589 )
1605 1590
1606 1591 # ensure that pending file written above is unlinked at
1607 1592 # failure, even if tr.writepending isn't invoked until the
1608 1593 # end of this transaction
1609 1594 tr.registertmp(filename, location=b'plain')
1610 1595
1611 1596 self._opener.tryunlink(backupname)
1612 1597 # hardlink backup is okay because _writedirstate is always called
1613 1598 # with an "atomictemp=True" file.
1614 1599 util.copyfile(
1615 1600 self._opener.join(filename),
1616 1601 self._opener.join(backupname),
1617 1602 hardlink=True,
1618 1603 )
1619 1604
1620 1605 def restorebackup(self, tr, backupname):
1621 1606 '''Restore dirstate by backup file'''
1622 1607 # this "invalidate()" prevents "wlock.release()" from writing
1623 1608 # changes of dirstate out after restoring from backup file
1624 1609 self.invalidate()
1625 1610 filename = self._actualfilename(tr)
1626 1611 o = self._opener
1627 1612 if util.samefile(o.join(backupname), o.join(filename)):
1628 1613 o.unlink(backupname)
1629 1614 else:
1630 1615 o.rename(backupname, filename, checkambig=True)
1631 1616
1632 1617 def clearbackup(self, tr, backupname):
1633 1618 '''Clear backup file'''
1634 1619 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now