##// END OF EJS Templates
dirstate: drop duplicated check...
marmoute -
r48470:ec1d63e6 default
parent child Browse files
Show More
@@ -1,1630 +1,1627
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .pycompat import delattr
18 18
19 19 from hgdemandimport import tracing
20 20
21 21 from . import (
22 22 dirstatemap,
23 23 encoding,
24 24 error,
25 25 match as matchmod,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 sparse,
31 31 util,
32 32 )
33 33
34 34 from .interfaces import (
35 35 dirstate as intdirstate,
36 36 util as interfaceutil,
37 37 )
38 38
39 39 parsers = policy.importmod('parsers')
40 40 rustmod = policy.importrust('dirstate')
41 41
42 42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43 43
44 44 propertycache = util.propertycache
45 45 filecache = scmutil.filecache
46 46 _rangemask = dirstatemap.rangemask
47 47
48 48 DirstateItem = parsers.DirstateItem
49 49
50 50
51 51 class repocache(filecache):
52 52 """filecache for files in .hg/"""
53 53
54 54 def join(self, obj, fname):
55 55 return obj._opener.join(fname)
56 56
57 57
58 58 class rootcache(filecache):
59 59 """filecache for files in the repository root"""
60 60
61 61 def join(self, obj, fname):
62 62 return obj._join(fname)
63 63
64 64
65 65 def _getfsnow(vfs):
66 66 '''Get "now" timestamp on filesystem'''
67 67 tmpfd, tmpname = vfs.mkstemp()
68 68 try:
69 69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 70 finally:
71 71 os.close(tmpfd)
72 72 vfs.unlink(tmpname)
73 73
74 74
75 75 def requires_parents_change(func):
76 76 def wrap(self, *args, **kwargs):
77 77 if not self.pendingparentchange():
78 78 msg = 'calling `%s` outside of a parentchange context'
79 79 msg %= func.__name__
80 80 raise error.ProgrammingError(msg)
81 81 return func(self, *args, **kwargs)
82 82
83 83 return wrap
84 84
85 85
86 86 def requires_no_parents_change(func):
87 87 def wrap(self, *args, **kwargs):
88 88 if self.pendingparentchange():
89 89 msg = 'calling `%s` inside of a parentchange context'
90 90 msg %= func.__name__
91 91 raise error.ProgrammingError(msg)
92 92 return func(self, *args, **kwargs)
93 93
94 94 return wrap
95 95
96 96
97 97 @interfaceutil.implementer(intdirstate.idirstate)
98 98 class dirstate(object):
99 99 def __init__(
100 100 self,
101 101 opener,
102 102 ui,
103 103 root,
104 104 validate,
105 105 sparsematchfn,
106 106 nodeconstants,
107 107 use_dirstate_v2,
108 108 ):
109 109 """Create a new dirstate object.
110 110
111 111 opener is an open()-like callable that can be used to open the
112 112 dirstate file; root is the root of the directory tracked by
113 113 the dirstate.
114 114 """
115 115 self._use_dirstate_v2 = use_dirstate_v2
116 116 self._nodeconstants = nodeconstants
117 117 self._opener = opener
118 118 self._validate = validate
119 119 self._root = root
120 120 self._sparsematchfn = sparsematchfn
121 121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 122 # UNC path pointing to root share (issue4557)
123 123 self._rootdir = pathutil.normasprefix(root)
124 124 self._dirty = False
125 125 self._lastnormaltime = 0
126 126 self._ui = ui
127 127 self._filecache = {}
128 128 self._parentwriters = 0
129 129 self._filename = b'dirstate'
130 130 self._pendingfilename = b'%s.pending' % self._filename
131 131 self._plchangecallbacks = {}
132 132 self._origpl = None
133 133 self._updatedfiles = set()
134 134 self._mapcls = dirstatemap.dirstatemap
135 135 # Access and cache cwd early, so we don't access it for the first time
136 136 # after a working-copy update caused it to not exist (accessing it then
137 137 # raises an exception).
138 138 self._cwd
139 139
140 140 def prefetch_parents(self):
141 141 """make sure the parents are loaded
142 142
143 143 Used to avoid a race condition.
144 144 """
145 145 self._pl
146 146
147 147 @contextlib.contextmanager
148 148 def parentchange(self):
149 149 """Context manager for handling dirstate parents.
150 150
151 151 If an exception occurs in the scope of the context manager,
152 152 the incoherent dirstate won't be written when wlock is
153 153 released.
154 154 """
155 155 self._parentwriters += 1
156 156 yield
157 157 # Typically we want the "undo" step of a context manager in a
158 158 # finally block so it happens even when an exception
159 159 # occurs. In this case, however, we only want to decrement
160 160 # parentwriters if the code in the with statement exits
161 161 # normally, so we don't have a try/finally here on purpose.
162 162 self._parentwriters -= 1
163 163
164 164 def pendingparentchange(self):
165 165 """Returns true if the dirstate is in the middle of a set of changes
166 166 that modify the dirstate parent.
167 167 """
168 168 return self._parentwriters > 0
169 169
170 170 @propertycache
171 171 def _map(self):
172 172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 173 self._map = self._mapcls(
174 174 self._ui,
175 175 self._opener,
176 176 self._root,
177 177 self._nodeconstants,
178 178 self._use_dirstate_v2,
179 179 )
180 180 return self._map
181 181
182 182 @property
183 183 def _sparsematcher(self):
184 184 """The matcher for the sparse checkout.
185 185
186 186 The working directory may not include every file from a manifest. The
187 187 matcher obtained by this property will match a path if it is to be
188 188 included in the working directory.
189 189 """
190 190 # TODO there is potential to cache this property. For now, the matcher
191 191 # is resolved on every access. (But the called function does use a
192 192 # cache to keep the lookup fast.)
193 193 return self._sparsematchfn()
194 194
195 195 @repocache(b'branch')
196 196 def _branch(self):
197 197 try:
198 198 return self._opener.read(b"branch").strip() or b"default"
199 199 except IOError as inst:
200 200 if inst.errno != errno.ENOENT:
201 201 raise
202 202 return b"default"
203 203
204 204 @property
205 205 def _pl(self):
206 206 return self._map.parents()
207 207
208 208 def hasdir(self, d):
209 209 return self._map.hastrackeddir(d)
210 210
211 211 @rootcache(b'.hgignore')
212 212 def _ignore(self):
213 213 files = self._ignorefiles()
214 214 if not files:
215 215 return matchmod.never()
216 216
217 217 pats = [b'include:%s' % f for f in files]
218 218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219 219
220 220 @propertycache
221 221 def _slash(self):
222 222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223 223
224 224 @propertycache
225 225 def _checklink(self):
226 226 return util.checklink(self._root)
227 227
228 228 @propertycache
229 229 def _checkexec(self):
230 230 return bool(util.checkexec(self._root))
231 231
232 232 @propertycache
233 233 def _checkcase(self):
234 234 return not util.fscasesensitive(self._join(b'.hg'))
235 235
236 236 def _join(self, f):
237 237 # much faster than os.path.join()
238 238 # it's safe because f is always a relative path
239 239 return self._rootdir + f
240 240
241 241 def flagfunc(self, buildfallback):
242 242 if self._checklink and self._checkexec:
243 243
244 244 def f(x):
245 245 try:
246 246 st = os.lstat(self._join(x))
247 247 if util.statislink(st):
248 248 return b'l'
249 249 if util.statisexec(st):
250 250 return b'x'
251 251 except OSError:
252 252 pass
253 253 return b''
254 254
255 255 return f
256 256
257 257 fallback = buildfallback()
258 258 if self._checklink:
259 259
260 260 def f(x):
261 261 if os.path.islink(self._join(x)):
262 262 return b'l'
263 263 if b'x' in fallback(x):
264 264 return b'x'
265 265 return b''
266 266
267 267 return f
268 268 if self._checkexec:
269 269
270 270 def f(x):
271 271 if b'l' in fallback(x):
272 272 return b'l'
273 273 if util.isexec(self._join(x)):
274 274 return b'x'
275 275 return b''
276 276
277 277 return f
278 278 else:
279 279 return fallback
280 280
281 281 @propertycache
282 282 def _cwd(self):
283 283 # internal config: ui.forcecwd
284 284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 285 if forcecwd:
286 286 return forcecwd
287 287 return encoding.getcwd()
288 288
289 289 def getcwd(self):
290 290 """Return the path from which a canonical path is calculated.
291 291
292 292 This path should be used to resolve file patterns or to convert
293 293 canonical paths back to file paths for display. It shouldn't be
294 294 used to get real file paths. Use vfs functions instead.
295 295 """
296 296 cwd = self._cwd
297 297 if cwd == self._root:
298 298 return b''
299 299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 300 rootsep = self._root
301 301 if not util.endswithsep(rootsep):
302 302 rootsep += pycompat.ossep
303 303 if cwd.startswith(rootsep):
304 304 return cwd[len(rootsep) :]
305 305 else:
306 306 # we're outside the repo. return an absolute path.
307 307 return cwd
308 308
309 309 def pathto(self, f, cwd=None):
310 310 if cwd is None:
311 311 cwd = self.getcwd()
312 312 path = util.pathto(self._root, cwd, f)
313 313 if self._slash:
314 314 return util.pconvert(path)
315 315 return path
316 316
317 317 def __getitem__(self, key):
318 318 """Return the current state of key (a filename) in the dirstate.
319 319
320 320 States are:
321 321 n normal
322 322 m needs merging
323 323 r marked for removal
324 324 a marked for addition
325 325 ? not tracked
326 326
327 327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 328 consider migrating all user of this to going through the dirstate entry
329 329 instead.
330 330 """
331 331 entry = self._map.get(key)
332 332 if entry is not None:
333 333 return entry.state
334 334 return b'?'
335 335
336 336 def __contains__(self, key):
337 337 return key in self._map
338 338
339 339 def __iter__(self):
340 340 return iter(sorted(self._map))
341 341
342 342 def items(self):
343 343 return pycompat.iteritems(self._map)
344 344
345 345 iteritems = items
346 346
347 347 def directories(self):
348 348 return self._map.directories()
349 349
350 350 def parents(self):
351 351 return [self._validate(p) for p in self._pl]
352 352
353 353 def p1(self):
354 354 return self._validate(self._pl[0])
355 355
356 356 def p2(self):
357 357 return self._validate(self._pl[1])
358 358
359 359 @property
360 360 def in_merge(self):
361 361 """True if a merge is in progress"""
362 362 return self._pl[1] != self._nodeconstants.nullid
363 363
364 364 def branch(self):
365 365 return encoding.tolocal(self._branch)
366 366
367 367 def setparents(self, p1, p2=None):
368 368 """Set dirstate parents to p1 and p2.
369 369
370 370 When moving from two parents to one, "merged" entries a
371 371 adjusted to normal and previous copy records discarded and
372 372 returned by the call.
373 373
374 374 See localrepo.setparents()
375 375 """
376 376 if p2 is None:
377 377 p2 = self._nodeconstants.nullid
378 378 if self._parentwriters == 0:
379 379 raise ValueError(
380 380 b"cannot set dirstate parent outside of "
381 381 b"dirstate.parentchange context manager"
382 382 )
383 383
384 384 self._dirty = True
385 385 oldp2 = self._pl[1]
386 386 if self._origpl is None:
387 387 self._origpl = self._pl
388 388 self._map.setparents(p1, p2)
389 389 copies = {}
390 390 if (
391 391 oldp2 != self._nodeconstants.nullid
392 392 and p2 == self._nodeconstants.nullid
393 393 ):
394 394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395 395
396 396 for f in candidatefiles:
397 397 s = self._map.get(f)
398 398 if s is None:
399 399 continue
400 400
401 401 # Discard "merged" markers when moving away from a merge state
402 402 if s.merged:
403 403 source = self._map.copymap.get(f)
404 404 if source:
405 405 copies[f] = source
406 406 self.normallookup(f)
407 407 # Also fix up otherparent markers
408 408 elif s.from_p2:
409 409 source = self._map.copymap.get(f)
410 410 if source:
411 411 copies[f] = source
412 412 self._add(f)
413 413 return copies
414 414
415 415 def setbranch(self, branch):
416 416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 418 try:
419 419 f.write(self._branch + b'\n')
420 420 f.close()
421 421
422 422 # make sure filecache has the correct stat info for _branch after
423 423 # replacing the underlying file
424 424 ce = self._filecache[b'_branch']
425 425 if ce:
426 426 ce.refresh()
427 427 except: # re-raises
428 428 f.discard()
429 429 raise
430 430
431 431 def invalidate(self):
432 432 """Causes the next access to reread the dirstate.
433 433
434 434 This is different from localrepo.invalidatedirstate() because it always
435 435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 436 check whether the dirstate has changed before rereading it."""
437 437
438 438 for a in ("_map", "_branch", "_ignore"):
439 439 if a in self.__dict__:
440 440 delattr(self, a)
441 441 self._lastnormaltime = 0
442 442 self._dirty = False
443 443 self._updatedfiles.clear()
444 444 self._parentwriters = 0
445 445 self._origpl = None
446 446
447 447 def copy(self, source, dest):
448 448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 449 if source == dest:
450 450 return
451 451 self._dirty = True
452 452 if source is not None:
453 453 self._map.copymap[dest] = source
454 454 self._updatedfiles.add(source)
455 455 self._updatedfiles.add(dest)
456 456 elif self._map.copymap.pop(dest, None):
457 457 self._updatedfiles.add(dest)
458 458
459 459 def copied(self, file):
460 460 return self._map.copymap.get(file, None)
461 461
462 462 def copies(self):
463 463 return self._map.copymap
464 464
465 465 @requires_no_parents_change
466 466 def set_tracked(self, filename):
467 467 """a "public" method for generic code to mark a file as tracked
468 468
469 469 This function is to be called outside of "update/merge" case. For
470 470 example by a command like `hg add X`.
471 471
472 472 return True the file was previously untracked, False otherwise.
473 473 """
474 474 entry = self._map.get(filename)
475 475 if entry is None:
476 476 self._add(filename)
477 477 return True
478 478 elif not entry.tracked:
479 479 self.normallookup(filename)
480 480 return True
481 481 return False
482 482
483 483 @requires_no_parents_change
484 484 def set_untracked(self, filename):
485 485 """a "public" method for generic code to mark a file as untracked
486 486
487 487 This function is to be called outside of "update/merge" case. For
488 488 example by a command like `hg remove X`.
489 489
490 490 return True the file was previously tracked, False otherwise.
491 491 """
492 492 entry = self._map.get(filename)
493 493 if entry is None:
494 494 return False
495 495 elif entry.added:
496 496 self._drop(filename)
497 497 return True
498 498 else:
499 499 self._remove(filename)
500 500 return True
501 501
502 502 @requires_parents_change
503 503 def update_file_reference(
504 504 self,
505 505 filename,
506 506 p1_tracked,
507 507 ):
508 508 """Set a file as tracked in the parent (or not)
509 509
510 510 This is to be called when adjust the dirstate to a new parent after an history
511 511 rewriting operation.
512 512
513 513 It should not be called during a merge (p2 != nullid) and only within
514 514 a `with dirstate.parentchange():` context.
515 515 """
516 516 if self.in_merge:
517 517 msg = b'update_file_reference should not be called when merging'
518 518 raise error.ProgrammingError(msg)
519 519 entry = self._map.get(filename)
520 520 if entry is None:
521 521 wc_tracked = False
522 522 else:
523 523 wc_tracked = entry.tracked
524 524 if p1_tracked and wc_tracked:
525 525 # the underlying reference might have changed, we will have to
526 526 # check it.
527 527 self.normallookup(filename)
528 528 elif not (p1_tracked or wc_tracked):
529 529 # the file is no longer relevant to anyone
530 530 self._drop(filename)
531 531 elif (not p1_tracked) and wc_tracked:
532 532 if not entry.added:
533 533 self._add(filename)
534 534 elif p1_tracked and not wc_tracked:
535 535 if entry is None or not entry.removed:
536 536 self._remove(filename)
537 537 else:
538 538 assert False, 'unreachable'
539 539
540 540 @requires_parents_change
541 541 def update_file(
542 542 self,
543 543 filename,
544 544 wc_tracked,
545 545 p1_tracked,
546 546 p2_tracked=False,
547 547 merged=False,
548 548 clean_p1=False,
549 549 clean_p2=False,
550 550 possibly_dirty=False,
551 551 ):
552 552 """update the information about a file in the dirstate
553 553
554 554 This is to be called when the direstates parent changes to keep track
555 555 of what is the file situation in regards to the working copy and its parent.
556 556
557 557 This function must be called within a `dirstate.parentchange` context.
558 558
559 559 note: the API is at an early stage and we might need to ajust it
560 560 depending of what information ends up being relevant and useful to
561 561 other processing.
562 562 """
563 if not self.pendingparentchange():
564 msg = b'calling `update_file` outside of a parentchange context'
565 raise error.ProgrammingError(msg)
566 563 if merged and (clean_p1 or clean_p2):
567 564 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
568 565 raise error.ProgrammingError(msg)
569 566 assert not (merged and (clean_p1 or clean_p1))
570 567 if not (p1_tracked or p2_tracked or wc_tracked):
571 568 self._drop(filename)
572 569 elif merged:
573 570 assert wc_tracked
574 571 if not self.in_merge:
575 572 self.normallookup(filename)
576 573 self.otherparent(filename)
577 574 elif not (p1_tracked or p2_tracked) and wc_tracked:
578 575 self._addpath(filename, added=True, possibly_dirty=possibly_dirty)
579 576 self._map.copymap.pop(filename, None)
580 577 elif (p1_tracked or p2_tracked) and not wc_tracked:
581 578 self._remove(filename)
582 579 elif clean_p2 and wc_tracked:
583 580 assert p2_tracked
584 581 self.otherparent(filename)
585 582 elif not p1_tracked and p2_tracked and wc_tracked:
586 583 self._addpath(filename, from_p2=True, possibly_dirty=possibly_dirty)
587 584 self._map.copymap.pop(filename, None)
588 585 elif possibly_dirty:
589 586 self._addpath(filename, possibly_dirty=possibly_dirty)
590 587 elif wc_tracked:
591 588 self.normal(filename)
592 589 # XXX We need something for file that are dirty after an update
593 590 else:
594 591 assert False, 'unreachable'
595 592
596 593 @requires_parents_change
597 594 def update_parent_file_data(self, f, filedata):
598 595 """update the information about the content of a file
599 596
600 597 This function should be called within a `dirstate.parentchange` context.
601 598 """
602 599 self.normal(f, parentfiledata=filedata)
603 600
604 601 def _addpath(
605 602 self,
606 603 f,
607 604 mode=0,
608 605 size=None,
609 606 mtime=None,
610 607 added=False,
611 608 merged=False,
612 609 from_p2=False,
613 610 possibly_dirty=False,
614 611 ):
615 612 entry = self._map.get(f)
616 613 if added or entry is not None and entry.removed:
617 614 scmutil.checkfilename(f)
618 615 if self._map.hastrackeddir(f):
619 616 msg = _(b'directory %r already in dirstate')
620 617 msg %= pycompat.bytestr(f)
621 618 raise error.Abort(msg)
622 619 # shadows
623 620 for d in pathutil.finddirs(f):
624 621 if self._map.hastrackeddir(d):
625 622 break
626 623 entry = self._map.get(d)
627 624 if entry is not None and not entry.removed:
628 625 msg = _(b'file %r in dirstate clashes with %r')
629 626 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
630 627 raise error.Abort(msg)
631 628 self._dirty = True
632 629 self._updatedfiles.add(f)
633 630 self._map.addfile(
634 631 f,
635 632 mode=mode,
636 633 size=size,
637 634 mtime=mtime,
638 635 added=added,
639 636 merged=merged,
640 637 from_p2=from_p2,
641 638 possibly_dirty=possibly_dirty,
642 639 )
643 640
644 641 def normal(self, f, parentfiledata=None):
645 642 """Mark a file normal and clean.
646 643
647 644 parentfiledata: (mode, size, mtime) of the clean file
648 645
649 646 parentfiledata should be computed from memory (for mode,
650 647 size), as or close as possible from the point where we
651 648 determined the file was clean, to limit the risk of the
652 649 file having been changed by an external process between the
653 650 moment where the file was determined to be clean and now."""
654 651 if parentfiledata:
655 652 (mode, size, mtime) = parentfiledata
656 653 else:
657 654 s = os.lstat(self._join(f))
658 655 mode = s.st_mode
659 656 size = s.st_size
660 657 mtime = s[stat.ST_MTIME]
661 658 self._addpath(f, mode=mode, size=size, mtime=mtime)
662 659 self._map.copymap.pop(f, None)
663 660 if f in self._map.nonnormalset:
664 661 self._map.nonnormalset.remove(f)
665 662 if mtime > self._lastnormaltime:
666 663 # Remember the most recent modification timeslot for status(),
667 664 # to make sure we won't miss future size-preserving file content
668 665 # modifications that happen within the same timeslot.
669 666 self._lastnormaltime = mtime
670 667
671 668 def normallookup(self, f):
672 669 '''Mark a file normal, but possibly dirty.'''
673 670 if self.in_merge:
674 671 # if there is a merge going on and the file was either
675 672 # "merged" or coming from other parent (-2) before
676 673 # being removed, restore that state.
677 674 entry = self._map.get(f)
678 675 if entry is not None:
679 676 # XXX this should probably be dealt with a a lower level
680 677 # (see `merged_removed` and `from_p2_removed`)
681 678 if entry.merged_removed or entry.from_p2_removed:
682 679 source = self._map.copymap.get(f)
683 680 if entry.merged_removed:
684 681 self.merge(f)
685 682 elif entry.from_p2_removed:
686 683 self.otherparent(f)
687 684 if source is not None:
688 685 self.copy(source, f)
689 686 return
690 687 elif entry.merged or entry.from_p2:
691 688 return
692 689 self._addpath(f, possibly_dirty=True)
693 690 self._map.copymap.pop(f, None)
694 691
695 692 def otherparent(self, f):
696 693 '''Mark as coming from the other parent, always dirty.'''
697 694 if not self.in_merge:
698 695 msg = _(b"setting %r to other parent only allowed in merges") % f
699 696 raise error.Abort(msg)
700 697 entry = self._map.get(f)
701 698 if entry is not None and entry.tracked:
702 699 # merge-like
703 700 self._addpath(f, merged=True)
704 701 else:
705 702 # add-like
706 703 self._addpath(f, from_p2=True)
707 704 self._map.copymap.pop(f, None)
708 705
709 706 def add(self, f):
710 707 '''Mark a file added.'''
711 708 if not self.pendingparentchange():
712 709 util.nouideprecwarn(
713 710 b"do not use `add` outside of update/merge context."
714 711 b" Use `set_tracked`",
715 712 b'6.0',
716 713 stacklevel=2,
717 714 )
718 715 self._add(f)
719 716
720 717 def _add(self, filename):
721 718 """internal function to mark a file as added"""
722 719 self._addpath(filename, added=True)
723 720 self._map.copymap.pop(filename, None)
724 721
725 722 def remove(self, f):
726 723 '''Mark a file removed'''
727 724 if not self.pendingparentchange():
728 725 util.nouideprecwarn(
729 726 b"do not use `remove` outside of update/merge context."
730 727 b" Use `set_untracked`",
731 728 b'6.0',
732 729 stacklevel=2,
733 730 )
734 731 self._remove(f)
735 732
736 733 def _remove(self, filename):
737 734 """internal function to mark a file removed"""
738 735 self._dirty = True
739 736 self._updatedfiles.add(filename)
740 737 self._map.removefile(filename, in_merge=self.in_merge)
741 738
742 739 def merge(self, f):
743 740 '''Mark a file merged.'''
744 741 if not self.in_merge:
745 742 return self.normallookup(f)
746 743 return self.otherparent(f)
747 744
748 745 def drop(self, f):
749 746 '''Drop a file from the dirstate'''
750 747 if not self.pendingparentchange():
751 748 util.nouideprecwarn(
752 749 b"do not use `drop` outside of update/merge context."
753 750 b" Use `set_untracked`",
754 751 b'6.0',
755 752 stacklevel=2,
756 753 )
757 754 self._drop(f)
758 755
759 756 def _drop(self, filename):
760 757 """internal function to drop a file from the dirstate"""
761 758 if self._map.dropfile(filename):
762 759 self._dirty = True
763 760 self._updatedfiles.add(filename)
764 761 self._map.copymap.pop(filename, None)
765 762
766 763 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
767 764 if exists is None:
768 765 exists = os.path.lexists(os.path.join(self._root, path))
769 766 if not exists:
770 767 # Maybe a path component exists
771 768 if not ignoremissing and b'/' in path:
772 769 d, f = path.rsplit(b'/', 1)
773 770 d = self._normalize(d, False, ignoremissing, None)
774 771 folded = d + b"/" + f
775 772 else:
776 773 # No path components, preserve original case
777 774 folded = path
778 775 else:
779 776 # recursively normalize leading directory components
780 777 # against dirstate
781 778 if b'/' in normed:
782 779 d, f = normed.rsplit(b'/', 1)
783 780 d = self._normalize(d, False, ignoremissing, True)
784 781 r = self._root + b"/" + d
785 782 folded = d + b"/" + util.fspath(f, r)
786 783 else:
787 784 folded = util.fspath(normed, self._root)
788 785 storemap[normed] = folded
789 786
790 787 return folded
791 788
792 789 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
793 790 normed = util.normcase(path)
794 791 folded = self._map.filefoldmap.get(normed, None)
795 792 if folded is None:
796 793 if isknown:
797 794 folded = path
798 795 else:
799 796 folded = self._discoverpath(
800 797 path, normed, ignoremissing, exists, self._map.filefoldmap
801 798 )
802 799 return folded
803 800
804 801 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
805 802 normed = util.normcase(path)
806 803 folded = self._map.filefoldmap.get(normed, None)
807 804 if folded is None:
808 805 folded = self._map.dirfoldmap.get(normed, None)
809 806 if folded is None:
810 807 if isknown:
811 808 folded = path
812 809 else:
813 810 # store discovered result in dirfoldmap so that future
814 811 # normalizefile calls don't start matching directories
815 812 folded = self._discoverpath(
816 813 path, normed, ignoremissing, exists, self._map.dirfoldmap
817 814 )
818 815 return folded
819 816
820 817 def normalize(self, path, isknown=False, ignoremissing=False):
821 818 """
822 819 normalize the case of a pathname when on a casefolding filesystem
823 820
824 821 isknown specifies whether the filename came from walking the
825 822 disk, to avoid extra filesystem access.
826 823
827 824 If ignoremissing is True, missing path are returned
828 825 unchanged. Otherwise, we try harder to normalize possibly
829 826 existing path components.
830 827
831 828 The normalized case is determined based on the following precedence:
832 829
833 830 - version of name already stored in the dirstate
834 831 - version of name stored on disk
835 832 - version provided via command arguments
836 833 """
837 834
838 835 if self._checkcase:
839 836 return self._normalize(path, isknown, ignoremissing)
840 837 return path
841 838
842 839 def clear(self):
843 840 self._map.clear()
844 841 self._lastnormaltime = 0
845 842 self._updatedfiles.clear()
846 843 self._dirty = True
847 844
848 845 def rebuild(self, parent, allfiles, changedfiles=None):
849 846 if changedfiles is None:
850 847 # Rebuild entire dirstate
851 848 to_lookup = allfiles
852 849 to_drop = []
853 850 lastnormaltime = self._lastnormaltime
854 851 self.clear()
855 852 self._lastnormaltime = lastnormaltime
856 853 elif len(changedfiles) < 10:
857 854 # Avoid turning allfiles into a set, which can be expensive if it's
858 855 # large.
859 856 to_lookup = []
860 857 to_drop = []
861 858 for f in changedfiles:
862 859 if f in allfiles:
863 860 to_lookup.append(f)
864 861 else:
865 862 to_drop.append(f)
866 863 else:
867 864 changedfilesset = set(changedfiles)
868 865 to_lookup = changedfilesset & set(allfiles)
869 866 to_drop = changedfilesset - to_lookup
870 867
871 868 if self._origpl is None:
872 869 self._origpl = self._pl
873 870 self._map.setparents(parent, self._nodeconstants.nullid)
874 871
875 872 for f in to_lookup:
876 873 self.normallookup(f)
877 874 for f in to_drop:
878 875 self._drop(f)
879 876
880 877 self._dirty = True
881 878
882 879 def identity(self):
883 880 """Return identity of dirstate itself to detect changing in storage
884 881
885 882 If identity of previous dirstate is equal to this, writing
886 883 changes based on the former dirstate out can keep consistency.
887 884 """
888 885 return self._map.identity
889 886
890 887 def write(self, tr):
891 888 if not self._dirty:
892 889 return
893 890
894 891 filename = self._filename
895 892 if tr:
896 893 # 'dirstate.write()' is not only for writing in-memory
897 894 # changes out, but also for dropping ambiguous timestamp.
898 895 # delayed writing re-raise "ambiguous timestamp issue".
899 896 # See also the wiki page below for detail:
900 897 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
901 898
902 899 # emulate dropping timestamp in 'parsers.pack_dirstate'
903 900 now = _getfsnow(self._opener)
904 901 self._map.clearambiguoustimes(self._updatedfiles, now)
905 902
906 903 # emulate that all 'dirstate.normal' results are written out
907 904 self._lastnormaltime = 0
908 905 self._updatedfiles.clear()
909 906
910 907 # delay writing in-memory changes out
911 908 tr.addfilegenerator(
912 909 b'dirstate',
913 910 (self._filename,),
914 911 self._writedirstate,
915 912 location=b'plain',
916 913 )
917 914 return
918 915
919 916 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
920 917 self._writedirstate(st)
921 918
922 919 def addparentchangecallback(self, category, callback):
923 920 """add a callback to be called when the wd parents are changed
924 921
925 922 Callback will be called with the following arguments:
926 923 dirstate, (oldp1, oldp2), (newp1, newp2)
927 924
928 925 Category is a unique identifier to allow overwriting an old callback
929 926 with a newer callback.
930 927 """
931 928 self._plchangecallbacks[category] = callback
932 929
933 930 def _writedirstate(self, st):
934 931 # notify callbacks about parents change
935 932 if self._origpl is not None and self._origpl != self._pl:
936 933 for c, callback in sorted(
937 934 pycompat.iteritems(self._plchangecallbacks)
938 935 ):
939 936 callback(self, self._origpl, self._pl)
940 937 self._origpl = None
941 938 # use the modification time of the newly created temporary file as the
942 939 # filesystem's notion of 'now'
943 940 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
944 941
945 942 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
946 943 # timestamp of each entries in dirstate, because of 'now > mtime'
947 944 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
948 945 if delaywrite > 0:
949 946 # do we have any files to delay for?
950 947 for f, e in pycompat.iteritems(self._map):
951 948 if e.need_delay(now):
952 949 import time # to avoid useless import
953 950
954 951 # rather than sleep n seconds, sleep until the next
955 952 # multiple of n seconds
956 953 clock = time.time()
957 954 start = int(clock) - (int(clock) % delaywrite)
958 955 end = start + delaywrite
959 956 time.sleep(end - clock)
960 957 now = end # trust our estimate that the end is near now
961 958 break
962 959
963 960 self._map.write(st, now)
964 961 self._lastnormaltime = 0
965 962 self._dirty = False
966 963
967 964 def _dirignore(self, f):
968 965 if self._ignore(f):
969 966 return True
970 967 for p in pathutil.finddirs(f):
971 968 if self._ignore(p):
972 969 return True
973 970 return False
974 971
975 972 def _ignorefiles(self):
976 973 files = []
977 974 if os.path.exists(self._join(b'.hgignore')):
978 975 files.append(self._join(b'.hgignore'))
979 976 for name, path in self._ui.configitems(b"ui"):
980 977 if name == b'ignore' or name.startswith(b'ignore.'):
981 978 # we need to use os.path.join here rather than self._join
982 979 # because path is arbitrary and user-specified
983 980 files.append(os.path.join(self._rootdir, util.expandpath(path)))
984 981 return files
985 982
986 983 def _ignorefileandline(self, f):
987 984 files = collections.deque(self._ignorefiles())
988 985 visited = set()
989 986 while files:
990 987 i = files.popleft()
991 988 patterns = matchmod.readpatternfile(
992 989 i, self._ui.warn, sourceinfo=True
993 990 )
994 991 for pattern, lineno, line in patterns:
995 992 kind, p = matchmod._patsplit(pattern, b'glob')
996 993 if kind == b"subinclude":
997 994 if p not in visited:
998 995 files.append(p)
999 996 continue
1000 997 m = matchmod.match(
1001 998 self._root, b'', [], [pattern], warn=self._ui.warn
1002 999 )
1003 1000 if m(f):
1004 1001 return (i, lineno, line)
1005 1002 visited.add(i)
1006 1003 return (None, -1, b"")
1007 1004
1008 1005 def _walkexplicit(self, match, subrepos):
1009 1006 """Get stat data about the files explicitly specified by match.
1010 1007
1011 1008 Return a triple (results, dirsfound, dirsnotfound).
1012 1009 - results is a mapping from filename to stat result. It also contains
1013 1010 listings mapping subrepos and .hg to None.
1014 1011 - dirsfound is a list of files found to be directories.
1015 1012 - dirsnotfound is a list of files that the dirstate thinks are
1016 1013 directories and that were not found."""
1017 1014
1018 1015 def badtype(mode):
1019 1016 kind = _(b'unknown')
1020 1017 if stat.S_ISCHR(mode):
1021 1018 kind = _(b'character device')
1022 1019 elif stat.S_ISBLK(mode):
1023 1020 kind = _(b'block device')
1024 1021 elif stat.S_ISFIFO(mode):
1025 1022 kind = _(b'fifo')
1026 1023 elif stat.S_ISSOCK(mode):
1027 1024 kind = _(b'socket')
1028 1025 elif stat.S_ISDIR(mode):
1029 1026 kind = _(b'directory')
1030 1027 return _(b'unsupported file type (type is %s)') % kind
1031 1028
1032 1029 badfn = match.bad
1033 1030 dmap = self._map
1034 1031 lstat = os.lstat
1035 1032 getkind = stat.S_IFMT
1036 1033 dirkind = stat.S_IFDIR
1037 1034 regkind = stat.S_IFREG
1038 1035 lnkkind = stat.S_IFLNK
1039 1036 join = self._join
1040 1037 dirsfound = []
1041 1038 foundadd = dirsfound.append
1042 1039 dirsnotfound = []
1043 1040 notfoundadd = dirsnotfound.append
1044 1041
1045 1042 if not match.isexact() and self._checkcase:
1046 1043 normalize = self._normalize
1047 1044 else:
1048 1045 normalize = None
1049 1046
1050 1047 files = sorted(match.files())
1051 1048 subrepos.sort()
1052 1049 i, j = 0, 0
1053 1050 while i < len(files) and j < len(subrepos):
1054 1051 subpath = subrepos[j] + b"/"
1055 1052 if files[i] < subpath:
1056 1053 i += 1
1057 1054 continue
1058 1055 while i < len(files) and files[i].startswith(subpath):
1059 1056 del files[i]
1060 1057 j += 1
1061 1058
1062 1059 if not files or b'' in files:
1063 1060 files = [b'']
1064 1061 # constructing the foldmap is expensive, so don't do it for the
1065 1062 # common case where files is ['']
1066 1063 normalize = None
1067 1064 results = dict.fromkeys(subrepos)
1068 1065 results[b'.hg'] = None
1069 1066
1070 1067 for ff in files:
1071 1068 if normalize:
1072 1069 nf = normalize(ff, False, True)
1073 1070 else:
1074 1071 nf = ff
1075 1072 if nf in results:
1076 1073 continue
1077 1074
1078 1075 try:
1079 1076 st = lstat(join(nf))
1080 1077 kind = getkind(st.st_mode)
1081 1078 if kind == dirkind:
1082 1079 if nf in dmap:
1083 1080 # file replaced by dir on disk but still in dirstate
1084 1081 results[nf] = None
1085 1082 foundadd((nf, ff))
1086 1083 elif kind == regkind or kind == lnkkind:
1087 1084 results[nf] = st
1088 1085 else:
1089 1086 badfn(ff, badtype(kind))
1090 1087 if nf in dmap:
1091 1088 results[nf] = None
1092 1089 except OSError as inst: # nf not found on disk - it is dirstate only
1093 1090 if nf in dmap: # does it exactly match a missing file?
1094 1091 results[nf] = None
1095 1092 else: # does it match a missing directory?
1096 1093 if self._map.hasdir(nf):
1097 1094 notfoundadd(nf)
1098 1095 else:
1099 1096 badfn(ff, encoding.strtolocal(inst.strerror))
1100 1097
1101 1098 # match.files() may contain explicitly-specified paths that shouldn't
1102 1099 # be taken; drop them from the list of files found. dirsfound/notfound
1103 1100 # aren't filtered here because they will be tested later.
1104 1101 if match.anypats():
1105 1102 for f in list(results):
1106 1103 if f == b'.hg' or f in subrepos:
1107 1104 # keep sentinel to disable further out-of-repo walks
1108 1105 continue
1109 1106 if not match(f):
1110 1107 del results[f]
1111 1108
1112 1109 # Case insensitive filesystems cannot rely on lstat() failing to detect
1113 1110 # a case-only rename. Prune the stat object for any file that does not
1114 1111 # match the case in the filesystem, if there are multiple files that
1115 1112 # normalize to the same path.
1116 1113 if match.isexact() and self._checkcase:
1117 1114 normed = {}
1118 1115
1119 1116 for f, st in pycompat.iteritems(results):
1120 1117 if st is None:
1121 1118 continue
1122 1119
1123 1120 nc = util.normcase(f)
1124 1121 paths = normed.get(nc)
1125 1122
1126 1123 if paths is None:
1127 1124 paths = set()
1128 1125 normed[nc] = paths
1129 1126
1130 1127 paths.add(f)
1131 1128
1132 1129 for norm, paths in pycompat.iteritems(normed):
1133 1130 if len(paths) > 1:
1134 1131 for path in paths:
1135 1132 folded = self._discoverpath(
1136 1133 path, norm, True, None, self._map.dirfoldmap
1137 1134 )
1138 1135 if path != folded:
1139 1136 results[path] = None
1140 1137
1141 1138 return results, dirsfound, dirsnotfound
1142 1139
1143 1140 def walk(self, match, subrepos, unknown, ignored, full=True):
1144 1141 """
1145 1142 Walk recursively through the directory tree, finding all files
1146 1143 matched by match.
1147 1144
1148 1145 If full is False, maybe skip some known-clean files.
1149 1146
1150 1147 Return a dict mapping filename to stat-like object (either
1151 1148 mercurial.osutil.stat instance or return value of os.stat()).
1152 1149
1153 1150 """
1154 1151 # full is a flag that extensions that hook into walk can use -- this
1155 1152 # implementation doesn't use it at all. This satisfies the contract
1156 1153 # because we only guarantee a "maybe".
1157 1154
1158 1155 if ignored:
1159 1156 ignore = util.never
1160 1157 dirignore = util.never
1161 1158 elif unknown:
1162 1159 ignore = self._ignore
1163 1160 dirignore = self._dirignore
1164 1161 else:
1165 1162 # if not unknown and not ignored, drop dir recursion and step 2
1166 1163 ignore = util.always
1167 1164 dirignore = util.always
1168 1165
1169 1166 matchfn = match.matchfn
1170 1167 matchalways = match.always()
1171 1168 matchtdir = match.traversedir
1172 1169 dmap = self._map
1173 1170 listdir = util.listdir
1174 1171 lstat = os.lstat
1175 1172 dirkind = stat.S_IFDIR
1176 1173 regkind = stat.S_IFREG
1177 1174 lnkkind = stat.S_IFLNK
1178 1175 join = self._join
1179 1176
1180 1177 exact = skipstep3 = False
1181 1178 if match.isexact(): # match.exact
1182 1179 exact = True
1183 1180 dirignore = util.always # skip step 2
1184 1181 elif match.prefix(): # match.match, no patterns
1185 1182 skipstep3 = True
1186 1183
1187 1184 if not exact and self._checkcase:
1188 1185 normalize = self._normalize
1189 1186 normalizefile = self._normalizefile
1190 1187 skipstep3 = False
1191 1188 else:
1192 1189 normalize = self._normalize
1193 1190 normalizefile = None
1194 1191
1195 1192 # step 1: find all explicit files
1196 1193 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1197 1194 if matchtdir:
1198 1195 for d in work:
1199 1196 matchtdir(d[0])
1200 1197 for d in dirsnotfound:
1201 1198 matchtdir(d)
1202 1199
1203 1200 skipstep3 = skipstep3 and not (work or dirsnotfound)
1204 1201 work = [d for d in work if not dirignore(d[0])]
1205 1202
1206 1203 # step 2: visit subdirectories
1207 1204 def traverse(work, alreadynormed):
1208 1205 wadd = work.append
1209 1206 while work:
1210 1207 tracing.counter('dirstate.walk work', len(work))
1211 1208 nd = work.pop()
1212 1209 visitentries = match.visitchildrenset(nd)
1213 1210 if not visitentries:
1214 1211 continue
1215 1212 if visitentries == b'this' or visitentries == b'all':
1216 1213 visitentries = None
1217 1214 skip = None
1218 1215 if nd != b'':
1219 1216 skip = b'.hg'
1220 1217 try:
1221 1218 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1222 1219 entries = listdir(join(nd), stat=True, skip=skip)
1223 1220 except OSError as inst:
1224 1221 if inst.errno in (errno.EACCES, errno.ENOENT):
1225 1222 match.bad(
1226 1223 self.pathto(nd), encoding.strtolocal(inst.strerror)
1227 1224 )
1228 1225 continue
1229 1226 raise
1230 1227 for f, kind, st in entries:
1231 1228 # Some matchers may return files in the visitentries set,
1232 1229 # instead of 'this', if the matcher explicitly mentions them
1233 1230 # and is not an exactmatcher. This is acceptable; we do not
1234 1231 # make any hard assumptions about file-or-directory below
1235 1232 # based on the presence of `f` in visitentries. If
1236 1233 # visitchildrenset returned a set, we can always skip the
1237 1234 # entries *not* in the set it provided regardless of whether
1238 1235 # they're actually a file or a directory.
1239 1236 if visitentries and f not in visitentries:
1240 1237 continue
1241 1238 if normalizefile:
1242 1239 # even though f might be a directory, we're only
1243 1240 # interested in comparing it to files currently in the
1244 1241 # dmap -- therefore normalizefile is enough
1245 1242 nf = normalizefile(
1246 1243 nd and (nd + b"/" + f) or f, True, True
1247 1244 )
1248 1245 else:
1249 1246 nf = nd and (nd + b"/" + f) or f
1250 1247 if nf not in results:
1251 1248 if kind == dirkind:
1252 1249 if not ignore(nf):
1253 1250 if matchtdir:
1254 1251 matchtdir(nf)
1255 1252 wadd(nf)
1256 1253 if nf in dmap and (matchalways or matchfn(nf)):
1257 1254 results[nf] = None
1258 1255 elif kind == regkind or kind == lnkkind:
1259 1256 if nf in dmap:
1260 1257 if matchalways or matchfn(nf):
1261 1258 results[nf] = st
1262 1259 elif (matchalways or matchfn(nf)) and not ignore(
1263 1260 nf
1264 1261 ):
1265 1262 # unknown file -- normalize if necessary
1266 1263 if not alreadynormed:
1267 1264 nf = normalize(nf, False, True)
1268 1265 results[nf] = st
1269 1266 elif nf in dmap and (matchalways or matchfn(nf)):
1270 1267 results[nf] = None
1271 1268
1272 1269 for nd, d in work:
1273 1270 # alreadynormed means that processwork doesn't have to do any
1274 1271 # expensive directory normalization
1275 1272 alreadynormed = not normalize or nd == d
1276 1273 traverse([d], alreadynormed)
1277 1274
1278 1275 for s in subrepos:
1279 1276 del results[s]
1280 1277 del results[b'.hg']
1281 1278
1282 1279 # step 3: visit remaining files from dmap
1283 1280 if not skipstep3 and not exact:
1284 1281 # If a dmap file is not in results yet, it was either
1285 1282 # a) not matching matchfn b) ignored, c) missing, or d) under a
1286 1283 # symlink directory.
1287 1284 if not results and matchalways:
1288 1285 visit = [f for f in dmap]
1289 1286 else:
1290 1287 visit = [f for f in dmap if f not in results and matchfn(f)]
1291 1288 visit.sort()
1292 1289
1293 1290 if unknown:
1294 1291 # unknown == True means we walked all dirs under the roots
1295 1292 # that wasn't ignored, and everything that matched was stat'ed
1296 1293 # and is already in results.
1297 1294 # The rest must thus be ignored or under a symlink.
1298 1295 audit_path = pathutil.pathauditor(self._root, cached=True)
1299 1296
1300 1297 for nf in iter(visit):
1301 1298 # If a stat for the same file was already added with a
1302 1299 # different case, don't add one for this, since that would
1303 1300 # make it appear as if the file exists under both names
1304 1301 # on disk.
1305 1302 if (
1306 1303 normalizefile
1307 1304 and normalizefile(nf, True, True) in results
1308 1305 ):
1309 1306 results[nf] = None
1310 1307 # Report ignored items in the dmap as long as they are not
1311 1308 # under a symlink directory.
1312 1309 elif audit_path.check(nf):
1313 1310 try:
1314 1311 results[nf] = lstat(join(nf))
1315 1312 # file was just ignored, no links, and exists
1316 1313 except OSError:
1317 1314 # file doesn't exist
1318 1315 results[nf] = None
1319 1316 else:
1320 1317 # It's either missing or under a symlink directory
1321 1318 # which we in this case report as missing
1322 1319 results[nf] = None
1323 1320 else:
1324 1321 # We may not have walked the full directory tree above,
1325 1322 # so stat and check everything we missed.
1326 1323 iv = iter(visit)
1327 1324 for st in util.statfiles([join(i) for i in visit]):
1328 1325 results[next(iv)] = st
1329 1326 return results
1330 1327
1331 1328 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1332 1329 # Force Rayon (Rust parallelism library) to respect the number of
1333 1330 # workers. This is a temporary workaround until Rust code knows
1334 1331 # how to read the config file.
1335 1332 numcpus = self._ui.configint(b"worker", b"numcpus")
1336 1333 if numcpus is not None:
1337 1334 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1338 1335
1339 1336 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1340 1337 if not workers_enabled:
1341 1338 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1342 1339
1343 1340 (
1344 1341 lookup,
1345 1342 modified,
1346 1343 added,
1347 1344 removed,
1348 1345 deleted,
1349 1346 clean,
1350 1347 ignored,
1351 1348 unknown,
1352 1349 warnings,
1353 1350 bad,
1354 1351 traversed,
1355 1352 dirty,
1356 1353 ) = rustmod.status(
1357 1354 self._map._rustmap,
1358 1355 matcher,
1359 1356 self._rootdir,
1360 1357 self._ignorefiles(),
1361 1358 self._checkexec,
1362 1359 self._lastnormaltime,
1363 1360 bool(list_clean),
1364 1361 bool(list_ignored),
1365 1362 bool(list_unknown),
1366 1363 bool(matcher.traversedir),
1367 1364 )
1368 1365
1369 1366 self._dirty |= dirty
1370 1367
1371 1368 if matcher.traversedir:
1372 1369 for dir in traversed:
1373 1370 matcher.traversedir(dir)
1374 1371
1375 1372 if self._ui.warn:
1376 1373 for item in warnings:
1377 1374 if isinstance(item, tuple):
1378 1375 file_path, syntax = item
1379 1376 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1380 1377 file_path,
1381 1378 syntax,
1382 1379 )
1383 1380 self._ui.warn(msg)
1384 1381 else:
1385 1382 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1386 1383 self._ui.warn(
1387 1384 msg
1388 1385 % (
1389 1386 pathutil.canonpath(
1390 1387 self._rootdir, self._rootdir, item
1391 1388 ),
1392 1389 b"No such file or directory",
1393 1390 )
1394 1391 )
1395 1392
1396 1393 for (fn, message) in bad:
1397 1394 matcher.bad(fn, encoding.strtolocal(message))
1398 1395
1399 1396 status = scmutil.status(
1400 1397 modified=modified,
1401 1398 added=added,
1402 1399 removed=removed,
1403 1400 deleted=deleted,
1404 1401 unknown=unknown,
1405 1402 ignored=ignored,
1406 1403 clean=clean,
1407 1404 )
1408 1405 return (lookup, status)
1409 1406
1410 1407 def status(self, match, subrepos, ignored, clean, unknown):
1411 1408 """Determine the status of the working copy relative to the
1412 1409 dirstate and return a pair of (unsure, status), where status is of type
1413 1410 scmutil.status and:
1414 1411
1415 1412 unsure:
1416 1413 files that might have been modified since the dirstate was
1417 1414 written, but need to be read to be sure (size is the same
1418 1415 but mtime differs)
1419 1416 status.modified:
1420 1417 files that have definitely been modified since the dirstate
1421 1418 was written (different size or mode)
1422 1419 status.clean:
1423 1420 files that have definitely not been modified since the
1424 1421 dirstate was written
1425 1422 """
1426 1423 listignored, listclean, listunknown = ignored, clean, unknown
1427 1424 lookup, modified, added, unknown, ignored = [], [], [], [], []
1428 1425 removed, deleted, clean = [], [], []
1429 1426
1430 1427 dmap = self._map
1431 1428 dmap.preload()
1432 1429
1433 1430 use_rust = True
1434 1431
1435 1432 allowed_matchers = (
1436 1433 matchmod.alwaysmatcher,
1437 1434 matchmod.exactmatcher,
1438 1435 matchmod.includematcher,
1439 1436 )
1440 1437
1441 1438 if rustmod is None:
1442 1439 use_rust = False
1443 1440 elif self._checkcase:
1444 1441 # Case-insensitive filesystems are not handled yet
1445 1442 use_rust = False
1446 1443 elif subrepos:
1447 1444 use_rust = False
1448 1445 elif sparse.enabled:
1449 1446 use_rust = False
1450 1447 elif not isinstance(match, allowed_matchers):
1451 1448 # Some matchers have yet to be implemented
1452 1449 use_rust = False
1453 1450
1454 1451 if use_rust:
1455 1452 try:
1456 1453 return self._rust_status(
1457 1454 match, listclean, listignored, listunknown
1458 1455 )
1459 1456 except rustmod.FallbackError:
1460 1457 pass
1461 1458
1462 1459 def noop(f):
1463 1460 pass
1464 1461
1465 1462 dcontains = dmap.__contains__
1466 1463 dget = dmap.__getitem__
1467 1464 ladd = lookup.append # aka "unsure"
1468 1465 madd = modified.append
1469 1466 aadd = added.append
1470 1467 uadd = unknown.append if listunknown else noop
1471 1468 iadd = ignored.append if listignored else noop
1472 1469 radd = removed.append
1473 1470 dadd = deleted.append
1474 1471 cadd = clean.append if listclean else noop
1475 1472 mexact = match.exact
1476 1473 dirignore = self._dirignore
1477 1474 checkexec = self._checkexec
1478 1475 copymap = self._map.copymap
1479 1476 lastnormaltime = self._lastnormaltime
1480 1477
1481 1478 # We need to do full walks when either
1482 1479 # - we're listing all clean files, or
1483 1480 # - match.traversedir does something, because match.traversedir should
1484 1481 # be called for every dir in the working dir
1485 1482 full = listclean or match.traversedir is not None
1486 1483 for fn, st in pycompat.iteritems(
1487 1484 self.walk(match, subrepos, listunknown, listignored, full=full)
1488 1485 ):
1489 1486 if not dcontains(fn):
1490 1487 if (listignored or mexact(fn)) and dirignore(fn):
1491 1488 if listignored:
1492 1489 iadd(fn)
1493 1490 else:
1494 1491 uadd(fn)
1495 1492 continue
1496 1493
1497 1494 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1498 1495 # written like that for performance reasons. dmap[fn] is not a
1499 1496 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1500 1497 # opcode has fast paths when the value to be unpacked is a tuple or
1501 1498 # a list, but falls back to creating a full-fledged iterator in
1502 1499 # general. That is much slower than simply accessing and storing the
1503 1500 # tuple members one by one.
1504 1501 t = dget(fn)
1505 1502 mode = t.mode
1506 1503 size = t.size
1507 1504 time = t.mtime
1508 1505
1509 1506 if not st and t.tracked:
1510 1507 dadd(fn)
1511 1508 elif t.merged:
1512 1509 madd(fn)
1513 1510 elif t.added:
1514 1511 aadd(fn)
1515 1512 elif t.removed:
1516 1513 radd(fn)
1517 1514 elif t.tracked:
1518 1515 if (
1519 1516 size >= 0
1520 1517 and (
1521 1518 (size != st.st_size and size != st.st_size & _rangemask)
1522 1519 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1523 1520 )
1524 1521 or t.from_p2
1525 1522 or fn in copymap
1526 1523 ):
1527 1524 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1528 1525 # issue6456: Size returned may be longer due to
1529 1526 # encryption on EXT-4 fscrypt, undecided.
1530 1527 ladd(fn)
1531 1528 else:
1532 1529 madd(fn)
1533 1530 elif (
1534 1531 time != st[stat.ST_MTIME]
1535 1532 and time != st[stat.ST_MTIME] & _rangemask
1536 1533 ):
1537 1534 ladd(fn)
1538 1535 elif st[stat.ST_MTIME] == lastnormaltime:
1539 1536 # fn may have just been marked as normal and it may have
1540 1537 # changed in the same second without changing its size.
1541 1538 # This can happen if we quickly do multiple commits.
1542 1539 # Force lookup, so we don't miss such a racy file change.
1543 1540 ladd(fn)
1544 1541 elif listclean:
1545 1542 cadd(fn)
1546 1543 status = scmutil.status(
1547 1544 modified, added, removed, deleted, unknown, ignored, clean
1548 1545 )
1549 1546 return (lookup, status)
1550 1547
1551 1548 def matches(self, match):
1552 1549 """
1553 1550 return files in the dirstate (in whatever state) filtered by match
1554 1551 """
1555 1552 dmap = self._map
1556 1553 if rustmod is not None:
1557 1554 dmap = self._map._rustmap
1558 1555
1559 1556 if match.always():
1560 1557 return dmap.keys()
1561 1558 files = match.files()
1562 1559 if match.isexact():
1563 1560 # fast path -- filter the other way around, since typically files is
1564 1561 # much smaller than dmap
1565 1562 return [f for f in files if f in dmap]
1566 1563 if match.prefix() and all(fn in dmap for fn in files):
1567 1564 # fast path -- all the values are known to be files, so just return
1568 1565 # that
1569 1566 return list(files)
1570 1567 return [f for f in dmap if match(f)]
1571 1568
1572 1569 def _actualfilename(self, tr):
1573 1570 if tr:
1574 1571 return self._pendingfilename
1575 1572 else:
1576 1573 return self._filename
1577 1574
1578 1575 def savebackup(self, tr, backupname):
1579 1576 '''Save current dirstate into backup file'''
1580 1577 filename = self._actualfilename(tr)
1581 1578 assert backupname != filename
1582 1579
1583 1580 # use '_writedirstate' instead of 'write' to write changes certainly,
1584 1581 # because the latter omits writing out if transaction is running.
1585 1582 # output file will be used to create backup of dirstate at this point.
1586 1583 if self._dirty or not self._opener.exists(filename):
1587 1584 self._writedirstate(
1588 1585 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1589 1586 )
1590 1587
1591 1588 if tr:
1592 1589 # ensure that subsequent tr.writepending returns True for
1593 1590 # changes written out above, even if dirstate is never
1594 1591 # changed after this
1595 1592 tr.addfilegenerator(
1596 1593 b'dirstate',
1597 1594 (self._filename,),
1598 1595 self._writedirstate,
1599 1596 location=b'plain',
1600 1597 )
1601 1598
1602 1599 # ensure that pending file written above is unlinked at
1603 1600 # failure, even if tr.writepending isn't invoked until the
1604 1601 # end of this transaction
1605 1602 tr.registertmp(filename, location=b'plain')
1606 1603
1607 1604 self._opener.tryunlink(backupname)
1608 1605 # hardlink backup is okay because _writedirstate is always called
1609 1606 # with an "atomictemp=True" file.
1610 1607 util.copyfile(
1611 1608 self._opener.join(filename),
1612 1609 self._opener.join(backupname),
1613 1610 hardlink=True,
1614 1611 )
1615 1612
1616 1613 def restorebackup(self, tr, backupname):
1617 1614 '''Restore dirstate by backup file'''
1618 1615 # this "invalidate()" prevents "wlock.release()" from writing
1619 1616 # changes of dirstate out after restoring from backup file
1620 1617 self.invalidate()
1621 1618 filename = self._actualfilename(tr)
1622 1619 o = self._opener
1623 1620 if util.samefile(o.join(backupname), o.join(filename)):
1624 1621 o.unlink(backupname)
1625 1622 else:
1626 1623 o.rename(backupname, filename, checkambig=True)
1627 1624
1628 1625 def clearbackup(self, tr, backupname):
1629 1626 '''Clear backup file'''
1630 1627 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now