##// END OF EJS Templates
dirstate: replace a dead conditional branch with an assert in `update_file`...
marmoute -
r48473:37825a85 default
parent child Browse files
Show More
@@ -1,1626 +1,1625 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .pycompat import delattr
18 18
19 19 from hgdemandimport import tracing
20 20
21 21 from . import (
22 22 dirstatemap,
23 23 encoding,
24 24 error,
25 25 match as matchmod,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 sparse,
31 31 util,
32 32 )
33 33
34 34 from .interfaces import (
35 35 dirstate as intdirstate,
36 36 util as interfaceutil,
37 37 )
38 38
39 39 parsers = policy.importmod('parsers')
40 40 rustmod = policy.importrust('dirstate')
41 41
42 42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43 43
44 44 propertycache = util.propertycache
45 45 filecache = scmutil.filecache
46 46 _rangemask = dirstatemap.rangemask
47 47
48 48 DirstateItem = parsers.DirstateItem
49 49
50 50
51 51 class repocache(filecache):
52 52 """filecache for files in .hg/"""
53 53
54 54 def join(self, obj, fname):
55 55 return obj._opener.join(fname)
56 56
57 57
58 58 class rootcache(filecache):
59 59 """filecache for files in the repository root"""
60 60
61 61 def join(self, obj, fname):
62 62 return obj._join(fname)
63 63
64 64
65 65 def _getfsnow(vfs):
66 66 '''Get "now" timestamp on filesystem'''
67 67 tmpfd, tmpname = vfs.mkstemp()
68 68 try:
69 69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 70 finally:
71 71 os.close(tmpfd)
72 72 vfs.unlink(tmpname)
73 73
74 74
75 75 def requires_parents_change(func):
76 76 def wrap(self, *args, **kwargs):
77 77 if not self.pendingparentchange():
78 78 msg = 'calling `%s` outside of a parentchange context'
79 79 msg %= func.__name__
80 80 raise error.ProgrammingError(msg)
81 81 return func(self, *args, **kwargs)
82 82
83 83 return wrap
84 84
85 85
86 86 def requires_no_parents_change(func):
87 87 def wrap(self, *args, **kwargs):
88 88 if self.pendingparentchange():
89 89 msg = 'calling `%s` inside of a parentchange context'
90 90 msg %= func.__name__
91 91 raise error.ProgrammingError(msg)
92 92 return func(self, *args, **kwargs)
93 93
94 94 return wrap
95 95
96 96
97 97 @interfaceutil.implementer(intdirstate.idirstate)
98 98 class dirstate(object):
99 99 def __init__(
100 100 self,
101 101 opener,
102 102 ui,
103 103 root,
104 104 validate,
105 105 sparsematchfn,
106 106 nodeconstants,
107 107 use_dirstate_v2,
108 108 ):
109 109 """Create a new dirstate object.
110 110
111 111 opener is an open()-like callable that can be used to open the
112 112 dirstate file; root is the root of the directory tracked by
113 113 the dirstate.
114 114 """
115 115 self._use_dirstate_v2 = use_dirstate_v2
116 116 self._nodeconstants = nodeconstants
117 117 self._opener = opener
118 118 self._validate = validate
119 119 self._root = root
120 120 self._sparsematchfn = sparsematchfn
121 121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 122 # UNC path pointing to root share (issue4557)
123 123 self._rootdir = pathutil.normasprefix(root)
124 124 self._dirty = False
125 125 self._lastnormaltime = 0
126 126 self._ui = ui
127 127 self._filecache = {}
128 128 self._parentwriters = 0
129 129 self._filename = b'dirstate'
130 130 self._pendingfilename = b'%s.pending' % self._filename
131 131 self._plchangecallbacks = {}
132 132 self._origpl = None
133 133 self._updatedfiles = set()
134 134 self._mapcls = dirstatemap.dirstatemap
135 135 # Access and cache cwd early, so we don't access it for the first time
136 136 # after a working-copy update caused it to not exist (accessing it then
137 137 # raises an exception).
138 138 self._cwd
139 139
140 140 def prefetch_parents(self):
141 141 """make sure the parents are loaded
142 142
143 143 Used to avoid a race condition.
144 144 """
145 145 self._pl
146 146
147 147 @contextlib.contextmanager
148 148 def parentchange(self):
149 149 """Context manager for handling dirstate parents.
150 150
151 151 If an exception occurs in the scope of the context manager,
152 152 the incoherent dirstate won't be written when wlock is
153 153 released.
154 154 """
155 155 self._parentwriters += 1
156 156 yield
157 157 # Typically we want the "undo" step of a context manager in a
158 158 # finally block so it happens even when an exception
159 159 # occurs. In this case, however, we only want to decrement
160 160 # parentwriters if the code in the with statement exits
161 161 # normally, so we don't have a try/finally here on purpose.
162 162 self._parentwriters -= 1
163 163
164 164 def pendingparentchange(self):
165 165 """Returns true if the dirstate is in the middle of a set of changes
166 166 that modify the dirstate parent.
167 167 """
168 168 return self._parentwriters > 0
169 169
170 170 @propertycache
171 171 def _map(self):
172 172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 173 self._map = self._mapcls(
174 174 self._ui,
175 175 self._opener,
176 176 self._root,
177 177 self._nodeconstants,
178 178 self._use_dirstate_v2,
179 179 )
180 180 return self._map
181 181
182 182 @property
183 183 def _sparsematcher(self):
184 184 """The matcher for the sparse checkout.
185 185
186 186 The working directory may not include every file from a manifest. The
187 187 matcher obtained by this property will match a path if it is to be
188 188 included in the working directory.
189 189 """
190 190 # TODO there is potential to cache this property. For now, the matcher
191 191 # is resolved on every access. (But the called function does use a
192 192 # cache to keep the lookup fast.)
193 193 return self._sparsematchfn()
194 194
195 195 @repocache(b'branch')
196 196 def _branch(self):
197 197 try:
198 198 return self._opener.read(b"branch").strip() or b"default"
199 199 except IOError as inst:
200 200 if inst.errno != errno.ENOENT:
201 201 raise
202 202 return b"default"
203 203
204 204 @property
205 205 def _pl(self):
206 206 return self._map.parents()
207 207
208 208 def hasdir(self, d):
209 209 return self._map.hastrackeddir(d)
210 210
211 211 @rootcache(b'.hgignore')
212 212 def _ignore(self):
213 213 files = self._ignorefiles()
214 214 if not files:
215 215 return matchmod.never()
216 216
217 217 pats = [b'include:%s' % f for f in files]
218 218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219 219
220 220 @propertycache
221 221 def _slash(self):
222 222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223 223
224 224 @propertycache
225 225 def _checklink(self):
226 226 return util.checklink(self._root)
227 227
228 228 @propertycache
229 229 def _checkexec(self):
230 230 return bool(util.checkexec(self._root))
231 231
232 232 @propertycache
233 233 def _checkcase(self):
234 234 return not util.fscasesensitive(self._join(b'.hg'))
235 235
236 236 def _join(self, f):
237 237 # much faster than os.path.join()
238 238 # it's safe because f is always a relative path
239 239 return self._rootdir + f
240 240
241 241 def flagfunc(self, buildfallback):
242 242 if self._checklink and self._checkexec:
243 243
244 244 def f(x):
245 245 try:
246 246 st = os.lstat(self._join(x))
247 247 if util.statislink(st):
248 248 return b'l'
249 249 if util.statisexec(st):
250 250 return b'x'
251 251 except OSError:
252 252 pass
253 253 return b''
254 254
255 255 return f
256 256
257 257 fallback = buildfallback()
258 258 if self._checklink:
259 259
260 260 def f(x):
261 261 if os.path.islink(self._join(x)):
262 262 return b'l'
263 263 if b'x' in fallback(x):
264 264 return b'x'
265 265 return b''
266 266
267 267 return f
268 268 if self._checkexec:
269 269
270 270 def f(x):
271 271 if b'l' in fallback(x):
272 272 return b'l'
273 273 if util.isexec(self._join(x)):
274 274 return b'x'
275 275 return b''
276 276
277 277 return f
278 278 else:
279 279 return fallback
280 280
281 281 @propertycache
282 282 def _cwd(self):
283 283 # internal config: ui.forcecwd
284 284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 285 if forcecwd:
286 286 return forcecwd
287 287 return encoding.getcwd()
288 288
289 289 def getcwd(self):
290 290 """Return the path from which a canonical path is calculated.
291 291
292 292 This path should be used to resolve file patterns or to convert
293 293 canonical paths back to file paths for display. It shouldn't be
294 294 used to get real file paths. Use vfs functions instead.
295 295 """
296 296 cwd = self._cwd
297 297 if cwd == self._root:
298 298 return b''
299 299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 300 rootsep = self._root
301 301 if not util.endswithsep(rootsep):
302 302 rootsep += pycompat.ossep
303 303 if cwd.startswith(rootsep):
304 304 return cwd[len(rootsep) :]
305 305 else:
306 306 # we're outside the repo. return an absolute path.
307 307 return cwd
308 308
309 309 def pathto(self, f, cwd=None):
310 310 if cwd is None:
311 311 cwd = self.getcwd()
312 312 path = util.pathto(self._root, cwd, f)
313 313 if self._slash:
314 314 return util.pconvert(path)
315 315 return path
316 316
317 317 def __getitem__(self, key):
318 318 """Return the current state of key (a filename) in the dirstate.
319 319
320 320 States are:
321 321 n normal
322 322 m needs merging
323 323 r marked for removal
324 324 a marked for addition
325 325 ? not tracked
326 326
327 327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 328 consider migrating all user of this to going through the dirstate entry
329 329 instead.
330 330 """
331 331 entry = self._map.get(key)
332 332 if entry is not None:
333 333 return entry.state
334 334 return b'?'
335 335
336 336 def __contains__(self, key):
337 337 return key in self._map
338 338
339 339 def __iter__(self):
340 340 return iter(sorted(self._map))
341 341
342 342 def items(self):
343 343 return pycompat.iteritems(self._map)
344 344
345 345 iteritems = items
346 346
347 347 def directories(self):
348 348 return self._map.directories()
349 349
350 350 def parents(self):
351 351 return [self._validate(p) for p in self._pl]
352 352
353 353 def p1(self):
354 354 return self._validate(self._pl[0])
355 355
356 356 def p2(self):
357 357 return self._validate(self._pl[1])
358 358
359 359 @property
360 360 def in_merge(self):
361 361 """True if a merge is in progress"""
362 362 return self._pl[1] != self._nodeconstants.nullid
363 363
364 364 def branch(self):
365 365 return encoding.tolocal(self._branch)
366 366
367 367 def setparents(self, p1, p2=None):
368 368 """Set dirstate parents to p1 and p2.
369 369
370 370 When moving from two parents to one, "merged" entries a
371 371 adjusted to normal and previous copy records discarded and
372 372 returned by the call.
373 373
374 374 See localrepo.setparents()
375 375 """
376 376 if p2 is None:
377 377 p2 = self._nodeconstants.nullid
378 378 if self._parentwriters == 0:
379 379 raise ValueError(
380 380 b"cannot set dirstate parent outside of "
381 381 b"dirstate.parentchange context manager"
382 382 )
383 383
384 384 self._dirty = True
385 385 oldp2 = self._pl[1]
386 386 if self._origpl is None:
387 387 self._origpl = self._pl
388 388 self._map.setparents(p1, p2)
389 389 copies = {}
390 390 if (
391 391 oldp2 != self._nodeconstants.nullid
392 392 and p2 == self._nodeconstants.nullid
393 393 ):
394 394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395 395
396 396 for f in candidatefiles:
397 397 s = self._map.get(f)
398 398 if s is None:
399 399 continue
400 400
401 401 # Discard "merged" markers when moving away from a merge state
402 402 if s.merged:
403 403 source = self._map.copymap.get(f)
404 404 if source:
405 405 copies[f] = source
406 406 self.normallookup(f)
407 407 # Also fix up otherparent markers
408 408 elif s.from_p2:
409 409 source = self._map.copymap.get(f)
410 410 if source:
411 411 copies[f] = source
412 412 self._add(f)
413 413 return copies
414 414
415 415 def setbranch(self, branch):
416 416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 418 try:
419 419 f.write(self._branch + b'\n')
420 420 f.close()
421 421
422 422 # make sure filecache has the correct stat info for _branch after
423 423 # replacing the underlying file
424 424 ce = self._filecache[b'_branch']
425 425 if ce:
426 426 ce.refresh()
427 427 except: # re-raises
428 428 f.discard()
429 429 raise
430 430
431 431 def invalidate(self):
432 432 """Causes the next access to reread the dirstate.
433 433
434 434 This is different from localrepo.invalidatedirstate() because it always
435 435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 436 check whether the dirstate has changed before rereading it."""
437 437
438 438 for a in ("_map", "_branch", "_ignore"):
439 439 if a in self.__dict__:
440 440 delattr(self, a)
441 441 self._lastnormaltime = 0
442 442 self._dirty = False
443 443 self._updatedfiles.clear()
444 444 self._parentwriters = 0
445 445 self._origpl = None
446 446
447 447 def copy(self, source, dest):
448 448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 449 if source == dest:
450 450 return
451 451 self._dirty = True
452 452 if source is not None:
453 453 self._map.copymap[dest] = source
454 454 self._updatedfiles.add(source)
455 455 self._updatedfiles.add(dest)
456 456 elif self._map.copymap.pop(dest, None):
457 457 self._updatedfiles.add(dest)
458 458
459 459 def copied(self, file):
460 460 return self._map.copymap.get(file, None)
461 461
462 462 def copies(self):
463 463 return self._map.copymap
464 464
465 465 @requires_no_parents_change
466 466 def set_tracked(self, filename):
467 467 """a "public" method for generic code to mark a file as tracked
468 468
469 469 This function is to be called outside of "update/merge" case. For
470 470 example by a command like `hg add X`.
471 471
472 472 return True the file was previously untracked, False otherwise.
473 473 """
474 474 entry = self._map.get(filename)
475 475 if entry is None:
476 476 self._add(filename)
477 477 return True
478 478 elif not entry.tracked:
479 479 self.normallookup(filename)
480 480 return True
481 481 return False
482 482
483 483 @requires_no_parents_change
484 484 def set_untracked(self, filename):
485 485 """a "public" method for generic code to mark a file as untracked
486 486
487 487 This function is to be called outside of "update/merge" case. For
488 488 example by a command like `hg remove X`.
489 489
490 490 return True the file was previously tracked, False otherwise.
491 491 """
492 492 entry = self._map.get(filename)
493 493 if entry is None:
494 494 return False
495 495 elif entry.added:
496 496 self._drop(filename)
497 497 return True
498 498 else:
499 499 self._remove(filename)
500 500 return True
501 501
502 502 @requires_parents_change
503 503 def update_file_reference(
504 504 self,
505 505 filename,
506 506 p1_tracked,
507 507 ):
508 508 """Set a file as tracked in the parent (or not)
509 509
510 510 This is to be called when adjust the dirstate to a new parent after an history
511 511 rewriting operation.
512 512
513 513 It should not be called during a merge (p2 != nullid) and only within
514 514 a `with dirstate.parentchange():` context.
515 515 """
516 516 if self.in_merge:
517 517 msg = b'update_file_reference should not be called when merging'
518 518 raise error.ProgrammingError(msg)
519 519 entry = self._map.get(filename)
520 520 if entry is None:
521 521 wc_tracked = False
522 522 else:
523 523 wc_tracked = entry.tracked
524 524 if p1_tracked and wc_tracked:
525 525 # the underlying reference might have changed, we will have to
526 526 # check it.
527 527 self.normallookup(filename)
528 528 elif not (p1_tracked or wc_tracked):
529 529 # the file is no longer relevant to anyone
530 530 self._drop(filename)
531 531 elif (not p1_tracked) and wc_tracked:
532 532 if not entry.added:
533 533 self._add(filename)
534 534 elif p1_tracked and not wc_tracked:
535 535 if entry is None or not entry.removed:
536 536 self._remove(filename)
537 537 else:
538 538 assert False, 'unreachable'
539 539
540 540 @requires_parents_change
541 541 def update_file(
542 542 self,
543 543 filename,
544 544 wc_tracked,
545 545 p1_tracked,
546 546 p2_tracked=False,
547 547 merged=False,
548 548 clean_p1=False,
549 549 clean_p2=False,
550 550 possibly_dirty=False,
551 551 ):
552 552 """update the information about a file in the dirstate
553 553
554 554 This is to be called when the direstates parent changes to keep track
555 555 of what is the file situation in regards to the working copy and its parent.
556 556
557 557 This function must be called within a `dirstate.parentchange` context.
558 558
559 559 note: the API is at an early stage and we might need to ajust it
560 560 depending of what information ends up being relevant and useful to
561 561 other processing.
562 562 """
563 563 if merged and (clean_p1 or clean_p2):
564 564 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
565 565 raise error.ProgrammingError(msg)
566 566 if not (p1_tracked or p2_tracked or wc_tracked):
567 567 self._drop(filename)
568 568 elif merged:
569 569 assert wc_tracked
570 if not self.in_merge:
571 self.normallookup(filename)
570 assert self.in_merge # we are never in the "normallookup" case
572 571 self.otherparent(filename)
573 572 elif not (p1_tracked or p2_tracked) and wc_tracked:
574 573 self._addpath(filename, added=True, possibly_dirty=possibly_dirty)
575 574 self._map.copymap.pop(filename, None)
576 575 elif (p1_tracked or p2_tracked) and not wc_tracked:
577 576 self._remove(filename)
578 577 elif clean_p2 and wc_tracked:
579 578 assert p2_tracked
580 579 self.otherparent(filename)
581 580 elif not p1_tracked and p2_tracked and wc_tracked:
582 581 self._addpath(filename, from_p2=True, possibly_dirty=possibly_dirty)
583 582 self._map.copymap.pop(filename, None)
584 583 elif possibly_dirty:
585 584 self._addpath(filename, possibly_dirty=possibly_dirty)
586 585 elif wc_tracked:
587 586 self.normal(filename)
588 587 # XXX We need something for file that are dirty after an update
589 588 else:
590 589 assert False, 'unreachable'
591 590
592 591 @requires_parents_change
593 592 def update_parent_file_data(self, f, filedata):
594 593 """update the information about the content of a file
595 594
596 595 This function should be called within a `dirstate.parentchange` context.
597 596 """
598 597 self.normal(f, parentfiledata=filedata)
599 598
600 599 def _addpath(
601 600 self,
602 601 f,
603 602 mode=0,
604 603 size=None,
605 604 mtime=None,
606 605 added=False,
607 606 merged=False,
608 607 from_p2=False,
609 608 possibly_dirty=False,
610 609 ):
611 610 entry = self._map.get(f)
612 611 if added or entry is not None and entry.removed:
613 612 scmutil.checkfilename(f)
614 613 if self._map.hastrackeddir(f):
615 614 msg = _(b'directory %r already in dirstate')
616 615 msg %= pycompat.bytestr(f)
617 616 raise error.Abort(msg)
618 617 # shadows
619 618 for d in pathutil.finddirs(f):
620 619 if self._map.hastrackeddir(d):
621 620 break
622 621 entry = self._map.get(d)
623 622 if entry is not None and not entry.removed:
624 623 msg = _(b'file %r in dirstate clashes with %r')
625 624 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
626 625 raise error.Abort(msg)
627 626 self._dirty = True
628 627 self._updatedfiles.add(f)
629 628 self._map.addfile(
630 629 f,
631 630 mode=mode,
632 631 size=size,
633 632 mtime=mtime,
634 633 added=added,
635 634 merged=merged,
636 635 from_p2=from_p2,
637 636 possibly_dirty=possibly_dirty,
638 637 )
639 638
640 639 def normal(self, f, parentfiledata=None):
641 640 """Mark a file normal and clean.
642 641
643 642 parentfiledata: (mode, size, mtime) of the clean file
644 643
645 644 parentfiledata should be computed from memory (for mode,
646 645 size), as or close as possible from the point where we
647 646 determined the file was clean, to limit the risk of the
648 647 file having been changed by an external process between the
649 648 moment where the file was determined to be clean and now."""
650 649 if parentfiledata:
651 650 (mode, size, mtime) = parentfiledata
652 651 else:
653 652 s = os.lstat(self._join(f))
654 653 mode = s.st_mode
655 654 size = s.st_size
656 655 mtime = s[stat.ST_MTIME]
657 656 self._addpath(f, mode=mode, size=size, mtime=mtime)
658 657 self._map.copymap.pop(f, None)
659 658 if f in self._map.nonnormalset:
660 659 self._map.nonnormalset.remove(f)
661 660 if mtime > self._lastnormaltime:
662 661 # Remember the most recent modification timeslot for status(),
663 662 # to make sure we won't miss future size-preserving file content
664 663 # modifications that happen within the same timeslot.
665 664 self._lastnormaltime = mtime
666 665
667 666 def normallookup(self, f):
668 667 '''Mark a file normal, but possibly dirty.'''
669 668 if self.in_merge:
670 669 # if there is a merge going on and the file was either
671 670 # "merged" or coming from other parent (-2) before
672 671 # being removed, restore that state.
673 672 entry = self._map.get(f)
674 673 if entry is not None:
675 674 # XXX this should probably be dealt with a a lower level
676 675 # (see `merged_removed` and `from_p2_removed`)
677 676 if entry.merged_removed or entry.from_p2_removed:
678 677 source = self._map.copymap.get(f)
679 678 if entry.merged_removed:
680 679 self.merge(f)
681 680 elif entry.from_p2_removed:
682 681 self.otherparent(f)
683 682 if source is not None:
684 683 self.copy(source, f)
685 684 return
686 685 elif entry.merged or entry.from_p2:
687 686 return
688 687 self._addpath(f, possibly_dirty=True)
689 688 self._map.copymap.pop(f, None)
690 689
691 690 def otherparent(self, f):
692 691 '''Mark as coming from the other parent, always dirty.'''
693 692 if not self.in_merge:
694 693 msg = _(b"setting %r to other parent only allowed in merges") % f
695 694 raise error.Abort(msg)
696 695 entry = self._map.get(f)
697 696 if entry is not None and entry.tracked:
698 697 # merge-like
699 698 self._addpath(f, merged=True)
700 699 else:
701 700 # add-like
702 701 self._addpath(f, from_p2=True)
703 702 self._map.copymap.pop(f, None)
704 703
705 704 def add(self, f):
706 705 '''Mark a file added.'''
707 706 if not self.pendingparentchange():
708 707 util.nouideprecwarn(
709 708 b"do not use `add` outside of update/merge context."
710 709 b" Use `set_tracked`",
711 710 b'6.0',
712 711 stacklevel=2,
713 712 )
714 713 self._add(f)
715 714
716 715 def _add(self, filename):
717 716 """internal function to mark a file as added"""
718 717 self._addpath(filename, added=True)
719 718 self._map.copymap.pop(filename, None)
720 719
721 720 def remove(self, f):
722 721 '''Mark a file removed'''
723 722 if not self.pendingparentchange():
724 723 util.nouideprecwarn(
725 724 b"do not use `remove` outside of update/merge context."
726 725 b" Use `set_untracked`",
727 726 b'6.0',
728 727 stacklevel=2,
729 728 )
730 729 self._remove(f)
731 730
732 731 def _remove(self, filename):
733 732 """internal function to mark a file removed"""
734 733 self._dirty = True
735 734 self._updatedfiles.add(filename)
736 735 self._map.removefile(filename, in_merge=self.in_merge)
737 736
738 737 def merge(self, f):
739 738 '''Mark a file merged.'''
740 739 if not self.in_merge:
741 740 return self.normallookup(f)
742 741 return self.otherparent(f)
743 742
744 743 def drop(self, f):
745 744 '''Drop a file from the dirstate'''
746 745 if not self.pendingparentchange():
747 746 util.nouideprecwarn(
748 747 b"do not use `drop` outside of update/merge context."
749 748 b" Use `set_untracked`",
750 749 b'6.0',
751 750 stacklevel=2,
752 751 )
753 752 self._drop(f)
754 753
755 754 def _drop(self, filename):
756 755 """internal function to drop a file from the dirstate"""
757 756 if self._map.dropfile(filename):
758 757 self._dirty = True
759 758 self._updatedfiles.add(filename)
760 759 self._map.copymap.pop(filename, None)
761 760
762 761 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
763 762 if exists is None:
764 763 exists = os.path.lexists(os.path.join(self._root, path))
765 764 if not exists:
766 765 # Maybe a path component exists
767 766 if not ignoremissing and b'/' in path:
768 767 d, f = path.rsplit(b'/', 1)
769 768 d = self._normalize(d, False, ignoremissing, None)
770 769 folded = d + b"/" + f
771 770 else:
772 771 # No path components, preserve original case
773 772 folded = path
774 773 else:
775 774 # recursively normalize leading directory components
776 775 # against dirstate
777 776 if b'/' in normed:
778 777 d, f = normed.rsplit(b'/', 1)
779 778 d = self._normalize(d, False, ignoremissing, True)
780 779 r = self._root + b"/" + d
781 780 folded = d + b"/" + util.fspath(f, r)
782 781 else:
783 782 folded = util.fspath(normed, self._root)
784 783 storemap[normed] = folded
785 784
786 785 return folded
787 786
788 787 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
789 788 normed = util.normcase(path)
790 789 folded = self._map.filefoldmap.get(normed, None)
791 790 if folded is None:
792 791 if isknown:
793 792 folded = path
794 793 else:
795 794 folded = self._discoverpath(
796 795 path, normed, ignoremissing, exists, self._map.filefoldmap
797 796 )
798 797 return folded
799 798
800 799 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
801 800 normed = util.normcase(path)
802 801 folded = self._map.filefoldmap.get(normed, None)
803 802 if folded is None:
804 803 folded = self._map.dirfoldmap.get(normed, None)
805 804 if folded is None:
806 805 if isknown:
807 806 folded = path
808 807 else:
809 808 # store discovered result in dirfoldmap so that future
810 809 # normalizefile calls don't start matching directories
811 810 folded = self._discoverpath(
812 811 path, normed, ignoremissing, exists, self._map.dirfoldmap
813 812 )
814 813 return folded
815 814
816 815 def normalize(self, path, isknown=False, ignoremissing=False):
817 816 """
818 817 normalize the case of a pathname when on a casefolding filesystem
819 818
820 819 isknown specifies whether the filename came from walking the
821 820 disk, to avoid extra filesystem access.
822 821
823 822 If ignoremissing is True, missing path are returned
824 823 unchanged. Otherwise, we try harder to normalize possibly
825 824 existing path components.
826 825
827 826 The normalized case is determined based on the following precedence:
828 827
829 828 - version of name already stored in the dirstate
830 829 - version of name stored on disk
831 830 - version provided via command arguments
832 831 """
833 832
834 833 if self._checkcase:
835 834 return self._normalize(path, isknown, ignoremissing)
836 835 return path
837 836
838 837 def clear(self):
839 838 self._map.clear()
840 839 self._lastnormaltime = 0
841 840 self._updatedfiles.clear()
842 841 self._dirty = True
843 842
844 843 def rebuild(self, parent, allfiles, changedfiles=None):
845 844 if changedfiles is None:
846 845 # Rebuild entire dirstate
847 846 to_lookup = allfiles
848 847 to_drop = []
849 848 lastnormaltime = self._lastnormaltime
850 849 self.clear()
851 850 self._lastnormaltime = lastnormaltime
852 851 elif len(changedfiles) < 10:
853 852 # Avoid turning allfiles into a set, which can be expensive if it's
854 853 # large.
855 854 to_lookup = []
856 855 to_drop = []
857 856 for f in changedfiles:
858 857 if f in allfiles:
859 858 to_lookup.append(f)
860 859 else:
861 860 to_drop.append(f)
862 861 else:
863 862 changedfilesset = set(changedfiles)
864 863 to_lookup = changedfilesset & set(allfiles)
865 864 to_drop = changedfilesset - to_lookup
866 865
867 866 if self._origpl is None:
868 867 self._origpl = self._pl
869 868 self._map.setparents(parent, self._nodeconstants.nullid)
870 869
871 870 for f in to_lookup:
872 871 self.normallookup(f)
873 872 for f in to_drop:
874 873 self._drop(f)
875 874
876 875 self._dirty = True
877 876
878 877 def identity(self):
879 878 """Return identity of dirstate itself to detect changing in storage
880 879
881 880 If identity of previous dirstate is equal to this, writing
882 881 changes based on the former dirstate out can keep consistency.
883 882 """
884 883 return self._map.identity
885 884
886 885 def write(self, tr):
887 886 if not self._dirty:
888 887 return
889 888
890 889 filename = self._filename
891 890 if tr:
892 891 # 'dirstate.write()' is not only for writing in-memory
893 892 # changes out, but also for dropping ambiguous timestamp.
894 893 # delayed writing re-raise "ambiguous timestamp issue".
895 894 # See also the wiki page below for detail:
896 895 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
897 896
898 897 # emulate dropping timestamp in 'parsers.pack_dirstate'
899 898 now = _getfsnow(self._opener)
900 899 self._map.clearambiguoustimes(self._updatedfiles, now)
901 900
902 901 # emulate that all 'dirstate.normal' results are written out
903 902 self._lastnormaltime = 0
904 903 self._updatedfiles.clear()
905 904
906 905 # delay writing in-memory changes out
907 906 tr.addfilegenerator(
908 907 b'dirstate',
909 908 (self._filename,),
910 909 self._writedirstate,
911 910 location=b'plain',
912 911 )
913 912 return
914 913
915 914 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
916 915 self._writedirstate(st)
917 916
918 917 def addparentchangecallback(self, category, callback):
919 918 """add a callback to be called when the wd parents are changed
920 919
921 920 Callback will be called with the following arguments:
922 921 dirstate, (oldp1, oldp2), (newp1, newp2)
923 922
924 923 Category is a unique identifier to allow overwriting an old callback
925 924 with a newer callback.
926 925 """
927 926 self._plchangecallbacks[category] = callback
928 927
929 928 def _writedirstate(self, st):
930 929 # notify callbacks about parents change
931 930 if self._origpl is not None and self._origpl != self._pl:
932 931 for c, callback in sorted(
933 932 pycompat.iteritems(self._plchangecallbacks)
934 933 ):
935 934 callback(self, self._origpl, self._pl)
936 935 self._origpl = None
937 936 # use the modification time of the newly created temporary file as the
938 937 # filesystem's notion of 'now'
939 938 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
940 939
941 940 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
942 941 # timestamp of each entries in dirstate, because of 'now > mtime'
943 942 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
944 943 if delaywrite > 0:
945 944 # do we have any files to delay for?
946 945 for f, e in pycompat.iteritems(self._map):
947 946 if e.need_delay(now):
948 947 import time # to avoid useless import
949 948
950 949 # rather than sleep n seconds, sleep until the next
951 950 # multiple of n seconds
952 951 clock = time.time()
953 952 start = int(clock) - (int(clock) % delaywrite)
954 953 end = start + delaywrite
955 954 time.sleep(end - clock)
956 955 now = end # trust our estimate that the end is near now
957 956 break
958 957
959 958 self._map.write(st, now)
960 959 self._lastnormaltime = 0
961 960 self._dirty = False
962 961
963 962 def _dirignore(self, f):
964 963 if self._ignore(f):
965 964 return True
966 965 for p in pathutil.finddirs(f):
967 966 if self._ignore(p):
968 967 return True
969 968 return False
970 969
971 970 def _ignorefiles(self):
972 971 files = []
973 972 if os.path.exists(self._join(b'.hgignore')):
974 973 files.append(self._join(b'.hgignore'))
975 974 for name, path in self._ui.configitems(b"ui"):
976 975 if name == b'ignore' or name.startswith(b'ignore.'):
977 976 # we need to use os.path.join here rather than self._join
978 977 # because path is arbitrary and user-specified
979 978 files.append(os.path.join(self._rootdir, util.expandpath(path)))
980 979 return files
981 980
982 981 def _ignorefileandline(self, f):
983 982 files = collections.deque(self._ignorefiles())
984 983 visited = set()
985 984 while files:
986 985 i = files.popleft()
987 986 patterns = matchmod.readpatternfile(
988 987 i, self._ui.warn, sourceinfo=True
989 988 )
990 989 for pattern, lineno, line in patterns:
991 990 kind, p = matchmod._patsplit(pattern, b'glob')
992 991 if kind == b"subinclude":
993 992 if p not in visited:
994 993 files.append(p)
995 994 continue
996 995 m = matchmod.match(
997 996 self._root, b'', [], [pattern], warn=self._ui.warn
998 997 )
999 998 if m(f):
1000 999 return (i, lineno, line)
1001 1000 visited.add(i)
1002 1001 return (None, -1, b"")
1003 1002
1004 1003 def _walkexplicit(self, match, subrepos):
1005 1004 """Get stat data about the files explicitly specified by match.
1006 1005
1007 1006 Return a triple (results, dirsfound, dirsnotfound).
1008 1007 - results is a mapping from filename to stat result. It also contains
1009 1008 listings mapping subrepos and .hg to None.
1010 1009 - dirsfound is a list of files found to be directories.
1011 1010 - dirsnotfound is a list of files that the dirstate thinks are
1012 1011 directories and that were not found."""
1013 1012
1014 1013 def badtype(mode):
1015 1014 kind = _(b'unknown')
1016 1015 if stat.S_ISCHR(mode):
1017 1016 kind = _(b'character device')
1018 1017 elif stat.S_ISBLK(mode):
1019 1018 kind = _(b'block device')
1020 1019 elif stat.S_ISFIFO(mode):
1021 1020 kind = _(b'fifo')
1022 1021 elif stat.S_ISSOCK(mode):
1023 1022 kind = _(b'socket')
1024 1023 elif stat.S_ISDIR(mode):
1025 1024 kind = _(b'directory')
1026 1025 return _(b'unsupported file type (type is %s)') % kind
1027 1026
1028 1027 badfn = match.bad
1029 1028 dmap = self._map
1030 1029 lstat = os.lstat
1031 1030 getkind = stat.S_IFMT
1032 1031 dirkind = stat.S_IFDIR
1033 1032 regkind = stat.S_IFREG
1034 1033 lnkkind = stat.S_IFLNK
1035 1034 join = self._join
1036 1035 dirsfound = []
1037 1036 foundadd = dirsfound.append
1038 1037 dirsnotfound = []
1039 1038 notfoundadd = dirsnotfound.append
1040 1039
1041 1040 if not match.isexact() and self._checkcase:
1042 1041 normalize = self._normalize
1043 1042 else:
1044 1043 normalize = None
1045 1044
1046 1045 files = sorted(match.files())
1047 1046 subrepos.sort()
1048 1047 i, j = 0, 0
1049 1048 while i < len(files) and j < len(subrepos):
1050 1049 subpath = subrepos[j] + b"/"
1051 1050 if files[i] < subpath:
1052 1051 i += 1
1053 1052 continue
1054 1053 while i < len(files) and files[i].startswith(subpath):
1055 1054 del files[i]
1056 1055 j += 1
1057 1056
1058 1057 if not files or b'' in files:
1059 1058 files = [b'']
1060 1059 # constructing the foldmap is expensive, so don't do it for the
1061 1060 # common case where files is ['']
1062 1061 normalize = None
1063 1062 results = dict.fromkeys(subrepos)
1064 1063 results[b'.hg'] = None
1065 1064
1066 1065 for ff in files:
1067 1066 if normalize:
1068 1067 nf = normalize(ff, False, True)
1069 1068 else:
1070 1069 nf = ff
1071 1070 if nf in results:
1072 1071 continue
1073 1072
1074 1073 try:
1075 1074 st = lstat(join(nf))
1076 1075 kind = getkind(st.st_mode)
1077 1076 if kind == dirkind:
1078 1077 if nf in dmap:
1079 1078 # file replaced by dir on disk but still in dirstate
1080 1079 results[nf] = None
1081 1080 foundadd((nf, ff))
1082 1081 elif kind == regkind or kind == lnkkind:
1083 1082 results[nf] = st
1084 1083 else:
1085 1084 badfn(ff, badtype(kind))
1086 1085 if nf in dmap:
1087 1086 results[nf] = None
1088 1087 except OSError as inst: # nf not found on disk - it is dirstate only
1089 1088 if nf in dmap: # does it exactly match a missing file?
1090 1089 results[nf] = None
1091 1090 else: # does it match a missing directory?
1092 1091 if self._map.hasdir(nf):
1093 1092 notfoundadd(nf)
1094 1093 else:
1095 1094 badfn(ff, encoding.strtolocal(inst.strerror))
1096 1095
1097 1096 # match.files() may contain explicitly-specified paths that shouldn't
1098 1097 # be taken; drop them from the list of files found. dirsfound/notfound
1099 1098 # aren't filtered here because they will be tested later.
1100 1099 if match.anypats():
1101 1100 for f in list(results):
1102 1101 if f == b'.hg' or f in subrepos:
1103 1102 # keep sentinel to disable further out-of-repo walks
1104 1103 continue
1105 1104 if not match(f):
1106 1105 del results[f]
1107 1106
1108 1107 # Case insensitive filesystems cannot rely on lstat() failing to detect
1109 1108 # a case-only rename. Prune the stat object for any file that does not
1110 1109 # match the case in the filesystem, if there are multiple files that
1111 1110 # normalize to the same path.
1112 1111 if match.isexact() and self._checkcase:
1113 1112 normed = {}
1114 1113
1115 1114 for f, st in pycompat.iteritems(results):
1116 1115 if st is None:
1117 1116 continue
1118 1117
1119 1118 nc = util.normcase(f)
1120 1119 paths = normed.get(nc)
1121 1120
1122 1121 if paths is None:
1123 1122 paths = set()
1124 1123 normed[nc] = paths
1125 1124
1126 1125 paths.add(f)
1127 1126
1128 1127 for norm, paths in pycompat.iteritems(normed):
1129 1128 if len(paths) > 1:
1130 1129 for path in paths:
1131 1130 folded = self._discoverpath(
1132 1131 path, norm, True, None, self._map.dirfoldmap
1133 1132 )
1134 1133 if path != folded:
1135 1134 results[path] = None
1136 1135
1137 1136 return results, dirsfound, dirsnotfound
1138 1137
1139 1138 def walk(self, match, subrepos, unknown, ignored, full=True):
1140 1139 """
1141 1140 Walk recursively through the directory tree, finding all files
1142 1141 matched by match.
1143 1142
1144 1143 If full is False, maybe skip some known-clean files.
1145 1144
1146 1145 Return a dict mapping filename to stat-like object (either
1147 1146 mercurial.osutil.stat instance or return value of os.stat()).
1148 1147
1149 1148 """
1150 1149 # full is a flag that extensions that hook into walk can use -- this
1151 1150 # implementation doesn't use it at all. This satisfies the contract
1152 1151 # because we only guarantee a "maybe".
1153 1152
1154 1153 if ignored:
1155 1154 ignore = util.never
1156 1155 dirignore = util.never
1157 1156 elif unknown:
1158 1157 ignore = self._ignore
1159 1158 dirignore = self._dirignore
1160 1159 else:
1161 1160 # if not unknown and not ignored, drop dir recursion and step 2
1162 1161 ignore = util.always
1163 1162 dirignore = util.always
1164 1163
1165 1164 matchfn = match.matchfn
1166 1165 matchalways = match.always()
1167 1166 matchtdir = match.traversedir
1168 1167 dmap = self._map
1169 1168 listdir = util.listdir
1170 1169 lstat = os.lstat
1171 1170 dirkind = stat.S_IFDIR
1172 1171 regkind = stat.S_IFREG
1173 1172 lnkkind = stat.S_IFLNK
1174 1173 join = self._join
1175 1174
1176 1175 exact = skipstep3 = False
1177 1176 if match.isexact(): # match.exact
1178 1177 exact = True
1179 1178 dirignore = util.always # skip step 2
1180 1179 elif match.prefix(): # match.match, no patterns
1181 1180 skipstep3 = True
1182 1181
1183 1182 if not exact and self._checkcase:
1184 1183 normalize = self._normalize
1185 1184 normalizefile = self._normalizefile
1186 1185 skipstep3 = False
1187 1186 else:
1188 1187 normalize = self._normalize
1189 1188 normalizefile = None
1190 1189
1191 1190 # step 1: find all explicit files
1192 1191 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1193 1192 if matchtdir:
1194 1193 for d in work:
1195 1194 matchtdir(d[0])
1196 1195 for d in dirsnotfound:
1197 1196 matchtdir(d)
1198 1197
1199 1198 skipstep3 = skipstep3 and not (work or dirsnotfound)
1200 1199 work = [d for d in work if not dirignore(d[0])]
1201 1200
1202 1201 # step 2: visit subdirectories
1203 1202 def traverse(work, alreadynormed):
1204 1203 wadd = work.append
1205 1204 while work:
1206 1205 tracing.counter('dirstate.walk work', len(work))
1207 1206 nd = work.pop()
1208 1207 visitentries = match.visitchildrenset(nd)
1209 1208 if not visitentries:
1210 1209 continue
1211 1210 if visitentries == b'this' or visitentries == b'all':
1212 1211 visitentries = None
1213 1212 skip = None
1214 1213 if nd != b'':
1215 1214 skip = b'.hg'
1216 1215 try:
1217 1216 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1218 1217 entries = listdir(join(nd), stat=True, skip=skip)
1219 1218 except OSError as inst:
1220 1219 if inst.errno in (errno.EACCES, errno.ENOENT):
1221 1220 match.bad(
1222 1221 self.pathto(nd), encoding.strtolocal(inst.strerror)
1223 1222 )
1224 1223 continue
1225 1224 raise
1226 1225 for f, kind, st in entries:
1227 1226 # Some matchers may return files in the visitentries set,
1228 1227 # instead of 'this', if the matcher explicitly mentions them
1229 1228 # and is not an exactmatcher. This is acceptable; we do not
1230 1229 # make any hard assumptions about file-or-directory below
1231 1230 # based on the presence of `f` in visitentries. If
1232 1231 # visitchildrenset returned a set, we can always skip the
1233 1232 # entries *not* in the set it provided regardless of whether
1234 1233 # they're actually a file or a directory.
1235 1234 if visitentries and f not in visitentries:
1236 1235 continue
1237 1236 if normalizefile:
1238 1237 # even though f might be a directory, we're only
1239 1238 # interested in comparing it to files currently in the
1240 1239 # dmap -- therefore normalizefile is enough
1241 1240 nf = normalizefile(
1242 1241 nd and (nd + b"/" + f) or f, True, True
1243 1242 )
1244 1243 else:
1245 1244 nf = nd and (nd + b"/" + f) or f
1246 1245 if nf not in results:
1247 1246 if kind == dirkind:
1248 1247 if not ignore(nf):
1249 1248 if matchtdir:
1250 1249 matchtdir(nf)
1251 1250 wadd(nf)
1252 1251 if nf in dmap and (matchalways or matchfn(nf)):
1253 1252 results[nf] = None
1254 1253 elif kind == regkind or kind == lnkkind:
1255 1254 if nf in dmap:
1256 1255 if matchalways or matchfn(nf):
1257 1256 results[nf] = st
1258 1257 elif (matchalways or matchfn(nf)) and not ignore(
1259 1258 nf
1260 1259 ):
1261 1260 # unknown file -- normalize if necessary
1262 1261 if not alreadynormed:
1263 1262 nf = normalize(nf, False, True)
1264 1263 results[nf] = st
1265 1264 elif nf in dmap and (matchalways or matchfn(nf)):
1266 1265 results[nf] = None
1267 1266
1268 1267 for nd, d in work:
1269 1268 # alreadynormed means that processwork doesn't have to do any
1270 1269 # expensive directory normalization
1271 1270 alreadynormed = not normalize or nd == d
1272 1271 traverse([d], alreadynormed)
1273 1272
1274 1273 for s in subrepos:
1275 1274 del results[s]
1276 1275 del results[b'.hg']
1277 1276
1278 1277 # step 3: visit remaining files from dmap
1279 1278 if not skipstep3 and not exact:
1280 1279 # If a dmap file is not in results yet, it was either
1281 1280 # a) not matching matchfn b) ignored, c) missing, or d) under a
1282 1281 # symlink directory.
1283 1282 if not results and matchalways:
1284 1283 visit = [f for f in dmap]
1285 1284 else:
1286 1285 visit = [f for f in dmap if f not in results and matchfn(f)]
1287 1286 visit.sort()
1288 1287
1289 1288 if unknown:
1290 1289 # unknown == True means we walked all dirs under the roots
1291 1290 # that wasn't ignored, and everything that matched was stat'ed
1292 1291 # and is already in results.
1293 1292 # The rest must thus be ignored or under a symlink.
1294 1293 audit_path = pathutil.pathauditor(self._root, cached=True)
1295 1294
1296 1295 for nf in iter(visit):
1297 1296 # If a stat for the same file was already added with a
1298 1297 # different case, don't add one for this, since that would
1299 1298 # make it appear as if the file exists under both names
1300 1299 # on disk.
1301 1300 if (
1302 1301 normalizefile
1303 1302 and normalizefile(nf, True, True) in results
1304 1303 ):
1305 1304 results[nf] = None
1306 1305 # Report ignored items in the dmap as long as they are not
1307 1306 # under a symlink directory.
1308 1307 elif audit_path.check(nf):
1309 1308 try:
1310 1309 results[nf] = lstat(join(nf))
1311 1310 # file was just ignored, no links, and exists
1312 1311 except OSError:
1313 1312 # file doesn't exist
1314 1313 results[nf] = None
1315 1314 else:
1316 1315 # It's either missing or under a symlink directory
1317 1316 # which we in this case report as missing
1318 1317 results[nf] = None
1319 1318 else:
1320 1319 # We may not have walked the full directory tree above,
1321 1320 # so stat and check everything we missed.
1322 1321 iv = iter(visit)
1323 1322 for st in util.statfiles([join(i) for i in visit]):
1324 1323 results[next(iv)] = st
1325 1324 return results
1326 1325
1327 1326 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1328 1327 # Force Rayon (Rust parallelism library) to respect the number of
1329 1328 # workers. This is a temporary workaround until Rust code knows
1330 1329 # how to read the config file.
1331 1330 numcpus = self._ui.configint(b"worker", b"numcpus")
1332 1331 if numcpus is not None:
1333 1332 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1334 1333
1335 1334 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1336 1335 if not workers_enabled:
1337 1336 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1338 1337
1339 1338 (
1340 1339 lookup,
1341 1340 modified,
1342 1341 added,
1343 1342 removed,
1344 1343 deleted,
1345 1344 clean,
1346 1345 ignored,
1347 1346 unknown,
1348 1347 warnings,
1349 1348 bad,
1350 1349 traversed,
1351 1350 dirty,
1352 1351 ) = rustmod.status(
1353 1352 self._map._rustmap,
1354 1353 matcher,
1355 1354 self._rootdir,
1356 1355 self._ignorefiles(),
1357 1356 self._checkexec,
1358 1357 self._lastnormaltime,
1359 1358 bool(list_clean),
1360 1359 bool(list_ignored),
1361 1360 bool(list_unknown),
1362 1361 bool(matcher.traversedir),
1363 1362 )
1364 1363
1365 1364 self._dirty |= dirty
1366 1365
1367 1366 if matcher.traversedir:
1368 1367 for dir in traversed:
1369 1368 matcher.traversedir(dir)
1370 1369
1371 1370 if self._ui.warn:
1372 1371 for item in warnings:
1373 1372 if isinstance(item, tuple):
1374 1373 file_path, syntax = item
1375 1374 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1376 1375 file_path,
1377 1376 syntax,
1378 1377 )
1379 1378 self._ui.warn(msg)
1380 1379 else:
1381 1380 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1382 1381 self._ui.warn(
1383 1382 msg
1384 1383 % (
1385 1384 pathutil.canonpath(
1386 1385 self._rootdir, self._rootdir, item
1387 1386 ),
1388 1387 b"No such file or directory",
1389 1388 )
1390 1389 )
1391 1390
1392 1391 for (fn, message) in bad:
1393 1392 matcher.bad(fn, encoding.strtolocal(message))
1394 1393
1395 1394 status = scmutil.status(
1396 1395 modified=modified,
1397 1396 added=added,
1398 1397 removed=removed,
1399 1398 deleted=deleted,
1400 1399 unknown=unknown,
1401 1400 ignored=ignored,
1402 1401 clean=clean,
1403 1402 )
1404 1403 return (lookup, status)
1405 1404
1406 1405 def status(self, match, subrepos, ignored, clean, unknown):
1407 1406 """Determine the status of the working copy relative to the
1408 1407 dirstate and return a pair of (unsure, status), where status is of type
1409 1408 scmutil.status and:
1410 1409
1411 1410 unsure:
1412 1411 files that might have been modified since the dirstate was
1413 1412 written, but need to be read to be sure (size is the same
1414 1413 but mtime differs)
1415 1414 status.modified:
1416 1415 files that have definitely been modified since the dirstate
1417 1416 was written (different size or mode)
1418 1417 status.clean:
1419 1418 files that have definitely not been modified since the
1420 1419 dirstate was written
1421 1420 """
1422 1421 listignored, listclean, listunknown = ignored, clean, unknown
1423 1422 lookup, modified, added, unknown, ignored = [], [], [], [], []
1424 1423 removed, deleted, clean = [], [], []
1425 1424
1426 1425 dmap = self._map
1427 1426 dmap.preload()
1428 1427
1429 1428 use_rust = True
1430 1429
1431 1430 allowed_matchers = (
1432 1431 matchmod.alwaysmatcher,
1433 1432 matchmod.exactmatcher,
1434 1433 matchmod.includematcher,
1435 1434 )
1436 1435
1437 1436 if rustmod is None:
1438 1437 use_rust = False
1439 1438 elif self._checkcase:
1440 1439 # Case-insensitive filesystems are not handled yet
1441 1440 use_rust = False
1442 1441 elif subrepos:
1443 1442 use_rust = False
1444 1443 elif sparse.enabled:
1445 1444 use_rust = False
1446 1445 elif not isinstance(match, allowed_matchers):
1447 1446 # Some matchers have yet to be implemented
1448 1447 use_rust = False
1449 1448
1450 1449 if use_rust:
1451 1450 try:
1452 1451 return self._rust_status(
1453 1452 match, listclean, listignored, listunknown
1454 1453 )
1455 1454 except rustmod.FallbackError:
1456 1455 pass
1457 1456
1458 1457 def noop(f):
1459 1458 pass
1460 1459
1461 1460 dcontains = dmap.__contains__
1462 1461 dget = dmap.__getitem__
1463 1462 ladd = lookup.append # aka "unsure"
1464 1463 madd = modified.append
1465 1464 aadd = added.append
1466 1465 uadd = unknown.append if listunknown else noop
1467 1466 iadd = ignored.append if listignored else noop
1468 1467 radd = removed.append
1469 1468 dadd = deleted.append
1470 1469 cadd = clean.append if listclean else noop
1471 1470 mexact = match.exact
1472 1471 dirignore = self._dirignore
1473 1472 checkexec = self._checkexec
1474 1473 copymap = self._map.copymap
1475 1474 lastnormaltime = self._lastnormaltime
1476 1475
1477 1476 # We need to do full walks when either
1478 1477 # - we're listing all clean files, or
1479 1478 # - match.traversedir does something, because match.traversedir should
1480 1479 # be called for every dir in the working dir
1481 1480 full = listclean or match.traversedir is not None
1482 1481 for fn, st in pycompat.iteritems(
1483 1482 self.walk(match, subrepos, listunknown, listignored, full=full)
1484 1483 ):
1485 1484 if not dcontains(fn):
1486 1485 if (listignored or mexact(fn)) and dirignore(fn):
1487 1486 if listignored:
1488 1487 iadd(fn)
1489 1488 else:
1490 1489 uadd(fn)
1491 1490 continue
1492 1491
1493 1492 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1494 1493 # written like that for performance reasons. dmap[fn] is not a
1495 1494 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1496 1495 # opcode has fast paths when the value to be unpacked is a tuple or
1497 1496 # a list, but falls back to creating a full-fledged iterator in
1498 1497 # general. That is much slower than simply accessing and storing the
1499 1498 # tuple members one by one.
1500 1499 t = dget(fn)
1501 1500 mode = t.mode
1502 1501 size = t.size
1503 1502 time = t.mtime
1504 1503
1505 1504 if not st and t.tracked:
1506 1505 dadd(fn)
1507 1506 elif t.merged:
1508 1507 madd(fn)
1509 1508 elif t.added:
1510 1509 aadd(fn)
1511 1510 elif t.removed:
1512 1511 radd(fn)
1513 1512 elif t.tracked:
1514 1513 if (
1515 1514 size >= 0
1516 1515 and (
1517 1516 (size != st.st_size and size != st.st_size & _rangemask)
1518 1517 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1519 1518 )
1520 1519 or t.from_p2
1521 1520 or fn in copymap
1522 1521 ):
1523 1522 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1524 1523 # issue6456: Size returned may be longer due to
1525 1524 # encryption on EXT-4 fscrypt, undecided.
1526 1525 ladd(fn)
1527 1526 else:
1528 1527 madd(fn)
1529 1528 elif (
1530 1529 time != st[stat.ST_MTIME]
1531 1530 and time != st[stat.ST_MTIME] & _rangemask
1532 1531 ):
1533 1532 ladd(fn)
1534 1533 elif st[stat.ST_MTIME] == lastnormaltime:
1535 1534 # fn may have just been marked as normal and it may have
1536 1535 # changed in the same second without changing its size.
1537 1536 # This can happen if we quickly do multiple commits.
1538 1537 # Force lookup, so we don't miss such a racy file change.
1539 1538 ladd(fn)
1540 1539 elif listclean:
1541 1540 cadd(fn)
1542 1541 status = scmutil.status(
1543 1542 modified, added, removed, deleted, unknown, ignored, clean
1544 1543 )
1545 1544 return (lookup, status)
1546 1545
1547 1546 def matches(self, match):
1548 1547 """
1549 1548 return files in the dirstate (in whatever state) filtered by match
1550 1549 """
1551 1550 dmap = self._map
1552 1551 if rustmod is not None:
1553 1552 dmap = self._map._rustmap
1554 1553
1555 1554 if match.always():
1556 1555 return dmap.keys()
1557 1556 files = match.files()
1558 1557 if match.isexact():
1559 1558 # fast path -- filter the other way around, since typically files is
1560 1559 # much smaller than dmap
1561 1560 return [f for f in files if f in dmap]
1562 1561 if match.prefix() and all(fn in dmap for fn in files):
1563 1562 # fast path -- all the values are known to be files, so just return
1564 1563 # that
1565 1564 return list(files)
1566 1565 return [f for f in dmap if match(f)]
1567 1566
1568 1567 def _actualfilename(self, tr):
1569 1568 if tr:
1570 1569 return self._pendingfilename
1571 1570 else:
1572 1571 return self._filename
1573 1572
1574 1573 def savebackup(self, tr, backupname):
1575 1574 '''Save current dirstate into backup file'''
1576 1575 filename = self._actualfilename(tr)
1577 1576 assert backupname != filename
1578 1577
1579 1578 # use '_writedirstate' instead of 'write' to write changes certainly,
1580 1579 # because the latter omits writing out if transaction is running.
1581 1580 # output file will be used to create backup of dirstate at this point.
1582 1581 if self._dirty or not self._opener.exists(filename):
1583 1582 self._writedirstate(
1584 1583 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1585 1584 )
1586 1585
1587 1586 if tr:
1588 1587 # ensure that subsequent tr.writepending returns True for
1589 1588 # changes written out above, even if dirstate is never
1590 1589 # changed after this
1591 1590 tr.addfilegenerator(
1592 1591 b'dirstate',
1593 1592 (self._filename,),
1594 1593 self._writedirstate,
1595 1594 location=b'plain',
1596 1595 )
1597 1596
1598 1597 # ensure that pending file written above is unlinked at
1599 1598 # failure, even if tr.writepending isn't invoked until the
1600 1599 # end of this transaction
1601 1600 tr.registertmp(filename, location=b'plain')
1602 1601
1603 1602 self._opener.tryunlink(backupname)
1604 1603 # hardlink backup is okay because _writedirstate is always called
1605 1604 # with an "atomictemp=True" file.
1606 1605 util.copyfile(
1607 1606 self._opener.join(filename),
1608 1607 self._opener.join(backupname),
1609 1608 hardlink=True,
1610 1609 )
1611 1610
1612 1611 def restorebackup(self, tr, backupname):
1613 1612 '''Restore dirstate by backup file'''
1614 1613 # this "invalidate()" prevents "wlock.release()" from writing
1615 1614 # changes of dirstate out after restoring from backup file
1616 1615 self.invalidate()
1617 1616 filename = self._actualfilename(tr)
1618 1617 o = self._opener
1619 1618 if util.samefile(o.join(backupname), o.join(filename)):
1620 1619 o.unlink(backupname)
1621 1620 else:
1622 1621 o.rename(backupname, filename, checkambig=True)
1623 1622
1624 1623 def clearbackup(self, tr, backupname):
1625 1624 '''Clear backup file'''
1626 1625 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now