##// END OF EJS Templates
dirstate: factor out the part retrieve "filedata" out of `normal`...
marmoute -
r48490:a685c29e default
parent child Browse files
Show More
@@ -1,1626 +1,1631 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .pycompat import delattr
18 18
19 19 from hgdemandimport import tracing
20 20
21 21 from . import (
22 22 dirstatemap,
23 23 encoding,
24 24 error,
25 25 match as matchmod,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 sparse,
31 31 util,
32 32 )
33 33
34 34 from .interfaces import (
35 35 dirstate as intdirstate,
36 36 util as interfaceutil,
37 37 )
38 38
39 39 parsers = policy.importmod('parsers')
40 40 rustmod = policy.importrust('dirstate')
41 41
42 42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43 43
44 44 propertycache = util.propertycache
45 45 filecache = scmutil.filecache
46 46 _rangemask = dirstatemap.rangemask
47 47
48 48 DirstateItem = parsers.DirstateItem
49 49
50 50
51 51 class repocache(filecache):
52 52 """filecache for files in .hg/"""
53 53
54 54 def join(self, obj, fname):
55 55 return obj._opener.join(fname)
56 56
57 57
58 58 class rootcache(filecache):
59 59 """filecache for files in the repository root"""
60 60
61 61 def join(self, obj, fname):
62 62 return obj._join(fname)
63 63
64 64
65 65 def _getfsnow(vfs):
66 66 '''Get "now" timestamp on filesystem'''
67 67 tmpfd, tmpname = vfs.mkstemp()
68 68 try:
69 69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 70 finally:
71 71 os.close(tmpfd)
72 72 vfs.unlink(tmpname)
73 73
74 74
75 75 def requires_parents_change(func):
76 76 def wrap(self, *args, **kwargs):
77 77 if not self.pendingparentchange():
78 78 msg = 'calling `%s` outside of a parentchange context'
79 79 msg %= func.__name__
80 80 raise error.ProgrammingError(msg)
81 81 return func(self, *args, **kwargs)
82 82
83 83 return wrap
84 84
85 85
86 86 def requires_no_parents_change(func):
87 87 def wrap(self, *args, **kwargs):
88 88 if self.pendingparentchange():
89 89 msg = 'calling `%s` inside of a parentchange context'
90 90 msg %= func.__name__
91 91 raise error.ProgrammingError(msg)
92 92 return func(self, *args, **kwargs)
93 93
94 94 return wrap
95 95
96 96
97 97 @interfaceutil.implementer(intdirstate.idirstate)
98 98 class dirstate(object):
99 99 def __init__(
100 100 self,
101 101 opener,
102 102 ui,
103 103 root,
104 104 validate,
105 105 sparsematchfn,
106 106 nodeconstants,
107 107 use_dirstate_v2,
108 108 ):
109 109 """Create a new dirstate object.
110 110
111 111 opener is an open()-like callable that can be used to open the
112 112 dirstate file; root is the root of the directory tracked by
113 113 the dirstate.
114 114 """
115 115 self._use_dirstate_v2 = use_dirstate_v2
116 116 self._nodeconstants = nodeconstants
117 117 self._opener = opener
118 118 self._validate = validate
119 119 self._root = root
120 120 self._sparsematchfn = sparsematchfn
121 121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 122 # UNC path pointing to root share (issue4557)
123 123 self._rootdir = pathutil.normasprefix(root)
124 124 self._dirty = False
125 125 self._lastnormaltime = 0
126 126 self._ui = ui
127 127 self._filecache = {}
128 128 self._parentwriters = 0
129 129 self._filename = b'dirstate'
130 130 self._pendingfilename = b'%s.pending' % self._filename
131 131 self._plchangecallbacks = {}
132 132 self._origpl = None
133 133 self._updatedfiles = set()
134 134 self._mapcls = dirstatemap.dirstatemap
135 135 # Access and cache cwd early, so we don't access it for the first time
136 136 # after a working-copy update caused it to not exist (accessing it then
137 137 # raises an exception).
138 138 self._cwd
139 139
140 140 def prefetch_parents(self):
141 141 """make sure the parents are loaded
142 142
143 143 Used to avoid a race condition.
144 144 """
145 145 self._pl
146 146
147 147 @contextlib.contextmanager
148 148 def parentchange(self):
149 149 """Context manager for handling dirstate parents.
150 150
151 151 If an exception occurs in the scope of the context manager,
152 152 the incoherent dirstate won't be written when wlock is
153 153 released.
154 154 """
155 155 self._parentwriters += 1
156 156 yield
157 157 # Typically we want the "undo" step of a context manager in a
158 158 # finally block so it happens even when an exception
159 159 # occurs. In this case, however, we only want to decrement
160 160 # parentwriters if the code in the with statement exits
161 161 # normally, so we don't have a try/finally here on purpose.
162 162 self._parentwriters -= 1
163 163
164 164 def pendingparentchange(self):
165 165 """Returns true if the dirstate is in the middle of a set of changes
166 166 that modify the dirstate parent.
167 167 """
168 168 return self._parentwriters > 0
169 169
170 170 @propertycache
171 171 def _map(self):
172 172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 173 self._map = self._mapcls(
174 174 self._ui,
175 175 self._opener,
176 176 self._root,
177 177 self._nodeconstants,
178 178 self._use_dirstate_v2,
179 179 )
180 180 return self._map
181 181
182 182 @property
183 183 def _sparsematcher(self):
184 184 """The matcher for the sparse checkout.
185 185
186 186 The working directory may not include every file from a manifest. The
187 187 matcher obtained by this property will match a path if it is to be
188 188 included in the working directory.
189 189 """
190 190 # TODO there is potential to cache this property. For now, the matcher
191 191 # is resolved on every access. (But the called function does use a
192 192 # cache to keep the lookup fast.)
193 193 return self._sparsematchfn()
194 194
195 195 @repocache(b'branch')
196 196 def _branch(self):
197 197 try:
198 198 return self._opener.read(b"branch").strip() or b"default"
199 199 except IOError as inst:
200 200 if inst.errno != errno.ENOENT:
201 201 raise
202 202 return b"default"
203 203
204 204 @property
205 205 def _pl(self):
206 206 return self._map.parents()
207 207
208 208 def hasdir(self, d):
209 209 return self._map.hastrackeddir(d)
210 210
211 211 @rootcache(b'.hgignore')
212 212 def _ignore(self):
213 213 files = self._ignorefiles()
214 214 if not files:
215 215 return matchmod.never()
216 216
217 217 pats = [b'include:%s' % f for f in files]
218 218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219 219
220 220 @propertycache
221 221 def _slash(self):
222 222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223 223
224 224 @propertycache
225 225 def _checklink(self):
226 226 return util.checklink(self._root)
227 227
228 228 @propertycache
229 229 def _checkexec(self):
230 230 return bool(util.checkexec(self._root))
231 231
232 232 @propertycache
233 233 def _checkcase(self):
234 234 return not util.fscasesensitive(self._join(b'.hg'))
235 235
236 236 def _join(self, f):
237 237 # much faster than os.path.join()
238 238 # it's safe because f is always a relative path
239 239 return self._rootdir + f
240 240
241 241 def flagfunc(self, buildfallback):
242 242 if self._checklink and self._checkexec:
243 243
244 244 def f(x):
245 245 try:
246 246 st = os.lstat(self._join(x))
247 247 if util.statislink(st):
248 248 return b'l'
249 249 if util.statisexec(st):
250 250 return b'x'
251 251 except OSError:
252 252 pass
253 253 return b''
254 254
255 255 return f
256 256
257 257 fallback = buildfallback()
258 258 if self._checklink:
259 259
260 260 def f(x):
261 261 if os.path.islink(self._join(x)):
262 262 return b'l'
263 263 if b'x' in fallback(x):
264 264 return b'x'
265 265 return b''
266 266
267 267 return f
268 268 if self._checkexec:
269 269
270 270 def f(x):
271 271 if b'l' in fallback(x):
272 272 return b'l'
273 273 if util.isexec(self._join(x)):
274 274 return b'x'
275 275 return b''
276 276
277 277 return f
278 278 else:
279 279 return fallback
280 280
281 281 @propertycache
282 282 def _cwd(self):
283 283 # internal config: ui.forcecwd
284 284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 285 if forcecwd:
286 286 return forcecwd
287 287 return encoding.getcwd()
288 288
289 289 def getcwd(self):
290 290 """Return the path from which a canonical path is calculated.
291 291
292 292 This path should be used to resolve file patterns or to convert
293 293 canonical paths back to file paths for display. It shouldn't be
294 294 used to get real file paths. Use vfs functions instead.
295 295 """
296 296 cwd = self._cwd
297 297 if cwd == self._root:
298 298 return b''
299 299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 300 rootsep = self._root
301 301 if not util.endswithsep(rootsep):
302 302 rootsep += pycompat.ossep
303 303 if cwd.startswith(rootsep):
304 304 return cwd[len(rootsep) :]
305 305 else:
306 306 # we're outside the repo. return an absolute path.
307 307 return cwd
308 308
309 309 def pathto(self, f, cwd=None):
310 310 if cwd is None:
311 311 cwd = self.getcwd()
312 312 path = util.pathto(self._root, cwd, f)
313 313 if self._slash:
314 314 return util.pconvert(path)
315 315 return path
316 316
317 317 def __getitem__(self, key):
318 318 """Return the current state of key (a filename) in the dirstate.
319 319
320 320 States are:
321 321 n normal
322 322 m needs merging
323 323 r marked for removal
324 324 a marked for addition
325 325 ? not tracked
326 326
327 327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 328 consider migrating all user of this to going through the dirstate entry
329 329 instead.
330 330 """
331 331 entry = self._map.get(key)
332 332 if entry is not None:
333 333 return entry.state
334 334 return b'?'
335 335
336 336 def __contains__(self, key):
337 337 return key in self._map
338 338
339 339 def __iter__(self):
340 340 return iter(sorted(self._map))
341 341
342 342 def items(self):
343 343 return pycompat.iteritems(self._map)
344 344
345 345 iteritems = items
346 346
347 347 def directories(self):
348 348 return self._map.directories()
349 349
350 350 def parents(self):
351 351 return [self._validate(p) for p in self._pl]
352 352
353 353 def p1(self):
354 354 return self._validate(self._pl[0])
355 355
356 356 def p2(self):
357 357 return self._validate(self._pl[1])
358 358
359 359 @property
360 360 def in_merge(self):
361 361 """True if a merge is in progress"""
362 362 return self._pl[1] != self._nodeconstants.nullid
363 363
364 364 def branch(self):
365 365 return encoding.tolocal(self._branch)
366 366
367 367 def setparents(self, p1, p2=None):
368 368 """Set dirstate parents to p1 and p2.
369 369
370 370 When moving from two parents to one, "merged" entries a
371 371 adjusted to normal and previous copy records discarded and
372 372 returned by the call.
373 373
374 374 See localrepo.setparents()
375 375 """
376 376 if p2 is None:
377 377 p2 = self._nodeconstants.nullid
378 378 if self._parentwriters == 0:
379 379 raise ValueError(
380 380 b"cannot set dirstate parent outside of "
381 381 b"dirstate.parentchange context manager"
382 382 )
383 383
384 384 self._dirty = True
385 385 oldp2 = self._pl[1]
386 386 if self._origpl is None:
387 387 self._origpl = self._pl
388 388 self._map.setparents(p1, p2)
389 389 copies = {}
390 390 if (
391 391 oldp2 != self._nodeconstants.nullid
392 392 and p2 == self._nodeconstants.nullid
393 393 ):
394 394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395 395
396 396 for f in candidatefiles:
397 397 s = self._map.get(f)
398 398 if s is None:
399 399 continue
400 400
401 401 # Discard "merged" markers when moving away from a merge state
402 402 if s.merged:
403 403 source = self._map.copymap.get(f)
404 404 if source:
405 405 copies[f] = source
406 406 self.normallookup(f)
407 407 # Also fix up otherparent markers
408 408 elif s.from_p2:
409 409 source = self._map.copymap.get(f)
410 410 if source:
411 411 copies[f] = source
412 412 self._add(f)
413 413 return copies
414 414
415 415 def setbranch(self, branch):
416 416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 418 try:
419 419 f.write(self._branch + b'\n')
420 420 f.close()
421 421
422 422 # make sure filecache has the correct stat info for _branch after
423 423 # replacing the underlying file
424 424 ce = self._filecache[b'_branch']
425 425 if ce:
426 426 ce.refresh()
427 427 except: # re-raises
428 428 f.discard()
429 429 raise
430 430
431 431 def invalidate(self):
432 432 """Causes the next access to reread the dirstate.
433 433
434 434 This is different from localrepo.invalidatedirstate() because it always
435 435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 436 check whether the dirstate has changed before rereading it."""
437 437
438 438 for a in ("_map", "_branch", "_ignore"):
439 439 if a in self.__dict__:
440 440 delattr(self, a)
441 441 self._lastnormaltime = 0
442 442 self._dirty = False
443 443 self._updatedfiles.clear()
444 444 self._parentwriters = 0
445 445 self._origpl = None
446 446
447 447 def copy(self, source, dest):
448 448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 449 if source == dest:
450 450 return
451 451 self._dirty = True
452 452 if source is not None:
453 453 self._map.copymap[dest] = source
454 454 self._updatedfiles.add(source)
455 455 self._updatedfiles.add(dest)
456 456 elif self._map.copymap.pop(dest, None):
457 457 self._updatedfiles.add(dest)
458 458
459 459 def copied(self, file):
460 460 return self._map.copymap.get(file, None)
461 461
462 462 def copies(self):
463 463 return self._map.copymap
464 464
465 465 @requires_no_parents_change
466 466 def set_tracked(self, filename):
467 467 """a "public" method for generic code to mark a file as tracked
468 468
469 469 This function is to be called outside of "update/merge" case. For
470 470 example by a command like `hg add X`.
471 471
472 472 return True the file was previously untracked, False otherwise.
473 473 """
474 474 entry = self._map.get(filename)
475 475 if entry is None:
476 476 self._add(filename)
477 477 return True
478 478 elif not entry.tracked:
479 479 self.normallookup(filename)
480 480 return True
481 481 return False
482 482
483 483 @requires_no_parents_change
484 484 def set_untracked(self, filename):
485 485 """a "public" method for generic code to mark a file as untracked
486 486
487 487 This function is to be called outside of "update/merge" case. For
488 488 example by a command like `hg remove X`.
489 489
490 490 return True the file was previously tracked, False otherwise.
491 491 """
492 492 entry = self._map.get(filename)
493 493 if entry is None:
494 494 return False
495 495 elif entry.added:
496 496 self._drop(filename)
497 497 return True
498 498 else:
499 499 self._remove(filename)
500 500 return True
501 501
502 502 @requires_parents_change
503 503 def update_file_reference(
504 504 self,
505 505 filename,
506 506 p1_tracked,
507 507 ):
508 508 """Set a file as tracked in the parent (or not)
509 509
510 510 This is to be called when adjust the dirstate to a new parent after an history
511 511 rewriting operation.
512 512
513 513 It should not be called during a merge (p2 != nullid) and only within
514 514 a `with dirstate.parentchange():` context.
515 515 """
516 516 if self.in_merge:
517 517 msg = b'update_file_reference should not be called when merging'
518 518 raise error.ProgrammingError(msg)
519 519 entry = self._map.get(filename)
520 520 if entry is None:
521 521 wc_tracked = False
522 522 else:
523 523 wc_tracked = entry.tracked
524 524 if p1_tracked and wc_tracked:
525 525 # the underlying reference might have changed, we will have to
526 526 # check it.
527 527 self.normallookup(filename)
528 528 elif not (p1_tracked or wc_tracked):
529 529 # the file is no longer relevant to anyone
530 530 self._drop(filename)
531 531 elif (not p1_tracked) and wc_tracked:
532 532 if not entry.added:
533 533 self._add(filename)
534 534 elif p1_tracked and not wc_tracked:
535 535 if entry is None or not entry.removed:
536 536 self._remove(filename)
537 537 else:
538 538 assert False, 'unreachable'
539 539
540 540 @requires_parents_change
541 541 def update_file(
542 542 self,
543 543 filename,
544 544 wc_tracked,
545 545 p1_tracked,
546 546 p2_tracked=False,
547 547 merged=False,
548 548 clean_p1=False,
549 549 clean_p2=False,
550 550 possibly_dirty=False,
551 551 ):
552 552 """update the information about a file in the dirstate
553 553
554 554 This is to be called when the direstates parent changes to keep track
555 555 of what is the file situation in regards to the working copy and its parent.
556 556
557 557 This function must be called within a `dirstate.parentchange` context.
558 558
559 559 note: the API is at an early stage and we might need to ajust it
560 560 depending of what information ends up being relevant and useful to
561 561 other processing.
562 562 """
563 563 if merged and (clean_p1 or clean_p2):
564 564 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
565 565 raise error.ProgrammingError(msg)
566 566 if not (p1_tracked or p2_tracked or wc_tracked):
567 567 self._drop(filename)
568 568 elif merged:
569 569 assert wc_tracked
570 570 assert self.in_merge # we are never in the "normallookup" case
571 571 self.otherparent(filename)
572 572 elif not (p1_tracked or p2_tracked) and wc_tracked:
573 573 self._addpath(filename, added=True, possibly_dirty=possibly_dirty)
574 574 self._map.copymap.pop(filename, None)
575 575 elif (p1_tracked or p2_tracked) and not wc_tracked:
576 576 self._remove(filename)
577 577 elif clean_p2 and wc_tracked:
578 578 assert p2_tracked
579 579 self.otherparent(filename)
580 580 elif not p1_tracked and p2_tracked and wc_tracked:
581 581 self._addpath(filename, from_p2=True, possibly_dirty=possibly_dirty)
582 582 self._map.copymap.pop(filename, None)
583 583 elif possibly_dirty:
584 584 self._addpath(filename, possibly_dirty=possibly_dirty)
585 585 elif wc_tracked:
586 586 self.normal(filename)
587 587 # XXX We need something for file that are dirty after an update
588 588 else:
589 589 assert False, 'unreachable'
590 590
591 591 @requires_parents_change
592 592 def update_parent_file_data(self, f, filedata):
593 593 """update the information about the content of a file
594 594
595 595 This function should be called within a `dirstate.parentchange` context.
596 596 """
597 597 self.normal(f, parentfiledata=filedata)
598 598
599 599 def _addpath(
600 600 self,
601 601 f,
602 602 mode=0,
603 603 size=None,
604 604 mtime=None,
605 605 added=False,
606 606 merged=False,
607 607 from_p2=False,
608 608 possibly_dirty=False,
609 609 ):
610 610 entry = self._map.get(f)
611 611 if added or entry is not None and entry.removed:
612 612 scmutil.checkfilename(f)
613 613 if self._map.hastrackeddir(f):
614 614 msg = _(b'directory %r already in dirstate')
615 615 msg %= pycompat.bytestr(f)
616 616 raise error.Abort(msg)
617 617 # shadows
618 618 for d in pathutil.finddirs(f):
619 619 if self._map.hastrackeddir(d):
620 620 break
621 621 entry = self._map.get(d)
622 622 if entry is not None and not entry.removed:
623 623 msg = _(b'file %r in dirstate clashes with %r')
624 624 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
625 625 raise error.Abort(msg)
626 626 self._dirty = True
627 627 self._updatedfiles.add(f)
628 628 self._map.addfile(
629 629 f,
630 630 mode=mode,
631 631 size=size,
632 632 mtime=mtime,
633 633 added=added,
634 634 merged=merged,
635 635 from_p2=from_p2,
636 636 possibly_dirty=possibly_dirty,
637 637 )
638 638
639 def _get_filedata(self, filename):
640 """returns"""
641 s = os.lstat(self._join(filename))
642 mode = s.st_mode
643 size = s.st_size
644 mtime = s[stat.ST_MTIME]
645 return (mode, size, mtime)
646
639 647 def normal(self, f, parentfiledata=None):
640 648 """Mark a file normal and clean.
641 649
642 650 parentfiledata: (mode, size, mtime) of the clean file
643 651
644 652 parentfiledata should be computed from memory (for mode,
645 653 size), as or close as possible from the point where we
646 654 determined the file was clean, to limit the risk of the
647 655 file having been changed by an external process between the
648 656 moment where the file was determined to be clean and now."""
649 657 if parentfiledata:
650 658 (mode, size, mtime) = parentfiledata
651 659 else:
652 s = os.lstat(self._join(f))
653 mode = s.st_mode
654 size = s.st_size
655 mtime = s[stat.ST_MTIME]
660 (mode, size, mtime) = self._get_filedata(f)
656 661 self._addpath(f, mode=mode, size=size, mtime=mtime)
657 662 self._map.copymap.pop(f, None)
658 663 if f in self._map.nonnormalset:
659 664 self._map.nonnormalset.remove(f)
660 665 if mtime > self._lastnormaltime:
661 666 # Remember the most recent modification timeslot for status(),
662 667 # to make sure we won't miss future size-preserving file content
663 668 # modifications that happen within the same timeslot.
664 669 self._lastnormaltime = mtime
665 670
666 671 def normallookup(self, f):
667 672 '''Mark a file normal, but possibly dirty.'''
668 673 if self.in_merge:
669 674 # if there is a merge going on and the file was either
670 675 # "merged" or coming from other parent (-2) before
671 676 # being removed, restore that state.
672 677 entry = self._map.get(f)
673 678 if entry is not None:
674 679 # XXX this should probably be dealt with a a lower level
675 680 # (see `merged_removed` and `from_p2_removed`)
676 681 if entry.merged_removed or entry.from_p2_removed:
677 682 source = self._map.copymap.get(f)
678 683 if entry.merged_removed:
679 684 self.merge(f)
680 685 elif entry.from_p2_removed:
681 686 self.otherparent(f)
682 687 if source is not None:
683 688 self.copy(source, f)
684 689 return
685 690 elif entry.merged or entry.from_p2:
686 691 return
687 692 self._addpath(f, possibly_dirty=True)
688 693 self._map.copymap.pop(f, None)
689 694
690 695 def otherparent(self, f):
691 696 '''Mark as coming from the other parent, always dirty.'''
692 697 if not self.in_merge:
693 698 msg = _(b"setting %r to other parent only allowed in merges") % f
694 699 raise error.Abort(msg)
695 700 entry = self._map.get(f)
696 701 if entry is not None and entry.tracked:
697 702 # merge-like
698 703 self._addpath(f, merged=True)
699 704 else:
700 705 # add-like
701 706 self._addpath(f, from_p2=True)
702 707 self._map.copymap.pop(f, None)
703 708
704 709 def add(self, f):
705 710 '''Mark a file added.'''
706 711 if not self.pendingparentchange():
707 712 util.nouideprecwarn(
708 713 b"do not use `add` outside of update/merge context."
709 714 b" Use `set_tracked`",
710 715 b'6.0',
711 716 stacklevel=2,
712 717 )
713 718 self._add(f)
714 719
715 720 def _add(self, filename):
716 721 """internal function to mark a file as added"""
717 722 self._addpath(filename, added=True)
718 723 self._map.copymap.pop(filename, None)
719 724
720 725 def remove(self, f):
721 726 '''Mark a file removed'''
722 727 if not self.pendingparentchange():
723 728 util.nouideprecwarn(
724 729 b"do not use `remove` outside of update/merge context."
725 730 b" Use `set_untracked`",
726 731 b'6.0',
727 732 stacklevel=2,
728 733 )
729 734 self._remove(f)
730 735
731 736 def _remove(self, filename):
732 737 """internal function to mark a file removed"""
733 738 self._dirty = True
734 739 self._updatedfiles.add(filename)
735 740 self._map.removefile(filename, in_merge=self.in_merge)
736 741
737 742 def merge(self, f):
738 743 '''Mark a file merged.'''
739 744 if not self.in_merge:
740 745 return self.normallookup(f)
741 746 return self.otherparent(f)
742 747
743 748 def drop(self, f):
744 749 '''Drop a file from the dirstate'''
745 750 if not self.pendingparentchange():
746 751 util.nouideprecwarn(
747 752 b"do not use `drop` outside of update/merge context."
748 753 b" Use `set_untracked`",
749 754 b'6.0',
750 755 stacklevel=2,
751 756 )
752 757 self._drop(f)
753 758
754 759 def _drop(self, filename):
755 760 """internal function to drop a file from the dirstate"""
756 761 if self._map.dropfile(filename):
757 762 self._dirty = True
758 763 self._updatedfiles.add(filename)
759 764 self._map.copymap.pop(filename, None)
760 765
761 766 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
762 767 if exists is None:
763 768 exists = os.path.lexists(os.path.join(self._root, path))
764 769 if not exists:
765 770 # Maybe a path component exists
766 771 if not ignoremissing and b'/' in path:
767 772 d, f = path.rsplit(b'/', 1)
768 773 d = self._normalize(d, False, ignoremissing, None)
769 774 folded = d + b"/" + f
770 775 else:
771 776 # No path components, preserve original case
772 777 folded = path
773 778 else:
774 779 # recursively normalize leading directory components
775 780 # against dirstate
776 781 if b'/' in normed:
777 782 d, f = normed.rsplit(b'/', 1)
778 783 d = self._normalize(d, False, ignoremissing, True)
779 784 r = self._root + b"/" + d
780 785 folded = d + b"/" + util.fspath(f, r)
781 786 else:
782 787 folded = util.fspath(normed, self._root)
783 788 storemap[normed] = folded
784 789
785 790 return folded
786 791
787 792 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
788 793 normed = util.normcase(path)
789 794 folded = self._map.filefoldmap.get(normed, None)
790 795 if folded is None:
791 796 if isknown:
792 797 folded = path
793 798 else:
794 799 folded = self._discoverpath(
795 800 path, normed, ignoremissing, exists, self._map.filefoldmap
796 801 )
797 802 return folded
798 803
799 804 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
800 805 normed = util.normcase(path)
801 806 folded = self._map.filefoldmap.get(normed, None)
802 807 if folded is None:
803 808 folded = self._map.dirfoldmap.get(normed, None)
804 809 if folded is None:
805 810 if isknown:
806 811 folded = path
807 812 else:
808 813 # store discovered result in dirfoldmap so that future
809 814 # normalizefile calls don't start matching directories
810 815 folded = self._discoverpath(
811 816 path, normed, ignoremissing, exists, self._map.dirfoldmap
812 817 )
813 818 return folded
814 819
815 820 def normalize(self, path, isknown=False, ignoremissing=False):
816 821 """
817 822 normalize the case of a pathname when on a casefolding filesystem
818 823
819 824 isknown specifies whether the filename came from walking the
820 825 disk, to avoid extra filesystem access.
821 826
822 827 If ignoremissing is True, missing path are returned
823 828 unchanged. Otherwise, we try harder to normalize possibly
824 829 existing path components.
825 830
826 831 The normalized case is determined based on the following precedence:
827 832
828 833 - version of name already stored in the dirstate
829 834 - version of name stored on disk
830 835 - version provided via command arguments
831 836 """
832 837
833 838 if self._checkcase:
834 839 return self._normalize(path, isknown, ignoremissing)
835 840 return path
836 841
837 842 def clear(self):
838 843 self._map.clear()
839 844 self._lastnormaltime = 0
840 845 self._updatedfiles.clear()
841 846 self._dirty = True
842 847
843 848 def rebuild(self, parent, allfiles, changedfiles=None):
844 849 if changedfiles is None:
845 850 # Rebuild entire dirstate
846 851 to_lookup = allfiles
847 852 to_drop = []
848 853 lastnormaltime = self._lastnormaltime
849 854 self.clear()
850 855 self._lastnormaltime = lastnormaltime
851 856 elif len(changedfiles) < 10:
852 857 # Avoid turning allfiles into a set, which can be expensive if it's
853 858 # large.
854 859 to_lookup = []
855 860 to_drop = []
856 861 for f in changedfiles:
857 862 if f in allfiles:
858 863 to_lookup.append(f)
859 864 else:
860 865 to_drop.append(f)
861 866 else:
862 867 changedfilesset = set(changedfiles)
863 868 to_lookup = changedfilesset & set(allfiles)
864 869 to_drop = changedfilesset - to_lookup
865 870
866 871 if self._origpl is None:
867 872 self._origpl = self._pl
868 873 self._map.setparents(parent, self._nodeconstants.nullid)
869 874
870 875 for f in to_lookup:
871 876 self.normallookup(f)
872 877 for f in to_drop:
873 878 self._drop(f)
874 879
875 880 self._dirty = True
876 881
877 882 def identity(self):
878 883 """Return identity of dirstate itself to detect changing in storage
879 884
880 885 If identity of previous dirstate is equal to this, writing
881 886 changes based on the former dirstate out can keep consistency.
882 887 """
883 888 return self._map.identity
884 889
885 890 def write(self, tr):
886 891 if not self._dirty:
887 892 return
888 893
889 894 filename = self._filename
890 895 if tr:
891 896 # 'dirstate.write()' is not only for writing in-memory
892 897 # changes out, but also for dropping ambiguous timestamp.
893 898 # delayed writing re-raise "ambiguous timestamp issue".
894 899 # See also the wiki page below for detail:
895 900 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
896 901
897 902 # emulate dropping timestamp in 'parsers.pack_dirstate'
898 903 now = _getfsnow(self._opener)
899 904 self._map.clearambiguoustimes(self._updatedfiles, now)
900 905
901 906 # emulate that all 'dirstate.normal' results are written out
902 907 self._lastnormaltime = 0
903 908 self._updatedfiles.clear()
904 909
905 910 # delay writing in-memory changes out
906 911 tr.addfilegenerator(
907 912 b'dirstate',
908 913 (self._filename,),
909 914 lambda f: self._writedirstate(tr, f),
910 915 location=b'plain',
911 916 )
912 917 return
913 918
914 919 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
915 920 self._writedirstate(tr, st)
916 921
917 922 def addparentchangecallback(self, category, callback):
918 923 """add a callback to be called when the wd parents are changed
919 924
920 925 Callback will be called with the following arguments:
921 926 dirstate, (oldp1, oldp2), (newp1, newp2)
922 927
923 928 Category is a unique identifier to allow overwriting an old callback
924 929 with a newer callback.
925 930 """
926 931 self._plchangecallbacks[category] = callback
927 932
928 933 def _writedirstate(self, tr, st):
929 934 # notify callbacks about parents change
930 935 if self._origpl is not None and self._origpl != self._pl:
931 936 for c, callback in sorted(
932 937 pycompat.iteritems(self._plchangecallbacks)
933 938 ):
934 939 callback(self, self._origpl, self._pl)
935 940 self._origpl = None
936 941 # use the modification time of the newly created temporary file as the
937 942 # filesystem's notion of 'now'
938 943 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
939 944
940 945 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
941 946 # timestamp of each entries in dirstate, because of 'now > mtime'
942 947 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
943 948 if delaywrite > 0:
944 949 # do we have any files to delay for?
945 950 for f, e in pycompat.iteritems(self._map):
946 951 if e.need_delay(now):
947 952 import time # to avoid useless import
948 953
949 954 # rather than sleep n seconds, sleep until the next
950 955 # multiple of n seconds
951 956 clock = time.time()
952 957 start = int(clock) - (int(clock) % delaywrite)
953 958 end = start + delaywrite
954 959 time.sleep(end - clock)
955 960 now = end # trust our estimate that the end is near now
956 961 break
957 962
958 963 self._map.write(tr, st, now)
959 964 self._lastnormaltime = 0
960 965 self._dirty = False
961 966
962 967 def _dirignore(self, f):
963 968 if self._ignore(f):
964 969 return True
965 970 for p in pathutil.finddirs(f):
966 971 if self._ignore(p):
967 972 return True
968 973 return False
969 974
970 975 def _ignorefiles(self):
971 976 files = []
972 977 if os.path.exists(self._join(b'.hgignore')):
973 978 files.append(self._join(b'.hgignore'))
974 979 for name, path in self._ui.configitems(b"ui"):
975 980 if name == b'ignore' or name.startswith(b'ignore.'):
976 981 # we need to use os.path.join here rather than self._join
977 982 # because path is arbitrary and user-specified
978 983 files.append(os.path.join(self._rootdir, util.expandpath(path)))
979 984 return files
980 985
981 986 def _ignorefileandline(self, f):
982 987 files = collections.deque(self._ignorefiles())
983 988 visited = set()
984 989 while files:
985 990 i = files.popleft()
986 991 patterns = matchmod.readpatternfile(
987 992 i, self._ui.warn, sourceinfo=True
988 993 )
989 994 for pattern, lineno, line in patterns:
990 995 kind, p = matchmod._patsplit(pattern, b'glob')
991 996 if kind == b"subinclude":
992 997 if p not in visited:
993 998 files.append(p)
994 999 continue
995 1000 m = matchmod.match(
996 1001 self._root, b'', [], [pattern], warn=self._ui.warn
997 1002 )
998 1003 if m(f):
999 1004 return (i, lineno, line)
1000 1005 visited.add(i)
1001 1006 return (None, -1, b"")
1002 1007
1003 1008 def _walkexplicit(self, match, subrepos):
1004 1009 """Get stat data about the files explicitly specified by match.
1005 1010
1006 1011 Return a triple (results, dirsfound, dirsnotfound).
1007 1012 - results is a mapping from filename to stat result. It also contains
1008 1013 listings mapping subrepos and .hg to None.
1009 1014 - dirsfound is a list of files found to be directories.
1010 1015 - dirsnotfound is a list of files that the dirstate thinks are
1011 1016 directories and that were not found."""
1012 1017
1013 1018 def badtype(mode):
1014 1019 kind = _(b'unknown')
1015 1020 if stat.S_ISCHR(mode):
1016 1021 kind = _(b'character device')
1017 1022 elif stat.S_ISBLK(mode):
1018 1023 kind = _(b'block device')
1019 1024 elif stat.S_ISFIFO(mode):
1020 1025 kind = _(b'fifo')
1021 1026 elif stat.S_ISSOCK(mode):
1022 1027 kind = _(b'socket')
1023 1028 elif stat.S_ISDIR(mode):
1024 1029 kind = _(b'directory')
1025 1030 return _(b'unsupported file type (type is %s)') % kind
1026 1031
1027 1032 badfn = match.bad
1028 1033 dmap = self._map
1029 1034 lstat = os.lstat
1030 1035 getkind = stat.S_IFMT
1031 1036 dirkind = stat.S_IFDIR
1032 1037 regkind = stat.S_IFREG
1033 1038 lnkkind = stat.S_IFLNK
1034 1039 join = self._join
1035 1040 dirsfound = []
1036 1041 foundadd = dirsfound.append
1037 1042 dirsnotfound = []
1038 1043 notfoundadd = dirsnotfound.append
1039 1044
1040 1045 if not match.isexact() and self._checkcase:
1041 1046 normalize = self._normalize
1042 1047 else:
1043 1048 normalize = None
1044 1049
1045 1050 files = sorted(match.files())
1046 1051 subrepos.sort()
1047 1052 i, j = 0, 0
1048 1053 while i < len(files) and j < len(subrepos):
1049 1054 subpath = subrepos[j] + b"/"
1050 1055 if files[i] < subpath:
1051 1056 i += 1
1052 1057 continue
1053 1058 while i < len(files) and files[i].startswith(subpath):
1054 1059 del files[i]
1055 1060 j += 1
1056 1061
1057 1062 if not files or b'' in files:
1058 1063 files = [b'']
1059 1064 # constructing the foldmap is expensive, so don't do it for the
1060 1065 # common case where files is ['']
1061 1066 normalize = None
1062 1067 results = dict.fromkeys(subrepos)
1063 1068 results[b'.hg'] = None
1064 1069
1065 1070 for ff in files:
1066 1071 if normalize:
1067 1072 nf = normalize(ff, False, True)
1068 1073 else:
1069 1074 nf = ff
1070 1075 if nf in results:
1071 1076 continue
1072 1077
1073 1078 try:
1074 1079 st = lstat(join(nf))
1075 1080 kind = getkind(st.st_mode)
1076 1081 if kind == dirkind:
1077 1082 if nf in dmap:
1078 1083 # file replaced by dir on disk but still in dirstate
1079 1084 results[nf] = None
1080 1085 foundadd((nf, ff))
1081 1086 elif kind == regkind or kind == lnkkind:
1082 1087 results[nf] = st
1083 1088 else:
1084 1089 badfn(ff, badtype(kind))
1085 1090 if nf in dmap:
1086 1091 results[nf] = None
1087 1092 except OSError as inst: # nf not found on disk - it is dirstate only
1088 1093 if nf in dmap: # does it exactly match a missing file?
1089 1094 results[nf] = None
1090 1095 else: # does it match a missing directory?
1091 1096 if self._map.hasdir(nf):
1092 1097 notfoundadd(nf)
1093 1098 else:
1094 1099 badfn(ff, encoding.strtolocal(inst.strerror))
1095 1100
1096 1101 # match.files() may contain explicitly-specified paths that shouldn't
1097 1102 # be taken; drop them from the list of files found. dirsfound/notfound
1098 1103 # aren't filtered here because they will be tested later.
1099 1104 if match.anypats():
1100 1105 for f in list(results):
1101 1106 if f == b'.hg' or f in subrepos:
1102 1107 # keep sentinel to disable further out-of-repo walks
1103 1108 continue
1104 1109 if not match(f):
1105 1110 del results[f]
1106 1111
1107 1112 # Case insensitive filesystems cannot rely on lstat() failing to detect
1108 1113 # a case-only rename. Prune the stat object for any file that does not
1109 1114 # match the case in the filesystem, if there are multiple files that
1110 1115 # normalize to the same path.
1111 1116 if match.isexact() and self._checkcase:
1112 1117 normed = {}
1113 1118
1114 1119 for f, st in pycompat.iteritems(results):
1115 1120 if st is None:
1116 1121 continue
1117 1122
1118 1123 nc = util.normcase(f)
1119 1124 paths = normed.get(nc)
1120 1125
1121 1126 if paths is None:
1122 1127 paths = set()
1123 1128 normed[nc] = paths
1124 1129
1125 1130 paths.add(f)
1126 1131
1127 1132 for norm, paths in pycompat.iteritems(normed):
1128 1133 if len(paths) > 1:
1129 1134 for path in paths:
1130 1135 folded = self._discoverpath(
1131 1136 path, norm, True, None, self._map.dirfoldmap
1132 1137 )
1133 1138 if path != folded:
1134 1139 results[path] = None
1135 1140
1136 1141 return results, dirsfound, dirsnotfound
1137 1142
1138 1143 def walk(self, match, subrepos, unknown, ignored, full=True):
1139 1144 """
1140 1145 Walk recursively through the directory tree, finding all files
1141 1146 matched by match.
1142 1147
1143 1148 If full is False, maybe skip some known-clean files.
1144 1149
1145 1150 Return a dict mapping filename to stat-like object (either
1146 1151 mercurial.osutil.stat instance or return value of os.stat()).
1147 1152
1148 1153 """
1149 1154 # full is a flag that extensions that hook into walk can use -- this
1150 1155 # implementation doesn't use it at all. This satisfies the contract
1151 1156 # because we only guarantee a "maybe".
1152 1157
1153 1158 if ignored:
1154 1159 ignore = util.never
1155 1160 dirignore = util.never
1156 1161 elif unknown:
1157 1162 ignore = self._ignore
1158 1163 dirignore = self._dirignore
1159 1164 else:
1160 1165 # if not unknown and not ignored, drop dir recursion and step 2
1161 1166 ignore = util.always
1162 1167 dirignore = util.always
1163 1168
1164 1169 matchfn = match.matchfn
1165 1170 matchalways = match.always()
1166 1171 matchtdir = match.traversedir
1167 1172 dmap = self._map
1168 1173 listdir = util.listdir
1169 1174 lstat = os.lstat
1170 1175 dirkind = stat.S_IFDIR
1171 1176 regkind = stat.S_IFREG
1172 1177 lnkkind = stat.S_IFLNK
1173 1178 join = self._join
1174 1179
1175 1180 exact = skipstep3 = False
1176 1181 if match.isexact(): # match.exact
1177 1182 exact = True
1178 1183 dirignore = util.always # skip step 2
1179 1184 elif match.prefix(): # match.match, no patterns
1180 1185 skipstep3 = True
1181 1186
1182 1187 if not exact and self._checkcase:
1183 1188 normalize = self._normalize
1184 1189 normalizefile = self._normalizefile
1185 1190 skipstep3 = False
1186 1191 else:
1187 1192 normalize = self._normalize
1188 1193 normalizefile = None
1189 1194
1190 1195 # step 1: find all explicit files
1191 1196 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1192 1197 if matchtdir:
1193 1198 for d in work:
1194 1199 matchtdir(d[0])
1195 1200 for d in dirsnotfound:
1196 1201 matchtdir(d)
1197 1202
1198 1203 skipstep3 = skipstep3 and not (work or dirsnotfound)
1199 1204 work = [d for d in work if not dirignore(d[0])]
1200 1205
1201 1206 # step 2: visit subdirectories
1202 1207 def traverse(work, alreadynormed):
1203 1208 wadd = work.append
1204 1209 while work:
1205 1210 tracing.counter('dirstate.walk work', len(work))
1206 1211 nd = work.pop()
1207 1212 visitentries = match.visitchildrenset(nd)
1208 1213 if not visitentries:
1209 1214 continue
1210 1215 if visitentries == b'this' or visitentries == b'all':
1211 1216 visitentries = None
1212 1217 skip = None
1213 1218 if nd != b'':
1214 1219 skip = b'.hg'
1215 1220 try:
1216 1221 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1217 1222 entries = listdir(join(nd), stat=True, skip=skip)
1218 1223 except OSError as inst:
1219 1224 if inst.errno in (errno.EACCES, errno.ENOENT):
1220 1225 match.bad(
1221 1226 self.pathto(nd), encoding.strtolocal(inst.strerror)
1222 1227 )
1223 1228 continue
1224 1229 raise
1225 1230 for f, kind, st in entries:
1226 1231 # Some matchers may return files in the visitentries set,
1227 1232 # instead of 'this', if the matcher explicitly mentions them
1228 1233 # and is not an exactmatcher. This is acceptable; we do not
1229 1234 # make any hard assumptions about file-or-directory below
1230 1235 # based on the presence of `f` in visitentries. If
1231 1236 # visitchildrenset returned a set, we can always skip the
1232 1237 # entries *not* in the set it provided regardless of whether
1233 1238 # they're actually a file or a directory.
1234 1239 if visitentries and f not in visitentries:
1235 1240 continue
1236 1241 if normalizefile:
1237 1242 # even though f might be a directory, we're only
1238 1243 # interested in comparing it to files currently in the
1239 1244 # dmap -- therefore normalizefile is enough
1240 1245 nf = normalizefile(
1241 1246 nd and (nd + b"/" + f) or f, True, True
1242 1247 )
1243 1248 else:
1244 1249 nf = nd and (nd + b"/" + f) or f
1245 1250 if nf not in results:
1246 1251 if kind == dirkind:
1247 1252 if not ignore(nf):
1248 1253 if matchtdir:
1249 1254 matchtdir(nf)
1250 1255 wadd(nf)
1251 1256 if nf in dmap and (matchalways or matchfn(nf)):
1252 1257 results[nf] = None
1253 1258 elif kind == regkind or kind == lnkkind:
1254 1259 if nf in dmap:
1255 1260 if matchalways or matchfn(nf):
1256 1261 results[nf] = st
1257 1262 elif (matchalways or matchfn(nf)) and not ignore(
1258 1263 nf
1259 1264 ):
1260 1265 # unknown file -- normalize if necessary
1261 1266 if not alreadynormed:
1262 1267 nf = normalize(nf, False, True)
1263 1268 results[nf] = st
1264 1269 elif nf in dmap and (matchalways or matchfn(nf)):
1265 1270 results[nf] = None
1266 1271
1267 1272 for nd, d in work:
1268 1273 # alreadynormed means that processwork doesn't have to do any
1269 1274 # expensive directory normalization
1270 1275 alreadynormed = not normalize or nd == d
1271 1276 traverse([d], alreadynormed)
1272 1277
1273 1278 for s in subrepos:
1274 1279 del results[s]
1275 1280 del results[b'.hg']
1276 1281
1277 1282 # step 3: visit remaining files from dmap
1278 1283 if not skipstep3 and not exact:
1279 1284 # If a dmap file is not in results yet, it was either
1280 1285 # a) not matching matchfn b) ignored, c) missing, or d) under a
1281 1286 # symlink directory.
1282 1287 if not results and matchalways:
1283 1288 visit = [f for f in dmap]
1284 1289 else:
1285 1290 visit = [f for f in dmap if f not in results and matchfn(f)]
1286 1291 visit.sort()
1287 1292
1288 1293 if unknown:
1289 1294 # unknown == True means we walked all dirs under the roots
1290 1295 # that wasn't ignored, and everything that matched was stat'ed
1291 1296 # and is already in results.
1292 1297 # The rest must thus be ignored or under a symlink.
1293 1298 audit_path = pathutil.pathauditor(self._root, cached=True)
1294 1299
1295 1300 for nf in iter(visit):
1296 1301 # If a stat for the same file was already added with a
1297 1302 # different case, don't add one for this, since that would
1298 1303 # make it appear as if the file exists under both names
1299 1304 # on disk.
1300 1305 if (
1301 1306 normalizefile
1302 1307 and normalizefile(nf, True, True) in results
1303 1308 ):
1304 1309 results[nf] = None
1305 1310 # Report ignored items in the dmap as long as they are not
1306 1311 # under a symlink directory.
1307 1312 elif audit_path.check(nf):
1308 1313 try:
1309 1314 results[nf] = lstat(join(nf))
1310 1315 # file was just ignored, no links, and exists
1311 1316 except OSError:
1312 1317 # file doesn't exist
1313 1318 results[nf] = None
1314 1319 else:
1315 1320 # It's either missing or under a symlink directory
1316 1321 # which we in this case report as missing
1317 1322 results[nf] = None
1318 1323 else:
1319 1324 # We may not have walked the full directory tree above,
1320 1325 # so stat and check everything we missed.
1321 1326 iv = iter(visit)
1322 1327 for st in util.statfiles([join(i) for i in visit]):
1323 1328 results[next(iv)] = st
1324 1329 return results
1325 1330
1326 1331 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1327 1332 # Force Rayon (Rust parallelism library) to respect the number of
1328 1333 # workers. This is a temporary workaround until Rust code knows
1329 1334 # how to read the config file.
1330 1335 numcpus = self._ui.configint(b"worker", b"numcpus")
1331 1336 if numcpus is not None:
1332 1337 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1333 1338
1334 1339 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1335 1340 if not workers_enabled:
1336 1341 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1337 1342
1338 1343 (
1339 1344 lookup,
1340 1345 modified,
1341 1346 added,
1342 1347 removed,
1343 1348 deleted,
1344 1349 clean,
1345 1350 ignored,
1346 1351 unknown,
1347 1352 warnings,
1348 1353 bad,
1349 1354 traversed,
1350 1355 dirty,
1351 1356 ) = rustmod.status(
1352 1357 self._map._rustmap,
1353 1358 matcher,
1354 1359 self._rootdir,
1355 1360 self._ignorefiles(),
1356 1361 self._checkexec,
1357 1362 self._lastnormaltime,
1358 1363 bool(list_clean),
1359 1364 bool(list_ignored),
1360 1365 bool(list_unknown),
1361 1366 bool(matcher.traversedir),
1362 1367 )
1363 1368
1364 1369 self._dirty |= dirty
1365 1370
1366 1371 if matcher.traversedir:
1367 1372 for dir in traversed:
1368 1373 matcher.traversedir(dir)
1369 1374
1370 1375 if self._ui.warn:
1371 1376 for item in warnings:
1372 1377 if isinstance(item, tuple):
1373 1378 file_path, syntax = item
1374 1379 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1375 1380 file_path,
1376 1381 syntax,
1377 1382 )
1378 1383 self._ui.warn(msg)
1379 1384 else:
1380 1385 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1381 1386 self._ui.warn(
1382 1387 msg
1383 1388 % (
1384 1389 pathutil.canonpath(
1385 1390 self._rootdir, self._rootdir, item
1386 1391 ),
1387 1392 b"No such file or directory",
1388 1393 )
1389 1394 )
1390 1395
1391 1396 for (fn, message) in bad:
1392 1397 matcher.bad(fn, encoding.strtolocal(message))
1393 1398
1394 1399 status = scmutil.status(
1395 1400 modified=modified,
1396 1401 added=added,
1397 1402 removed=removed,
1398 1403 deleted=deleted,
1399 1404 unknown=unknown,
1400 1405 ignored=ignored,
1401 1406 clean=clean,
1402 1407 )
1403 1408 return (lookup, status)
1404 1409
1405 1410 def status(self, match, subrepos, ignored, clean, unknown):
1406 1411 """Determine the status of the working copy relative to the
1407 1412 dirstate and return a pair of (unsure, status), where status is of type
1408 1413 scmutil.status and:
1409 1414
1410 1415 unsure:
1411 1416 files that might have been modified since the dirstate was
1412 1417 written, but need to be read to be sure (size is the same
1413 1418 but mtime differs)
1414 1419 status.modified:
1415 1420 files that have definitely been modified since the dirstate
1416 1421 was written (different size or mode)
1417 1422 status.clean:
1418 1423 files that have definitely not been modified since the
1419 1424 dirstate was written
1420 1425 """
1421 1426 listignored, listclean, listunknown = ignored, clean, unknown
1422 1427 lookup, modified, added, unknown, ignored = [], [], [], [], []
1423 1428 removed, deleted, clean = [], [], []
1424 1429
1425 1430 dmap = self._map
1426 1431 dmap.preload()
1427 1432
1428 1433 use_rust = True
1429 1434
1430 1435 allowed_matchers = (
1431 1436 matchmod.alwaysmatcher,
1432 1437 matchmod.exactmatcher,
1433 1438 matchmod.includematcher,
1434 1439 )
1435 1440
1436 1441 if rustmod is None:
1437 1442 use_rust = False
1438 1443 elif self._checkcase:
1439 1444 # Case-insensitive filesystems are not handled yet
1440 1445 use_rust = False
1441 1446 elif subrepos:
1442 1447 use_rust = False
1443 1448 elif sparse.enabled:
1444 1449 use_rust = False
1445 1450 elif not isinstance(match, allowed_matchers):
1446 1451 # Some matchers have yet to be implemented
1447 1452 use_rust = False
1448 1453
1449 1454 if use_rust:
1450 1455 try:
1451 1456 return self._rust_status(
1452 1457 match, listclean, listignored, listunknown
1453 1458 )
1454 1459 except rustmod.FallbackError:
1455 1460 pass
1456 1461
1457 1462 def noop(f):
1458 1463 pass
1459 1464
1460 1465 dcontains = dmap.__contains__
1461 1466 dget = dmap.__getitem__
1462 1467 ladd = lookup.append # aka "unsure"
1463 1468 madd = modified.append
1464 1469 aadd = added.append
1465 1470 uadd = unknown.append if listunknown else noop
1466 1471 iadd = ignored.append if listignored else noop
1467 1472 radd = removed.append
1468 1473 dadd = deleted.append
1469 1474 cadd = clean.append if listclean else noop
1470 1475 mexact = match.exact
1471 1476 dirignore = self._dirignore
1472 1477 checkexec = self._checkexec
1473 1478 copymap = self._map.copymap
1474 1479 lastnormaltime = self._lastnormaltime
1475 1480
1476 1481 # We need to do full walks when either
1477 1482 # - we're listing all clean files, or
1478 1483 # - match.traversedir does something, because match.traversedir should
1479 1484 # be called for every dir in the working dir
1480 1485 full = listclean or match.traversedir is not None
1481 1486 for fn, st in pycompat.iteritems(
1482 1487 self.walk(match, subrepos, listunknown, listignored, full=full)
1483 1488 ):
1484 1489 if not dcontains(fn):
1485 1490 if (listignored or mexact(fn)) and dirignore(fn):
1486 1491 if listignored:
1487 1492 iadd(fn)
1488 1493 else:
1489 1494 uadd(fn)
1490 1495 continue
1491 1496
1492 1497 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1493 1498 # written like that for performance reasons. dmap[fn] is not a
1494 1499 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1495 1500 # opcode has fast paths when the value to be unpacked is a tuple or
1496 1501 # a list, but falls back to creating a full-fledged iterator in
1497 1502 # general. That is much slower than simply accessing and storing the
1498 1503 # tuple members one by one.
1499 1504 t = dget(fn)
1500 1505 mode = t.mode
1501 1506 size = t.size
1502 1507 time = t.mtime
1503 1508
1504 1509 if not st and t.tracked:
1505 1510 dadd(fn)
1506 1511 elif t.merged:
1507 1512 madd(fn)
1508 1513 elif t.added:
1509 1514 aadd(fn)
1510 1515 elif t.removed:
1511 1516 radd(fn)
1512 1517 elif t.tracked:
1513 1518 if (
1514 1519 size >= 0
1515 1520 and (
1516 1521 (size != st.st_size and size != st.st_size & _rangemask)
1517 1522 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1518 1523 )
1519 1524 or t.from_p2
1520 1525 or fn in copymap
1521 1526 ):
1522 1527 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1523 1528 # issue6456: Size returned may be longer due to
1524 1529 # encryption on EXT-4 fscrypt, undecided.
1525 1530 ladd(fn)
1526 1531 else:
1527 1532 madd(fn)
1528 1533 elif (
1529 1534 time != st[stat.ST_MTIME]
1530 1535 and time != st[stat.ST_MTIME] & _rangemask
1531 1536 ):
1532 1537 ladd(fn)
1533 1538 elif st[stat.ST_MTIME] == lastnormaltime:
1534 1539 # fn may have just been marked as normal and it may have
1535 1540 # changed in the same second without changing its size.
1536 1541 # This can happen if we quickly do multiple commits.
1537 1542 # Force lookup, so we don't miss such a racy file change.
1538 1543 ladd(fn)
1539 1544 elif listclean:
1540 1545 cadd(fn)
1541 1546 status = scmutil.status(
1542 1547 modified, added, removed, deleted, unknown, ignored, clean
1543 1548 )
1544 1549 return (lookup, status)
1545 1550
1546 1551 def matches(self, match):
1547 1552 """
1548 1553 return files in the dirstate (in whatever state) filtered by match
1549 1554 """
1550 1555 dmap = self._map
1551 1556 if rustmod is not None:
1552 1557 dmap = self._map._rustmap
1553 1558
1554 1559 if match.always():
1555 1560 return dmap.keys()
1556 1561 files = match.files()
1557 1562 if match.isexact():
1558 1563 # fast path -- filter the other way around, since typically files is
1559 1564 # much smaller than dmap
1560 1565 return [f for f in files if f in dmap]
1561 1566 if match.prefix() and all(fn in dmap for fn in files):
1562 1567 # fast path -- all the values are known to be files, so just return
1563 1568 # that
1564 1569 return list(files)
1565 1570 return [f for f in dmap if match(f)]
1566 1571
1567 1572 def _actualfilename(self, tr):
1568 1573 if tr:
1569 1574 return self._pendingfilename
1570 1575 else:
1571 1576 return self._filename
1572 1577
1573 1578 def savebackup(self, tr, backupname):
1574 1579 '''Save current dirstate into backup file'''
1575 1580 filename = self._actualfilename(tr)
1576 1581 assert backupname != filename
1577 1582
1578 1583 # use '_writedirstate' instead of 'write' to write changes certainly,
1579 1584 # because the latter omits writing out if transaction is running.
1580 1585 # output file will be used to create backup of dirstate at this point.
1581 1586 if self._dirty or not self._opener.exists(filename):
1582 1587 self._writedirstate(
1583 1588 tr,
1584 1589 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1585 1590 )
1586 1591
1587 1592 if tr:
1588 1593 # ensure that subsequent tr.writepending returns True for
1589 1594 # changes written out above, even if dirstate is never
1590 1595 # changed after this
1591 1596 tr.addfilegenerator(
1592 1597 b'dirstate',
1593 1598 (self._filename,),
1594 1599 lambda f: self._writedirstate(tr, f),
1595 1600 location=b'plain',
1596 1601 )
1597 1602
1598 1603 # ensure that pending file written above is unlinked at
1599 1604 # failure, even if tr.writepending isn't invoked until the
1600 1605 # end of this transaction
1601 1606 tr.registertmp(filename, location=b'plain')
1602 1607
1603 1608 self._opener.tryunlink(backupname)
1604 1609 # hardlink backup is okay because _writedirstate is always called
1605 1610 # with an "atomictemp=True" file.
1606 1611 util.copyfile(
1607 1612 self._opener.join(filename),
1608 1613 self._opener.join(backupname),
1609 1614 hardlink=True,
1610 1615 )
1611 1616
1612 1617 def restorebackup(self, tr, backupname):
1613 1618 '''Restore dirstate by backup file'''
1614 1619 # this "invalidate()" prevents "wlock.release()" from writing
1615 1620 # changes of dirstate out after restoring from backup file
1616 1621 self.invalidate()
1617 1622 filename = self._actualfilename(tr)
1618 1623 o = self._opener
1619 1624 if util.samefile(o.join(backupname), o.join(filename)):
1620 1625 o.unlink(backupname)
1621 1626 else:
1622 1627 o.rename(backupname, filename, checkambig=True)
1623 1628
1624 1629 def clearbackup(self, tr, backupname):
1625 1630 '''Clear backup file'''
1626 1631 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now