##// END OF EJS Templates
dirstate: use `reset_state` in `update_file_p1`...
marmoute -
r48494:1c06ef8f default
parent child Browse files
Show More
@@ -1,1640 +1,1656
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .pycompat import delattr
18 18
19 19 from hgdemandimport import tracing
20 20
21 21 from . import (
22 22 dirstatemap,
23 23 encoding,
24 24 error,
25 25 match as matchmod,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 sparse,
31 31 util,
32 32 )
33 33
34 34 from .interfaces import (
35 35 dirstate as intdirstate,
36 36 util as interfaceutil,
37 37 )
38 38
39 39 parsers = policy.importmod('parsers')
40 40 rustmod = policy.importrust('dirstate')
41 41
42 42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43 43
44 44 propertycache = util.propertycache
45 45 filecache = scmutil.filecache
46 46 _rangemask = dirstatemap.rangemask
47 47
48 48 DirstateItem = parsers.DirstateItem
49 49
50 50
51 51 class repocache(filecache):
52 52 """filecache for files in .hg/"""
53 53
54 54 def join(self, obj, fname):
55 55 return obj._opener.join(fname)
56 56
57 57
58 58 class rootcache(filecache):
59 59 """filecache for files in the repository root"""
60 60
61 61 def join(self, obj, fname):
62 62 return obj._join(fname)
63 63
64 64
65 65 def _getfsnow(vfs):
66 66 '''Get "now" timestamp on filesystem'''
67 67 tmpfd, tmpname = vfs.mkstemp()
68 68 try:
69 69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 70 finally:
71 71 os.close(tmpfd)
72 72 vfs.unlink(tmpname)
73 73
74 74
75 75 def requires_parents_change(func):
76 76 def wrap(self, *args, **kwargs):
77 77 if not self.pendingparentchange():
78 78 msg = 'calling `%s` outside of a parentchange context'
79 79 msg %= func.__name__
80 80 raise error.ProgrammingError(msg)
81 81 return func(self, *args, **kwargs)
82 82
83 83 return wrap
84 84
85 85
86 86 def requires_no_parents_change(func):
87 87 def wrap(self, *args, **kwargs):
88 88 if self.pendingparentchange():
89 89 msg = 'calling `%s` inside of a parentchange context'
90 90 msg %= func.__name__
91 91 raise error.ProgrammingError(msg)
92 92 return func(self, *args, **kwargs)
93 93
94 94 return wrap
95 95
96 96
97 97 @interfaceutil.implementer(intdirstate.idirstate)
98 98 class dirstate(object):
99 99 def __init__(
100 100 self,
101 101 opener,
102 102 ui,
103 103 root,
104 104 validate,
105 105 sparsematchfn,
106 106 nodeconstants,
107 107 use_dirstate_v2,
108 108 ):
109 109 """Create a new dirstate object.
110 110
111 111 opener is an open()-like callable that can be used to open the
112 112 dirstate file; root is the root of the directory tracked by
113 113 the dirstate.
114 114 """
115 115 self._use_dirstate_v2 = use_dirstate_v2
116 116 self._nodeconstants = nodeconstants
117 117 self._opener = opener
118 118 self._validate = validate
119 119 self._root = root
120 120 self._sparsematchfn = sparsematchfn
121 121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 122 # UNC path pointing to root share (issue4557)
123 123 self._rootdir = pathutil.normasprefix(root)
124 124 self._dirty = False
125 125 self._lastnormaltime = 0
126 126 self._ui = ui
127 127 self._filecache = {}
128 128 self._parentwriters = 0
129 129 self._filename = b'dirstate'
130 130 self._pendingfilename = b'%s.pending' % self._filename
131 131 self._plchangecallbacks = {}
132 132 self._origpl = None
133 133 self._updatedfiles = set()
134 134 self._mapcls = dirstatemap.dirstatemap
135 135 # Access and cache cwd early, so we don't access it for the first time
136 136 # after a working-copy update caused it to not exist (accessing it then
137 137 # raises an exception).
138 138 self._cwd
139 139
140 140 def prefetch_parents(self):
141 141 """make sure the parents are loaded
142 142
143 143 Used to avoid a race condition.
144 144 """
145 145 self._pl
146 146
147 147 @contextlib.contextmanager
148 148 def parentchange(self):
149 149 """Context manager for handling dirstate parents.
150 150
151 151 If an exception occurs in the scope of the context manager,
152 152 the incoherent dirstate won't be written when wlock is
153 153 released.
154 154 """
155 155 self._parentwriters += 1
156 156 yield
157 157 # Typically we want the "undo" step of a context manager in a
158 158 # finally block so it happens even when an exception
159 159 # occurs. In this case, however, we only want to decrement
160 160 # parentwriters if the code in the with statement exits
161 161 # normally, so we don't have a try/finally here on purpose.
162 162 self._parentwriters -= 1
163 163
164 164 def pendingparentchange(self):
165 165 """Returns true if the dirstate is in the middle of a set of changes
166 166 that modify the dirstate parent.
167 167 """
168 168 return self._parentwriters > 0
169 169
170 170 @propertycache
171 171 def _map(self):
172 172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 173 self._map = self._mapcls(
174 174 self._ui,
175 175 self._opener,
176 176 self._root,
177 177 self._nodeconstants,
178 178 self._use_dirstate_v2,
179 179 )
180 180 return self._map
181 181
182 182 @property
183 183 def _sparsematcher(self):
184 184 """The matcher for the sparse checkout.
185 185
186 186 The working directory may not include every file from a manifest. The
187 187 matcher obtained by this property will match a path if it is to be
188 188 included in the working directory.
189 189 """
190 190 # TODO there is potential to cache this property. For now, the matcher
191 191 # is resolved on every access. (But the called function does use a
192 192 # cache to keep the lookup fast.)
193 193 return self._sparsematchfn()
194 194
195 195 @repocache(b'branch')
196 196 def _branch(self):
197 197 try:
198 198 return self._opener.read(b"branch").strip() or b"default"
199 199 except IOError as inst:
200 200 if inst.errno != errno.ENOENT:
201 201 raise
202 202 return b"default"
203 203
204 204 @property
205 205 def _pl(self):
206 206 return self._map.parents()
207 207
208 208 def hasdir(self, d):
209 209 return self._map.hastrackeddir(d)
210 210
211 211 @rootcache(b'.hgignore')
212 212 def _ignore(self):
213 213 files = self._ignorefiles()
214 214 if not files:
215 215 return matchmod.never()
216 216
217 217 pats = [b'include:%s' % f for f in files]
218 218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219 219
220 220 @propertycache
221 221 def _slash(self):
222 222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223 223
224 224 @propertycache
225 225 def _checklink(self):
226 226 return util.checklink(self._root)
227 227
228 228 @propertycache
229 229 def _checkexec(self):
230 230 return bool(util.checkexec(self._root))
231 231
232 232 @propertycache
233 233 def _checkcase(self):
234 234 return not util.fscasesensitive(self._join(b'.hg'))
235 235
236 236 def _join(self, f):
237 237 # much faster than os.path.join()
238 238 # it's safe because f is always a relative path
239 239 return self._rootdir + f
240 240
241 241 def flagfunc(self, buildfallback):
242 242 if self._checklink and self._checkexec:
243 243
244 244 def f(x):
245 245 try:
246 246 st = os.lstat(self._join(x))
247 247 if util.statislink(st):
248 248 return b'l'
249 249 if util.statisexec(st):
250 250 return b'x'
251 251 except OSError:
252 252 pass
253 253 return b''
254 254
255 255 return f
256 256
257 257 fallback = buildfallback()
258 258 if self._checklink:
259 259
260 260 def f(x):
261 261 if os.path.islink(self._join(x)):
262 262 return b'l'
263 263 if b'x' in fallback(x):
264 264 return b'x'
265 265 return b''
266 266
267 267 return f
268 268 if self._checkexec:
269 269
270 270 def f(x):
271 271 if b'l' in fallback(x):
272 272 return b'l'
273 273 if util.isexec(self._join(x)):
274 274 return b'x'
275 275 return b''
276 276
277 277 return f
278 278 else:
279 279 return fallback
280 280
281 281 @propertycache
282 282 def _cwd(self):
283 283 # internal config: ui.forcecwd
284 284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 285 if forcecwd:
286 286 return forcecwd
287 287 return encoding.getcwd()
288 288
289 289 def getcwd(self):
290 290 """Return the path from which a canonical path is calculated.
291 291
292 292 This path should be used to resolve file patterns or to convert
293 293 canonical paths back to file paths for display. It shouldn't be
294 294 used to get real file paths. Use vfs functions instead.
295 295 """
296 296 cwd = self._cwd
297 297 if cwd == self._root:
298 298 return b''
299 299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 300 rootsep = self._root
301 301 if not util.endswithsep(rootsep):
302 302 rootsep += pycompat.ossep
303 303 if cwd.startswith(rootsep):
304 304 return cwd[len(rootsep) :]
305 305 else:
306 306 # we're outside the repo. return an absolute path.
307 307 return cwd
308 308
309 309 def pathto(self, f, cwd=None):
310 310 if cwd is None:
311 311 cwd = self.getcwd()
312 312 path = util.pathto(self._root, cwd, f)
313 313 if self._slash:
314 314 return util.pconvert(path)
315 315 return path
316 316
317 317 def __getitem__(self, key):
318 318 """Return the current state of key (a filename) in the dirstate.
319 319
320 320 States are:
321 321 n normal
322 322 m needs merging
323 323 r marked for removal
324 324 a marked for addition
325 325 ? not tracked
326 326
327 327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 328 consider migrating all user of this to going through the dirstate entry
329 329 instead.
330 330 """
331 331 entry = self._map.get(key)
332 332 if entry is not None:
333 333 return entry.state
334 334 return b'?'
335 335
336 336 def __contains__(self, key):
337 337 return key in self._map
338 338
339 339 def __iter__(self):
340 340 return iter(sorted(self._map))
341 341
342 342 def items(self):
343 343 return pycompat.iteritems(self._map)
344 344
345 345 iteritems = items
346 346
347 347 def directories(self):
348 348 return self._map.directories()
349 349
350 350 def parents(self):
351 351 return [self._validate(p) for p in self._pl]
352 352
353 353 def p1(self):
354 354 return self._validate(self._pl[0])
355 355
356 356 def p2(self):
357 357 return self._validate(self._pl[1])
358 358
359 359 @property
360 360 def in_merge(self):
361 361 """True if a merge is in progress"""
362 362 return self._pl[1] != self._nodeconstants.nullid
363 363
364 364 def branch(self):
365 365 return encoding.tolocal(self._branch)
366 366
367 367 def setparents(self, p1, p2=None):
368 368 """Set dirstate parents to p1 and p2.
369 369
370 370 When moving from two parents to one, "merged" entries a
371 371 adjusted to normal and previous copy records discarded and
372 372 returned by the call.
373 373
374 374 See localrepo.setparents()
375 375 """
376 376 if p2 is None:
377 377 p2 = self._nodeconstants.nullid
378 378 if self._parentwriters == 0:
379 379 raise ValueError(
380 380 b"cannot set dirstate parent outside of "
381 381 b"dirstate.parentchange context manager"
382 382 )
383 383
384 384 self._dirty = True
385 385 oldp2 = self._pl[1]
386 386 if self._origpl is None:
387 387 self._origpl = self._pl
388 388 self._map.setparents(p1, p2)
389 389 copies = {}
390 390 if (
391 391 oldp2 != self._nodeconstants.nullid
392 392 and p2 == self._nodeconstants.nullid
393 393 ):
394 394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395 395
396 396 for f in candidatefiles:
397 397 s = self._map.get(f)
398 398 if s is None:
399 399 continue
400 400
401 401 # Discard "merged" markers when moving away from a merge state
402 402 if s.merged:
403 403 source = self._map.copymap.get(f)
404 404 if source:
405 405 copies[f] = source
406 406 self.normallookup(f)
407 407 # Also fix up otherparent markers
408 408 elif s.from_p2:
409 409 source = self._map.copymap.get(f)
410 410 if source:
411 411 copies[f] = source
412 412 self._add(f)
413 413 return copies
414 414
415 415 def setbranch(self, branch):
416 416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 418 try:
419 419 f.write(self._branch + b'\n')
420 420 f.close()
421 421
422 422 # make sure filecache has the correct stat info for _branch after
423 423 # replacing the underlying file
424 424 ce = self._filecache[b'_branch']
425 425 if ce:
426 426 ce.refresh()
427 427 except: # re-raises
428 428 f.discard()
429 429 raise
430 430
431 431 def invalidate(self):
432 432 """Causes the next access to reread the dirstate.
433 433
434 434 This is different from localrepo.invalidatedirstate() because it always
435 435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 436 check whether the dirstate has changed before rereading it."""
437 437
438 438 for a in ("_map", "_branch", "_ignore"):
439 439 if a in self.__dict__:
440 440 delattr(self, a)
441 441 self._lastnormaltime = 0
442 442 self._dirty = False
443 443 self._updatedfiles.clear()
444 444 self._parentwriters = 0
445 445 self._origpl = None
446 446
447 447 def copy(self, source, dest):
448 448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 449 if source == dest:
450 450 return
451 451 self._dirty = True
452 452 if source is not None:
453 453 self._map.copymap[dest] = source
454 454 self._updatedfiles.add(source)
455 455 self._updatedfiles.add(dest)
456 456 elif self._map.copymap.pop(dest, None):
457 457 self._updatedfiles.add(dest)
458 458
459 459 def copied(self, file):
460 460 return self._map.copymap.get(file, None)
461 461
462 462 def copies(self):
463 463 return self._map.copymap
464 464
465 465 @requires_no_parents_change
466 466 def set_tracked(self, filename):
467 467 """a "public" method for generic code to mark a file as tracked
468 468
469 469 This function is to be called outside of "update/merge" case. For
470 470 example by a command like `hg add X`.
471 471
472 472 return True the file was previously untracked, False otherwise.
473 473 """
474 474 entry = self._map.get(filename)
475 475 if entry is None:
476 476 self._add(filename)
477 477 return True
478 478 elif not entry.tracked:
479 479 self.normallookup(filename)
480 480 return True
481 481 return False
482 482
483 483 @requires_no_parents_change
484 484 def set_untracked(self, filename):
485 485 """a "public" method for generic code to mark a file as untracked
486 486
487 487 This function is to be called outside of "update/merge" case. For
488 488 example by a command like `hg remove X`.
489 489
490 490 return True the file was previously tracked, False otherwise.
491 491 """
492 492 entry = self._map.get(filename)
493 493 if entry is None:
494 494 return False
495 495 elif entry.added:
496 496 self._drop(filename)
497 497 return True
498 498 else:
499 499 self._remove(filename)
500 500 return True
501 501
502 502 @requires_parents_change
503 503 def update_file_p1(
504 504 self,
505 505 filename,
506 506 p1_tracked,
507 507 ):
508 508 """Set a file as tracked in the parent (or not)
509 509
510 510 This is to be called when adjust the dirstate to a new parent after an history
511 511 rewriting operation.
512 512
513 513 It should not be called during a merge (p2 != nullid) and only within
514 514 a `with dirstate.parentchange():` context.
515 515 """
516 516 if self.in_merge:
517 517 msg = b'update_file_reference should not be called when merging'
518 518 raise error.ProgrammingError(msg)
519 519 entry = self._map.get(filename)
520 520 if entry is None:
521 521 wc_tracked = False
522 522 else:
523 523 wc_tracked = entry.tracked
524 possibly_dirty = False
524 525 if p1_tracked and wc_tracked:
525 526 # the underlying reference might have changed, we will have to
526 527 # check it.
527 self.normallookup(filename)
528 possibly_dirty = True
528 529 elif not (p1_tracked or wc_tracked):
529 530 # the file is no longer relevant to anyone
530 531 self._drop(filename)
531 532 elif (not p1_tracked) and wc_tracked:
532 if not entry.added:
533 self._add(filename)
533 if entry is not None and entry.added:
534 return # avoid dropping copy information (maybe?)
534 535 elif p1_tracked and not wc_tracked:
535 if entry is None or not entry.removed:
536 self._remove(filename)
536 pass
537 537 else:
538 538 assert False, 'unreachable'
539 539
540 # this mean we are doing call for file we do not really care about the
541 # data (eg: added or removed), however this should be a minor overhead
542 # compared to the overall update process calling this.
543 parentfiledata = None
544 if wc_tracked:
545 parentfiledata = self._get_filedata(filename)
546
547 self._updatedfiles.add(filename)
548 self._map.reset_state(
549 filename,
550 wc_tracked,
551 p1_tracked,
552 possibly_dirty=possibly_dirty,
553 parentfiledata=parentfiledata,
554 )
555
540 556 @requires_parents_change
541 557 def update_file(
542 558 self,
543 559 filename,
544 560 wc_tracked,
545 561 p1_tracked,
546 562 p2_tracked=False,
547 563 merged=False,
548 564 clean_p1=False,
549 565 clean_p2=False,
550 566 possibly_dirty=False,
551 567 parentfiledata=None,
552 568 ):
553 569 """update the information about a file in the dirstate
554 570
555 571 This is to be called when the direstates parent changes to keep track
556 572 of what is the file situation in regards to the working copy and its parent.
557 573
558 574 This function must be called within a `dirstate.parentchange` context.
559 575
560 576 note: the API is at an early stage and we might need to ajust it
561 577 depending of what information ends up being relevant and useful to
562 578 other processing.
563 579 """
564 580 if merged and (clean_p1 or clean_p2):
565 581 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
566 582 raise error.ProgrammingError(msg)
567 583
568 584 # note: I do not think we need to double check name clash here since we
569 585 # are in a update/merge case that should already have taken care of
570 586 # this. The test agrees
571 587
572 588 self._dirty = True
573 589 self._updatedfiles.add(filename)
574 590
575 591 need_parent_file_data = (
576 592 not (possibly_dirty or clean_p2 or merged)
577 593 and wc_tracked
578 594 and p1_tracked
579 595 )
580 596
581 597 # this mean we are doing call for file we do not really care about the
582 598 # data (eg: added or removed), however this should be a minor overhead
583 599 # compared to the overall update process calling this.
584 600 if need_parent_file_data:
585 601 if parentfiledata is None:
586 602 parentfiledata = self._get_filedata(filename)
587 603 mtime = parentfiledata[2]
588 604
589 605 if mtime > self._lastnormaltime:
590 606 # Remember the most recent modification timeslot for
591 607 # status(), to make sure we won't miss future
592 608 # size-preserving file content modifications that happen
593 609 # within the same timeslot.
594 610 self._lastnormaltime = mtime
595 611
596 612 self._map.reset_state(
597 613 filename,
598 614 wc_tracked,
599 615 p1_tracked,
600 616 p2_tracked=p2_tracked,
601 617 merged=merged,
602 618 clean_p1=clean_p1,
603 619 clean_p2=clean_p2,
604 620 possibly_dirty=possibly_dirty,
605 621 parentfiledata=parentfiledata,
606 622 )
607 623
608 624 def _addpath(
609 625 self,
610 626 f,
611 627 mode=0,
612 628 size=None,
613 629 mtime=None,
614 630 added=False,
615 631 merged=False,
616 632 from_p2=False,
617 633 possibly_dirty=False,
618 634 ):
619 635 entry = self._map.get(f)
620 636 if added or entry is not None and entry.removed:
621 637 scmutil.checkfilename(f)
622 638 if self._map.hastrackeddir(f):
623 639 msg = _(b'directory %r already in dirstate')
624 640 msg %= pycompat.bytestr(f)
625 641 raise error.Abort(msg)
626 642 # shadows
627 643 for d in pathutil.finddirs(f):
628 644 if self._map.hastrackeddir(d):
629 645 break
630 646 entry = self._map.get(d)
631 647 if entry is not None and not entry.removed:
632 648 msg = _(b'file %r in dirstate clashes with %r')
633 649 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
634 650 raise error.Abort(msg)
635 651 self._dirty = True
636 652 self._updatedfiles.add(f)
637 653 self._map.addfile(
638 654 f,
639 655 mode=mode,
640 656 size=size,
641 657 mtime=mtime,
642 658 added=added,
643 659 merged=merged,
644 660 from_p2=from_p2,
645 661 possibly_dirty=possibly_dirty,
646 662 )
647 663
648 664 def _get_filedata(self, filename):
649 665 """returns"""
650 666 s = os.lstat(self._join(filename))
651 667 mode = s.st_mode
652 668 size = s.st_size
653 669 mtime = s[stat.ST_MTIME]
654 670 return (mode, size, mtime)
655 671
656 672 def normal(self, f, parentfiledata=None):
657 673 """Mark a file normal and clean.
658 674
659 675 parentfiledata: (mode, size, mtime) of the clean file
660 676
661 677 parentfiledata should be computed from memory (for mode,
662 678 size), as or close as possible from the point where we
663 679 determined the file was clean, to limit the risk of the
664 680 file having been changed by an external process between the
665 681 moment where the file was determined to be clean and now."""
666 682 if parentfiledata:
667 683 (mode, size, mtime) = parentfiledata
668 684 else:
669 685 (mode, size, mtime) = self._get_filedata(f)
670 686 self._addpath(f, mode=mode, size=size, mtime=mtime)
671 687 self._map.copymap.pop(f, None)
672 688 if f in self._map.nonnormalset:
673 689 self._map.nonnormalset.remove(f)
674 690 if mtime > self._lastnormaltime:
675 691 # Remember the most recent modification timeslot for status(),
676 692 # to make sure we won't miss future size-preserving file content
677 693 # modifications that happen within the same timeslot.
678 694 self._lastnormaltime = mtime
679 695
680 696 def normallookup(self, f):
681 697 '''Mark a file normal, but possibly dirty.'''
682 698 if self.in_merge:
683 699 # if there is a merge going on and the file was either
684 700 # "merged" or coming from other parent (-2) before
685 701 # being removed, restore that state.
686 702 entry = self._map.get(f)
687 703 if entry is not None:
688 704 # XXX this should probably be dealt with a a lower level
689 705 # (see `merged_removed` and `from_p2_removed`)
690 706 if entry.merged_removed or entry.from_p2_removed:
691 707 source = self._map.copymap.get(f)
692 708 if entry.merged_removed:
693 709 self.merge(f)
694 710 elif entry.from_p2_removed:
695 711 self.otherparent(f)
696 712 if source is not None:
697 713 self.copy(source, f)
698 714 return
699 715 elif entry.merged or entry.from_p2:
700 716 return
701 717 self._addpath(f, possibly_dirty=True)
702 718 self._map.copymap.pop(f, None)
703 719
704 720 def otherparent(self, f):
705 721 '''Mark as coming from the other parent, always dirty.'''
706 722 if not self.in_merge:
707 723 msg = _(b"setting %r to other parent only allowed in merges") % f
708 724 raise error.Abort(msg)
709 725 entry = self._map.get(f)
710 726 if entry is not None and entry.tracked:
711 727 # merge-like
712 728 self._addpath(f, merged=True)
713 729 else:
714 730 # add-like
715 731 self._addpath(f, from_p2=True)
716 732 self._map.copymap.pop(f, None)
717 733
718 734 def add(self, f):
719 735 '''Mark a file added.'''
720 736 if not self.pendingparentchange():
721 737 util.nouideprecwarn(
722 738 b"do not use `add` outside of update/merge context."
723 739 b" Use `set_tracked`",
724 740 b'6.0',
725 741 stacklevel=2,
726 742 )
727 743 self._add(f)
728 744
729 745 def _add(self, filename):
730 746 """internal function to mark a file as added"""
731 747 self._addpath(filename, added=True)
732 748 self._map.copymap.pop(filename, None)
733 749
734 750 def remove(self, f):
735 751 '''Mark a file removed'''
736 752 if not self.pendingparentchange():
737 753 util.nouideprecwarn(
738 754 b"do not use `remove` outside of update/merge context."
739 755 b" Use `set_untracked`",
740 756 b'6.0',
741 757 stacklevel=2,
742 758 )
743 759 self._remove(f)
744 760
745 761 def _remove(self, filename):
746 762 """internal function to mark a file removed"""
747 763 self._dirty = True
748 764 self._updatedfiles.add(filename)
749 765 self._map.removefile(filename, in_merge=self.in_merge)
750 766
751 767 def merge(self, f):
752 768 '''Mark a file merged.'''
753 769 if not self.in_merge:
754 770 return self.normallookup(f)
755 771 return self.otherparent(f)
756 772
757 773 def drop(self, f):
758 774 '''Drop a file from the dirstate'''
759 775 if not self.pendingparentchange():
760 776 util.nouideprecwarn(
761 777 b"do not use `drop` outside of update/merge context."
762 778 b" Use `set_untracked`",
763 779 b'6.0',
764 780 stacklevel=2,
765 781 )
766 782 self._drop(f)
767 783
768 784 def _drop(self, filename):
769 785 """internal function to drop a file from the dirstate"""
770 786 if self._map.dropfile(filename):
771 787 self._dirty = True
772 788 self._updatedfiles.add(filename)
773 789 self._map.copymap.pop(filename, None)
774 790
775 791 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
776 792 if exists is None:
777 793 exists = os.path.lexists(os.path.join(self._root, path))
778 794 if not exists:
779 795 # Maybe a path component exists
780 796 if not ignoremissing and b'/' in path:
781 797 d, f = path.rsplit(b'/', 1)
782 798 d = self._normalize(d, False, ignoremissing, None)
783 799 folded = d + b"/" + f
784 800 else:
785 801 # No path components, preserve original case
786 802 folded = path
787 803 else:
788 804 # recursively normalize leading directory components
789 805 # against dirstate
790 806 if b'/' in normed:
791 807 d, f = normed.rsplit(b'/', 1)
792 808 d = self._normalize(d, False, ignoremissing, True)
793 809 r = self._root + b"/" + d
794 810 folded = d + b"/" + util.fspath(f, r)
795 811 else:
796 812 folded = util.fspath(normed, self._root)
797 813 storemap[normed] = folded
798 814
799 815 return folded
800 816
801 817 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
802 818 normed = util.normcase(path)
803 819 folded = self._map.filefoldmap.get(normed, None)
804 820 if folded is None:
805 821 if isknown:
806 822 folded = path
807 823 else:
808 824 folded = self._discoverpath(
809 825 path, normed, ignoremissing, exists, self._map.filefoldmap
810 826 )
811 827 return folded
812 828
813 829 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
814 830 normed = util.normcase(path)
815 831 folded = self._map.filefoldmap.get(normed, None)
816 832 if folded is None:
817 833 folded = self._map.dirfoldmap.get(normed, None)
818 834 if folded is None:
819 835 if isknown:
820 836 folded = path
821 837 else:
822 838 # store discovered result in dirfoldmap so that future
823 839 # normalizefile calls don't start matching directories
824 840 folded = self._discoverpath(
825 841 path, normed, ignoremissing, exists, self._map.dirfoldmap
826 842 )
827 843 return folded
828 844
829 845 def normalize(self, path, isknown=False, ignoremissing=False):
830 846 """
831 847 normalize the case of a pathname when on a casefolding filesystem
832 848
833 849 isknown specifies whether the filename came from walking the
834 850 disk, to avoid extra filesystem access.
835 851
836 852 If ignoremissing is True, missing path are returned
837 853 unchanged. Otherwise, we try harder to normalize possibly
838 854 existing path components.
839 855
840 856 The normalized case is determined based on the following precedence:
841 857
842 858 - version of name already stored in the dirstate
843 859 - version of name stored on disk
844 860 - version provided via command arguments
845 861 """
846 862
847 863 if self._checkcase:
848 864 return self._normalize(path, isknown, ignoremissing)
849 865 return path
850 866
851 867 def clear(self):
852 868 self._map.clear()
853 869 self._lastnormaltime = 0
854 870 self._updatedfiles.clear()
855 871 self._dirty = True
856 872
857 873 def rebuild(self, parent, allfiles, changedfiles=None):
858 874 if changedfiles is None:
859 875 # Rebuild entire dirstate
860 876 to_lookup = allfiles
861 877 to_drop = []
862 878 lastnormaltime = self._lastnormaltime
863 879 self.clear()
864 880 self._lastnormaltime = lastnormaltime
865 881 elif len(changedfiles) < 10:
866 882 # Avoid turning allfiles into a set, which can be expensive if it's
867 883 # large.
868 884 to_lookup = []
869 885 to_drop = []
870 886 for f in changedfiles:
871 887 if f in allfiles:
872 888 to_lookup.append(f)
873 889 else:
874 890 to_drop.append(f)
875 891 else:
876 892 changedfilesset = set(changedfiles)
877 893 to_lookup = changedfilesset & set(allfiles)
878 894 to_drop = changedfilesset - to_lookup
879 895
880 896 if self._origpl is None:
881 897 self._origpl = self._pl
882 898 self._map.setparents(parent, self._nodeconstants.nullid)
883 899
884 900 for f in to_lookup:
885 901 self.normallookup(f)
886 902 for f in to_drop:
887 903 self._drop(f)
888 904
889 905 self._dirty = True
890 906
891 907 def identity(self):
892 908 """Return identity of dirstate itself to detect changing in storage
893 909
894 910 If identity of previous dirstate is equal to this, writing
895 911 changes based on the former dirstate out can keep consistency.
896 912 """
897 913 return self._map.identity
898 914
899 915 def write(self, tr):
900 916 if not self._dirty:
901 917 return
902 918
903 919 filename = self._filename
904 920 if tr:
905 921 # 'dirstate.write()' is not only for writing in-memory
906 922 # changes out, but also for dropping ambiguous timestamp.
907 923 # delayed writing re-raise "ambiguous timestamp issue".
908 924 # See also the wiki page below for detail:
909 925 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
910 926
911 927 # emulate dropping timestamp in 'parsers.pack_dirstate'
912 928 now = _getfsnow(self._opener)
913 929 self._map.clearambiguoustimes(self._updatedfiles, now)
914 930
915 931 # emulate that all 'dirstate.normal' results are written out
916 932 self._lastnormaltime = 0
917 933 self._updatedfiles.clear()
918 934
919 935 # delay writing in-memory changes out
920 936 tr.addfilegenerator(
921 937 b'dirstate',
922 938 (self._filename,),
923 939 lambda f: self._writedirstate(tr, f),
924 940 location=b'plain',
925 941 )
926 942 return
927 943
928 944 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
929 945 self._writedirstate(tr, st)
930 946
931 947 def addparentchangecallback(self, category, callback):
932 948 """add a callback to be called when the wd parents are changed
933 949
934 950 Callback will be called with the following arguments:
935 951 dirstate, (oldp1, oldp2), (newp1, newp2)
936 952
937 953 Category is a unique identifier to allow overwriting an old callback
938 954 with a newer callback.
939 955 """
940 956 self._plchangecallbacks[category] = callback
941 957
942 958 def _writedirstate(self, tr, st):
943 959 # notify callbacks about parents change
944 960 if self._origpl is not None and self._origpl != self._pl:
945 961 for c, callback in sorted(
946 962 pycompat.iteritems(self._plchangecallbacks)
947 963 ):
948 964 callback(self, self._origpl, self._pl)
949 965 self._origpl = None
950 966 # use the modification time of the newly created temporary file as the
951 967 # filesystem's notion of 'now'
952 968 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
953 969
954 970 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
955 971 # timestamp of each entries in dirstate, because of 'now > mtime'
956 972 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
957 973 if delaywrite > 0:
958 974 # do we have any files to delay for?
959 975 for f, e in pycompat.iteritems(self._map):
960 976 if e.need_delay(now):
961 977 import time # to avoid useless import
962 978
963 979 # rather than sleep n seconds, sleep until the next
964 980 # multiple of n seconds
965 981 clock = time.time()
966 982 start = int(clock) - (int(clock) % delaywrite)
967 983 end = start + delaywrite
968 984 time.sleep(end - clock)
969 985 now = end # trust our estimate that the end is near now
970 986 break
971 987
972 988 self._map.write(tr, st, now)
973 989 self._lastnormaltime = 0
974 990 self._dirty = False
975 991
976 992 def _dirignore(self, f):
977 993 if self._ignore(f):
978 994 return True
979 995 for p in pathutil.finddirs(f):
980 996 if self._ignore(p):
981 997 return True
982 998 return False
983 999
984 1000 def _ignorefiles(self):
985 1001 files = []
986 1002 if os.path.exists(self._join(b'.hgignore')):
987 1003 files.append(self._join(b'.hgignore'))
988 1004 for name, path in self._ui.configitems(b"ui"):
989 1005 if name == b'ignore' or name.startswith(b'ignore.'):
990 1006 # we need to use os.path.join here rather than self._join
991 1007 # because path is arbitrary and user-specified
992 1008 files.append(os.path.join(self._rootdir, util.expandpath(path)))
993 1009 return files
994 1010
995 1011 def _ignorefileandline(self, f):
996 1012 files = collections.deque(self._ignorefiles())
997 1013 visited = set()
998 1014 while files:
999 1015 i = files.popleft()
1000 1016 patterns = matchmod.readpatternfile(
1001 1017 i, self._ui.warn, sourceinfo=True
1002 1018 )
1003 1019 for pattern, lineno, line in patterns:
1004 1020 kind, p = matchmod._patsplit(pattern, b'glob')
1005 1021 if kind == b"subinclude":
1006 1022 if p not in visited:
1007 1023 files.append(p)
1008 1024 continue
1009 1025 m = matchmod.match(
1010 1026 self._root, b'', [], [pattern], warn=self._ui.warn
1011 1027 )
1012 1028 if m(f):
1013 1029 return (i, lineno, line)
1014 1030 visited.add(i)
1015 1031 return (None, -1, b"")
1016 1032
1017 1033 def _walkexplicit(self, match, subrepos):
1018 1034 """Get stat data about the files explicitly specified by match.
1019 1035
1020 1036 Return a triple (results, dirsfound, dirsnotfound).
1021 1037 - results is a mapping from filename to stat result. It also contains
1022 1038 listings mapping subrepos and .hg to None.
1023 1039 - dirsfound is a list of files found to be directories.
1024 1040 - dirsnotfound is a list of files that the dirstate thinks are
1025 1041 directories and that were not found."""
1026 1042
1027 1043 def badtype(mode):
1028 1044 kind = _(b'unknown')
1029 1045 if stat.S_ISCHR(mode):
1030 1046 kind = _(b'character device')
1031 1047 elif stat.S_ISBLK(mode):
1032 1048 kind = _(b'block device')
1033 1049 elif stat.S_ISFIFO(mode):
1034 1050 kind = _(b'fifo')
1035 1051 elif stat.S_ISSOCK(mode):
1036 1052 kind = _(b'socket')
1037 1053 elif stat.S_ISDIR(mode):
1038 1054 kind = _(b'directory')
1039 1055 return _(b'unsupported file type (type is %s)') % kind
1040 1056
1041 1057 badfn = match.bad
1042 1058 dmap = self._map
1043 1059 lstat = os.lstat
1044 1060 getkind = stat.S_IFMT
1045 1061 dirkind = stat.S_IFDIR
1046 1062 regkind = stat.S_IFREG
1047 1063 lnkkind = stat.S_IFLNK
1048 1064 join = self._join
1049 1065 dirsfound = []
1050 1066 foundadd = dirsfound.append
1051 1067 dirsnotfound = []
1052 1068 notfoundadd = dirsnotfound.append
1053 1069
1054 1070 if not match.isexact() and self._checkcase:
1055 1071 normalize = self._normalize
1056 1072 else:
1057 1073 normalize = None
1058 1074
1059 1075 files = sorted(match.files())
1060 1076 subrepos.sort()
1061 1077 i, j = 0, 0
1062 1078 while i < len(files) and j < len(subrepos):
1063 1079 subpath = subrepos[j] + b"/"
1064 1080 if files[i] < subpath:
1065 1081 i += 1
1066 1082 continue
1067 1083 while i < len(files) and files[i].startswith(subpath):
1068 1084 del files[i]
1069 1085 j += 1
1070 1086
1071 1087 if not files or b'' in files:
1072 1088 files = [b'']
1073 1089 # constructing the foldmap is expensive, so don't do it for the
1074 1090 # common case where files is ['']
1075 1091 normalize = None
1076 1092 results = dict.fromkeys(subrepos)
1077 1093 results[b'.hg'] = None
1078 1094
1079 1095 for ff in files:
1080 1096 if normalize:
1081 1097 nf = normalize(ff, False, True)
1082 1098 else:
1083 1099 nf = ff
1084 1100 if nf in results:
1085 1101 continue
1086 1102
1087 1103 try:
1088 1104 st = lstat(join(nf))
1089 1105 kind = getkind(st.st_mode)
1090 1106 if kind == dirkind:
1091 1107 if nf in dmap:
1092 1108 # file replaced by dir on disk but still in dirstate
1093 1109 results[nf] = None
1094 1110 foundadd((nf, ff))
1095 1111 elif kind == regkind or kind == lnkkind:
1096 1112 results[nf] = st
1097 1113 else:
1098 1114 badfn(ff, badtype(kind))
1099 1115 if nf in dmap:
1100 1116 results[nf] = None
1101 1117 except OSError as inst: # nf not found on disk - it is dirstate only
1102 1118 if nf in dmap: # does it exactly match a missing file?
1103 1119 results[nf] = None
1104 1120 else: # does it match a missing directory?
1105 1121 if self._map.hasdir(nf):
1106 1122 notfoundadd(nf)
1107 1123 else:
1108 1124 badfn(ff, encoding.strtolocal(inst.strerror))
1109 1125
1110 1126 # match.files() may contain explicitly-specified paths that shouldn't
1111 1127 # be taken; drop them from the list of files found. dirsfound/notfound
1112 1128 # aren't filtered here because they will be tested later.
1113 1129 if match.anypats():
1114 1130 for f in list(results):
1115 1131 if f == b'.hg' or f in subrepos:
1116 1132 # keep sentinel to disable further out-of-repo walks
1117 1133 continue
1118 1134 if not match(f):
1119 1135 del results[f]
1120 1136
1121 1137 # Case insensitive filesystems cannot rely on lstat() failing to detect
1122 1138 # a case-only rename. Prune the stat object for any file that does not
1123 1139 # match the case in the filesystem, if there are multiple files that
1124 1140 # normalize to the same path.
1125 1141 if match.isexact() and self._checkcase:
1126 1142 normed = {}
1127 1143
1128 1144 for f, st in pycompat.iteritems(results):
1129 1145 if st is None:
1130 1146 continue
1131 1147
1132 1148 nc = util.normcase(f)
1133 1149 paths = normed.get(nc)
1134 1150
1135 1151 if paths is None:
1136 1152 paths = set()
1137 1153 normed[nc] = paths
1138 1154
1139 1155 paths.add(f)
1140 1156
1141 1157 for norm, paths in pycompat.iteritems(normed):
1142 1158 if len(paths) > 1:
1143 1159 for path in paths:
1144 1160 folded = self._discoverpath(
1145 1161 path, norm, True, None, self._map.dirfoldmap
1146 1162 )
1147 1163 if path != folded:
1148 1164 results[path] = None
1149 1165
1150 1166 return results, dirsfound, dirsnotfound
1151 1167
1152 1168 def walk(self, match, subrepos, unknown, ignored, full=True):
1153 1169 """
1154 1170 Walk recursively through the directory tree, finding all files
1155 1171 matched by match.
1156 1172
1157 1173 If full is False, maybe skip some known-clean files.
1158 1174
1159 1175 Return a dict mapping filename to stat-like object (either
1160 1176 mercurial.osutil.stat instance or return value of os.stat()).
1161 1177
1162 1178 """
1163 1179 # full is a flag that extensions that hook into walk can use -- this
1164 1180 # implementation doesn't use it at all. This satisfies the contract
1165 1181 # because we only guarantee a "maybe".
1166 1182
1167 1183 if ignored:
1168 1184 ignore = util.never
1169 1185 dirignore = util.never
1170 1186 elif unknown:
1171 1187 ignore = self._ignore
1172 1188 dirignore = self._dirignore
1173 1189 else:
1174 1190 # if not unknown and not ignored, drop dir recursion and step 2
1175 1191 ignore = util.always
1176 1192 dirignore = util.always
1177 1193
1178 1194 matchfn = match.matchfn
1179 1195 matchalways = match.always()
1180 1196 matchtdir = match.traversedir
1181 1197 dmap = self._map
1182 1198 listdir = util.listdir
1183 1199 lstat = os.lstat
1184 1200 dirkind = stat.S_IFDIR
1185 1201 regkind = stat.S_IFREG
1186 1202 lnkkind = stat.S_IFLNK
1187 1203 join = self._join
1188 1204
1189 1205 exact = skipstep3 = False
1190 1206 if match.isexact(): # match.exact
1191 1207 exact = True
1192 1208 dirignore = util.always # skip step 2
1193 1209 elif match.prefix(): # match.match, no patterns
1194 1210 skipstep3 = True
1195 1211
1196 1212 if not exact and self._checkcase:
1197 1213 normalize = self._normalize
1198 1214 normalizefile = self._normalizefile
1199 1215 skipstep3 = False
1200 1216 else:
1201 1217 normalize = self._normalize
1202 1218 normalizefile = None
1203 1219
1204 1220 # step 1: find all explicit files
1205 1221 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1206 1222 if matchtdir:
1207 1223 for d in work:
1208 1224 matchtdir(d[0])
1209 1225 for d in dirsnotfound:
1210 1226 matchtdir(d)
1211 1227
1212 1228 skipstep3 = skipstep3 and not (work or dirsnotfound)
1213 1229 work = [d for d in work if not dirignore(d[0])]
1214 1230
1215 1231 # step 2: visit subdirectories
1216 1232 def traverse(work, alreadynormed):
1217 1233 wadd = work.append
1218 1234 while work:
1219 1235 tracing.counter('dirstate.walk work', len(work))
1220 1236 nd = work.pop()
1221 1237 visitentries = match.visitchildrenset(nd)
1222 1238 if not visitentries:
1223 1239 continue
1224 1240 if visitentries == b'this' or visitentries == b'all':
1225 1241 visitentries = None
1226 1242 skip = None
1227 1243 if nd != b'':
1228 1244 skip = b'.hg'
1229 1245 try:
1230 1246 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1231 1247 entries = listdir(join(nd), stat=True, skip=skip)
1232 1248 except OSError as inst:
1233 1249 if inst.errno in (errno.EACCES, errno.ENOENT):
1234 1250 match.bad(
1235 1251 self.pathto(nd), encoding.strtolocal(inst.strerror)
1236 1252 )
1237 1253 continue
1238 1254 raise
1239 1255 for f, kind, st in entries:
1240 1256 # Some matchers may return files in the visitentries set,
1241 1257 # instead of 'this', if the matcher explicitly mentions them
1242 1258 # and is not an exactmatcher. This is acceptable; we do not
1243 1259 # make any hard assumptions about file-or-directory below
1244 1260 # based on the presence of `f` in visitentries. If
1245 1261 # visitchildrenset returned a set, we can always skip the
1246 1262 # entries *not* in the set it provided regardless of whether
1247 1263 # they're actually a file or a directory.
1248 1264 if visitentries and f not in visitentries:
1249 1265 continue
1250 1266 if normalizefile:
1251 1267 # even though f might be a directory, we're only
1252 1268 # interested in comparing it to files currently in the
1253 1269 # dmap -- therefore normalizefile is enough
1254 1270 nf = normalizefile(
1255 1271 nd and (nd + b"/" + f) or f, True, True
1256 1272 )
1257 1273 else:
1258 1274 nf = nd and (nd + b"/" + f) or f
1259 1275 if nf not in results:
1260 1276 if kind == dirkind:
1261 1277 if not ignore(nf):
1262 1278 if matchtdir:
1263 1279 matchtdir(nf)
1264 1280 wadd(nf)
1265 1281 if nf in dmap and (matchalways or matchfn(nf)):
1266 1282 results[nf] = None
1267 1283 elif kind == regkind or kind == lnkkind:
1268 1284 if nf in dmap:
1269 1285 if matchalways or matchfn(nf):
1270 1286 results[nf] = st
1271 1287 elif (matchalways or matchfn(nf)) and not ignore(
1272 1288 nf
1273 1289 ):
1274 1290 # unknown file -- normalize if necessary
1275 1291 if not alreadynormed:
1276 1292 nf = normalize(nf, False, True)
1277 1293 results[nf] = st
1278 1294 elif nf in dmap and (matchalways or matchfn(nf)):
1279 1295 results[nf] = None
1280 1296
1281 1297 for nd, d in work:
1282 1298 # alreadynormed means that processwork doesn't have to do any
1283 1299 # expensive directory normalization
1284 1300 alreadynormed = not normalize or nd == d
1285 1301 traverse([d], alreadynormed)
1286 1302
1287 1303 for s in subrepos:
1288 1304 del results[s]
1289 1305 del results[b'.hg']
1290 1306
1291 1307 # step 3: visit remaining files from dmap
1292 1308 if not skipstep3 and not exact:
1293 1309 # If a dmap file is not in results yet, it was either
1294 1310 # a) not matching matchfn b) ignored, c) missing, or d) under a
1295 1311 # symlink directory.
1296 1312 if not results and matchalways:
1297 1313 visit = [f for f in dmap]
1298 1314 else:
1299 1315 visit = [f for f in dmap if f not in results and matchfn(f)]
1300 1316 visit.sort()
1301 1317
1302 1318 if unknown:
1303 1319 # unknown == True means we walked all dirs under the roots
1304 1320 # that wasn't ignored, and everything that matched was stat'ed
1305 1321 # and is already in results.
1306 1322 # The rest must thus be ignored or under a symlink.
1307 1323 audit_path = pathutil.pathauditor(self._root, cached=True)
1308 1324
1309 1325 for nf in iter(visit):
1310 1326 # If a stat for the same file was already added with a
1311 1327 # different case, don't add one for this, since that would
1312 1328 # make it appear as if the file exists under both names
1313 1329 # on disk.
1314 1330 if (
1315 1331 normalizefile
1316 1332 and normalizefile(nf, True, True) in results
1317 1333 ):
1318 1334 results[nf] = None
1319 1335 # Report ignored items in the dmap as long as they are not
1320 1336 # under a symlink directory.
1321 1337 elif audit_path.check(nf):
1322 1338 try:
1323 1339 results[nf] = lstat(join(nf))
1324 1340 # file was just ignored, no links, and exists
1325 1341 except OSError:
1326 1342 # file doesn't exist
1327 1343 results[nf] = None
1328 1344 else:
1329 1345 # It's either missing or under a symlink directory
1330 1346 # which we in this case report as missing
1331 1347 results[nf] = None
1332 1348 else:
1333 1349 # We may not have walked the full directory tree above,
1334 1350 # so stat and check everything we missed.
1335 1351 iv = iter(visit)
1336 1352 for st in util.statfiles([join(i) for i in visit]):
1337 1353 results[next(iv)] = st
1338 1354 return results
1339 1355
1340 1356 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1341 1357 # Force Rayon (Rust parallelism library) to respect the number of
1342 1358 # workers. This is a temporary workaround until Rust code knows
1343 1359 # how to read the config file.
1344 1360 numcpus = self._ui.configint(b"worker", b"numcpus")
1345 1361 if numcpus is not None:
1346 1362 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1347 1363
1348 1364 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1349 1365 if not workers_enabled:
1350 1366 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1351 1367
1352 1368 (
1353 1369 lookup,
1354 1370 modified,
1355 1371 added,
1356 1372 removed,
1357 1373 deleted,
1358 1374 clean,
1359 1375 ignored,
1360 1376 unknown,
1361 1377 warnings,
1362 1378 bad,
1363 1379 traversed,
1364 1380 dirty,
1365 1381 ) = rustmod.status(
1366 1382 self._map._rustmap,
1367 1383 matcher,
1368 1384 self._rootdir,
1369 1385 self._ignorefiles(),
1370 1386 self._checkexec,
1371 1387 self._lastnormaltime,
1372 1388 bool(list_clean),
1373 1389 bool(list_ignored),
1374 1390 bool(list_unknown),
1375 1391 bool(matcher.traversedir),
1376 1392 )
1377 1393
1378 1394 self._dirty |= dirty
1379 1395
1380 1396 if matcher.traversedir:
1381 1397 for dir in traversed:
1382 1398 matcher.traversedir(dir)
1383 1399
1384 1400 if self._ui.warn:
1385 1401 for item in warnings:
1386 1402 if isinstance(item, tuple):
1387 1403 file_path, syntax = item
1388 1404 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1389 1405 file_path,
1390 1406 syntax,
1391 1407 )
1392 1408 self._ui.warn(msg)
1393 1409 else:
1394 1410 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1395 1411 self._ui.warn(
1396 1412 msg
1397 1413 % (
1398 1414 pathutil.canonpath(
1399 1415 self._rootdir, self._rootdir, item
1400 1416 ),
1401 1417 b"No such file or directory",
1402 1418 )
1403 1419 )
1404 1420
1405 1421 for (fn, message) in bad:
1406 1422 matcher.bad(fn, encoding.strtolocal(message))
1407 1423
1408 1424 status = scmutil.status(
1409 1425 modified=modified,
1410 1426 added=added,
1411 1427 removed=removed,
1412 1428 deleted=deleted,
1413 1429 unknown=unknown,
1414 1430 ignored=ignored,
1415 1431 clean=clean,
1416 1432 )
1417 1433 return (lookup, status)
1418 1434
1419 1435 def status(self, match, subrepos, ignored, clean, unknown):
1420 1436 """Determine the status of the working copy relative to the
1421 1437 dirstate and return a pair of (unsure, status), where status is of type
1422 1438 scmutil.status and:
1423 1439
1424 1440 unsure:
1425 1441 files that might have been modified since the dirstate was
1426 1442 written, but need to be read to be sure (size is the same
1427 1443 but mtime differs)
1428 1444 status.modified:
1429 1445 files that have definitely been modified since the dirstate
1430 1446 was written (different size or mode)
1431 1447 status.clean:
1432 1448 files that have definitely not been modified since the
1433 1449 dirstate was written
1434 1450 """
1435 1451 listignored, listclean, listunknown = ignored, clean, unknown
1436 1452 lookup, modified, added, unknown, ignored = [], [], [], [], []
1437 1453 removed, deleted, clean = [], [], []
1438 1454
1439 1455 dmap = self._map
1440 1456 dmap.preload()
1441 1457
1442 1458 use_rust = True
1443 1459
1444 1460 allowed_matchers = (
1445 1461 matchmod.alwaysmatcher,
1446 1462 matchmod.exactmatcher,
1447 1463 matchmod.includematcher,
1448 1464 )
1449 1465
1450 1466 if rustmod is None:
1451 1467 use_rust = False
1452 1468 elif self._checkcase:
1453 1469 # Case-insensitive filesystems are not handled yet
1454 1470 use_rust = False
1455 1471 elif subrepos:
1456 1472 use_rust = False
1457 1473 elif sparse.enabled:
1458 1474 use_rust = False
1459 1475 elif not isinstance(match, allowed_matchers):
1460 1476 # Some matchers have yet to be implemented
1461 1477 use_rust = False
1462 1478
1463 1479 if use_rust:
1464 1480 try:
1465 1481 return self._rust_status(
1466 1482 match, listclean, listignored, listunknown
1467 1483 )
1468 1484 except rustmod.FallbackError:
1469 1485 pass
1470 1486
1471 1487 def noop(f):
1472 1488 pass
1473 1489
1474 1490 dcontains = dmap.__contains__
1475 1491 dget = dmap.__getitem__
1476 1492 ladd = lookup.append # aka "unsure"
1477 1493 madd = modified.append
1478 1494 aadd = added.append
1479 1495 uadd = unknown.append if listunknown else noop
1480 1496 iadd = ignored.append if listignored else noop
1481 1497 radd = removed.append
1482 1498 dadd = deleted.append
1483 1499 cadd = clean.append if listclean else noop
1484 1500 mexact = match.exact
1485 1501 dirignore = self._dirignore
1486 1502 checkexec = self._checkexec
1487 1503 copymap = self._map.copymap
1488 1504 lastnormaltime = self._lastnormaltime
1489 1505
1490 1506 # We need to do full walks when either
1491 1507 # - we're listing all clean files, or
1492 1508 # - match.traversedir does something, because match.traversedir should
1493 1509 # be called for every dir in the working dir
1494 1510 full = listclean or match.traversedir is not None
1495 1511 for fn, st in pycompat.iteritems(
1496 1512 self.walk(match, subrepos, listunknown, listignored, full=full)
1497 1513 ):
1498 1514 if not dcontains(fn):
1499 1515 if (listignored or mexact(fn)) and dirignore(fn):
1500 1516 if listignored:
1501 1517 iadd(fn)
1502 1518 else:
1503 1519 uadd(fn)
1504 1520 continue
1505 1521
1506 1522 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1507 1523 # written like that for performance reasons. dmap[fn] is not a
1508 1524 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1509 1525 # opcode has fast paths when the value to be unpacked is a tuple or
1510 1526 # a list, but falls back to creating a full-fledged iterator in
1511 1527 # general. That is much slower than simply accessing and storing the
1512 1528 # tuple members one by one.
1513 1529 t = dget(fn)
1514 1530 mode = t.mode
1515 1531 size = t.size
1516 1532 time = t.mtime
1517 1533
1518 1534 if not st and t.tracked:
1519 1535 dadd(fn)
1520 1536 elif t.merged:
1521 1537 madd(fn)
1522 1538 elif t.added:
1523 1539 aadd(fn)
1524 1540 elif t.removed:
1525 1541 radd(fn)
1526 1542 elif t.tracked:
1527 1543 if (
1528 1544 size >= 0
1529 1545 and (
1530 1546 (size != st.st_size and size != st.st_size & _rangemask)
1531 1547 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1532 1548 )
1533 1549 or t.from_p2
1534 1550 or fn in copymap
1535 1551 ):
1536 1552 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1537 1553 # issue6456: Size returned may be longer due to
1538 1554 # encryption on EXT-4 fscrypt, undecided.
1539 1555 ladd(fn)
1540 1556 else:
1541 1557 madd(fn)
1542 1558 elif (
1543 1559 time != st[stat.ST_MTIME]
1544 1560 and time != st[stat.ST_MTIME] & _rangemask
1545 1561 ):
1546 1562 ladd(fn)
1547 1563 elif st[stat.ST_MTIME] == lastnormaltime:
1548 1564 # fn may have just been marked as normal and it may have
1549 1565 # changed in the same second without changing its size.
1550 1566 # This can happen if we quickly do multiple commits.
1551 1567 # Force lookup, so we don't miss such a racy file change.
1552 1568 ladd(fn)
1553 1569 elif listclean:
1554 1570 cadd(fn)
1555 1571 status = scmutil.status(
1556 1572 modified, added, removed, deleted, unknown, ignored, clean
1557 1573 )
1558 1574 return (lookup, status)
1559 1575
1560 1576 def matches(self, match):
1561 1577 """
1562 1578 return files in the dirstate (in whatever state) filtered by match
1563 1579 """
1564 1580 dmap = self._map
1565 1581 if rustmod is not None:
1566 1582 dmap = self._map._rustmap
1567 1583
1568 1584 if match.always():
1569 1585 return dmap.keys()
1570 1586 files = match.files()
1571 1587 if match.isexact():
1572 1588 # fast path -- filter the other way around, since typically files is
1573 1589 # much smaller than dmap
1574 1590 return [f for f in files if f in dmap]
1575 1591 if match.prefix() and all(fn in dmap for fn in files):
1576 1592 # fast path -- all the values are known to be files, so just return
1577 1593 # that
1578 1594 return list(files)
1579 1595 return [f for f in dmap if match(f)]
1580 1596
1581 1597 def _actualfilename(self, tr):
1582 1598 if tr:
1583 1599 return self._pendingfilename
1584 1600 else:
1585 1601 return self._filename
1586 1602
1587 1603 def savebackup(self, tr, backupname):
1588 1604 '''Save current dirstate into backup file'''
1589 1605 filename = self._actualfilename(tr)
1590 1606 assert backupname != filename
1591 1607
1592 1608 # use '_writedirstate' instead of 'write' to write changes certainly,
1593 1609 # because the latter omits writing out if transaction is running.
1594 1610 # output file will be used to create backup of dirstate at this point.
1595 1611 if self._dirty or not self._opener.exists(filename):
1596 1612 self._writedirstate(
1597 1613 tr,
1598 1614 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1599 1615 )
1600 1616
1601 1617 if tr:
1602 1618 # ensure that subsequent tr.writepending returns True for
1603 1619 # changes written out above, even if dirstate is never
1604 1620 # changed after this
1605 1621 tr.addfilegenerator(
1606 1622 b'dirstate',
1607 1623 (self._filename,),
1608 1624 lambda f: self._writedirstate(tr, f),
1609 1625 location=b'plain',
1610 1626 )
1611 1627
1612 1628 # ensure that pending file written above is unlinked at
1613 1629 # failure, even if tr.writepending isn't invoked until the
1614 1630 # end of this transaction
1615 1631 tr.registertmp(filename, location=b'plain')
1616 1632
1617 1633 self._opener.tryunlink(backupname)
1618 1634 # hardlink backup is okay because _writedirstate is always called
1619 1635 # with an "atomictemp=True" file.
1620 1636 util.copyfile(
1621 1637 self._opener.join(filename),
1622 1638 self._opener.join(backupname),
1623 1639 hardlink=True,
1624 1640 )
1625 1641
1626 1642 def restorebackup(self, tr, backupname):
1627 1643 '''Restore dirstate by backup file'''
1628 1644 # this "invalidate()" prevents "wlock.release()" from writing
1629 1645 # changes of dirstate out after restoring from backup file
1630 1646 self.invalidate()
1631 1647 filename = self._actualfilename(tr)
1632 1648 o = self._opener
1633 1649 if util.samefile(o.join(backupname), o.join(filename)):
1634 1650 o.unlink(backupname)
1635 1651 else:
1636 1652 o.rename(backupname, filename, checkambig=True)
1637 1653
1638 1654 def clearbackup(self, tr, backupname):
1639 1655 '''Clear backup file'''
1640 1656 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now