##// END OF EJS Templates
dirstate: make a conditionnal easier to read in `setparents`...
marmoute -
r48801:dd267f16 default
parent child Browse files
Show More
@@ -1,1618 +1,1616
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .pycompat import delattr
18 18
19 19 from hgdemandimport import tracing
20 20
21 21 from . import (
22 22 dirstatemap,
23 23 encoding,
24 24 error,
25 25 match as matchmod,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 sparse,
31 31 util,
32 32 )
33 33
34 34 from .interfaces import (
35 35 dirstate as intdirstate,
36 36 util as interfaceutil,
37 37 )
38 38
39 39 parsers = policy.importmod('parsers')
40 40 rustmod = policy.importrust('dirstate')
41 41
42 42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43 43
44 44 propertycache = util.propertycache
45 45 filecache = scmutil.filecache
46 46 _rangemask = dirstatemap.rangemask
47 47
48 48 DirstateItem = parsers.DirstateItem
49 49
50 50
51 51 class repocache(filecache):
52 52 """filecache for files in .hg/"""
53 53
54 54 def join(self, obj, fname):
55 55 return obj._opener.join(fname)
56 56
57 57
58 58 class rootcache(filecache):
59 59 """filecache for files in the repository root"""
60 60
61 61 def join(self, obj, fname):
62 62 return obj._join(fname)
63 63
64 64
65 65 def _getfsnow(vfs):
66 66 '''Get "now" timestamp on filesystem'''
67 67 tmpfd, tmpname = vfs.mkstemp()
68 68 try:
69 69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 70 finally:
71 71 os.close(tmpfd)
72 72 vfs.unlink(tmpname)
73 73
74 74
75 75 def requires_parents_change(func):
76 76 def wrap(self, *args, **kwargs):
77 77 if not self.pendingparentchange():
78 78 msg = 'calling `%s` outside of a parentchange context'
79 79 msg %= func.__name__
80 80 raise error.ProgrammingError(msg)
81 81 return func(self, *args, **kwargs)
82 82
83 83 return wrap
84 84
85 85
86 86 def requires_no_parents_change(func):
87 87 def wrap(self, *args, **kwargs):
88 88 if self.pendingparentchange():
89 89 msg = 'calling `%s` inside of a parentchange context'
90 90 msg %= func.__name__
91 91 raise error.ProgrammingError(msg)
92 92 return func(self, *args, **kwargs)
93 93
94 94 return wrap
95 95
96 96
97 97 @interfaceutil.implementer(intdirstate.idirstate)
98 98 class dirstate(object):
99 99 def __init__(
100 100 self,
101 101 opener,
102 102 ui,
103 103 root,
104 104 validate,
105 105 sparsematchfn,
106 106 nodeconstants,
107 107 use_dirstate_v2,
108 108 ):
109 109 """Create a new dirstate object.
110 110
111 111 opener is an open()-like callable that can be used to open the
112 112 dirstate file; root is the root of the directory tracked by
113 113 the dirstate.
114 114 """
115 115 self._use_dirstate_v2 = use_dirstate_v2
116 116 self._nodeconstants = nodeconstants
117 117 self._opener = opener
118 118 self._validate = validate
119 119 self._root = root
120 120 self._sparsematchfn = sparsematchfn
121 121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 122 # UNC path pointing to root share (issue4557)
123 123 self._rootdir = pathutil.normasprefix(root)
124 124 self._dirty = False
125 125 self._lastnormaltime = 0
126 126 self._ui = ui
127 127 self._filecache = {}
128 128 self._parentwriters = 0
129 129 self._filename = b'dirstate'
130 130 self._pendingfilename = b'%s.pending' % self._filename
131 131 self._plchangecallbacks = {}
132 132 self._origpl = None
133 133 self._updatedfiles = set()
134 134 self._mapcls = dirstatemap.dirstatemap
135 135 # Access and cache cwd early, so we don't access it for the first time
136 136 # after a working-copy update caused it to not exist (accessing it then
137 137 # raises an exception).
138 138 self._cwd
139 139
140 140 def prefetch_parents(self):
141 141 """make sure the parents are loaded
142 142
143 143 Used to avoid a race condition.
144 144 """
145 145 self._pl
146 146
147 147 @contextlib.contextmanager
148 148 def parentchange(self):
149 149 """Context manager for handling dirstate parents.
150 150
151 151 If an exception occurs in the scope of the context manager,
152 152 the incoherent dirstate won't be written when wlock is
153 153 released.
154 154 """
155 155 self._parentwriters += 1
156 156 yield
157 157 # Typically we want the "undo" step of a context manager in a
158 158 # finally block so it happens even when an exception
159 159 # occurs. In this case, however, we only want to decrement
160 160 # parentwriters if the code in the with statement exits
161 161 # normally, so we don't have a try/finally here on purpose.
162 162 self._parentwriters -= 1
163 163
164 164 def pendingparentchange(self):
165 165 """Returns true if the dirstate is in the middle of a set of changes
166 166 that modify the dirstate parent.
167 167 """
168 168 return self._parentwriters > 0
169 169
170 170 @propertycache
171 171 def _map(self):
172 172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 173 self._map = self._mapcls(
174 174 self._ui,
175 175 self._opener,
176 176 self._root,
177 177 self._nodeconstants,
178 178 self._use_dirstate_v2,
179 179 )
180 180 return self._map
181 181
182 182 @property
183 183 def _sparsematcher(self):
184 184 """The matcher for the sparse checkout.
185 185
186 186 The working directory may not include every file from a manifest. The
187 187 matcher obtained by this property will match a path if it is to be
188 188 included in the working directory.
189 189 """
190 190 # TODO there is potential to cache this property. For now, the matcher
191 191 # is resolved on every access. (But the called function does use a
192 192 # cache to keep the lookup fast.)
193 193 return self._sparsematchfn()
194 194
195 195 @repocache(b'branch')
196 196 def _branch(self):
197 197 try:
198 198 return self._opener.read(b"branch").strip() or b"default"
199 199 except IOError as inst:
200 200 if inst.errno != errno.ENOENT:
201 201 raise
202 202 return b"default"
203 203
204 204 @property
205 205 def _pl(self):
206 206 return self._map.parents()
207 207
208 208 def hasdir(self, d):
209 209 return self._map.hastrackeddir(d)
210 210
211 211 @rootcache(b'.hgignore')
212 212 def _ignore(self):
213 213 files = self._ignorefiles()
214 214 if not files:
215 215 return matchmod.never()
216 216
217 217 pats = [b'include:%s' % f for f in files]
218 218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219 219
220 220 @propertycache
221 221 def _slash(self):
222 222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223 223
224 224 @propertycache
225 225 def _checklink(self):
226 226 return util.checklink(self._root)
227 227
228 228 @propertycache
229 229 def _checkexec(self):
230 230 return bool(util.checkexec(self._root))
231 231
232 232 @propertycache
233 233 def _checkcase(self):
234 234 return not util.fscasesensitive(self._join(b'.hg'))
235 235
236 236 def _join(self, f):
237 237 # much faster than os.path.join()
238 238 # it's safe because f is always a relative path
239 239 return self._rootdir + f
240 240
241 241 def flagfunc(self, buildfallback):
242 242 if self._checklink and self._checkexec:
243 243
244 244 def f(x):
245 245 try:
246 246 st = os.lstat(self._join(x))
247 247 if util.statislink(st):
248 248 return b'l'
249 249 if util.statisexec(st):
250 250 return b'x'
251 251 except OSError:
252 252 pass
253 253 return b''
254 254
255 255 return f
256 256
257 257 fallback = buildfallback()
258 258 if self._checklink:
259 259
260 260 def f(x):
261 261 if os.path.islink(self._join(x)):
262 262 return b'l'
263 263 if b'x' in fallback(x):
264 264 return b'x'
265 265 return b''
266 266
267 267 return f
268 268 if self._checkexec:
269 269
270 270 def f(x):
271 271 if b'l' in fallback(x):
272 272 return b'l'
273 273 if util.isexec(self._join(x)):
274 274 return b'x'
275 275 return b''
276 276
277 277 return f
278 278 else:
279 279 return fallback
280 280
281 281 @propertycache
282 282 def _cwd(self):
283 283 # internal config: ui.forcecwd
284 284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 285 if forcecwd:
286 286 return forcecwd
287 287 return encoding.getcwd()
288 288
289 289 def getcwd(self):
290 290 """Return the path from which a canonical path is calculated.
291 291
292 292 This path should be used to resolve file patterns or to convert
293 293 canonical paths back to file paths for display. It shouldn't be
294 294 used to get real file paths. Use vfs functions instead.
295 295 """
296 296 cwd = self._cwd
297 297 if cwd == self._root:
298 298 return b''
299 299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 300 rootsep = self._root
301 301 if not util.endswithsep(rootsep):
302 302 rootsep += pycompat.ossep
303 303 if cwd.startswith(rootsep):
304 304 return cwd[len(rootsep) :]
305 305 else:
306 306 # we're outside the repo. return an absolute path.
307 307 return cwd
308 308
309 309 def pathto(self, f, cwd=None):
310 310 if cwd is None:
311 311 cwd = self.getcwd()
312 312 path = util.pathto(self._root, cwd, f)
313 313 if self._slash:
314 314 return util.pconvert(path)
315 315 return path
316 316
317 317 def __getitem__(self, key):
318 318 """Return the current state of key (a filename) in the dirstate.
319 319
320 320 States are:
321 321 n normal
322 322 m needs merging
323 323 r marked for removal
324 324 a marked for addition
325 325 ? not tracked
326 326
327 327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 328 consider migrating all user of this to going through the dirstate entry
329 329 instead.
330 330 """
331 331 entry = self._map.get(key)
332 332 if entry is not None:
333 333 return entry.state
334 334 return b'?'
335 335
336 336 def __contains__(self, key):
337 337 return key in self._map
338 338
339 339 def __iter__(self):
340 340 return iter(sorted(self._map))
341 341
342 342 def items(self):
343 343 return pycompat.iteritems(self._map)
344 344
345 345 iteritems = items
346 346
347 347 def directories(self):
348 348 return self._map.directories()
349 349
350 350 def parents(self):
351 351 return [self._validate(p) for p in self._pl]
352 352
353 353 def p1(self):
354 354 return self._validate(self._pl[0])
355 355
356 356 def p2(self):
357 357 return self._validate(self._pl[1])
358 358
359 359 @property
360 360 def in_merge(self):
361 361 """True if a merge is in progress"""
362 362 return self._pl[1] != self._nodeconstants.nullid
363 363
364 364 def branch(self):
365 365 return encoding.tolocal(self._branch)
366 366
367 367 def setparents(self, p1, p2=None):
368 368 """Set dirstate parents to p1 and p2.
369 369
370 370 When moving from two parents to one, "merged" entries a
371 371 adjusted to normal and previous copy records discarded and
372 372 returned by the call.
373 373
374 374 See localrepo.setparents()
375 375 """
376 376 if p2 is None:
377 377 p2 = self._nodeconstants.nullid
378 378 if self._parentwriters == 0:
379 379 raise ValueError(
380 380 b"cannot set dirstate parent outside of "
381 381 b"dirstate.parentchange context manager"
382 382 )
383 383
384 384 self._dirty = True
385 385 oldp2 = self._pl[1]
386 386 if self._origpl is None:
387 387 self._origpl = self._pl
388 388 self._map.setparents(p1, p2)
389 389 copies = {}
390 if (
391 oldp2 != self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
393 ):
390 nullid = self._nodeconstants.nullid
391 if oldp2 != nullid and p2 == nullid:
394 392 candidatefiles = self._map.non_normal_or_other_parent_paths()
395 393
396 394 for f in candidatefiles:
397 395 s = self._map.get(f)
398 396 if s is None:
399 397 continue
400 398
401 399 # Discard "merged" markers when moving away from a merge state
402 400 if s.merged:
403 401 source = self._map.copymap.get(f)
404 402 if source:
405 403 copies[f] = source
406 404 self._normallookup(f)
407 405 # Also fix up otherparent markers
408 406 elif s.from_p2:
409 407 source = self._map.copymap.get(f)
410 408 if source:
411 409 copies[f] = source
412 410 self._check_new_tracked_filename(f)
413 411 self._updatedfiles.add(f)
414 412 self._map.reset_state(
415 413 f,
416 414 p1_tracked=False,
417 415 wc_tracked=True,
418 416 )
419 417 return copies
420 418
421 419 def setbranch(self, branch):
422 420 self.__class__._branch.set(self, encoding.fromlocal(branch))
423 421 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
424 422 try:
425 423 f.write(self._branch + b'\n')
426 424 f.close()
427 425
428 426 # make sure filecache has the correct stat info for _branch after
429 427 # replacing the underlying file
430 428 ce = self._filecache[b'_branch']
431 429 if ce:
432 430 ce.refresh()
433 431 except: # re-raises
434 432 f.discard()
435 433 raise
436 434
437 435 def invalidate(self):
438 436 """Causes the next access to reread the dirstate.
439 437
440 438 This is different from localrepo.invalidatedirstate() because it always
441 439 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
442 440 check whether the dirstate has changed before rereading it."""
443 441
444 442 for a in ("_map", "_branch", "_ignore"):
445 443 if a in self.__dict__:
446 444 delattr(self, a)
447 445 self._lastnormaltime = 0
448 446 self._dirty = False
449 447 self._updatedfiles.clear()
450 448 self._parentwriters = 0
451 449 self._origpl = None
452 450
453 451 def copy(self, source, dest):
454 452 """Mark dest as a copy of source. Unmark dest if source is None."""
455 453 if source == dest:
456 454 return
457 455 self._dirty = True
458 456 if source is not None:
459 457 self._map.copymap[dest] = source
460 458 self._updatedfiles.add(source)
461 459 self._updatedfiles.add(dest)
462 460 elif self._map.copymap.pop(dest, None):
463 461 self._updatedfiles.add(dest)
464 462
465 463 def copied(self, file):
466 464 return self._map.copymap.get(file, None)
467 465
468 466 def copies(self):
469 467 return self._map.copymap
470 468
471 469 @requires_no_parents_change
472 470 def set_tracked(self, filename):
473 471 """a "public" method for generic code to mark a file as tracked
474 472
475 473 This function is to be called outside of "update/merge" case. For
476 474 example by a command like `hg add X`.
477 475
478 476 return True the file was previously untracked, False otherwise.
479 477 """
480 478 self._dirty = True
481 479 self._updatedfiles.add(filename)
482 480 entry = self._map.get(filename)
483 481 if entry is None:
484 482 self._check_new_tracked_filename(filename)
485 483 self._map.addfile(filename, added=True)
486 484 return True
487 485 elif not entry.tracked:
488 486 self._normallookup(filename)
489 487 return True
490 488 # XXX This is probably overkill for more case, but we need this to
491 489 # fully replace the `normallookup` call with `set_tracked` one.
492 490 # Consider smoothing this in the future.
493 491 self.set_possibly_dirty(filename)
494 492 return False
495 493
496 494 @requires_no_parents_change
497 495 def set_untracked(self, filename):
498 496 """a "public" method for generic code to mark a file as untracked
499 497
500 498 This function is to be called outside of "update/merge" case. For
501 499 example by a command like `hg remove X`.
502 500
503 501 return True the file was previously tracked, False otherwise.
504 502 """
505 503 ret = self._map.set_untracked(filename)
506 504 if ret:
507 505 self._dirty = True
508 506 self._updatedfiles.add(filename)
509 507 return ret
510 508
511 509 @requires_no_parents_change
512 510 def set_clean(self, filename, parentfiledata=None):
513 511 """record that the current state of the file on disk is known to be clean"""
514 512 self._dirty = True
515 513 self._updatedfiles.add(filename)
516 514 if parentfiledata:
517 515 (mode, size, mtime) = parentfiledata
518 516 else:
519 517 (mode, size, mtime) = self._get_filedata(filename)
520 518 if not self._map[filename].tracked:
521 519 self._check_new_tracked_filename(filename)
522 520 self._map.set_clean(filename, mode, size, mtime)
523 521 if mtime > self._lastnormaltime:
524 522 # Remember the most recent modification timeslot for status(),
525 523 # to make sure we won't miss future size-preserving file content
526 524 # modifications that happen within the same timeslot.
527 525 self._lastnormaltime = mtime
528 526
529 527 @requires_no_parents_change
530 528 def set_possibly_dirty(self, filename):
531 529 """record that the current state of the file on disk is unknown"""
532 530 self._dirty = True
533 531 self._updatedfiles.add(filename)
534 532 self._map.set_possibly_dirty(filename)
535 533
536 534 @requires_parents_change
537 535 def update_file_p1(
538 536 self,
539 537 filename,
540 538 p1_tracked,
541 539 ):
542 540 """Set a file as tracked in the parent (or not)
543 541
544 542 This is to be called when adjust the dirstate to a new parent after an history
545 543 rewriting operation.
546 544
547 545 It should not be called during a merge (p2 != nullid) and only within
548 546 a `with dirstate.parentchange():` context.
549 547 """
550 548 if self.in_merge:
551 549 msg = b'update_file_reference should not be called when merging'
552 550 raise error.ProgrammingError(msg)
553 551 entry = self._map.get(filename)
554 552 if entry is None:
555 553 wc_tracked = False
556 554 else:
557 555 wc_tracked = entry.tracked
558 556 possibly_dirty = False
559 557 if p1_tracked and wc_tracked:
560 558 # the underlying reference might have changed, we will have to
561 559 # check it.
562 560 possibly_dirty = True
563 561 elif not (p1_tracked or wc_tracked):
564 562 # the file is no longer relevant to anyone
565 563 self._drop(filename)
566 564 elif (not p1_tracked) and wc_tracked:
567 565 if entry is not None and entry.added:
568 566 return # avoid dropping copy information (maybe?)
569 567 elif p1_tracked and not wc_tracked:
570 568 pass
571 569 else:
572 570 assert False, 'unreachable'
573 571
574 572 # this mean we are doing call for file we do not really care about the
575 573 # data (eg: added or removed), however this should be a minor overhead
576 574 # compared to the overall update process calling this.
577 575 parentfiledata = None
578 576 if wc_tracked:
579 577 parentfiledata = self._get_filedata(filename)
580 578
581 579 self._updatedfiles.add(filename)
582 580 self._map.reset_state(
583 581 filename,
584 582 wc_tracked,
585 583 p1_tracked,
586 584 possibly_dirty=possibly_dirty,
587 585 parentfiledata=parentfiledata,
588 586 )
589 587 if (
590 588 parentfiledata is not None
591 589 and parentfiledata[2] > self._lastnormaltime
592 590 ):
593 591 # Remember the most recent modification timeslot for status(),
594 592 # to make sure we won't miss future size-preserving file content
595 593 # modifications that happen within the same timeslot.
596 594 self._lastnormaltime = parentfiledata[2]
597 595
598 596 @requires_parents_change
599 597 def update_file(
600 598 self,
601 599 filename,
602 600 wc_tracked,
603 601 p1_tracked,
604 602 p2_tracked=False,
605 603 merged=False,
606 604 clean_p1=False,
607 605 clean_p2=False,
608 606 possibly_dirty=False,
609 607 parentfiledata=None,
610 608 ):
611 609 """update the information about a file in the dirstate
612 610
613 611 This is to be called when the direstates parent changes to keep track
614 612 of what is the file situation in regards to the working copy and its parent.
615 613
616 614 This function must be called within a `dirstate.parentchange` context.
617 615
618 616 note: the API is at an early stage and we might need to adjust it
619 617 depending of what information ends up being relevant and useful to
620 618 other processing.
621 619 """
622 620 if merged and (clean_p1 or clean_p2):
623 621 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
624 622 raise error.ProgrammingError(msg)
625 623
626 624 # note: I do not think we need to double check name clash here since we
627 625 # are in a update/merge case that should already have taken care of
628 626 # this. The test agrees
629 627
630 628 self._dirty = True
631 629 self._updatedfiles.add(filename)
632 630
633 631 need_parent_file_data = (
634 632 not (possibly_dirty or clean_p2 or merged)
635 633 and wc_tracked
636 634 and p1_tracked
637 635 )
638 636
639 637 # this mean we are doing call for file we do not really care about the
640 638 # data (eg: added or removed), however this should be a minor overhead
641 639 # compared to the overall update process calling this.
642 640 if need_parent_file_data:
643 641 if parentfiledata is None:
644 642 parentfiledata = self._get_filedata(filename)
645 643 mtime = parentfiledata[2]
646 644
647 645 if mtime > self._lastnormaltime:
648 646 # Remember the most recent modification timeslot for
649 647 # status(), to make sure we won't miss future
650 648 # size-preserving file content modifications that happen
651 649 # within the same timeslot.
652 650 self._lastnormaltime = mtime
653 651
654 652 self._map.reset_state(
655 653 filename,
656 654 wc_tracked,
657 655 p1_tracked,
658 656 p2_tracked=p2_tracked,
659 657 merged=merged,
660 658 clean_p1=clean_p1,
661 659 clean_p2=clean_p2,
662 660 possibly_dirty=possibly_dirty,
663 661 parentfiledata=parentfiledata,
664 662 )
665 663 if (
666 664 parentfiledata is not None
667 665 and parentfiledata[2] > self._lastnormaltime
668 666 ):
669 667 # Remember the most recent modification timeslot for status(),
670 668 # to make sure we won't miss future size-preserving file content
671 669 # modifications that happen within the same timeslot.
672 670 self._lastnormaltime = parentfiledata[2]
673 671
674 672 def _addpath(
675 673 self,
676 674 f,
677 675 mode=0,
678 676 size=None,
679 677 mtime=None,
680 678 added=False,
681 679 merged=False,
682 680 from_p2=False,
683 681 possibly_dirty=False,
684 682 ):
685 683 entry = self._map.get(f)
686 684 if added or entry is not None and not entry.tracked:
687 685 self._check_new_tracked_filename(f)
688 686 self._dirty = True
689 687 self._updatedfiles.add(f)
690 688 self._map.addfile(
691 689 f,
692 690 mode=mode,
693 691 size=size,
694 692 mtime=mtime,
695 693 added=added,
696 694 merged=merged,
697 695 from_p2=from_p2,
698 696 possibly_dirty=possibly_dirty,
699 697 )
700 698
701 699 def _check_new_tracked_filename(self, filename):
702 700 scmutil.checkfilename(filename)
703 701 if self._map.hastrackeddir(filename):
704 702 msg = _(b'directory %r already in dirstate')
705 703 msg %= pycompat.bytestr(filename)
706 704 raise error.Abort(msg)
707 705 # shadows
708 706 for d in pathutil.finddirs(filename):
709 707 if self._map.hastrackeddir(d):
710 708 break
711 709 entry = self._map.get(d)
712 710 if entry is not None and not entry.removed:
713 711 msg = _(b'file %r in dirstate clashes with %r')
714 712 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
715 713 raise error.Abort(msg)
716 714
717 715 def _get_filedata(self, filename):
718 716 """returns"""
719 717 s = os.lstat(self._join(filename))
720 718 mode = s.st_mode
721 719 size = s.st_size
722 720 mtime = s[stat.ST_MTIME]
723 721 return (mode, size, mtime)
724 722
725 723 def _normallookup(self, f):
726 724 '''Mark a file normal, but possibly dirty.'''
727 725 if self.in_merge:
728 726 # if there is a merge going on and the file was either
729 727 # "merged" or coming from other parent (-2) before
730 728 # being removed, restore that state.
731 729 entry = self._map.get(f)
732 730 if entry is not None:
733 731 # XXX this should probably be dealt with a a lower level
734 732 # (see `merged_removed` and `from_p2_removed`)
735 733 if entry.merged_removed or entry.from_p2_removed:
736 734 source = self._map.copymap.get(f)
737 735 self._addpath(f, from_p2=True)
738 736 self._map.copymap.pop(f, None)
739 737 if source is not None:
740 738 self.copy(source, f)
741 739 return
742 740 elif entry.merged or entry.from_p2:
743 741 return
744 742 self._addpath(f, possibly_dirty=True)
745 743 self._map.copymap.pop(f, None)
746 744
747 745 def _drop(self, filename):
748 746 """internal function to drop a file from the dirstate"""
749 747 if self._map.dropfile(filename):
750 748 self._dirty = True
751 749 self._updatedfiles.add(filename)
752 750
753 751 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
754 752 if exists is None:
755 753 exists = os.path.lexists(os.path.join(self._root, path))
756 754 if not exists:
757 755 # Maybe a path component exists
758 756 if not ignoremissing and b'/' in path:
759 757 d, f = path.rsplit(b'/', 1)
760 758 d = self._normalize(d, False, ignoremissing, None)
761 759 folded = d + b"/" + f
762 760 else:
763 761 # No path components, preserve original case
764 762 folded = path
765 763 else:
766 764 # recursively normalize leading directory components
767 765 # against dirstate
768 766 if b'/' in normed:
769 767 d, f = normed.rsplit(b'/', 1)
770 768 d = self._normalize(d, False, ignoremissing, True)
771 769 r = self._root + b"/" + d
772 770 folded = d + b"/" + util.fspath(f, r)
773 771 else:
774 772 folded = util.fspath(normed, self._root)
775 773 storemap[normed] = folded
776 774
777 775 return folded
778 776
779 777 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
780 778 normed = util.normcase(path)
781 779 folded = self._map.filefoldmap.get(normed, None)
782 780 if folded is None:
783 781 if isknown:
784 782 folded = path
785 783 else:
786 784 folded = self._discoverpath(
787 785 path, normed, ignoremissing, exists, self._map.filefoldmap
788 786 )
789 787 return folded
790 788
791 789 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
792 790 normed = util.normcase(path)
793 791 folded = self._map.filefoldmap.get(normed, None)
794 792 if folded is None:
795 793 folded = self._map.dirfoldmap.get(normed, None)
796 794 if folded is None:
797 795 if isknown:
798 796 folded = path
799 797 else:
800 798 # store discovered result in dirfoldmap so that future
801 799 # normalizefile calls don't start matching directories
802 800 folded = self._discoverpath(
803 801 path, normed, ignoremissing, exists, self._map.dirfoldmap
804 802 )
805 803 return folded
806 804
807 805 def normalize(self, path, isknown=False, ignoremissing=False):
808 806 """
809 807 normalize the case of a pathname when on a casefolding filesystem
810 808
811 809 isknown specifies whether the filename came from walking the
812 810 disk, to avoid extra filesystem access.
813 811
814 812 If ignoremissing is True, missing path are returned
815 813 unchanged. Otherwise, we try harder to normalize possibly
816 814 existing path components.
817 815
818 816 The normalized case is determined based on the following precedence:
819 817
820 818 - version of name already stored in the dirstate
821 819 - version of name stored on disk
822 820 - version provided via command arguments
823 821 """
824 822
825 823 if self._checkcase:
826 824 return self._normalize(path, isknown, ignoremissing)
827 825 return path
828 826
829 827 def clear(self):
830 828 self._map.clear()
831 829 self._lastnormaltime = 0
832 830 self._updatedfiles.clear()
833 831 self._dirty = True
834 832
835 833 def rebuild(self, parent, allfiles, changedfiles=None):
836 834 if changedfiles is None:
837 835 # Rebuild entire dirstate
838 836 to_lookup = allfiles
839 837 to_drop = []
840 838 lastnormaltime = self._lastnormaltime
841 839 self.clear()
842 840 self._lastnormaltime = lastnormaltime
843 841 elif len(changedfiles) < 10:
844 842 # Avoid turning allfiles into a set, which can be expensive if it's
845 843 # large.
846 844 to_lookup = []
847 845 to_drop = []
848 846 for f in changedfiles:
849 847 if f in allfiles:
850 848 to_lookup.append(f)
851 849 else:
852 850 to_drop.append(f)
853 851 else:
854 852 changedfilesset = set(changedfiles)
855 853 to_lookup = changedfilesset & set(allfiles)
856 854 to_drop = changedfilesset - to_lookup
857 855
858 856 if self._origpl is None:
859 857 self._origpl = self._pl
860 858 self._map.setparents(parent, self._nodeconstants.nullid)
861 859
862 860 for f in to_lookup:
863 861 self._normallookup(f)
864 862 for f in to_drop:
865 863 self._drop(f)
866 864
867 865 self._dirty = True
868 866
869 867 def identity(self):
870 868 """Return identity of dirstate itself to detect changing in storage
871 869
872 870 If identity of previous dirstate is equal to this, writing
873 871 changes based on the former dirstate out can keep consistency.
874 872 """
875 873 return self._map.identity
876 874
877 875 def write(self, tr):
878 876 if not self._dirty:
879 877 return
880 878
881 879 filename = self._filename
882 880 if tr:
883 881 # 'dirstate.write()' is not only for writing in-memory
884 882 # changes out, but also for dropping ambiguous timestamp.
885 883 # delayed writing re-raise "ambiguous timestamp issue".
886 884 # See also the wiki page below for detail:
887 885 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
888 886
889 887 # emulate dropping timestamp in 'parsers.pack_dirstate'
890 888 now = _getfsnow(self._opener)
891 889 self._map.clearambiguoustimes(self._updatedfiles, now)
892 890
893 891 # emulate that all 'dirstate.normal' results are written out
894 892 self._lastnormaltime = 0
895 893 self._updatedfiles.clear()
896 894
897 895 # delay writing in-memory changes out
898 896 tr.addfilegenerator(
899 897 b'dirstate',
900 898 (self._filename,),
901 899 lambda f: self._writedirstate(tr, f),
902 900 location=b'plain',
903 901 )
904 902 return
905 903
906 904 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
907 905 self._writedirstate(tr, st)
908 906
909 907 def addparentchangecallback(self, category, callback):
910 908 """add a callback to be called when the wd parents are changed
911 909
912 910 Callback will be called with the following arguments:
913 911 dirstate, (oldp1, oldp2), (newp1, newp2)
914 912
915 913 Category is a unique identifier to allow overwriting an old callback
916 914 with a newer callback.
917 915 """
918 916 self._plchangecallbacks[category] = callback
919 917
920 918 def _writedirstate(self, tr, st):
921 919 # notify callbacks about parents change
922 920 if self._origpl is not None and self._origpl != self._pl:
923 921 for c, callback in sorted(
924 922 pycompat.iteritems(self._plchangecallbacks)
925 923 ):
926 924 callback(self, self._origpl, self._pl)
927 925 self._origpl = None
928 926 # use the modification time of the newly created temporary file as the
929 927 # filesystem's notion of 'now'
930 928 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
931 929
932 930 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
933 931 # timestamp of each entries in dirstate, because of 'now > mtime'
934 932 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
935 933 if delaywrite > 0:
936 934 # do we have any files to delay for?
937 935 for f, e in pycompat.iteritems(self._map):
938 936 if e.need_delay(now):
939 937 import time # to avoid useless import
940 938
941 939 # rather than sleep n seconds, sleep until the next
942 940 # multiple of n seconds
943 941 clock = time.time()
944 942 start = int(clock) - (int(clock) % delaywrite)
945 943 end = start + delaywrite
946 944 time.sleep(end - clock)
947 945 now = end # trust our estimate that the end is near now
948 946 break
949 947
950 948 self._map.write(tr, st, now)
951 949 self._lastnormaltime = 0
952 950 self._dirty = False
953 951
954 952 def _dirignore(self, f):
955 953 if self._ignore(f):
956 954 return True
957 955 for p in pathutil.finddirs(f):
958 956 if self._ignore(p):
959 957 return True
960 958 return False
961 959
962 960 def _ignorefiles(self):
963 961 files = []
964 962 if os.path.exists(self._join(b'.hgignore')):
965 963 files.append(self._join(b'.hgignore'))
966 964 for name, path in self._ui.configitems(b"ui"):
967 965 if name == b'ignore' or name.startswith(b'ignore.'):
968 966 # we need to use os.path.join here rather than self._join
969 967 # because path is arbitrary and user-specified
970 968 files.append(os.path.join(self._rootdir, util.expandpath(path)))
971 969 return files
972 970
973 971 def _ignorefileandline(self, f):
974 972 files = collections.deque(self._ignorefiles())
975 973 visited = set()
976 974 while files:
977 975 i = files.popleft()
978 976 patterns = matchmod.readpatternfile(
979 977 i, self._ui.warn, sourceinfo=True
980 978 )
981 979 for pattern, lineno, line in patterns:
982 980 kind, p = matchmod._patsplit(pattern, b'glob')
983 981 if kind == b"subinclude":
984 982 if p not in visited:
985 983 files.append(p)
986 984 continue
987 985 m = matchmod.match(
988 986 self._root, b'', [], [pattern], warn=self._ui.warn
989 987 )
990 988 if m(f):
991 989 return (i, lineno, line)
992 990 visited.add(i)
993 991 return (None, -1, b"")
994 992
995 993 def _walkexplicit(self, match, subrepos):
996 994 """Get stat data about the files explicitly specified by match.
997 995
998 996 Return a triple (results, dirsfound, dirsnotfound).
999 997 - results is a mapping from filename to stat result. It also contains
1000 998 listings mapping subrepos and .hg to None.
1001 999 - dirsfound is a list of files found to be directories.
1002 1000 - dirsnotfound is a list of files that the dirstate thinks are
1003 1001 directories and that were not found."""
1004 1002
1005 1003 def badtype(mode):
1006 1004 kind = _(b'unknown')
1007 1005 if stat.S_ISCHR(mode):
1008 1006 kind = _(b'character device')
1009 1007 elif stat.S_ISBLK(mode):
1010 1008 kind = _(b'block device')
1011 1009 elif stat.S_ISFIFO(mode):
1012 1010 kind = _(b'fifo')
1013 1011 elif stat.S_ISSOCK(mode):
1014 1012 kind = _(b'socket')
1015 1013 elif stat.S_ISDIR(mode):
1016 1014 kind = _(b'directory')
1017 1015 return _(b'unsupported file type (type is %s)') % kind
1018 1016
1019 1017 badfn = match.bad
1020 1018 dmap = self._map
1021 1019 lstat = os.lstat
1022 1020 getkind = stat.S_IFMT
1023 1021 dirkind = stat.S_IFDIR
1024 1022 regkind = stat.S_IFREG
1025 1023 lnkkind = stat.S_IFLNK
1026 1024 join = self._join
1027 1025 dirsfound = []
1028 1026 foundadd = dirsfound.append
1029 1027 dirsnotfound = []
1030 1028 notfoundadd = dirsnotfound.append
1031 1029
1032 1030 if not match.isexact() and self._checkcase:
1033 1031 normalize = self._normalize
1034 1032 else:
1035 1033 normalize = None
1036 1034
1037 1035 files = sorted(match.files())
1038 1036 subrepos.sort()
1039 1037 i, j = 0, 0
1040 1038 while i < len(files) and j < len(subrepos):
1041 1039 subpath = subrepos[j] + b"/"
1042 1040 if files[i] < subpath:
1043 1041 i += 1
1044 1042 continue
1045 1043 while i < len(files) and files[i].startswith(subpath):
1046 1044 del files[i]
1047 1045 j += 1
1048 1046
1049 1047 if not files or b'' in files:
1050 1048 files = [b'']
1051 1049 # constructing the foldmap is expensive, so don't do it for the
1052 1050 # common case where files is ['']
1053 1051 normalize = None
1054 1052 results = dict.fromkeys(subrepos)
1055 1053 results[b'.hg'] = None
1056 1054
1057 1055 for ff in files:
1058 1056 if normalize:
1059 1057 nf = normalize(ff, False, True)
1060 1058 else:
1061 1059 nf = ff
1062 1060 if nf in results:
1063 1061 continue
1064 1062
1065 1063 try:
1066 1064 st = lstat(join(nf))
1067 1065 kind = getkind(st.st_mode)
1068 1066 if kind == dirkind:
1069 1067 if nf in dmap:
1070 1068 # file replaced by dir on disk but still in dirstate
1071 1069 results[nf] = None
1072 1070 foundadd((nf, ff))
1073 1071 elif kind == regkind or kind == lnkkind:
1074 1072 results[nf] = st
1075 1073 else:
1076 1074 badfn(ff, badtype(kind))
1077 1075 if nf in dmap:
1078 1076 results[nf] = None
1079 1077 except OSError as inst: # nf not found on disk - it is dirstate only
1080 1078 if nf in dmap: # does it exactly match a missing file?
1081 1079 results[nf] = None
1082 1080 else: # does it match a missing directory?
1083 1081 if self._map.hasdir(nf):
1084 1082 notfoundadd(nf)
1085 1083 else:
1086 1084 badfn(ff, encoding.strtolocal(inst.strerror))
1087 1085
1088 1086 # match.files() may contain explicitly-specified paths that shouldn't
1089 1087 # be taken; drop them from the list of files found. dirsfound/notfound
1090 1088 # aren't filtered here because they will be tested later.
1091 1089 if match.anypats():
1092 1090 for f in list(results):
1093 1091 if f == b'.hg' or f in subrepos:
1094 1092 # keep sentinel to disable further out-of-repo walks
1095 1093 continue
1096 1094 if not match(f):
1097 1095 del results[f]
1098 1096
1099 1097 # Case insensitive filesystems cannot rely on lstat() failing to detect
1100 1098 # a case-only rename. Prune the stat object for any file that does not
1101 1099 # match the case in the filesystem, if there are multiple files that
1102 1100 # normalize to the same path.
1103 1101 if match.isexact() and self._checkcase:
1104 1102 normed = {}
1105 1103
1106 1104 for f, st in pycompat.iteritems(results):
1107 1105 if st is None:
1108 1106 continue
1109 1107
1110 1108 nc = util.normcase(f)
1111 1109 paths = normed.get(nc)
1112 1110
1113 1111 if paths is None:
1114 1112 paths = set()
1115 1113 normed[nc] = paths
1116 1114
1117 1115 paths.add(f)
1118 1116
1119 1117 for norm, paths in pycompat.iteritems(normed):
1120 1118 if len(paths) > 1:
1121 1119 for path in paths:
1122 1120 folded = self._discoverpath(
1123 1121 path, norm, True, None, self._map.dirfoldmap
1124 1122 )
1125 1123 if path != folded:
1126 1124 results[path] = None
1127 1125
1128 1126 return results, dirsfound, dirsnotfound
1129 1127
1130 1128 def walk(self, match, subrepos, unknown, ignored, full=True):
1131 1129 """
1132 1130 Walk recursively through the directory tree, finding all files
1133 1131 matched by match.
1134 1132
1135 1133 If full is False, maybe skip some known-clean files.
1136 1134
1137 1135 Return a dict mapping filename to stat-like object (either
1138 1136 mercurial.osutil.stat instance or return value of os.stat()).
1139 1137
1140 1138 """
1141 1139 # full is a flag that extensions that hook into walk can use -- this
1142 1140 # implementation doesn't use it at all. This satisfies the contract
1143 1141 # because we only guarantee a "maybe".
1144 1142
1145 1143 if ignored:
1146 1144 ignore = util.never
1147 1145 dirignore = util.never
1148 1146 elif unknown:
1149 1147 ignore = self._ignore
1150 1148 dirignore = self._dirignore
1151 1149 else:
1152 1150 # if not unknown and not ignored, drop dir recursion and step 2
1153 1151 ignore = util.always
1154 1152 dirignore = util.always
1155 1153
1156 1154 matchfn = match.matchfn
1157 1155 matchalways = match.always()
1158 1156 matchtdir = match.traversedir
1159 1157 dmap = self._map
1160 1158 listdir = util.listdir
1161 1159 lstat = os.lstat
1162 1160 dirkind = stat.S_IFDIR
1163 1161 regkind = stat.S_IFREG
1164 1162 lnkkind = stat.S_IFLNK
1165 1163 join = self._join
1166 1164
1167 1165 exact = skipstep3 = False
1168 1166 if match.isexact(): # match.exact
1169 1167 exact = True
1170 1168 dirignore = util.always # skip step 2
1171 1169 elif match.prefix(): # match.match, no patterns
1172 1170 skipstep3 = True
1173 1171
1174 1172 if not exact and self._checkcase:
1175 1173 normalize = self._normalize
1176 1174 normalizefile = self._normalizefile
1177 1175 skipstep3 = False
1178 1176 else:
1179 1177 normalize = self._normalize
1180 1178 normalizefile = None
1181 1179
1182 1180 # step 1: find all explicit files
1183 1181 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1184 1182 if matchtdir:
1185 1183 for d in work:
1186 1184 matchtdir(d[0])
1187 1185 for d in dirsnotfound:
1188 1186 matchtdir(d)
1189 1187
1190 1188 skipstep3 = skipstep3 and not (work or dirsnotfound)
1191 1189 work = [d for d in work if not dirignore(d[0])]
1192 1190
1193 1191 # step 2: visit subdirectories
1194 1192 def traverse(work, alreadynormed):
1195 1193 wadd = work.append
1196 1194 while work:
1197 1195 tracing.counter('dirstate.walk work', len(work))
1198 1196 nd = work.pop()
1199 1197 visitentries = match.visitchildrenset(nd)
1200 1198 if not visitentries:
1201 1199 continue
1202 1200 if visitentries == b'this' or visitentries == b'all':
1203 1201 visitentries = None
1204 1202 skip = None
1205 1203 if nd != b'':
1206 1204 skip = b'.hg'
1207 1205 try:
1208 1206 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1209 1207 entries = listdir(join(nd), stat=True, skip=skip)
1210 1208 except OSError as inst:
1211 1209 if inst.errno in (errno.EACCES, errno.ENOENT):
1212 1210 match.bad(
1213 1211 self.pathto(nd), encoding.strtolocal(inst.strerror)
1214 1212 )
1215 1213 continue
1216 1214 raise
1217 1215 for f, kind, st in entries:
1218 1216 # Some matchers may return files in the visitentries set,
1219 1217 # instead of 'this', if the matcher explicitly mentions them
1220 1218 # and is not an exactmatcher. This is acceptable; we do not
1221 1219 # make any hard assumptions about file-or-directory below
1222 1220 # based on the presence of `f` in visitentries. If
1223 1221 # visitchildrenset returned a set, we can always skip the
1224 1222 # entries *not* in the set it provided regardless of whether
1225 1223 # they're actually a file or a directory.
1226 1224 if visitentries and f not in visitentries:
1227 1225 continue
1228 1226 if normalizefile:
1229 1227 # even though f might be a directory, we're only
1230 1228 # interested in comparing it to files currently in the
1231 1229 # dmap -- therefore normalizefile is enough
1232 1230 nf = normalizefile(
1233 1231 nd and (nd + b"/" + f) or f, True, True
1234 1232 )
1235 1233 else:
1236 1234 nf = nd and (nd + b"/" + f) or f
1237 1235 if nf not in results:
1238 1236 if kind == dirkind:
1239 1237 if not ignore(nf):
1240 1238 if matchtdir:
1241 1239 matchtdir(nf)
1242 1240 wadd(nf)
1243 1241 if nf in dmap and (matchalways or matchfn(nf)):
1244 1242 results[nf] = None
1245 1243 elif kind == regkind or kind == lnkkind:
1246 1244 if nf in dmap:
1247 1245 if matchalways or matchfn(nf):
1248 1246 results[nf] = st
1249 1247 elif (matchalways or matchfn(nf)) and not ignore(
1250 1248 nf
1251 1249 ):
1252 1250 # unknown file -- normalize if necessary
1253 1251 if not alreadynormed:
1254 1252 nf = normalize(nf, False, True)
1255 1253 results[nf] = st
1256 1254 elif nf in dmap and (matchalways or matchfn(nf)):
1257 1255 results[nf] = None
1258 1256
1259 1257 for nd, d in work:
1260 1258 # alreadynormed means that processwork doesn't have to do any
1261 1259 # expensive directory normalization
1262 1260 alreadynormed = not normalize or nd == d
1263 1261 traverse([d], alreadynormed)
1264 1262
1265 1263 for s in subrepos:
1266 1264 del results[s]
1267 1265 del results[b'.hg']
1268 1266
1269 1267 # step 3: visit remaining files from dmap
1270 1268 if not skipstep3 and not exact:
1271 1269 # If a dmap file is not in results yet, it was either
1272 1270 # a) not matching matchfn b) ignored, c) missing, or d) under a
1273 1271 # symlink directory.
1274 1272 if not results and matchalways:
1275 1273 visit = [f for f in dmap]
1276 1274 else:
1277 1275 visit = [f for f in dmap if f not in results and matchfn(f)]
1278 1276 visit.sort()
1279 1277
1280 1278 if unknown:
1281 1279 # unknown == True means we walked all dirs under the roots
1282 1280 # that wasn't ignored, and everything that matched was stat'ed
1283 1281 # and is already in results.
1284 1282 # The rest must thus be ignored or under a symlink.
1285 1283 audit_path = pathutil.pathauditor(self._root, cached=True)
1286 1284
1287 1285 for nf in iter(visit):
1288 1286 # If a stat for the same file was already added with a
1289 1287 # different case, don't add one for this, since that would
1290 1288 # make it appear as if the file exists under both names
1291 1289 # on disk.
1292 1290 if (
1293 1291 normalizefile
1294 1292 and normalizefile(nf, True, True) in results
1295 1293 ):
1296 1294 results[nf] = None
1297 1295 # Report ignored items in the dmap as long as they are not
1298 1296 # under a symlink directory.
1299 1297 elif audit_path.check(nf):
1300 1298 try:
1301 1299 results[nf] = lstat(join(nf))
1302 1300 # file was just ignored, no links, and exists
1303 1301 except OSError:
1304 1302 # file doesn't exist
1305 1303 results[nf] = None
1306 1304 else:
1307 1305 # It's either missing or under a symlink directory
1308 1306 # which we in this case report as missing
1309 1307 results[nf] = None
1310 1308 else:
1311 1309 # We may not have walked the full directory tree above,
1312 1310 # so stat and check everything we missed.
1313 1311 iv = iter(visit)
1314 1312 for st in util.statfiles([join(i) for i in visit]):
1315 1313 results[next(iv)] = st
1316 1314 return results
1317 1315
1318 1316 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1319 1317 # Force Rayon (Rust parallelism library) to respect the number of
1320 1318 # workers. This is a temporary workaround until Rust code knows
1321 1319 # how to read the config file.
1322 1320 numcpus = self._ui.configint(b"worker", b"numcpus")
1323 1321 if numcpus is not None:
1324 1322 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1325 1323
1326 1324 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1327 1325 if not workers_enabled:
1328 1326 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1329 1327
1330 1328 (
1331 1329 lookup,
1332 1330 modified,
1333 1331 added,
1334 1332 removed,
1335 1333 deleted,
1336 1334 clean,
1337 1335 ignored,
1338 1336 unknown,
1339 1337 warnings,
1340 1338 bad,
1341 1339 traversed,
1342 1340 dirty,
1343 1341 ) = rustmod.status(
1344 1342 self._map._rustmap,
1345 1343 matcher,
1346 1344 self._rootdir,
1347 1345 self._ignorefiles(),
1348 1346 self._checkexec,
1349 1347 self._lastnormaltime,
1350 1348 bool(list_clean),
1351 1349 bool(list_ignored),
1352 1350 bool(list_unknown),
1353 1351 bool(matcher.traversedir),
1354 1352 )
1355 1353
1356 1354 self._dirty |= dirty
1357 1355
1358 1356 if matcher.traversedir:
1359 1357 for dir in traversed:
1360 1358 matcher.traversedir(dir)
1361 1359
1362 1360 if self._ui.warn:
1363 1361 for item in warnings:
1364 1362 if isinstance(item, tuple):
1365 1363 file_path, syntax = item
1366 1364 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1367 1365 file_path,
1368 1366 syntax,
1369 1367 )
1370 1368 self._ui.warn(msg)
1371 1369 else:
1372 1370 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1373 1371 self._ui.warn(
1374 1372 msg
1375 1373 % (
1376 1374 pathutil.canonpath(
1377 1375 self._rootdir, self._rootdir, item
1378 1376 ),
1379 1377 b"No such file or directory",
1380 1378 )
1381 1379 )
1382 1380
1383 1381 for (fn, message) in bad:
1384 1382 matcher.bad(fn, encoding.strtolocal(message))
1385 1383
1386 1384 status = scmutil.status(
1387 1385 modified=modified,
1388 1386 added=added,
1389 1387 removed=removed,
1390 1388 deleted=deleted,
1391 1389 unknown=unknown,
1392 1390 ignored=ignored,
1393 1391 clean=clean,
1394 1392 )
1395 1393 return (lookup, status)
1396 1394
1397 1395 def status(self, match, subrepos, ignored, clean, unknown):
1398 1396 """Determine the status of the working copy relative to the
1399 1397 dirstate and return a pair of (unsure, status), where status is of type
1400 1398 scmutil.status and:
1401 1399
1402 1400 unsure:
1403 1401 files that might have been modified since the dirstate was
1404 1402 written, but need to be read to be sure (size is the same
1405 1403 but mtime differs)
1406 1404 status.modified:
1407 1405 files that have definitely been modified since the dirstate
1408 1406 was written (different size or mode)
1409 1407 status.clean:
1410 1408 files that have definitely not been modified since the
1411 1409 dirstate was written
1412 1410 """
1413 1411 listignored, listclean, listunknown = ignored, clean, unknown
1414 1412 lookup, modified, added, unknown, ignored = [], [], [], [], []
1415 1413 removed, deleted, clean = [], [], []
1416 1414
1417 1415 dmap = self._map
1418 1416 dmap.preload()
1419 1417
1420 1418 use_rust = True
1421 1419
1422 1420 allowed_matchers = (
1423 1421 matchmod.alwaysmatcher,
1424 1422 matchmod.exactmatcher,
1425 1423 matchmod.includematcher,
1426 1424 )
1427 1425
1428 1426 if rustmod is None:
1429 1427 use_rust = False
1430 1428 elif self._checkcase:
1431 1429 # Case-insensitive filesystems are not handled yet
1432 1430 use_rust = False
1433 1431 elif subrepos:
1434 1432 use_rust = False
1435 1433 elif sparse.enabled:
1436 1434 use_rust = False
1437 1435 elif not isinstance(match, allowed_matchers):
1438 1436 # Some matchers have yet to be implemented
1439 1437 use_rust = False
1440 1438
1441 1439 if use_rust:
1442 1440 try:
1443 1441 return self._rust_status(
1444 1442 match, listclean, listignored, listunknown
1445 1443 )
1446 1444 except rustmod.FallbackError:
1447 1445 pass
1448 1446
1449 1447 def noop(f):
1450 1448 pass
1451 1449
1452 1450 dcontains = dmap.__contains__
1453 1451 dget = dmap.__getitem__
1454 1452 ladd = lookup.append # aka "unsure"
1455 1453 madd = modified.append
1456 1454 aadd = added.append
1457 1455 uadd = unknown.append if listunknown else noop
1458 1456 iadd = ignored.append if listignored else noop
1459 1457 radd = removed.append
1460 1458 dadd = deleted.append
1461 1459 cadd = clean.append if listclean else noop
1462 1460 mexact = match.exact
1463 1461 dirignore = self._dirignore
1464 1462 checkexec = self._checkexec
1465 1463 copymap = self._map.copymap
1466 1464 lastnormaltime = self._lastnormaltime
1467 1465
1468 1466 # We need to do full walks when either
1469 1467 # - we're listing all clean files, or
1470 1468 # - match.traversedir does something, because match.traversedir should
1471 1469 # be called for every dir in the working dir
1472 1470 full = listclean or match.traversedir is not None
1473 1471 for fn, st in pycompat.iteritems(
1474 1472 self.walk(match, subrepos, listunknown, listignored, full=full)
1475 1473 ):
1476 1474 if not dcontains(fn):
1477 1475 if (listignored or mexact(fn)) and dirignore(fn):
1478 1476 if listignored:
1479 1477 iadd(fn)
1480 1478 else:
1481 1479 uadd(fn)
1482 1480 continue
1483 1481
1484 1482 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1485 1483 # written like that for performance reasons. dmap[fn] is not a
1486 1484 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1487 1485 # opcode has fast paths when the value to be unpacked is a tuple or
1488 1486 # a list, but falls back to creating a full-fledged iterator in
1489 1487 # general. That is much slower than simply accessing and storing the
1490 1488 # tuple members one by one.
1491 1489 t = dget(fn)
1492 1490 mode = t.mode
1493 1491 size = t.size
1494 1492 time = t.mtime
1495 1493
1496 1494 if not st and t.tracked:
1497 1495 dadd(fn)
1498 1496 elif t.merged:
1499 1497 madd(fn)
1500 1498 elif t.added:
1501 1499 aadd(fn)
1502 1500 elif t.removed:
1503 1501 radd(fn)
1504 1502 elif t.tracked:
1505 1503 if (
1506 1504 size >= 0
1507 1505 and (
1508 1506 (size != st.st_size and size != st.st_size & _rangemask)
1509 1507 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1510 1508 )
1511 1509 or t.from_p2
1512 1510 or fn in copymap
1513 1511 ):
1514 1512 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1515 1513 # issue6456: Size returned may be longer due to
1516 1514 # encryption on EXT-4 fscrypt, undecided.
1517 1515 ladd(fn)
1518 1516 else:
1519 1517 madd(fn)
1520 1518 elif (
1521 1519 time != st[stat.ST_MTIME]
1522 1520 and time != st[stat.ST_MTIME] & _rangemask
1523 1521 ):
1524 1522 ladd(fn)
1525 1523 elif st[stat.ST_MTIME] == lastnormaltime:
1526 1524 # fn may have just been marked as normal and it may have
1527 1525 # changed in the same second without changing its size.
1528 1526 # This can happen if we quickly do multiple commits.
1529 1527 # Force lookup, so we don't miss such a racy file change.
1530 1528 ladd(fn)
1531 1529 elif listclean:
1532 1530 cadd(fn)
1533 1531 status = scmutil.status(
1534 1532 modified, added, removed, deleted, unknown, ignored, clean
1535 1533 )
1536 1534 return (lookup, status)
1537 1535
1538 1536 def matches(self, match):
1539 1537 """
1540 1538 return files in the dirstate (in whatever state) filtered by match
1541 1539 """
1542 1540 dmap = self._map
1543 1541 if rustmod is not None:
1544 1542 dmap = self._map._rustmap
1545 1543
1546 1544 if match.always():
1547 1545 return dmap.keys()
1548 1546 files = match.files()
1549 1547 if match.isexact():
1550 1548 # fast path -- filter the other way around, since typically files is
1551 1549 # much smaller than dmap
1552 1550 return [f for f in files if f in dmap]
1553 1551 if match.prefix() and all(fn in dmap for fn in files):
1554 1552 # fast path -- all the values are known to be files, so just return
1555 1553 # that
1556 1554 return list(files)
1557 1555 return [f for f in dmap if match(f)]
1558 1556
1559 1557 def _actualfilename(self, tr):
1560 1558 if tr:
1561 1559 return self._pendingfilename
1562 1560 else:
1563 1561 return self._filename
1564 1562
1565 1563 def savebackup(self, tr, backupname):
1566 1564 '''Save current dirstate into backup file'''
1567 1565 filename = self._actualfilename(tr)
1568 1566 assert backupname != filename
1569 1567
1570 1568 # use '_writedirstate' instead of 'write' to write changes certainly,
1571 1569 # because the latter omits writing out if transaction is running.
1572 1570 # output file will be used to create backup of dirstate at this point.
1573 1571 if self._dirty or not self._opener.exists(filename):
1574 1572 self._writedirstate(
1575 1573 tr,
1576 1574 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1577 1575 )
1578 1576
1579 1577 if tr:
1580 1578 # ensure that subsequent tr.writepending returns True for
1581 1579 # changes written out above, even if dirstate is never
1582 1580 # changed after this
1583 1581 tr.addfilegenerator(
1584 1582 b'dirstate',
1585 1583 (self._filename,),
1586 1584 lambda f: self._writedirstate(tr, f),
1587 1585 location=b'plain',
1588 1586 )
1589 1587
1590 1588 # ensure that pending file written above is unlinked at
1591 1589 # failure, even if tr.writepending isn't invoked until the
1592 1590 # end of this transaction
1593 1591 tr.registertmp(filename, location=b'plain')
1594 1592
1595 1593 self._opener.tryunlink(backupname)
1596 1594 # hardlink backup is okay because _writedirstate is always called
1597 1595 # with an "atomictemp=True" file.
1598 1596 util.copyfile(
1599 1597 self._opener.join(filename),
1600 1598 self._opener.join(backupname),
1601 1599 hardlink=True,
1602 1600 )
1603 1601
1604 1602 def restorebackup(self, tr, backupname):
1605 1603 '''Restore dirstate by backup file'''
1606 1604 # this "invalidate()" prevents "wlock.release()" from writing
1607 1605 # changes of dirstate out after restoring from backup file
1608 1606 self.invalidate()
1609 1607 filename = self._actualfilename(tr)
1610 1608 o = self._opener
1611 1609 if util.samefile(o.join(backupname), o.join(filename)):
1612 1610 o.unlink(backupname)
1613 1611 else:
1614 1612 o.rename(backupname, filename, checkambig=True)
1615 1613
1616 1614 def clearbackup(self, tr, backupname):
1617 1615 '''Clear backup file'''
1618 1616 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now