##// END OF EJS Templates
dirstate: use `tracked` property in `_addpath`...
marmoute -
r48796:36c0d738 default
parent child Browse files
Show More
@@ -1,1614 +1,1614 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .pycompat import delattr
18 18
19 19 from hgdemandimport import tracing
20 20
21 21 from . import (
22 22 dirstatemap,
23 23 encoding,
24 24 error,
25 25 match as matchmod,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 sparse,
31 31 util,
32 32 )
33 33
34 34 from .interfaces import (
35 35 dirstate as intdirstate,
36 36 util as interfaceutil,
37 37 )
38 38
39 39 parsers = policy.importmod('parsers')
40 40 rustmod = policy.importrust('dirstate')
41 41
42 42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43 43
44 44 propertycache = util.propertycache
45 45 filecache = scmutil.filecache
46 46 _rangemask = dirstatemap.rangemask
47 47
48 48 DirstateItem = parsers.DirstateItem
49 49
50 50
51 51 class repocache(filecache):
52 52 """filecache for files in .hg/"""
53 53
54 54 def join(self, obj, fname):
55 55 return obj._opener.join(fname)
56 56
57 57
58 58 class rootcache(filecache):
59 59 """filecache for files in the repository root"""
60 60
61 61 def join(self, obj, fname):
62 62 return obj._join(fname)
63 63
64 64
65 65 def _getfsnow(vfs):
66 66 '''Get "now" timestamp on filesystem'''
67 67 tmpfd, tmpname = vfs.mkstemp()
68 68 try:
69 69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 70 finally:
71 71 os.close(tmpfd)
72 72 vfs.unlink(tmpname)
73 73
74 74
75 75 def requires_parents_change(func):
76 76 def wrap(self, *args, **kwargs):
77 77 if not self.pendingparentchange():
78 78 msg = 'calling `%s` outside of a parentchange context'
79 79 msg %= func.__name__
80 80 raise error.ProgrammingError(msg)
81 81 return func(self, *args, **kwargs)
82 82
83 83 return wrap
84 84
85 85
86 86 def requires_no_parents_change(func):
87 87 def wrap(self, *args, **kwargs):
88 88 if self.pendingparentchange():
89 89 msg = 'calling `%s` inside of a parentchange context'
90 90 msg %= func.__name__
91 91 raise error.ProgrammingError(msg)
92 92 return func(self, *args, **kwargs)
93 93
94 94 return wrap
95 95
96 96
97 97 @interfaceutil.implementer(intdirstate.idirstate)
98 98 class dirstate(object):
99 99 def __init__(
100 100 self,
101 101 opener,
102 102 ui,
103 103 root,
104 104 validate,
105 105 sparsematchfn,
106 106 nodeconstants,
107 107 use_dirstate_v2,
108 108 ):
109 109 """Create a new dirstate object.
110 110
111 111 opener is an open()-like callable that can be used to open the
112 112 dirstate file; root is the root of the directory tracked by
113 113 the dirstate.
114 114 """
115 115 self._use_dirstate_v2 = use_dirstate_v2
116 116 self._nodeconstants = nodeconstants
117 117 self._opener = opener
118 118 self._validate = validate
119 119 self._root = root
120 120 self._sparsematchfn = sparsematchfn
121 121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 122 # UNC path pointing to root share (issue4557)
123 123 self._rootdir = pathutil.normasprefix(root)
124 124 self._dirty = False
125 125 self._lastnormaltime = 0
126 126 self._ui = ui
127 127 self._filecache = {}
128 128 self._parentwriters = 0
129 129 self._filename = b'dirstate'
130 130 self._pendingfilename = b'%s.pending' % self._filename
131 131 self._plchangecallbacks = {}
132 132 self._origpl = None
133 133 self._updatedfiles = set()
134 134 self._mapcls = dirstatemap.dirstatemap
135 135 # Access and cache cwd early, so we don't access it for the first time
136 136 # after a working-copy update caused it to not exist (accessing it then
137 137 # raises an exception).
138 138 self._cwd
139 139
140 140 def prefetch_parents(self):
141 141 """make sure the parents are loaded
142 142
143 143 Used to avoid a race condition.
144 144 """
145 145 self._pl
146 146
147 147 @contextlib.contextmanager
148 148 def parentchange(self):
149 149 """Context manager for handling dirstate parents.
150 150
151 151 If an exception occurs in the scope of the context manager,
152 152 the incoherent dirstate won't be written when wlock is
153 153 released.
154 154 """
155 155 self._parentwriters += 1
156 156 yield
157 157 # Typically we want the "undo" step of a context manager in a
158 158 # finally block so it happens even when an exception
159 159 # occurs. In this case, however, we only want to decrement
160 160 # parentwriters if the code in the with statement exits
161 161 # normally, so we don't have a try/finally here on purpose.
162 162 self._parentwriters -= 1
163 163
164 164 def pendingparentchange(self):
165 165 """Returns true if the dirstate is in the middle of a set of changes
166 166 that modify the dirstate parent.
167 167 """
168 168 return self._parentwriters > 0
169 169
170 170 @propertycache
171 171 def _map(self):
172 172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 173 self._map = self._mapcls(
174 174 self._ui,
175 175 self._opener,
176 176 self._root,
177 177 self._nodeconstants,
178 178 self._use_dirstate_v2,
179 179 )
180 180 return self._map
181 181
182 182 @property
183 183 def _sparsematcher(self):
184 184 """The matcher for the sparse checkout.
185 185
186 186 The working directory may not include every file from a manifest. The
187 187 matcher obtained by this property will match a path if it is to be
188 188 included in the working directory.
189 189 """
190 190 # TODO there is potential to cache this property. For now, the matcher
191 191 # is resolved on every access. (But the called function does use a
192 192 # cache to keep the lookup fast.)
193 193 return self._sparsematchfn()
194 194
195 195 @repocache(b'branch')
196 196 def _branch(self):
197 197 try:
198 198 return self._opener.read(b"branch").strip() or b"default"
199 199 except IOError as inst:
200 200 if inst.errno != errno.ENOENT:
201 201 raise
202 202 return b"default"
203 203
204 204 @property
205 205 def _pl(self):
206 206 return self._map.parents()
207 207
208 208 def hasdir(self, d):
209 209 return self._map.hastrackeddir(d)
210 210
211 211 @rootcache(b'.hgignore')
212 212 def _ignore(self):
213 213 files = self._ignorefiles()
214 214 if not files:
215 215 return matchmod.never()
216 216
217 217 pats = [b'include:%s' % f for f in files]
218 218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219 219
220 220 @propertycache
221 221 def _slash(self):
222 222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223 223
224 224 @propertycache
225 225 def _checklink(self):
226 226 return util.checklink(self._root)
227 227
228 228 @propertycache
229 229 def _checkexec(self):
230 230 return bool(util.checkexec(self._root))
231 231
232 232 @propertycache
233 233 def _checkcase(self):
234 234 return not util.fscasesensitive(self._join(b'.hg'))
235 235
236 236 def _join(self, f):
237 237 # much faster than os.path.join()
238 238 # it's safe because f is always a relative path
239 239 return self._rootdir + f
240 240
241 241 def flagfunc(self, buildfallback):
242 242 if self._checklink and self._checkexec:
243 243
244 244 def f(x):
245 245 try:
246 246 st = os.lstat(self._join(x))
247 247 if util.statislink(st):
248 248 return b'l'
249 249 if util.statisexec(st):
250 250 return b'x'
251 251 except OSError:
252 252 pass
253 253 return b''
254 254
255 255 return f
256 256
257 257 fallback = buildfallback()
258 258 if self._checklink:
259 259
260 260 def f(x):
261 261 if os.path.islink(self._join(x)):
262 262 return b'l'
263 263 if b'x' in fallback(x):
264 264 return b'x'
265 265 return b''
266 266
267 267 return f
268 268 if self._checkexec:
269 269
270 270 def f(x):
271 271 if b'l' in fallback(x):
272 272 return b'l'
273 273 if util.isexec(self._join(x)):
274 274 return b'x'
275 275 return b''
276 276
277 277 return f
278 278 else:
279 279 return fallback
280 280
281 281 @propertycache
282 282 def _cwd(self):
283 283 # internal config: ui.forcecwd
284 284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 285 if forcecwd:
286 286 return forcecwd
287 287 return encoding.getcwd()
288 288
289 289 def getcwd(self):
290 290 """Return the path from which a canonical path is calculated.
291 291
292 292 This path should be used to resolve file patterns or to convert
293 293 canonical paths back to file paths for display. It shouldn't be
294 294 used to get real file paths. Use vfs functions instead.
295 295 """
296 296 cwd = self._cwd
297 297 if cwd == self._root:
298 298 return b''
299 299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 300 rootsep = self._root
301 301 if not util.endswithsep(rootsep):
302 302 rootsep += pycompat.ossep
303 303 if cwd.startswith(rootsep):
304 304 return cwd[len(rootsep) :]
305 305 else:
306 306 # we're outside the repo. return an absolute path.
307 307 return cwd
308 308
309 309 def pathto(self, f, cwd=None):
310 310 if cwd is None:
311 311 cwd = self.getcwd()
312 312 path = util.pathto(self._root, cwd, f)
313 313 if self._slash:
314 314 return util.pconvert(path)
315 315 return path
316 316
317 317 def __getitem__(self, key):
318 318 """Return the current state of key (a filename) in the dirstate.
319 319
320 320 States are:
321 321 n normal
322 322 m needs merging
323 323 r marked for removal
324 324 a marked for addition
325 325 ? not tracked
326 326
327 327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 328 consider migrating all user of this to going through the dirstate entry
329 329 instead.
330 330 """
331 331 entry = self._map.get(key)
332 332 if entry is not None:
333 333 return entry.state
334 334 return b'?'
335 335
336 336 def __contains__(self, key):
337 337 return key in self._map
338 338
339 339 def __iter__(self):
340 340 return iter(sorted(self._map))
341 341
342 342 def items(self):
343 343 return pycompat.iteritems(self._map)
344 344
345 345 iteritems = items
346 346
347 347 def directories(self):
348 348 return self._map.directories()
349 349
350 350 def parents(self):
351 351 return [self._validate(p) for p in self._pl]
352 352
353 353 def p1(self):
354 354 return self._validate(self._pl[0])
355 355
356 356 def p2(self):
357 357 return self._validate(self._pl[1])
358 358
359 359 @property
360 360 def in_merge(self):
361 361 """True if a merge is in progress"""
362 362 return self._pl[1] != self._nodeconstants.nullid
363 363
364 364 def branch(self):
365 365 return encoding.tolocal(self._branch)
366 366
367 367 def setparents(self, p1, p2=None):
368 368 """Set dirstate parents to p1 and p2.
369 369
370 370 When moving from two parents to one, "merged" entries a
371 371 adjusted to normal and previous copy records discarded and
372 372 returned by the call.
373 373
374 374 See localrepo.setparents()
375 375 """
376 376 if p2 is None:
377 377 p2 = self._nodeconstants.nullid
378 378 if self._parentwriters == 0:
379 379 raise ValueError(
380 380 b"cannot set dirstate parent outside of "
381 381 b"dirstate.parentchange context manager"
382 382 )
383 383
384 384 self._dirty = True
385 385 oldp2 = self._pl[1]
386 386 if self._origpl is None:
387 387 self._origpl = self._pl
388 388 self._map.setparents(p1, p2)
389 389 copies = {}
390 390 if (
391 391 oldp2 != self._nodeconstants.nullid
392 392 and p2 == self._nodeconstants.nullid
393 393 ):
394 394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395 395
396 396 for f in candidatefiles:
397 397 s = self._map.get(f)
398 398 if s is None:
399 399 continue
400 400
401 401 # Discard "merged" markers when moving away from a merge state
402 402 if s.merged:
403 403 source = self._map.copymap.get(f)
404 404 if source:
405 405 copies[f] = source
406 406 self._normallookup(f)
407 407 # Also fix up otherparent markers
408 408 elif s.from_p2:
409 409 source = self._map.copymap.get(f)
410 410 if source:
411 411 copies[f] = source
412 412 self._add(f)
413 413 return copies
414 414
415 415 def setbranch(self, branch):
416 416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 418 try:
419 419 f.write(self._branch + b'\n')
420 420 f.close()
421 421
422 422 # make sure filecache has the correct stat info for _branch after
423 423 # replacing the underlying file
424 424 ce = self._filecache[b'_branch']
425 425 if ce:
426 426 ce.refresh()
427 427 except: # re-raises
428 428 f.discard()
429 429 raise
430 430
431 431 def invalidate(self):
432 432 """Causes the next access to reread the dirstate.
433 433
434 434 This is different from localrepo.invalidatedirstate() because it always
435 435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 436 check whether the dirstate has changed before rereading it."""
437 437
438 438 for a in ("_map", "_branch", "_ignore"):
439 439 if a in self.__dict__:
440 440 delattr(self, a)
441 441 self._lastnormaltime = 0
442 442 self._dirty = False
443 443 self._updatedfiles.clear()
444 444 self._parentwriters = 0
445 445 self._origpl = None
446 446
447 447 def copy(self, source, dest):
448 448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 449 if source == dest:
450 450 return
451 451 self._dirty = True
452 452 if source is not None:
453 453 self._map.copymap[dest] = source
454 454 self._updatedfiles.add(source)
455 455 self._updatedfiles.add(dest)
456 456 elif self._map.copymap.pop(dest, None):
457 457 self._updatedfiles.add(dest)
458 458
459 459 def copied(self, file):
460 460 return self._map.copymap.get(file, None)
461 461
462 462 def copies(self):
463 463 return self._map.copymap
464 464
465 465 @requires_no_parents_change
466 466 def set_tracked(self, filename):
467 467 """a "public" method for generic code to mark a file as tracked
468 468
469 469 This function is to be called outside of "update/merge" case. For
470 470 example by a command like `hg add X`.
471 471
472 472 return True the file was previously untracked, False otherwise.
473 473 """
474 474 entry = self._map.get(filename)
475 475 if entry is None:
476 476 self._add(filename)
477 477 return True
478 478 elif not entry.tracked:
479 479 self._normallookup(filename)
480 480 return True
481 481 # XXX This is probably overkill for more case, but we need this to
482 482 # fully replace the `normallookup` call with `set_tracked` one.
483 483 # Consider smoothing this in the future.
484 484 self.set_possibly_dirty(filename)
485 485 return False
486 486
487 487 @requires_no_parents_change
488 488 def set_untracked(self, filename):
489 489 """a "public" method for generic code to mark a file as untracked
490 490
491 491 This function is to be called outside of "update/merge" case. For
492 492 example by a command like `hg remove X`.
493 493
494 494 return True the file was previously tracked, False otherwise.
495 495 """
496 496 ret = self._map.set_untracked(filename)
497 497 if ret:
498 498 self._dirty = True
499 499 self._updatedfiles.add(filename)
500 500 return ret
501 501
502 502 @requires_no_parents_change
503 503 def set_clean(self, filename, parentfiledata=None):
504 504 """record that the current state of the file on disk is known to be clean"""
505 505 self._dirty = True
506 506 self._updatedfiles.add(filename)
507 507 if parentfiledata:
508 508 (mode, size, mtime) = parentfiledata
509 509 else:
510 510 (mode, size, mtime) = self._get_filedata(filename)
511 511 if not self._map[filename].tracked:
512 512 self._check_new_tracked_filename(filename)
513 513 self._map.set_clean(filename, mode, size, mtime)
514 514 if mtime > self._lastnormaltime:
515 515 # Remember the most recent modification timeslot for status(),
516 516 # to make sure we won't miss future size-preserving file content
517 517 # modifications that happen within the same timeslot.
518 518 self._lastnormaltime = mtime
519 519
520 520 @requires_no_parents_change
521 521 def set_possibly_dirty(self, filename):
522 522 """record that the current state of the file on disk is unknown"""
523 523 self._dirty = True
524 524 self._updatedfiles.add(filename)
525 525 self._map.set_possibly_dirty(filename)
526 526
527 527 @requires_parents_change
528 528 def update_file_p1(
529 529 self,
530 530 filename,
531 531 p1_tracked,
532 532 ):
533 533 """Set a file as tracked in the parent (or not)
534 534
535 535 This is to be called when adjust the dirstate to a new parent after an history
536 536 rewriting operation.
537 537
538 538 It should not be called during a merge (p2 != nullid) and only within
539 539 a `with dirstate.parentchange():` context.
540 540 """
541 541 if self.in_merge:
542 542 msg = b'update_file_reference should not be called when merging'
543 543 raise error.ProgrammingError(msg)
544 544 entry = self._map.get(filename)
545 545 if entry is None:
546 546 wc_tracked = False
547 547 else:
548 548 wc_tracked = entry.tracked
549 549 possibly_dirty = False
550 550 if p1_tracked and wc_tracked:
551 551 # the underlying reference might have changed, we will have to
552 552 # check it.
553 553 possibly_dirty = True
554 554 elif not (p1_tracked or wc_tracked):
555 555 # the file is no longer relevant to anyone
556 556 self._drop(filename)
557 557 elif (not p1_tracked) and wc_tracked:
558 558 if entry is not None and entry.added:
559 559 return # avoid dropping copy information (maybe?)
560 560 elif p1_tracked and not wc_tracked:
561 561 pass
562 562 else:
563 563 assert False, 'unreachable'
564 564
565 565 # this mean we are doing call for file we do not really care about the
566 566 # data (eg: added or removed), however this should be a minor overhead
567 567 # compared to the overall update process calling this.
568 568 parentfiledata = None
569 569 if wc_tracked:
570 570 parentfiledata = self._get_filedata(filename)
571 571
572 572 self._updatedfiles.add(filename)
573 573 self._map.reset_state(
574 574 filename,
575 575 wc_tracked,
576 576 p1_tracked,
577 577 possibly_dirty=possibly_dirty,
578 578 parentfiledata=parentfiledata,
579 579 )
580 580 if (
581 581 parentfiledata is not None
582 582 and parentfiledata[2] > self._lastnormaltime
583 583 ):
584 584 # Remember the most recent modification timeslot for status(),
585 585 # to make sure we won't miss future size-preserving file content
586 586 # modifications that happen within the same timeslot.
587 587 self._lastnormaltime = parentfiledata[2]
588 588
589 589 @requires_parents_change
590 590 def update_file(
591 591 self,
592 592 filename,
593 593 wc_tracked,
594 594 p1_tracked,
595 595 p2_tracked=False,
596 596 merged=False,
597 597 clean_p1=False,
598 598 clean_p2=False,
599 599 possibly_dirty=False,
600 600 parentfiledata=None,
601 601 ):
602 602 """update the information about a file in the dirstate
603 603
604 604 This is to be called when the direstates parent changes to keep track
605 605 of what is the file situation in regards to the working copy and its parent.
606 606
607 607 This function must be called within a `dirstate.parentchange` context.
608 608
609 609 note: the API is at an early stage and we might need to adjust it
610 610 depending of what information ends up being relevant and useful to
611 611 other processing.
612 612 """
613 613 if merged and (clean_p1 or clean_p2):
614 614 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
615 615 raise error.ProgrammingError(msg)
616 616
617 617 # note: I do not think we need to double check name clash here since we
618 618 # are in a update/merge case that should already have taken care of
619 619 # this. The test agrees
620 620
621 621 self._dirty = True
622 622 self._updatedfiles.add(filename)
623 623
624 624 need_parent_file_data = (
625 625 not (possibly_dirty or clean_p2 or merged)
626 626 and wc_tracked
627 627 and p1_tracked
628 628 )
629 629
630 630 # this mean we are doing call for file we do not really care about the
631 631 # data (eg: added or removed), however this should be a minor overhead
632 632 # compared to the overall update process calling this.
633 633 if need_parent_file_data:
634 634 if parentfiledata is None:
635 635 parentfiledata = self._get_filedata(filename)
636 636 mtime = parentfiledata[2]
637 637
638 638 if mtime > self._lastnormaltime:
639 639 # Remember the most recent modification timeslot for
640 640 # status(), to make sure we won't miss future
641 641 # size-preserving file content modifications that happen
642 642 # within the same timeslot.
643 643 self._lastnormaltime = mtime
644 644
645 645 self._map.reset_state(
646 646 filename,
647 647 wc_tracked,
648 648 p1_tracked,
649 649 p2_tracked=p2_tracked,
650 650 merged=merged,
651 651 clean_p1=clean_p1,
652 652 clean_p2=clean_p2,
653 653 possibly_dirty=possibly_dirty,
654 654 parentfiledata=parentfiledata,
655 655 )
656 656 if (
657 657 parentfiledata is not None
658 658 and parentfiledata[2] > self._lastnormaltime
659 659 ):
660 660 # Remember the most recent modification timeslot for status(),
661 661 # to make sure we won't miss future size-preserving file content
662 662 # modifications that happen within the same timeslot.
663 663 self._lastnormaltime = parentfiledata[2]
664 664
665 665 def _addpath(
666 666 self,
667 667 f,
668 668 mode=0,
669 669 size=None,
670 670 mtime=None,
671 671 added=False,
672 672 merged=False,
673 673 from_p2=False,
674 674 possibly_dirty=False,
675 675 ):
676 676 entry = self._map.get(f)
677 if added or entry is not None and entry.removed:
677 if added or entry is not None and not entry.tracked:
678 678 self._check_new_tracked_filename(f)
679 679 self._dirty = True
680 680 self._updatedfiles.add(f)
681 681 self._map.addfile(
682 682 f,
683 683 mode=mode,
684 684 size=size,
685 685 mtime=mtime,
686 686 added=added,
687 687 merged=merged,
688 688 from_p2=from_p2,
689 689 possibly_dirty=possibly_dirty,
690 690 )
691 691
692 692 def _check_new_tracked_filename(self, filename):
693 693 scmutil.checkfilename(filename)
694 694 if self._map.hastrackeddir(filename):
695 695 msg = _(b'directory %r already in dirstate')
696 696 msg %= pycompat.bytestr(filename)
697 697 raise error.Abort(msg)
698 698 # shadows
699 699 for d in pathutil.finddirs(filename):
700 700 if self._map.hastrackeddir(d):
701 701 break
702 702 entry = self._map.get(d)
703 703 if entry is not None and not entry.removed:
704 704 msg = _(b'file %r in dirstate clashes with %r')
705 705 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
706 706 raise error.Abort(msg)
707 707
708 708 def _get_filedata(self, filename):
709 709 """returns"""
710 710 s = os.lstat(self._join(filename))
711 711 mode = s.st_mode
712 712 size = s.st_size
713 713 mtime = s[stat.ST_MTIME]
714 714 return (mode, size, mtime)
715 715
716 716 def _normallookup(self, f):
717 717 '''Mark a file normal, but possibly dirty.'''
718 718 if self.in_merge:
719 719 # if there is a merge going on and the file was either
720 720 # "merged" or coming from other parent (-2) before
721 721 # being removed, restore that state.
722 722 entry = self._map.get(f)
723 723 if entry is not None:
724 724 # XXX this should probably be dealt with a a lower level
725 725 # (see `merged_removed` and `from_p2_removed`)
726 726 if entry.merged_removed or entry.from_p2_removed:
727 727 source = self._map.copymap.get(f)
728 728 self._addpath(f, from_p2=True)
729 729 self._map.copymap.pop(f, None)
730 730 if source is not None:
731 731 self.copy(source, f)
732 732 return
733 733 elif entry.merged or entry.from_p2:
734 734 return
735 735 self._addpath(f, possibly_dirty=True)
736 736 self._map.copymap.pop(f, None)
737 737
738 738 def _add(self, filename):
739 739 """internal function to mark a file as added"""
740 740 self._addpath(filename, added=True)
741 741 self._map.copymap.pop(filename, None)
742 742
743 743 def _drop(self, filename):
744 744 """internal function to drop a file from the dirstate"""
745 745 if self._map.dropfile(filename):
746 746 self._dirty = True
747 747 self._updatedfiles.add(filename)
748 748
749 749 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
750 750 if exists is None:
751 751 exists = os.path.lexists(os.path.join(self._root, path))
752 752 if not exists:
753 753 # Maybe a path component exists
754 754 if not ignoremissing and b'/' in path:
755 755 d, f = path.rsplit(b'/', 1)
756 756 d = self._normalize(d, False, ignoremissing, None)
757 757 folded = d + b"/" + f
758 758 else:
759 759 # No path components, preserve original case
760 760 folded = path
761 761 else:
762 762 # recursively normalize leading directory components
763 763 # against dirstate
764 764 if b'/' in normed:
765 765 d, f = normed.rsplit(b'/', 1)
766 766 d = self._normalize(d, False, ignoremissing, True)
767 767 r = self._root + b"/" + d
768 768 folded = d + b"/" + util.fspath(f, r)
769 769 else:
770 770 folded = util.fspath(normed, self._root)
771 771 storemap[normed] = folded
772 772
773 773 return folded
774 774
775 775 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
776 776 normed = util.normcase(path)
777 777 folded = self._map.filefoldmap.get(normed, None)
778 778 if folded is None:
779 779 if isknown:
780 780 folded = path
781 781 else:
782 782 folded = self._discoverpath(
783 783 path, normed, ignoremissing, exists, self._map.filefoldmap
784 784 )
785 785 return folded
786 786
787 787 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
788 788 normed = util.normcase(path)
789 789 folded = self._map.filefoldmap.get(normed, None)
790 790 if folded is None:
791 791 folded = self._map.dirfoldmap.get(normed, None)
792 792 if folded is None:
793 793 if isknown:
794 794 folded = path
795 795 else:
796 796 # store discovered result in dirfoldmap so that future
797 797 # normalizefile calls don't start matching directories
798 798 folded = self._discoverpath(
799 799 path, normed, ignoremissing, exists, self._map.dirfoldmap
800 800 )
801 801 return folded
802 802
803 803 def normalize(self, path, isknown=False, ignoremissing=False):
804 804 """
805 805 normalize the case of a pathname when on a casefolding filesystem
806 806
807 807 isknown specifies whether the filename came from walking the
808 808 disk, to avoid extra filesystem access.
809 809
810 810 If ignoremissing is True, missing path are returned
811 811 unchanged. Otherwise, we try harder to normalize possibly
812 812 existing path components.
813 813
814 814 The normalized case is determined based on the following precedence:
815 815
816 816 - version of name already stored in the dirstate
817 817 - version of name stored on disk
818 818 - version provided via command arguments
819 819 """
820 820
821 821 if self._checkcase:
822 822 return self._normalize(path, isknown, ignoremissing)
823 823 return path
824 824
825 825 def clear(self):
826 826 self._map.clear()
827 827 self._lastnormaltime = 0
828 828 self._updatedfiles.clear()
829 829 self._dirty = True
830 830
831 831 def rebuild(self, parent, allfiles, changedfiles=None):
832 832 if changedfiles is None:
833 833 # Rebuild entire dirstate
834 834 to_lookup = allfiles
835 835 to_drop = []
836 836 lastnormaltime = self._lastnormaltime
837 837 self.clear()
838 838 self._lastnormaltime = lastnormaltime
839 839 elif len(changedfiles) < 10:
840 840 # Avoid turning allfiles into a set, which can be expensive if it's
841 841 # large.
842 842 to_lookup = []
843 843 to_drop = []
844 844 for f in changedfiles:
845 845 if f in allfiles:
846 846 to_lookup.append(f)
847 847 else:
848 848 to_drop.append(f)
849 849 else:
850 850 changedfilesset = set(changedfiles)
851 851 to_lookup = changedfilesset & set(allfiles)
852 852 to_drop = changedfilesset - to_lookup
853 853
854 854 if self._origpl is None:
855 855 self._origpl = self._pl
856 856 self._map.setparents(parent, self._nodeconstants.nullid)
857 857
858 858 for f in to_lookup:
859 859 self._normallookup(f)
860 860 for f in to_drop:
861 861 self._drop(f)
862 862
863 863 self._dirty = True
864 864
865 865 def identity(self):
866 866 """Return identity of dirstate itself to detect changing in storage
867 867
868 868 If identity of previous dirstate is equal to this, writing
869 869 changes based on the former dirstate out can keep consistency.
870 870 """
871 871 return self._map.identity
872 872
873 873 def write(self, tr):
874 874 if not self._dirty:
875 875 return
876 876
877 877 filename = self._filename
878 878 if tr:
879 879 # 'dirstate.write()' is not only for writing in-memory
880 880 # changes out, but also for dropping ambiguous timestamp.
881 881 # delayed writing re-raise "ambiguous timestamp issue".
882 882 # See also the wiki page below for detail:
883 883 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
884 884
885 885 # emulate dropping timestamp in 'parsers.pack_dirstate'
886 886 now = _getfsnow(self._opener)
887 887 self._map.clearambiguoustimes(self._updatedfiles, now)
888 888
889 889 # emulate that all 'dirstate.normal' results are written out
890 890 self._lastnormaltime = 0
891 891 self._updatedfiles.clear()
892 892
893 893 # delay writing in-memory changes out
894 894 tr.addfilegenerator(
895 895 b'dirstate',
896 896 (self._filename,),
897 897 lambda f: self._writedirstate(tr, f),
898 898 location=b'plain',
899 899 )
900 900 return
901 901
902 902 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
903 903 self._writedirstate(tr, st)
904 904
905 905 def addparentchangecallback(self, category, callback):
906 906 """add a callback to be called when the wd parents are changed
907 907
908 908 Callback will be called with the following arguments:
909 909 dirstate, (oldp1, oldp2), (newp1, newp2)
910 910
911 911 Category is a unique identifier to allow overwriting an old callback
912 912 with a newer callback.
913 913 """
914 914 self._plchangecallbacks[category] = callback
915 915
916 916 def _writedirstate(self, tr, st):
917 917 # notify callbacks about parents change
918 918 if self._origpl is not None and self._origpl != self._pl:
919 919 for c, callback in sorted(
920 920 pycompat.iteritems(self._plchangecallbacks)
921 921 ):
922 922 callback(self, self._origpl, self._pl)
923 923 self._origpl = None
924 924 # use the modification time of the newly created temporary file as the
925 925 # filesystem's notion of 'now'
926 926 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
927 927
928 928 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
929 929 # timestamp of each entries in dirstate, because of 'now > mtime'
930 930 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
931 931 if delaywrite > 0:
932 932 # do we have any files to delay for?
933 933 for f, e in pycompat.iteritems(self._map):
934 934 if e.need_delay(now):
935 935 import time # to avoid useless import
936 936
937 937 # rather than sleep n seconds, sleep until the next
938 938 # multiple of n seconds
939 939 clock = time.time()
940 940 start = int(clock) - (int(clock) % delaywrite)
941 941 end = start + delaywrite
942 942 time.sleep(end - clock)
943 943 now = end # trust our estimate that the end is near now
944 944 break
945 945
946 946 self._map.write(tr, st, now)
947 947 self._lastnormaltime = 0
948 948 self._dirty = False
949 949
950 950 def _dirignore(self, f):
951 951 if self._ignore(f):
952 952 return True
953 953 for p in pathutil.finddirs(f):
954 954 if self._ignore(p):
955 955 return True
956 956 return False
957 957
958 958 def _ignorefiles(self):
959 959 files = []
960 960 if os.path.exists(self._join(b'.hgignore')):
961 961 files.append(self._join(b'.hgignore'))
962 962 for name, path in self._ui.configitems(b"ui"):
963 963 if name == b'ignore' or name.startswith(b'ignore.'):
964 964 # we need to use os.path.join here rather than self._join
965 965 # because path is arbitrary and user-specified
966 966 files.append(os.path.join(self._rootdir, util.expandpath(path)))
967 967 return files
968 968
969 969 def _ignorefileandline(self, f):
970 970 files = collections.deque(self._ignorefiles())
971 971 visited = set()
972 972 while files:
973 973 i = files.popleft()
974 974 patterns = matchmod.readpatternfile(
975 975 i, self._ui.warn, sourceinfo=True
976 976 )
977 977 for pattern, lineno, line in patterns:
978 978 kind, p = matchmod._patsplit(pattern, b'glob')
979 979 if kind == b"subinclude":
980 980 if p not in visited:
981 981 files.append(p)
982 982 continue
983 983 m = matchmod.match(
984 984 self._root, b'', [], [pattern], warn=self._ui.warn
985 985 )
986 986 if m(f):
987 987 return (i, lineno, line)
988 988 visited.add(i)
989 989 return (None, -1, b"")
990 990
991 991 def _walkexplicit(self, match, subrepos):
992 992 """Get stat data about the files explicitly specified by match.
993 993
994 994 Return a triple (results, dirsfound, dirsnotfound).
995 995 - results is a mapping from filename to stat result. It also contains
996 996 listings mapping subrepos and .hg to None.
997 997 - dirsfound is a list of files found to be directories.
998 998 - dirsnotfound is a list of files that the dirstate thinks are
999 999 directories and that were not found."""
1000 1000
1001 1001 def badtype(mode):
1002 1002 kind = _(b'unknown')
1003 1003 if stat.S_ISCHR(mode):
1004 1004 kind = _(b'character device')
1005 1005 elif stat.S_ISBLK(mode):
1006 1006 kind = _(b'block device')
1007 1007 elif stat.S_ISFIFO(mode):
1008 1008 kind = _(b'fifo')
1009 1009 elif stat.S_ISSOCK(mode):
1010 1010 kind = _(b'socket')
1011 1011 elif stat.S_ISDIR(mode):
1012 1012 kind = _(b'directory')
1013 1013 return _(b'unsupported file type (type is %s)') % kind
1014 1014
1015 1015 badfn = match.bad
1016 1016 dmap = self._map
1017 1017 lstat = os.lstat
1018 1018 getkind = stat.S_IFMT
1019 1019 dirkind = stat.S_IFDIR
1020 1020 regkind = stat.S_IFREG
1021 1021 lnkkind = stat.S_IFLNK
1022 1022 join = self._join
1023 1023 dirsfound = []
1024 1024 foundadd = dirsfound.append
1025 1025 dirsnotfound = []
1026 1026 notfoundadd = dirsnotfound.append
1027 1027
1028 1028 if not match.isexact() and self._checkcase:
1029 1029 normalize = self._normalize
1030 1030 else:
1031 1031 normalize = None
1032 1032
1033 1033 files = sorted(match.files())
1034 1034 subrepos.sort()
1035 1035 i, j = 0, 0
1036 1036 while i < len(files) and j < len(subrepos):
1037 1037 subpath = subrepos[j] + b"/"
1038 1038 if files[i] < subpath:
1039 1039 i += 1
1040 1040 continue
1041 1041 while i < len(files) and files[i].startswith(subpath):
1042 1042 del files[i]
1043 1043 j += 1
1044 1044
1045 1045 if not files or b'' in files:
1046 1046 files = [b'']
1047 1047 # constructing the foldmap is expensive, so don't do it for the
1048 1048 # common case where files is ['']
1049 1049 normalize = None
1050 1050 results = dict.fromkeys(subrepos)
1051 1051 results[b'.hg'] = None
1052 1052
1053 1053 for ff in files:
1054 1054 if normalize:
1055 1055 nf = normalize(ff, False, True)
1056 1056 else:
1057 1057 nf = ff
1058 1058 if nf in results:
1059 1059 continue
1060 1060
1061 1061 try:
1062 1062 st = lstat(join(nf))
1063 1063 kind = getkind(st.st_mode)
1064 1064 if kind == dirkind:
1065 1065 if nf in dmap:
1066 1066 # file replaced by dir on disk but still in dirstate
1067 1067 results[nf] = None
1068 1068 foundadd((nf, ff))
1069 1069 elif kind == regkind or kind == lnkkind:
1070 1070 results[nf] = st
1071 1071 else:
1072 1072 badfn(ff, badtype(kind))
1073 1073 if nf in dmap:
1074 1074 results[nf] = None
1075 1075 except OSError as inst: # nf not found on disk - it is dirstate only
1076 1076 if nf in dmap: # does it exactly match a missing file?
1077 1077 results[nf] = None
1078 1078 else: # does it match a missing directory?
1079 1079 if self._map.hasdir(nf):
1080 1080 notfoundadd(nf)
1081 1081 else:
1082 1082 badfn(ff, encoding.strtolocal(inst.strerror))
1083 1083
1084 1084 # match.files() may contain explicitly-specified paths that shouldn't
1085 1085 # be taken; drop them from the list of files found. dirsfound/notfound
1086 1086 # aren't filtered here because they will be tested later.
1087 1087 if match.anypats():
1088 1088 for f in list(results):
1089 1089 if f == b'.hg' or f in subrepos:
1090 1090 # keep sentinel to disable further out-of-repo walks
1091 1091 continue
1092 1092 if not match(f):
1093 1093 del results[f]
1094 1094
1095 1095 # Case insensitive filesystems cannot rely on lstat() failing to detect
1096 1096 # a case-only rename. Prune the stat object for any file that does not
1097 1097 # match the case in the filesystem, if there are multiple files that
1098 1098 # normalize to the same path.
1099 1099 if match.isexact() and self._checkcase:
1100 1100 normed = {}
1101 1101
1102 1102 for f, st in pycompat.iteritems(results):
1103 1103 if st is None:
1104 1104 continue
1105 1105
1106 1106 nc = util.normcase(f)
1107 1107 paths = normed.get(nc)
1108 1108
1109 1109 if paths is None:
1110 1110 paths = set()
1111 1111 normed[nc] = paths
1112 1112
1113 1113 paths.add(f)
1114 1114
1115 1115 for norm, paths in pycompat.iteritems(normed):
1116 1116 if len(paths) > 1:
1117 1117 for path in paths:
1118 1118 folded = self._discoverpath(
1119 1119 path, norm, True, None, self._map.dirfoldmap
1120 1120 )
1121 1121 if path != folded:
1122 1122 results[path] = None
1123 1123
1124 1124 return results, dirsfound, dirsnotfound
1125 1125
1126 1126 def walk(self, match, subrepos, unknown, ignored, full=True):
1127 1127 """
1128 1128 Walk recursively through the directory tree, finding all files
1129 1129 matched by match.
1130 1130
1131 1131 If full is False, maybe skip some known-clean files.
1132 1132
1133 1133 Return a dict mapping filename to stat-like object (either
1134 1134 mercurial.osutil.stat instance or return value of os.stat()).
1135 1135
1136 1136 """
1137 1137 # full is a flag that extensions that hook into walk can use -- this
1138 1138 # implementation doesn't use it at all. This satisfies the contract
1139 1139 # because we only guarantee a "maybe".
1140 1140
1141 1141 if ignored:
1142 1142 ignore = util.never
1143 1143 dirignore = util.never
1144 1144 elif unknown:
1145 1145 ignore = self._ignore
1146 1146 dirignore = self._dirignore
1147 1147 else:
1148 1148 # if not unknown and not ignored, drop dir recursion and step 2
1149 1149 ignore = util.always
1150 1150 dirignore = util.always
1151 1151
1152 1152 matchfn = match.matchfn
1153 1153 matchalways = match.always()
1154 1154 matchtdir = match.traversedir
1155 1155 dmap = self._map
1156 1156 listdir = util.listdir
1157 1157 lstat = os.lstat
1158 1158 dirkind = stat.S_IFDIR
1159 1159 regkind = stat.S_IFREG
1160 1160 lnkkind = stat.S_IFLNK
1161 1161 join = self._join
1162 1162
1163 1163 exact = skipstep3 = False
1164 1164 if match.isexact(): # match.exact
1165 1165 exact = True
1166 1166 dirignore = util.always # skip step 2
1167 1167 elif match.prefix(): # match.match, no patterns
1168 1168 skipstep3 = True
1169 1169
1170 1170 if not exact and self._checkcase:
1171 1171 normalize = self._normalize
1172 1172 normalizefile = self._normalizefile
1173 1173 skipstep3 = False
1174 1174 else:
1175 1175 normalize = self._normalize
1176 1176 normalizefile = None
1177 1177
1178 1178 # step 1: find all explicit files
1179 1179 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1180 1180 if matchtdir:
1181 1181 for d in work:
1182 1182 matchtdir(d[0])
1183 1183 for d in dirsnotfound:
1184 1184 matchtdir(d)
1185 1185
1186 1186 skipstep3 = skipstep3 and not (work or dirsnotfound)
1187 1187 work = [d for d in work if not dirignore(d[0])]
1188 1188
1189 1189 # step 2: visit subdirectories
1190 1190 def traverse(work, alreadynormed):
1191 1191 wadd = work.append
1192 1192 while work:
1193 1193 tracing.counter('dirstate.walk work', len(work))
1194 1194 nd = work.pop()
1195 1195 visitentries = match.visitchildrenset(nd)
1196 1196 if not visitentries:
1197 1197 continue
1198 1198 if visitentries == b'this' or visitentries == b'all':
1199 1199 visitentries = None
1200 1200 skip = None
1201 1201 if nd != b'':
1202 1202 skip = b'.hg'
1203 1203 try:
1204 1204 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1205 1205 entries = listdir(join(nd), stat=True, skip=skip)
1206 1206 except OSError as inst:
1207 1207 if inst.errno in (errno.EACCES, errno.ENOENT):
1208 1208 match.bad(
1209 1209 self.pathto(nd), encoding.strtolocal(inst.strerror)
1210 1210 )
1211 1211 continue
1212 1212 raise
1213 1213 for f, kind, st in entries:
1214 1214 # Some matchers may return files in the visitentries set,
1215 1215 # instead of 'this', if the matcher explicitly mentions them
1216 1216 # and is not an exactmatcher. This is acceptable; we do not
1217 1217 # make any hard assumptions about file-or-directory below
1218 1218 # based on the presence of `f` in visitentries. If
1219 1219 # visitchildrenset returned a set, we can always skip the
1220 1220 # entries *not* in the set it provided regardless of whether
1221 1221 # they're actually a file or a directory.
1222 1222 if visitentries and f not in visitentries:
1223 1223 continue
1224 1224 if normalizefile:
1225 1225 # even though f might be a directory, we're only
1226 1226 # interested in comparing it to files currently in the
1227 1227 # dmap -- therefore normalizefile is enough
1228 1228 nf = normalizefile(
1229 1229 nd and (nd + b"/" + f) or f, True, True
1230 1230 )
1231 1231 else:
1232 1232 nf = nd and (nd + b"/" + f) or f
1233 1233 if nf not in results:
1234 1234 if kind == dirkind:
1235 1235 if not ignore(nf):
1236 1236 if matchtdir:
1237 1237 matchtdir(nf)
1238 1238 wadd(nf)
1239 1239 if nf in dmap and (matchalways or matchfn(nf)):
1240 1240 results[nf] = None
1241 1241 elif kind == regkind or kind == lnkkind:
1242 1242 if nf in dmap:
1243 1243 if matchalways or matchfn(nf):
1244 1244 results[nf] = st
1245 1245 elif (matchalways or matchfn(nf)) and not ignore(
1246 1246 nf
1247 1247 ):
1248 1248 # unknown file -- normalize if necessary
1249 1249 if not alreadynormed:
1250 1250 nf = normalize(nf, False, True)
1251 1251 results[nf] = st
1252 1252 elif nf in dmap and (matchalways or matchfn(nf)):
1253 1253 results[nf] = None
1254 1254
1255 1255 for nd, d in work:
1256 1256 # alreadynormed means that processwork doesn't have to do any
1257 1257 # expensive directory normalization
1258 1258 alreadynormed = not normalize or nd == d
1259 1259 traverse([d], alreadynormed)
1260 1260
1261 1261 for s in subrepos:
1262 1262 del results[s]
1263 1263 del results[b'.hg']
1264 1264
1265 1265 # step 3: visit remaining files from dmap
1266 1266 if not skipstep3 and not exact:
1267 1267 # If a dmap file is not in results yet, it was either
1268 1268 # a) not matching matchfn b) ignored, c) missing, or d) under a
1269 1269 # symlink directory.
1270 1270 if not results and matchalways:
1271 1271 visit = [f for f in dmap]
1272 1272 else:
1273 1273 visit = [f for f in dmap if f not in results and matchfn(f)]
1274 1274 visit.sort()
1275 1275
1276 1276 if unknown:
1277 1277 # unknown == True means we walked all dirs under the roots
1278 1278 # that wasn't ignored, and everything that matched was stat'ed
1279 1279 # and is already in results.
1280 1280 # The rest must thus be ignored or under a symlink.
1281 1281 audit_path = pathutil.pathauditor(self._root, cached=True)
1282 1282
1283 1283 for nf in iter(visit):
1284 1284 # If a stat for the same file was already added with a
1285 1285 # different case, don't add one for this, since that would
1286 1286 # make it appear as if the file exists under both names
1287 1287 # on disk.
1288 1288 if (
1289 1289 normalizefile
1290 1290 and normalizefile(nf, True, True) in results
1291 1291 ):
1292 1292 results[nf] = None
1293 1293 # Report ignored items in the dmap as long as they are not
1294 1294 # under a symlink directory.
1295 1295 elif audit_path.check(nf):
1296 1296 try:
1297 1297 results[nf] = lstat(join(nf))
1298 1298 # file was just ignored, no links, and exists
1299 1299 except OSError:
1300 1300 # file doesn't exist
1301 1301 results[nf] = None
1302 1302 else:
1303 1303 # It's either missing or under a symlink directory
1304 1304 # which we in this case report as missing
1305 1305 results[nf] = None
1306 1306 else:
1307 1307 # We may not have walked the full directory tree above,
1308 1308 # so stat and check everything we missed.
1309 1309 iv = iter(visit)
1310 1310 for st in util.statfiles([join(i) for i in visit]):
1311 1311 results[next(iv)] = st
1312 1312 return results
1313 1313
1314 1314 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1315 1315 # Force Rayon (Rust parallelism library) to respect the number of
1316 1316 # workers. This is a temporary workaround until Rust code knows
1317 1317 # how to read the config file.
1318 1318 numcpus = self._ui.configint(b"worker", b"numcpus")
1319 1319 if numcpus is not None:
1320 1320 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1321 1321
1322 1322 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1323 1323 if not workers_enabled:
1324 1324 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1325 1325
1326 1326 (
1327 1327 lookup,
1328 1328 modified,
1329 1329 added,
1330 1330 removed,
1331 1331 deleted,
1332 1332 clean,
1333 1333 ignored,
1334 1334 unknown,
1335 1335 warnings,
1336 1336 bad,
1337 1337 traversed,
1338 1338 dirty,
1339 1339 ) = rustmod.status(
1340 1340 self._map._rustmap,
1341 1341 matcher,
1342 1342 self._rootdir,
1343 1343 self._ignorefiles(),
1344 1344 self._checkexec,
1345 1345 self._lastnormaltime,
1346 1346 bool(list_clean),
1347 1347 bool(list_ignored),
1348 1348 bool(list_unknown),
1349 1349 bool(matcher.traversedir),
1350 1350 )
1351 1351
1352 1352 self._dirty |= dirty
1353 1353
1354 1354 if matcher.traversedir:
1355 1355 for dir in traversed:
1356 1356 matcher.traversedir(dir)
1357 1357
1358 1358 if self._ui.warn:
1359 1359 for item in warnings:
1360 1360 if isinstance(item, tuple):
1361 1361 file_path, syntax = item
1362 1362 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1363 1363 file_path,
1364 1364 syntax,
1365 1365 )
1366 1366 self._ui.warn(msg)
1367 1367 else:
1368 1368 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1369 1369 self._ui.warn(
1370 1370 msg
1371 1371 % (
1372 1372 pathutil.canonpath(
1373 1373 self._rootdir, self._rootdir, item
1374 1374 ),
1375 1375 b"No such file or directory",
1376 1376 )
1377 1377 )
1378 1378
1379 1379 for (fn, message) in bad:
1380 1380 matcher.bad(fn, encoding.strtolocal(message))
1381 1381
1382 1382 status = scmutil.status(
1383 1383 modified=modified,
1384 1384 added=added,
1385 1385 removed=removed,
1386 1386 deleted=deleted,
1387 1387 unknown=unknown,
1388 1388 ignored=ignored,
1389 1389 clean=clean,
1390 1390 )
1391 1391 return (lookup, status)
1392 1392
1393 1393 def status(self, match, subrepos, ignored, clean, unknown):
1394 1394 """Determine the status of the working copy relative to the
1395 1395 dirstate and return a pair of (unsure, status), where status is of type
1396 1396 scmutil.status and:
1397 1397
1398 1398 unsure:
1399 1399 files that might have been modified since the dirstate was
1400 1400 written, but need to be read to be sure (size is the same
1401 1401 but mtime differs)
1402 1402 status.modified:
1403 1403 files that have definitely been modified since the dirstate
1404 1404 was written (different size or mode)
1405 1405 status.clean:
1406 1406 files that have definitely not been modified since the
1407 1407 dirstate was written
1408 1408 """
1409 1409 listignored, listclean, listunknown = ignored, clean, unknown
1410 1410 lookup, modified, added, unknown, ignored = [], [], [], [], []
1411 1411 removed, deleted, clean = [], [], []
1412 1412
1413 1413 dmap = self._map
1414 1414 dmap.preload()
1415 1415
1416 1416 use_rust = True
1417 1417
1418 1418 allowed_matchers = (
1419 1419 matchmod.alwaysmatcher,
1420 1420 matchmod.exactmatcher,
1421 1421 matchmod.includematcher,
1422 1422 )
1423 1423
1424 1424 if rustmod is None:
1425 1425 use_rust = False
1426 1426 elif self._checkcase:
1427 1427 # Case-insensitive filesystems are not handled yet
1428 1428 use_rust = False
1429 1429 elif subrepos:
1430 1430 use_rust = False
1431 1431 elif sparse.enabled:
1432 1432 use_rust = False
1433 1433 elif not isinstance(match, allowed_matchers):
1434 1434 # Some matchers have yet to be implemented
1435 1435 use_rust = False
1436 1436
1437 1437 if use_rust:
1438 1438 try:
1439 1439 return self._rust_status(
1440 1440 match, listclean, listignored, listunknown
1441 1441 )
1442 1442 except rustmod.FallbackError:
1443 1443 pass
1444 1444
1445 1445 def noop(f):
1446 1446 pass
1447 1447
1448 1448 dcontains = dmap.__contains__
1449 1449 dget = dmap.__getitem__
1450 1450 ladd = lookup.append # aka "unsure"
1451 1451 madd = modified.append
1452 1452 aadd = added.append
1453 1453 uadd = unknown.append if listunknown else noop
1454 1454 iadd = ignored.append if listignored else noop
1455 1455 radd = removed.append
1456 1456 dadd = deleted.append
1457 1457 cadd = clean.append if listclean else noop
1458 1458 mexact = match.exact
1459 1459 dirignore = self._dirignore
1460 1460 checkexec = self._checkexec
1461 1461 copymap = self._map.copymap
1462 1462 lastnormaltime = self._lastnormaltime
1463 1463
1464 1464 # We need to do full walks when either
1465 1465 # - we're listing all clean files, or
1466 1466 # - match.traversedir does something, because match.traversedir should
1467 1467 # be called for every dir in the working dir
1468 1468 full = listclean or match.traversedir is not None
1469 1469 for fn, st in pycompat.iteritems(
1470 1470 self.walk(match, subrepos, listunknown, listignored, full=full)
1471 1471 ):
1472 1472 if not dcontains(fn):
1473 1473 if (listignored or mexact(fn)) and dirignore(fn):
1474 1474 if listignored:
1475 1475 iadd(fn)
1476 1476 else:
1477 1477 uadd(fn)
1478 1478 continue
1479 1479
1480 1480 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1481 1481 # written like that for performance reasons. dmap[fn] is not a
1482 1482 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1483 1483 # opcode has fast paths when the value to be unpacked is a tuple or
1484 1484 # a list, but falls back to creating a full-fledged iterator in
1485 1485 # general. That is much slower than simply accessing and storing the
1486 1486 # tuple members one by one.
1487 1487 t = dget(fn)
1488 1488 mode = t.mode
1489 1489 size = t.size
1490 1490 time = t.mtime
1491 1491
1492 1492 if not st and t.tracked:
1493 1493 dadd(fn)
1494 1494 elif t.merged:
1495 1495 madd(fn)
1496 1496 elif t.added:
1497 1497 aadd(fn)
1498 1498 elif t.removed:
1499 1499 radd(fn)
1500 1500 elif t.tracked:
1501 1501 if (
1502 1502 size >= 0
1503 1503 and (
1504 1504 (size != st.st_size and size != st.st_size & _rangemask)
1505 1505 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1506 1506 )
1507 1507 or t.from_p2
1508 1508 or fn in copymap
1509 1509 ):
1510 1510 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1511 1511 # issue6456: Size returned may be longer due to
1512 1512 # encryption on EXT-4 fscrypt, undecided.
1513 1513 ladd(fn)
1514 1514 else:
1515 1515 madd(fn)
1516 1516 elif (
1517 1517 time != st[stat.ST_MTIME]
1518 1518 and time != st[stat.ST_MTIME] & _rangemask
1519 1519 ):
1520 1520 ladd(fn)
1521 1521 elif st[stat.ST_MTIME] == lastnormaltime:
1522 1522 # fn may have just been marked as normal and it may have
1523 1523 # changed in the same second without changing its size.
1524 1524 # This can happen if we quickly do multiple commits.
1525 1525 # Force lookup, so we don't miss such a racy file change.
1526 1526 ladd(fn)
1527 1527 elif listclean:
1528 1528 cadd(fn)
1529 1529 status = scmutil.status(
1530 1530 modified, added, removed, deleted, unknown, ignored, clean
1531 1531 )
1532 1532 return (lookup, status)
1533 1533
1534 1534 def matches(self, match):
1535 1535 """
1536 1536 return files in the dirstate (in whatever state) filtered by match
1537 1537 """
1538 1538 dmap = self._map
1539 1539 if rustmod is not None:
1540 1540 dmap = self._map._rustmap
1541 1541
1542 1542 if match.always():
1543 1543 return dmap.keys()
1544 1544 files = match.files()
1545 1545 if match.isexact():
1546 1546 # fast path -- filter the other way around, since typically files is
1547 1547 # much smaller than dmap
1548 1548 return [f for f in files if f in dmap]
1549 1549 if match.prefix() and all(fn in dmap for fn in files):
1550 1550 # fast path -- all the values are known to be files, so just return
1551 1551 # that
1552 1552 return list(files)
1553 1553 return [f for f in dmap if match(f)]
1554 1554
1555 1555 def _actualfilename(self, tr):
1556 1556 if tr:
1557 1557 return self._pendingfilename
1558 1558 else:
1559 1559 return self._filename
1560 1560
1561 1561 def savebackup(self, tr, backupname):
1562 1562 '''Save current dirstate into backup file'''
1563 1563 filename = self._actualfilename(tr)
1564 1564 assert backupname != filename
1565 1565
1566 1566 # use '_writedirstate' instead of 'write' to write changes certainly,
1567 1567 # because the latter omits writing out if transaction is running.
1568 1568 # output file will be used to create backup of dirstate at this point.
1569 1569 if self._dirty or not self._opener.exists(filename):
1570 1570 self._writedirstate(
1571 1571 tr,
1572 1572 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1573 1573 )
1574 1574
1575 1575 if tr:
1576 1576 # ensure that subsequent tr.writepending returns True for
1577 1577 # changes written out above, even if dirstate is never
1578 1578 # changed after this
1579 1579 tr.addfilegenerator(
1580 1580 b'dirstate',
1581 1581 (self._filename,),
1582 1582 lambda f: self._writedirstate(tr, f),
1583 1583 location=b'plain',
1584 1584 )
1585 1585
1586 1586 # ensure that pending file written above is unlinked at
1587 1587 # failure, even if tr.writepending isn't invoked until the
1588 1588 # end of this transaction
1589 1589 tr.registertmp(filename, location=b'plain')
1590 1590
1591 1591 self._opener.tryunlink(backupname)
1592 1592 # hardlink backup is okay because _writedirstate is always called
1593 1593 # with an "atomictemp=True" file.
1594 1594 util.copyfile(
1595 1595 self._opener.join(filename),
1596 1596 self._opener.join(backupname),
1597 1597 hardlink=True,
1598 1598 )
1599 1599
1600 1600 def restorebackup(self, tr, backupname):
1601 1601 '''Restore dirstate by backup file'''
1602 1602 # this "invalidate()" prevents "wlock.release()" from writing
1603 1603 # changes of dirstate out after restoring from backup file
1604 1604 self.invalidate()
1605 1605 filename = self._actualfilename(tr)
1606 1606 o = self._opener
1607 1607 if util.samefile(o.join(backupname), o.join(filename)):
1608 1608 o.unlink(backupname)
1609 1609 else:
1610 1610 o.rename(backupname, filename, checkambig=True)
1611 1611
1612 1612 def clearbackup(self, tr, backupname):
1613 1613 '''Clear backup file'''
1614 1614 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now