##// END OF EJS Templates
dirstate: deprecate `dirstate.remove` in all cases...
marmoute -
r48501:0e87c90f default
parent child Browse files
Show More
@@ -1,1672 +1,1679 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .pycompat import delattr
18 18
19 19 from hgdemandimport import tracing
20 20
21 21 from . import (
22 22 dirstatemap,
23 23 encoding,
24 24 error,
25 25 match as matchmod,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 sparse,
31 31 util,
32 32 )
33 33
34 34 from .interfaces import (
35 35 dirstate as intdirstate,
36 36 util as interfaceutil,
37 37 )
38 38
39 39 parsers = policy.importmod('parsers')
40 40 rustmod = policy.importrust('dirstate')
41 41
42 42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43 43
44 44 propertycache = util.propertycache
45 45 filecache = scmutil.filecache
46 46 _rangemask = dirstatemap.rangemask
47 47
48 48 DirstateItem = parsers.DirstateItem
49 49
50 50
51 51 class repocache(filecache):
52 52 """filecache for files in .hg/"""
53 53
54 54 def join(self, obj, fname):
55 55 return obj._opener.join(fname)
56 56
57 57
58 58 class rootcache(filecache):
59 59 """filecache for files in the repository root"""
60 60
61 61 def join(self, obj, fname):
62 62 return obj._join(fname)
63 63
64 64
65 65 def _getfsnow(vfs):
66 66 '''Get "now" timestamp on filesystem'''
67 67 tmpfd, tmpname = vfs.mkstemp()
68 68 try:
69 69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 70 finally:
71 71 os.close(tmpfd)
72 72 vfs.unlink(tmpname)
73 73
74 74
75 75 def requires_parents_change(func):
76 76 def wrap(self, *args, **kwargs):
77 77 if not self.pendingparentchange():
78 78 msg = 'calling `%s` outside of a parentchange context'
79 79 msg %= func.__name__
80 80 raise error.ProgrammingError(msg)
81 81 return func(self, *args, **kwargs)
82 82
83 83 return wrap
84 84
85 85
86 86 def requires_no_parents_change(func):
87 87 def wrap(self, *args, **kwargs):
88 88 if self.pendingparentchange():
89 89 msg = 'calling `%s` inside of a parentchange context'
90 90 msg %= func.__name__
91 91 raise error.ProgrammingError(msg)
92 92 return func(self, *args, **kwargs)
93 93
94 94 return wrap
95 95
96 96
97 97 @interfaceutil.implementer(intdirstate.idirstate)
98 98 class dirstate(object):
99 99 def __init__(
100 100 self,
101 101 opener,
102 102 ui,
103 103 root,
104 104 validate,
105 105 sparsematchfn,
106 106 nodeconstants,
107 107 use_dirstate_v2,
108 108 ):
109 109 """Create a new dirstate object.
110 110
111 111 opener is an open()-like callable that can be used to open the
112 112 dirstate file; root is the root of the directory tracked by
113 113 the dirstate.
114 114 """
115 115 self._use_dirstate_v2 = use_dirstate_v2
116 116 self._nodeconstants = nodeconstants
117 117 self._opener = opener
118 118 self._validate = validate
119 119 self._root = root
120 120 self._sparsematchfn = sparsematchfn
121 121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 122 # UNC path pointing to root share (issue4557)
123 123 self._rootdir = pathutil.normasprefix(root)
124 124 self._dirty = False
125 125 self._lastnormaltime = 0
126 126 self._ui = ui
127 127 self._filecache = {}
128 128 self._parentwriters = 0
129 129 self._filename = b'dirstate'
130 130 self._pendingfilename = b'%s.pending' % self._filename
131 131 self._plchangecallbacks = {}
132 132 self._origpl = None
133 133 self._updatedfiles = set()
134 134 self._mapcls = dirstatemap.dirstatemap
135 135 # Access and cache cwd early, so we don't access it for the first time
136 136 # after a working-copy update caused it to not exist (accessing it then
137 137 # raises an exception).
138 138 self._cwd
139 139
140 140 def prefetch_parents(self):
141 141 """make sure the parents are loaded
142 142
143 143 Used to avoid a race condition.
144 144 """
145 145 self._pl
146 146
147 147 @contextlib.contextmanager
148 148 def parentchange(self):
149 149 """Context manager for handling dirstate parents.
150 150
151 151 If an exception occurs in the scope of the context manager,
152 152 the incoherent dirstate won't be written when wlock is
153 153 released.
154 154 """
155 155 self._parentwriters += 1
156 156 yield
157 157 # Typically we want the "undo" step of a context manager in a
158 158 # finally block so it happens even when an exception
159 159 # occurs. In this case, however, we only want to decrement
160 160 # parentwriters if the code in the with statement exits
161 161 # normally, so we don't have a try/finally here on purpose.
162 162 self._parentwriters -= 1
163 163
164 164 def pendingparentchange(self):
165 165 """Returns true if the dirstate is in the middle of a set of changes
166 166 that modify the dirstate parent.
167 167 """
168 168 return self._parentwriters > 0
169 169
170 170 @propertycache
171 171 def _map(self):
172 172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 173 self._map = self._mapcls(
174 174 self._ui,
175 175 self._opener,
176 176 self._root,
177 177 self._nodeconstants,
178 178 self._use_dirstate_v2,
179 179 )
180 180 return self._map
181 181
182 182 @property
183 183 def _sparsematcher(self):
184 184 """The matcher for the sparse checkout.
185 185
186 186 The working directory may not include every file from a manifest. The
187 187 matcher obtained by this property will match a path if it is to be
188 188 included in the working directory.
189 189 """
190 190 # TODO there is potential to cache this property. For now, the matcher
191 191 # is resolved on every access. (But the called function does use a
192 192 # cache to keep the lookup fast.)
193 193 return self._sparsematchfn()
194 194
195 195 @repocache(b'branch')
196 196 def _branch(self):
197 197 try:
198 198 return self._opener.read(b"branch").strip() or b"default"
199 199 except IOError as inst:
200 200 if inst.errno != errno.ENOENT:
201 201 raise
202 202 return b"default"
203 203
204 204 @property
205 205 def _pl(self):
206 206 return self._map.parents()
207 207
208 208 def hasdir(self, d):
209 209 return self._map.hastrackeddir(d)
210 210
211 211 @rootcache(b'.hgignore')
212 212 def _ignore(self):
213 213 files = self._ignorefiles()
214 214 if not files:
215 215 return matchmod.never()
216 216
217 217 pats = [b'include:%s' % f for f in files]
218 218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219 219
220 220 @propertycache
221 221 def _slash(self):
222 222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223 223
224 224 @propertycache
225 225 def _checklink(self):
226 226 return util.checklink(self._root)
227 227
228 228 @propertycache
229 229 def _checkexec(self):
230 230 return bool(util.checkexec(self._root))
231 231
232 232 @propertycache
233 233 def _checkcase(self):
234 234 return not util.fscasesensitive(self._join(b'.hg'))
235 235
236 236 def _join(self, f):
237 237 # much faster than os.path.join()
238 238 # it's safe because f is always a relative path
239 239 return self._rootdir + f
240 240
241 241 def flagfunc(self, buildfallback):
242 242 if self._checklink and self._checkexec:
243 243
244 244 def f(x):
245 245 try:
246 246 st = os.lstat(self._join(x))
247 247 if util.statislink(st):
248 248 return b'l'
249 249 if util.statisexec(st):
250 250 return b'x'
251 251 except OSError:
252 252 pass
253 253 return b''
254 254
255 255 return f
256 256
257 257 fallback = buildfallback()
258 258 if self._checklink:
259 259
260 260 def f(x):
261 261 if os.path.islink(self._join(x)):
262 262 return b'l'
263 263 if b'x' in fallback(x):
264 264 return b'x'
265 265 return b''
266 266
267 267 return f
268 268 if self._checkexec:
269 269
270 270 def f(x):
271 271 if b'l' in fallback(x):
272 272 return b'l'
273 273 if util.isexec(self._join(x)):
274 274 return b'x'
275 275 return b''
276 276
277 277 return f
278 278 else:
279 279 return fallback
280 280
281 281 @propertycache
282 282 def _cwd(self):
283 283 # internal config: ui.forcecwd
284 284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 285 if forcecwd:
286 286 return forcecwd
287 287 return encoding.getcwd()
288 288
289 289 def getcwd(self):
290 290 """Return the path from which a canonical path is calculated.
291 291
292 292 This path should be used to resolve file patterns or to convert
293 293 canonical paths back to file paths for display. It shouldn't be
294 294 used to get real file paths. Use vfs functions instead.
295 295 """
296 296 cwd = self._cwd
297 297 if cwd == self._root:
298 298 return b''
299 299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 300 rootsep = self._root
301 301 if not util.endswithsep(rootsep):
302 302 rootsep += pycompat.ossep
303 303 if cwd.startswith(rootsep):
304 304 return cwd[len(rootsep) :]
305 305 else:
306 306 # we're outside the repo. return an absolute path.
307 307 return cwd
308 308
309 309 def pathto(self, f, cwd=None):
310 310 if cwd is None:
311 311 cwd = self.getcwd()
312 312 path = util.pathto(self._root, cwd, f)
313 313 if self._slash:
314 314 return util.pconvert(path)
315 315 return path
316 316
317 317 def __getitem__(self, key):
318 318 """Return the current state of key (a filename) in the dirstate.
319 319
320 320 States are:
321 321 n normal
322 322 m needs merging
323 323 r marked for removal
324 324 a marked for addition
325 325 ? not tracked
326 326
327 327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 328 consider migrating all user of this to going through the dirstate entry
329 329 instead.
330 330 """
331 331 entry = self._map.get(key)
332 332 if entry is not None:
333 333 return entry.state
334 334 return b'?'
335 335
336 336 def __contains__(self, key):
337 337 return key in self._map
338 338
339 339 def __iter__(self):
340 340 return iter(sorted(self._map))
341 341
342 342 def items(self):
343 343 return pycompat.iteritems(self._map)
344 344
345 345 iteritems = items
346 346
347 347 def directories(self):
348 348 return self._map.directories()
349 349
350 350 def parents(self):
351 351 return [self._validate(p) for p in self._pl]
352 352
353 353 def p1(self):
354 354 return self._validate(self._pl[0])
355 355
356 356 def p2(self):
357 357 return self._validate(self._pl[1])
358 358
359 359 @property
360 360 def in_merge(self):
361 361 """True if a merge is in progress"""
362 362 return self._pl[1] != self._nodeconstants.nullid
363 363
364 364 def branch(self):
365 365 return encoding.tolocal(self._branch)
366 366
367 367 def setparents(self, p1, p2=None):
368 368 """Set dirstate parents to p1 and p2.
369 369
370 370 When moving from two parents to one, "merged" entries a
371 371 adjusted to normal and previous copy records discarded and
372 372 returned by the call.
373 373
374 374 See localrepo.setparents()
375 375 """
376 376 if p2 is None:
377 377 p2 = self._nodeconstants.nullid
378 378 if self._parentwriters == 0:
379 379 raise ValueError(
380 380 b"cannot set dirstate parent outside of "
381 381 b"dirstate.parentchange context manager"
382 382 )
383 383
384 384 self._dirty = True
385 385 oldp2 = self._pl[1]
386 386 if self._origpl is None:
387 387 self._origpl = self._pl
388 388 self._map.setparents(p1, p2)
389 389 copies = {}
390 390 if (
391 391 oldp2 != self._nodeconstants.nullid
392 392 and p2 == self._nodeconstants.nullid
393 393 ):
394 394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395 395
396 396 for f in candidatefiles:
397 397 s = self._map.get(f)
398 398 if s is None:
399 399 continue
400 400
401 401 # Discard "merged" markers when moving away from a merge state
402 402 if s.merged:
403 403 source = self._map.copymap.get(f)
404 404 if source:
405 405 copies[f] = source
406 406 self.normallookup(f)
407 407 # Also fix up otherparent markers
408 408 elif s.from_p2:
409 409 source = self._map.copymap.get(f)
410 410 if source:
411 411 copies[f] = source
412 412 self._add(f)
413 413 return copies
414 414
415 415 def setbranch(self, branch):
416 416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 418 try:
419 419 f.write(self._branch + b'\n')
420 420 f.close()
421 421
422 422 # make sure filecache has the correct stat info for _branch after
423 423 # replacing the underlying file
424 424 ce = self._filecache[b'_branch']
425 425 if ce:
426 426 ce.refresh()
427 427 except: # re-raises
428 428 f.discard()
429 429 raise
430 430
431 431 def invalidate(self):
432 432 """Causes the next access to reread the dirstate.
433 433
434 434 This is different from localrepo.invalidatedirstate() because it always
435 435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 436 check whether the dirstate has changed before rereading it."""
437 437
438 438 for a in ("_map", "_branch", "_ignore"):
439 439 if a in self.__dict__:
440 440 delattr(self, a)
441 441 self._lastnormaltime = 0
442 442 self._dirty = False
443 443 self._updatedfiles.clear()
444 444 self._parentwriters = 0
445 445 self._origpl = None
446 446
447 447 def copy(self, source, dest):
448 448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 449 if source == dest:
450 450 return
451 451 self._dirty = True
452 452 if source is not None:
453 453 self._map.copymap[dest] = source
454 454 self._updatedfiles.add(source)
455 455 self._updatedfiles.add(dest)
456 456 elif self._map.copymap.pop(dest, None):
457 457 self._updatedfiles.add(dest)
458 458
459 459 def copied(self, file):
460 460 return self._map.copymap.get(file, None)
461 461
462 462 def copies(self):
463 463 return self._map.copymap
464 464
465 465 @requires_no_parents_change
466 466 def set_tracked(self, filename):
467 467 """a "public" method for generic code to mark a file as tracked
468 468
469 469 This function is to be called outside of "update/merge" case. For
470 470 example by a command like `hg add X`.
471 471
472 472 return True the file was previously untracked, False otherwise.
473 473 """
474 474 entry = self._map.get(filename)
475 475 if entry is None:
476 476 self._add(filename)
477 477 return True
478 478 elif not entry.tracked:
479 479 self.normallookup(filename)
480 480 return True
481 481 return False
482 482
483 483 @requires_no_parents_change
484 484 def set_untracked(self, filename):
485 485 """a "public" method for generic code to mark a file as untracked
486 486
487 487 This function is to be called outside of "update/merge" case. For
488 488 example by a command like `hg remove X`.
489 489
490 490 return True the file was previously tracked, False otherwise.
491 491 """
492 492 entry = self._map.get(filename)
493 493 if entry is None:
494 494 return False
495 495 elif entry.added:
496 496 self._drop(filename)
497 497 return True
498 498 else:
499 499 self._remove(filename)
500 500 return True
501 501
502 502 @requires_parents_change
503 503 def update_file_p1(
504 504 self,
505 505 filename,
506 506 p1_tracked,
507 507 ):
508 508 """Set a file as tracked in the parent (or not)
509 509
510 510 This is to be called when adjust the dirstate to a new parent after an history
511 511 rewriting operation.
512 512
513 513 It should not be called during a merge (p2 != nullid) and only within
514 514 a `with dirstate.parentchange():` context.
515 515 """
516 516 if self.in_merge:
517 517 msg = b'update_file_reference should not be called when merging'
518 518 raise error.ProgrammingError(msg)
519 519 entry = self._map.get(filename)
520 520 if entry is None:
521 521 wc_tracked = False
522 522 else:
523 523 wc_tracked = entry.tracked
524 524 possibly_dirty = False
525 525 if p1_tracked and wc_tracked:
526 526 # the underlying reference might have changed, we will have to
527 527 # check it.
528 528 possibly_dirty = True
529 529 elif not (p1_tracked or wc_tracked):
530 530 # the file is no longer relevant to anyone
531 531 self._drop(filename)
532 532 elif (not p1_tracked) and wc_tracked:
533 533 if entry is not None and entry.added:
534 534 return # avoid dropping copy information (maybe?)
535 535 elif p1_tracked and not wc_tracked:
536 536 pass
537 537 else:
538 538 assert False, 'unreachable'
539 539
540 540 # this mean we are doing call for file we do not really care about the
541 541 # data (eg: added or removed), however this should be a minor overhead
542 542 # compared to the overall update process calling this.
543 543 parentfiledata = None
544 544 if wc_tracked:
545 545 parentfiledata = self._get_filedata(filename)
546 546
547 547 self._updatedfiles.add(filename)
548 548 self._map.reset_state(
549 549 filename,
550 550 wc_tracked,
551 551 p1_tracked,
552 552 possibly_dirty=possibly_dirty,
553 553 parentfiledata=parentfiledata,
554 554 )
555 555 if (
556 556 parentfiledata is not None
557 557 and parentfiledata[2] > self._lastnormaltime
558 558 ):
559 559 # Remember the most recent modification timeslot for status(),
560 560 # to make sure we won't miss future size-preserving file content
561 561 # modifications that happen within the same timeslot.
562 562 self._lastnormaltime = parentfiledata[2]
563 563
564 564 @requires_parents_change
565 565 def update_file(
566 566 self,
567 567 filename,
568 568 wc_tracked,
569 569 p1_tracked,
570 570 p2_tracked=False,
571 571 merged=False,
572 572 clean_p1=False,
573 573 clean_p2=False,
574 574 possibly_dirty=False,
575 575 parentfiledata=None,
576 576 ):
577 577 """update the information about a file in the dirstate
578 578
579 579 This is to be called when the direstates parent changes to keep track
580 580 of what is the file situation in regards to the working copy and its parent.
581 581
582 582 This function must be called within a `dirstate.parentchange` context.
583 583
584 584 note: the API is at an early stage and we might need to ajust it
585 585 depending of what information ends up being relevant and useful to
586 586 other processing.
587 587 """
588 588 if merged and (clean_p1 or clean_p2):
589 589 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
590 590 raise error.ProgrammingError(msg)
591 591
592 592 # note: I do not think we need to double check name clash here since we
593 593 # are in a update/merge case that should already have taken care of
594 594 # this. The test agrees
595 595
596 596 self._dirty = True
597 597 self._updatedfiles.add(filename)
598 598
599 599 need_parent_file_data = (
600 600 not (possibly_dirty or clean_p2 or merged)
601 601 and wc_tracked
602 602 and p1_tracked
603 603 )
604 604
605 605 # this mean we are doing call for file we do not really care about the
606 606 # data (eg: added or removed), however this should be a minor overhead
607 607 # compared to the overall update process calling this.
608 608 if need_parent_file_data:
609 609 if parentfiledata is None:
610 610 parentfiledata = self._get_filedata(filename)
611 611 mtime = parentfiledata[2]
612 612
613 613 if mtime > self._lastnormaltime:
614 614 # Remember the most recent modification timeslot for
615 615 # status(), to make sure we won't miss future
616 616 # size-preserving file content modifications that happen
617 617 # within the same timeslot.
618 618 self._lastnormaltime = mtime
619 619
620 620 self._map.reset_state(
621 621 filename,
622 622 wc_tracked,
623 623 p1_tracked,
624 624 p2_tracked=p2_tracked,
625 625 merged=merged,
626 626 clean_p1=clean_p1,
627 627 clean_p2=clean_p2,
628 628 possibly_dirty=possibly_dirty,
629 629 parentfiledata=parentfiledata,
630 630 )
631 631 if (
632 632 parentfiledata is not None
633 633 and parentfiledata[2] > self._lastnormaltime
634 634 ):
635 635 # Remember the most recent modification timeslot for status(),
636 636 # to make sure we won't miss future size-preserving file content
637 637 # modifications that happen within the same timeslot.
638 638 self._lastnormaltime = parentfiledata[2]
639 639
640 640 def _addpath(
641 641 self,
642 642 f,
643 643 mode=0,
644 644 size=None,
645 645 mtime=None,
646 646 added=False,
647 647 merged=False,
648 648 from_p2=False,
649 649 possibly_dirty=False,
650 650 ):
651 651 entry = self._map.get(f)
652 652 if added or entry is not None and entry.removed:
653 653 scmutil.checkfilename(f)
654 654 if self._map.hastrackeddir(f):
655 655 msg = _(b'directory %r already in dirstate')
656 656 msg %= pycompat.bytestr(f)
657 657 raise error.Abort(msg)
658 658 # shadows
659 659 for d in pathutil.finddirs(f):
660 660 if self._map.hastrackeddir(d):
661 661 break
662 662 entry = self._map.get(d)
663 663 if entry is not None and not entry.removed:
664 664 msg = _(b'file %r in dirstate clashes with %r')
665 665 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
666 666 raise error.Abort(msg)
667 667 self._dirty = True
668 668 self._updatedfiles.add(f)
669 669 self._map.addfile(
670 670 f,
671 671 mode=mode,
672 672 size=size,
673 673 mtime=mtime,
674 674 added=added,
675 675 merged=merged,
676 676 from_p2=from_p2,
677 677 possibly_dirty=possibly_dirty,
678 678 )
679 679
680 680 def _get_filedata(self, filename):
681 681 """returns"""
682 682 s = os.lstat(self._join(filename))
683 683 mode = s.st_mode
684 684 size = s.st_size
685 685 mtime = s[stat.ST_MTIME]
686 686 return (mode, size, mtime)
687 687
688 688 def normal(self, f, parentfiledata=None):
689 689 """Mark a file normal and clean.
690 690
691 691 parentfiledata: (mode, size, mtime) of the clean file
692 692
693 693 parentfiledata should be computed from memory (for mode,
694 694 size), as or close as possible from the point where we
695 695 determined the file was clean, to limit the risk of the
696 696 file having been changed by an external process between the
697 697 moment where the file was determined to be clean and now."""
698 698 if parentfiledata:
699 699 (mode, size, mtime) = parentfiledata
700 700 else:
701 701 (mode, size, mtime) = self._get_filedata(f)
702 702 self._addpath(f, mode=mode, size=size, mtime=mtime)
703 703 self._map.copymap.pop(f, None)
704 704 if f in self._map.nonnormalset:
705 705 self._map.nonnormalset.remove(f)
706 706 if mtime > self._lastnormaltime:
707 707 # Remember the most recent modification timeslot for status(),
708 708 # to make sure we won't miss future size-preserving file content
709 709 # modifications that happen within the same timeslot.
710 710 self._lastnormaltime = mtime
711 711
712 712 def normallookup(self, f):
713 713 '''Mark a file normal, but possibly dirty.'''
714 714 if self.in_merge:
715 715 # if there is a merge going on and the file was either
716 716 # "merged" or coming from other parent (-2) before
717 717 # being removed, restore that state.
718 718 entry = self._map.get(f)
719 719 if entry is not None:
720 720 # XXX this should probably be dealt with a a lower level
721 721 # (see `merged_removed` and `from_p2_removed`)
722 722 if entry.merged_removed or entry.from_p2_removed:
723 723 source = self._map.copymap.get(f)
724 724 if entry.merged_removed:
725 725 self.merge(f)
726 726 elif entry.from_p2_removed:
727 727 self.otherparent(f)
728 728 if source is not None:
729 729 self.copy(source, f)
730 730 return
731 731 elif entry.merged or entry.from_p2:
732 732 return
733 733 self._addpath(f, possibly_dirty=True)
734 734 self._map.copymap.pop(f, None)
735 735
736 736 def otherparent(self, f):
737 737 '''Mark as coming from the other parent, always dirty.'''
738 738 if not self.in_merge:
739 739 msg = _(b"setting %r to other parent only allowed in merges") % f
740 740 raise error.Abort(msg)
741 741 entry = self._map.get(f)
742 742 if entry is not None and entry.tracked:
743 743 # merge-like
744 744 self._addpath(f, merged=True)
745 745 else:
746 746 # add-like
747 747 self._addpath(f, from_p2=True)
748 748 self._map.copymap.pop(f, None)
749 749
750 750 def add(self, f):
751 751 '''Mark a file added.'''
752 752 if not self.pendingparentchange():
753 753 util.nouideprecwarn(
754 754 b"do not use `add` outside of update/merge context."
755 755 b" Use `set_tracked`",
756 756 b'6.0',
757 757 stacklevel=2,
758 758 )
759 759 self._add(f)
760 760
761 761 def _add(self, filename):
762 762 """internal function to mark a file as added"""
763 763 self._addpath(filename, added=True)
764 764 self._map.copymap.pop(filename, None)
765 765
766 766 def remove(self, f):
767 767 '''Mark a file removed'''
768 if not self.pendingparentchange():
768 if self.pendingparentchange():
769 util.nouideprecwarn(
770 b"do not use `remove` insde of update/merge context."
771 b" Use `update_file` or `update_file_p1`",
772 b'6.0',
773 stacklevel=2,
774 )
775 else:
769 776 util.nouideprecwarn(
770 777 b"do not use `remove` outside of update/merge context."
771 778 b" Use `set_untracked`",
772 779 b'6.0',
773 780 stacklevel=2,
774 781 )
775 782 self._remove(f)
776 783
777 784 def _remove(self, filename):
778 785 """internal function to mark a file removed"""
779 786 self._dirty = True
780 787 self._updatedfiles.add(filename)
781 788 self._map.removefile(filename, in_merge=self.in_merge)
782 789
783 790 def merge(self, f):
784 791 '''Mark a file merged.'''
785 792 if not self.in_merge:
786 793 return self.normallookup(f)
787 794 return self.otherparent(f)
788 795
789 796 def drop(self, f):
790 797 '''Drop a file from the dirstate'''
791 798 if not self.pendingparentchange():
792 799 util.nouideprecwarn(
793 800 b"do not use `drop` outside of update/merge context."
794 801 b" Use `set_untracked`",
795 802 b'6.0',
796 803 stacklevel=2,
797 804 )
798 805 self._drop(f)
799 806
800 807 def _drop(self, filename):
801 808 """internal function to drop a file from the dirstate"""
802 809 if self._map.dropfile(filename):
803 810 self._dirty = True
804 811 self._updatedfiles.add(filename)
805 812 self._map.copymap.pop(filename, None)
806 813
807 814 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
808 815 if exists is None:
809 816 exists = os.path.lexists(os.path.join(self._root, path))
810 817 if not exists:
811 818 # Maybe a path component exists
812 819 if not ignoremissing and b'/' in path:
813 820 d, f = path.rsplit(b'/', 1)
814 821 d = self._normalize(d, False, ignoremissing, None)
815 822 folded = d + b"/" + f
816 823 else:
817 824 # No path components, preserve original case
818 825 folded = path
819 826 else:
820 827 # recursively normalize leading directory components
821 828 # against dirstate
822 829 if b'/' in normed:
823 830 d, f = normed.rsplit(b'/', 1)
824 831 d = self._normalize(d, False, ignoremissing, True)
825 832 r = self._root + b"/" + d
826 833 folded = d + b"/" + util.fspath(f, r)
827 834 else:
828 835 folded = util.fspath(normed, self._root)
829 836 storemap[normed] = folded
830 837
831 838 return folded
832 839
833 840 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
834 841 normed = util.normcase(path)
835 842 folded = self._map.filefoldmap.get(normed, None)
836 843 if folded is None:
837 844 if isknown:
838 845 folded = path
839 846 else:
840 847 folded = self._discoverpath(
841 848 path, normed, ignoremissing, exists, self._map.filefoldmap
842 849 )
843 850 return folded
844 851
845 852 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
846 853 normed = util.normcase(path)
847 854 folded = self._map.filefoldmap.get(normed, None)
848 855 if folded is None:
849 856 folded = self._map.dirfoldmap.get(normed, None)
850 857 if folded is None:
851 858 if isknown:
852 859 folded = path
853 860 else:
854 861 # store discovered result in dirfoldmap so that future
855 862 # normalizefile calls don't start matching directories
856 863 folded = self._discoverpath(
857 864 path, normed, ignoremissing, exists, self._map.dirfoldmap
858 865 )
859 866 return folded
860 867
861 868 def normalize(self, path, isknown=False, ignoremissing=False):
862 869 """
863 870 normalize the case of a pathname when on a casefolding filesystem
864 871
865 872 isknown specifies whether the filename came from walking the
866 873 disk, to avoid extra filesystem access.
867 874
868 875 If ignoremissing is True, missing path are returned
869 876 unchanged. Otherwise, we try harder to normalize possibly
870 877 existing path components.
871 878
872 879 The normalized case is determined based on the following precedence:
873 880
874 881 - version of name already stored in the dirstate
875 882 - version of name stored on disk
876 883 - version provided via command arguments
877 884 """
878 885
879 886 if self._checkcase:
880 887 return self._normalize(path, isknown, ignoremissing)
881 888 return path
882 889
883 890 def clear(self):
884 891 self._map.clear()
885 892 self._lastnormaltime = 0
886 893 self._updatedfiles.clear()
887 894 self._dirty = True
888 895
889 896 def rebuild(self, parent, allfiles, changedfiles=None):
890 897 if changedfiles is None:
891 898 # Rebuild entire dirstate
892 899 to_lookup = allfiles
893 900 to_drop = []
894 901 lastnormaltime = self._lastnormaltime
895 902 self.clear()
896 903 self._lastnormaltime = lastnormaltime
897 904 elif len(changedfiles) < 10:
898 905 # Avoid turning allfiles into a set, which can be expensive if it's
899 906 # large.
900 907 to_lookup = []
901 908 to_drop = []
902 909 for f in changedfiles:
903 910 if f in allfiles:
904 911 to_lookup.append(f)
905 912 else:
906 913 to_drop.append(f)
907 914 else:
908 915 changedfilesset = set(changedfiles)
909 916 to_lookup = changedfilesset & set(allfiles)
910 917 to_drop = changedfilesset - to_lookup
911 918
912 919 if self._origpl is None:
913 920 self._origpl = self._pl
914 921 self._map.setparents(parent, self._nodeconstants.nullid)
915 922
916 923 for f in to_lookup:
917 924 self.normallookup(f)
918 925 for f in to_drop:
919 926 self._drop(f)
920 927
921 928 self._dirty = True
922 929
923 930 def identity(self):
924 931 """Return identity of dirstate itself to detect changing in storage
925 932
926 933 If identity of previous dirstate is equal to this, writing
927 934 changes based on the former dirstate out can keep consistency.
928 935 """
929 936 return self._map.identity
930 937
931 938 def write(self, tr):
932 939 if not self._dirty:
933 940 return
934 941
935 942 filename = self._filename
936 943 if tr:
937 944 # 'dirstate.write()' is not only for writing in-memory
938 945 # changes out, but also for dropping ambiguous timestamp.
939 946 # delayed writing re-raise "ambiguous timestamp issue".
940 947 # See also the wiki page below for detail:
941 948 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
942 949
943 950 # emulate dropping timestamp in 'parsers.pack_dirstate'
944 951 now = _getfsnow(self._opener)
945 952 self._map.clearambiguoustimes(self._updatedfiles, now)
946 953
947 954 # emulate that all 'dirstate.normal' results are written out
948 955 self._lastnormaltime = 0
949 956 self._updatedfiles.clear()
950 957
951 958 # delay writing in-memory changes out
952 959 tr.addfilegenerator(
953 960 b'dirstate',
954 961 (self._filename,),
955 962 lambda f: self._writedirstate(tr, f),
956 963 location=b'plain',
957 964 )
958 965 return
959 966
960 967 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
961 968 self._writedirstate(tr, st)
962 969
963 970 def addparentchangecallback(self, category, callback):
964 971 """add a callback to be called when the wd parents are changed
965 972
966 973 Callback will be called with the following arguments:
967 974 dirstate, (oldp1, oldp2), (newp1, newp2)
968 975
969 976 Category is a unique identifier to allow overwriting an old callback
970 977 with a newer callback.
971 978 """
972 979 self._plchangecallbacks[category] = callback
973 980
974 981 def _writedirstate(self, tr, st):
975 982 # notify callbacks about parents change
976 983 if self._origpl is not None and self._origpl != self._pl:
977 984 for c, callback in sorted(
978 985 pycompat.iteritems(self._plchangecallbacks)
979 986 ):
980 987 callback(self, self._origpl, self._pl)
981 988 self._origpl = None
982 989 # use the modification time of the newly created temporary file as the
983 990 # filesystem's notion of 'now'
984 991 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
985 992
986 993 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
987 994 # timestamp of each entries in dirstate, because of 'now > mtime'
988 995 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
989 996 if delaywrite > 0:
990 997 # do we have any files to delay for?
991 998 for f, e in pycompat.iteritems(self._map):
992 999 if e.need_delay(now):
993 1000 import time # to avoid useless import
994 1001
995 1002 # rather than sleep n seconds, sleep until the next
996 1003 # multiple of n seconds
997 1004 clock = time.time()
998 1005 start = int(clock) - (int(clock) % delaywrite)
999 1006 end = start + delaywrite
1000 1007 time.sleep(end - clock)
1001 1008 now = end # trust our estimate that the end is near now
1002 1009 break
1003 1010
1004 1011 self._map.write(tr, st, now)
1005 1012 self._lastnormaltime = 0
1006 1013 self._dirty = False
1007 1014
1008 1015 def _dirignore(self, f):
1009 1016 if self._ignore(f):
1010 1017 return True
1011 1018 for p in pathutil.finddirs(f):
1012 1019 if self._ignore(p):
1013 1020 return True
1014 1021 return False
1015 1022
1016 1023 def _ignorefiles(self):
1017 1024 files = []
1018 1025 if os.path.exists(self._join(b'.hgignore')):
1019 1026 files.append(self._join(b'.hgignore'))
1020 1027 for name, path in self._ui.configitems(b"ui"):
1021 1028 if name == b'ignore' or name.startswith(b'ignore.'):
1022 1029 # we need to use os.path.join here rather than self._join
1023 1030 # because path is arbitrary and user-specified
1024 1031 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1025 1032 return files
1026 1033
1027 1034 def _ignorefileandline(self, f):
1028 1035 files = collections.deque(self._ignorefiles())
1029 1036 visited = set()
1030 1037 while files:
1031 1038 i = files.popleft()
1032 1039 patterns = matchmod.readpatternfile(
1033 1040 i, self._ui.warn, sourceinfo=True
1034 1041 )
1035 1042 for pattern, lineno, line in patterns:
1036 1043 kind, p = matchmod._patsplit(pattern, b'glob')
1037 1044 if kind == b"subinclude":
1038 1045 if p not in visited:
1039 1046 files.append(p)
1040 1047 continue
1041 1048 m = matchmod.match(
1042 1049 self._root, b'', [], [pattern], warn=self._ui.warn
1043 1050 )
1044 1051 if m(f):
1045 1052 return (i, lineno, line)
1046 1053 visited.add(i)
1047 1054 return (None, -1, b"")
1048 1055
1049 1056 def _walkexplicit(self, match, subrepos):
1050 1057 """Get stat data about the files explicitly specified by match.
1051 1058
1052 1059 Return a triple (results, dirsfound, dirsnotfound).
1053 1060 - results is a mapping from filename to stat result. It also contains
1054 1061 listings mapping subrepos and .hg to None.
1055 1062 - dirsfound is a list of files found to be directories.
1056 1063 - dirsnotfound is a list of files that the dirstate thinks are
1057 1064 directories and that were not found."""
1058 1065
1059 1066 def badtype(mode):
1060 1067 kind = _(b'unknown')
1061 1068 if stat.S_ISCHR(mode):
1062 1069 kind = _(b'character device')
1063 1070 elif stat.S_ISBLK(mode):
1064 1071 kind = _(b'block device')
1065 1072 elif stat.S_ISFIFO(mode):
1066 1073 kind = _(b'fifo')
1067 1074 elif stat.S_ISSOCK(mode):
1068 1075 kind = _(b'socket')
1069 1076 elif stat.S_ISDIR(mode):
1070 1077 kind = _(b'directory')
1071 1078 return _(b'unsupported file type (type is %s)') % kind
1072 1079
1073 1080 badfn = match.bad
1074 1081 dmap = self._map
1075 1082 lstat = os.lstat
1076 1083 getkind = stat.S_IFMT
1077 1084 dirkind = stat.S_IFDIR
1078 1085 regkind = stat.S_IFREG
1079 1086 lnkkind = stat.S_IFLNK
1080 1087 join = self._join
1081 1088 dirsfound = []
1082 1089 foundadd = dirsfound.append
1083 1090 dirsnotfound = []
1084 1091 notfoundadd = dirsnotfound.append
1085 1092
1086 1093 if not match.isexact() and self._checkcase:
1087 1094 normalize = self._normalize
1088 1095 else:
1089 1096 normalize = None
1090 1097
1091 1098 files = sorted(match.files())
1092 1099 subrepos.sort()
1093 1100 i, j = 0, 0
1094 1101 while i < len(files) and j < len(subrepos):
1095 1102 subpath = subrepos[j] + b"/"
1096 1103 if files[i] < subpath:
1097 1104 i += 1
1098 1105 continue
1099 1106 while i < len(files) and files[i].startswith(subpath):
1100 1107 del files[i]
1101 1108 j += 1
1102 1109
1103 1110 if not files or b'' in files:
1104 1111 files = [b'']
1105 1112 # constructing the foldmap is expensive, so don't do it for the
1106 1113 # common case where files is ['']
1107 1114 normalize = None
1108 1115 results = dict.fromkeys(subrepos)
1109 1116 results[b'.hg'] = None
1110 1117
1111 1118 for ff in files:
1112 1119 if normalize:
1113 1120 nf = normalize(ff, False, True)
1114 1121 else:
1115 1122 nf = ff
1116 1123 if nf in results:
1117 1124 continue
1118 1125
1119 1126 try:
1120 1127 st = lstat(join(nf))
1121 1128 kind = getkind(st.st_mode)
1122 1129 if kind == dirkind:
1123 1130 if nf in dmap:
1124 1131 # file replaced by dir on disk but still in dirstate
1125 1132 results[nf] = None
1126 1133 foundadd((nf, ff))
1127 1134 elif kind == regkind or kind == lnkkind:
1128 1135 results[nf] = st
1129 1136 else:
1130 1137 badfn(ff, badtype(kind))
1131 1138 if nf in dmap:
1132 1139 results[nf] = None
1133 1140 except OSError as inst: # nf not found on disk - it is dirstate only
1134 1141 if nf in dmap: # does it exactly match a missing file?
1135 1142 results[nf] = None
1136 1143 else: # does it match a missing directory?
1137 1144 if self._map.hasdir(nf):
1138 1145 notfoundadd(nf)
1139 1146 else:
1140 1147 badfn(ff, encoding.strtolocal(inst.strerror))
1141 1148
1142 1149 # match.files() may contain explicitly-specified paths that shouldn't
1143 1150 # be taken; drop them from the list of files found. dirsfound/notfound
1144 1151 # aren't filtered here because they will be tested later.
1145 1152 if match.anypats():
1146 1153 for f in list(results):
1147 1154 if f == b'.hg' or f in subrepos:
1148 1155 # keep sentinel to disable further out-of-repo walks
1149 1156 continue
1150 1157 if not match(f):
1151 1158 del results[f]
1152 1159
1153 1160 # Case insensitive filesystems cannot rely on lstat() failing to detect
1154 1161 # a case-only rename. Prune the stat object for any file that does not
1155 1162 # match the case in the filesystem, if there are multiple files that
1156 1163 # normalize to the same path.
1157 1164 if match.isexact() and self._checkcase:
1158 1165 normed = {}
1159 1166
1160 1167 for f, st in pycompat.iteritems(results):
1161 1168 if st is None:
1162 1169 continue
1163 1170
1164 1171 nc = util.normcase(f)
1165 1172 paths = normed.get(nc)
1166 1173
1167 1174 if paths is None:
1168 1175 paths = set()
1169 1176 normed[nc] = paths
1170 1177
1171 1178 paths.add(f)
1172 1179
1173 1180 for norm, paths in pycompat.iteritems(normed):
1174 1181 if len(paths) > 1:
1175 1182 for path in paths:
1176 1183 folded = self._discoverpath(
1177 1184 path, norm, True, None, self._map.dirfoldmap
1178 1185 )
1179 1186 if path != folded:
1180 1187 results[path] = None
1181 1188
1182 1189 return results, dirsfound, dirsnotfound
1183 1190
1184 1191 def walk(self, match, subrepos, unknown, ignored, full=True):
1185 1192 """
1186 1193 Walk recursively through the directory tree, finding all files
1187 1194 matched by match.
1188 1195
1189 1196 If full is False, maybe skip some known-clean files.
1190 1197
1191 1198 Return a dict mapping filename to stat-like object (either
1192 1199 mercurial.osutil.stat instance or return value of os.stat()).
1193 1200
1194 1201 """
1195 1202 # full is a flag that extensions that hook into walk can use -- this
1196 1203 # implementation doesn't use it at all. This satisfies the contract
1197 1204 # because we only guarantee a "maybe".
1198 1205
1199 1206 if ignored:
1200 1207 ignore = util.never
1201 1208 dirignore = util.never
1202 1209 elif unknown:
1203 1210 ignore = self._ignore
1204 1211 dirignore = self._dirignore
1205 1212 else:
1206 1213 # if not unknown and not ignored, drop dir recursion and step 2
1207 1214 ignore = util.always
1208 1215 dirignore = util.always
1209 1216
1210 1217 matchfn = match.matchfn
1211 1218 matchalways = match.always()
1212 1219 matchtdir = match.traversedir
1213 1220 dmap = self._map
1214 1221 listdir = util.listdir
1215 1222 lstat = os.lstat
1216 1223 dirkind = stat.S_IFDIR
1217 1224 regkind = stat.S_IFREG
1218 1225 lnkkind = stat.S_IFLNK
1219 1226 join = self._join
1220 1227
1221 1228 exact = skipstep3 = False
1222 1229 if match.isexact(): # match.exact
1223 1230 exact = True
1224 1231 dirignore = util.always # skip step 2
1225 1232 elif match.prefix(): # match.match, no patterns
1226 1233 skipstep3 = True
1227 1234
1228 1235 if not exact and self._checkcase:
1229 1236 normalize = self._normalize
1230 1237 normalizefile = self._normalizefile
1231 1238 skipstep3 = False
1232 1239 else:
1233 1240 normalize = self._normalize
1234 1241 normalizefile = None
1235 1242
1236 1243 # step 1: find all explicit files
1237 1244 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1238 1245 if matchtdir:
1239 1246 for d in work:
1240 1247 matchtdir(d[0])
1241 1248 for d in dirsnotfound:
1242 1249 matchtdir(d)
1243 1250
1244 1251 skipstep3 = skipstep3 and not (work or dirsnotfound)
1245 1252 work = [d for d in work if not dirignore(d[0])]
1246 1253
1247 1254 # step 2: visit subdirectories
1248 1255 def traverse(work, alreadynormed):
1249 1256 wadd = work.append
1250 1257 while work:
1251 1258 tracing.counter('dirstate.walk work', len(work))
1252 1259 nd = work.pop()
1253 1260 visitentries = match.visitchildrenset(nd)
1254 1261 if not visitentries:
1255 1262 continue
1256 1263 if visitentries == b'this' or visitentries == b'all':
1257 1264 visitentries = None
1258 1265 skip = None
1259 1266 if nd != b'':
1260 1267 skip = b'.hg'
1261 1268 try:
1262 1269 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1263 1270 entries = listdir(join(nd), stat=True, skip=skip)
1264 1271 except OSError as inst:
1265 1272 if inst.errno in (errno.EACCES, errno.ENOENT):
1266 1273 match.bad(
1267 1274 self.pathto(nd), encoding.strtolocal(inst.strerror)
1268 1275 )
1269 1276 continue
1270 1277 raise
1271 1278 for f, kind, st in entries:
1272 1279 # Some matchers may return files in the visitentries set,
1273 1280 # instead of 'this', if the matcher explicitly mentions them
1274 1281 # and is not an exactmatcher. This is acceptable; we do not
1275 1282 # make any hard assumptions about file-or-directory below
1276 1283 # based on the presence of `f` in visitentries. If
1277 1284 # visitchildrenset returned a set, we can always skip the
1278 1285 # entries *not* in the set it provided regardless of whether
1279 1286 # they're actually a file or a directory.
1280 1287 if visitentries and f not in visitentries:
1281 1288 continue
1282 1289 if normalizefile:
1283 1290 # even though f might be a directory, we're only
1284 1291 # interested in comparing it to files currently in the
1285 1292 # dmap -- therefore normalizefile is enough
1286 1293 nf = normalizefile(
1287 1294 nd and (nd + b"/" + f) or f, True, True
1288 1295 )
1289 1296 else:
1290 1297 nf = nd and (nd + b"/" + f) or f
1291 1298 if nf not in results:
1292 1299 if kind == dirkind:
1293 1300 if not ignore(nf):
1294 1301 if matchtdir:
1295 1302 matchtdir(nf)
1296 1303 wadd(nf)
1297 1304 if nf in dmap and (matchalways or matchfn(nf)):
1298 1305 results[nf] = None
1299 1306 elif kind == regkind or kind == lnkkind:
1300 1307 if nf in dmap:
1301 1308 if matchalways or matchfn(nf):
1302 1309 results[nf] = st
1303 1310 elif (matchalways or matchfn(nf)) and not ignore(
1304 1311 nf
1305 1312 ):
1306 1313 # unknown file -- normalize if necessary
1307 1314 if not alreadynormed:
1308 1315 nf = normalize(nf, False, True)
1309 1316 results[nf] = st
1310 1317 elif nf in dmap and (matchalways or matchfn(nf)):
1311 1318 results[nf] = None
1312 1319
1313 1320 for nd, d in work:
1314 1321 # alreadynormed means that processwork doesn't have to do any
1315 1322 # expensive directory normalization
1316 1323 alreadynormed = not normalize or nd == d
1317 1324 traverse([d], alreadynormed)
1318 1325
1319 1326 for s in subrepos:
1320 1327 del results[s]
1321 1328 del results[b'.hg']
1322 1329
1323 1330 # step 3: visit remaining files from dmap
1324 1331 if not skipstep3 and not exact:
1325 1332 # If a dmap file is not in results yet, it was either
1326 1333 # a) not matching matchfn b) ignored, c) missing, or d) under a
1327 1334 # symlink directory.
1328 1335 if not results and matchalways:
1329 1336 visit = [f for f in dmap]
1330 1337 else:
1331 1338 visit = [f for f in dmap if f not in results and matchfn(f)]
1332 1339 visit.sort()
1333 1340
1334 1341 if unknown:
1335 1342 # unknown == True means we walked all dirs under the roots
1336 1343 # that wasn't ignored, and everything that matched was stat'ed
1337 1344 # and is already in results.
1338 1345 # The rest must thus be ignored or under a symlink.
1339 1346 audit_path = pathutil.pathauditor(self._root, cached=True)
1340 1347
1341 1348 for nf in iter(visit):
1342 1349 # If a stat for the same file was already added with a
1343 1350 # different case, don't add one for this, since that would
1344 1351 # make it appear as if the file exists under both names
1345 1352 # on disk.
1346 1353 if (
1347 1354 normalizefile
1348 1355 and normalizefile(nf, True, True) in results
1349 1356 ):
1350 1357 results[nf] = None
1351 1358 # Report ignored items in the dmap as long as they are not
1352 1359 # under a symlink directory.
1353 1360 elif audit_path.check(nf):
1354 1361 try:
1355 1362 results[nf] = lstat(join(nf))
1356 1363 # file was just ignored, no links, and exists
1357 1364 except OSError:
1358 1365 # file doesn't exist
1359 1366 results[nf] = None
1360 1367 else:
1361 1368 # It's either missing or under a symlink directory
1362 1369 # which we in this case report as missing
1363 1370 results[nf] = None
1364 1371 else:
1365 1372 # We may not have walked the full directory tree above,
1366 1373 # so stat and check everything we missed.
1367 1374 iv = iter(visit)
1368 1375 for st in util.statfiles([join(i) for i in visit]):
1369 1376 results[next(iv)] = st
1370 1377 return results
1371 1378
1372 1379 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1373 1380 # Force Rayon (Rust parallelism library) to respect the number of
1374 1381 # workers. This is a temporary workaround until Rust code knows
1375 1382 # how to read the config file.
1376 1383 numcpus = self._ui.configint(b"worker", b"numcpus")
1377 1384 if numcpus is not None:
1378 1385 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1379 1386
1380 1387 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1381 1388 if not workers_enabled:
1382 1389 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1383 1390
1384 1391 (
1385 1392 lookup,
1386 1393 modified,
1387 1394 added,
1388 1395 removed,
1389 1396 deleted,
1390 1397 clean,
1391 1398 ignored,
1392 1399 unknown,
1393 1400 warnings,
1394 1401 bad,
1395 1402 traversed,
1396 1403 dirty,
1397 1404 ) = rustmod.status(
1398 1405 self._map._rustmap,
1399 1406 matcher,
1400 1407 self._rootdir,
1401 1408 self._ignorefiles(),
1402 1409 self._checkexec,
1403 1410 self._lastnormaltime,
1404 1411 bool(list_clean),
1405 1412 bool(list_ignored),
1406 1413 bool(list_unknown),
1407 1414 bool(matcher.traversedir),
1408 1415 )
1409 1416
1410 1417 self._dirty |= dirty
1411 1418
1412 1419 if matcher.traversedir:
1413 1420 for dir in traversed:
1414 1421 matcher.traversedir(dir)
1415 1422
1416 1423 if self._ui.warn:
1417 1424 for item in warnings:
1418 1425 if isinstance(item, tuple):
1419 1426 file_path, syntax = item
1420 1427 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1421 1428 file_path,
1422 1429 syntax,
1423 1430 )
1424 1431 self._ui.warn(msg)
1425 1432 else:
1426 1433 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1427 1434 self._ui.warn(
1428 1435 msg
1429 1436 % (
1430 1437 pathutil.canonpath(
1431 1438 self._rootdir, self._rootdir, item
1432 1439 ),
1433 1440 b"No such file or directory",
1434 1441 )
1435 1442 )
1436 1443
1437 1444 for (fn, message) in bad:
1438 1445 matcher.bad(fn, encoding.strtolocal(message))
1439 1446
1440 1447 status = scmutil.status(
1441 1448 modified=modified,
1442 1449 added=added,
1443 1450 removed=removed,
1444 1451 deleted=deleted,
1445 1452 unknown=unknown,
1446 1453 ignored=ignored,
1447 1454 clean=clean,
1448 1455 )
1449 1456 return (lookup, status)
1450 1457
1451 1458 def status(self, match, subrepos, ignored, clean, unknown):
1452 1459 """Determine the status of the working copy relative to the
1453 1460 dirstate and return a pair of (unsure, status), where status is of type
1454 1461 scmutil.status and:
1455 1462
1456 1463 unsure:
1457 1464 files that might have been modified since the dirstate was
1458 1465 written, but need to be read to be sure (size is the same
1459 1466 but mtime differs)
1460 1467 status.modified:
1461 1468 files that have definitely been modified since the dirstate
1462 1469 was written (different size or mode)
1463 1470 status.clean:
1464 1471 files that have definitely not been modified since the
1465 1472 dirstate was written
1466 1473 """
1467 1474 listignored, listclean, listunknown = ignored, clean, unknown
1468 1475 lookup, modified, added, unknown, ignored = [], [], [], [], []
1469 1476 removed, deleted, clean = [], [], []
1470 1477
1471 1478 dmap = self._map
1472 1479 dmap.preload()
1473 1480
1474 1481 use_rust = True
1475 1482
1476 1483 allowed_matchers = (
1477 1484 matchmod.alwaysmatcher,
1478 1485 matchmod.exactmatcher,
1479 1486 matchmod.includematcher,
1480 1487 )
1481 1488
1482 1489 if rustmod is None:
1483 1490 use_rust = False
1484 1491 elif self._checkcase:
1485 1492 # Case-insensitive filesystems are not handled yet
1486 1493 use_rust = False
1487 1494 elif subrepos:
1488 1495 use_rust = False
1489 1496 elif sparse.enabled:
1490 1497 use_rust = False
1491 1498 elif not isinstance(match, allowed_matchers):
1492 1499 # Some matchers have yet to be implemented
1493 1500 use_rust = False
1494 1501
1495 1502 if use_rust:
1496 1503 try:
1497 1504 return self._rust_status(
1498 1505 match, listclean, listignored, listunknown
1499 1506 )
1500 1507 except rustmod.FallbackError:
1501 1508 pass
1502 1509
1503 1510 def noop(f):
1504 1511 pass
1505 1512
1506 1513 dcontains = dmap.__contains__
1507 1514 dget = dmap.__getitem__
1508 1515 ladd = lookup.append # aka "unsure"
1509 1516 madd = modified.append
1510 1517 aadd = added.append
1511 1518 uadd = unknown.append if listunknown else noop
1512 1519 iadd = ignored.append if listignored else noop
1513 1520 radd = removed.append
1514 1521 dadd = deleted.append
1515 1522 cadd = clean.append if listclean else noop
1516 1523 mexact = match.exact
1517 1524 dirignore = self._dirignore
1518 1525 checkexec = self._checkexec
1519 1526 copymap = self._map.copymap
1520 1527 lastnormaltime = self._lastnormaltime
1521 1528
1522 1529 # We need to do full walks when either
1523 1530 # - we're listing all clean files, or
1524 1531 # - match.traversedir does something, because match.traversedir should
1525 1532 # be called for every dir in the working dir
1526 1533 full = listclean or match.traversedir is not None
1527 1534 for fn, st in pycompat.iteritems(
1528 1535 self.walk(match, subrepos, listunknown, listignored, full=full)
1529 1536 ):
1530 1537 if not dcontains(fn):
1531 1538 if (listignored or mexact(fn)) and dirignore(fn):
1532 1539 if listignored:
1533 1540 iadd(fn)
1534 1541 else:
1535 1542 uadd(fn)
1536 1543 continue
1537 1544
1538 1545 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1539 1546 # written like that for performance reasons. dmap[fn] is not a
1540 1547 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1541 1548 # opcode has fast paths when the value to be unpacked is a tuple or
1542 1549 # a list, but falls back to creating a full-fledged iterator in
1543 1550 # general. That is much slower than simply accessing and storing the
1544 1551 # tuple members one by one.
1545 1552 t = dget(fn)
1546 1553 mode = t.mode
1547 1554 size = t.size
1548 1555 time = t.mtime
1549 1556
1550 1557 if not st and t.tracked:
1551 1558 dadd(fn)
1552 1559 elif t.merged:
1553 1560 madd(fn)
1554 1561 elif t.added:
1555 1562 aadd(fn)
1556 1563 elif t.removed:
1557 1564 radd(fn)
1558 1565 elif t.tracked:
1559 1566 if (
1560 1567 size >= 0
1561 1568 and (
1562 1569 (size != st.st_size and size != st.st_size & _rangemask)
1563 1570 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1564 1571 )
1565 1572 or t.from_p2
1566 1573 or fn in copymap
1567 1574 ):
1568 1575 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1569 1576 # issue6456: Size returned may be longer due to
1570 1577 # encryption on EXT-4 fscrypt, undecided.
1571 1578 ladd(fn)
1572 1579 else:
1573 1580 madd(fn)
1574 1581 elif (
1575 1582 time != st[stat.ST_MTIME]
1576 1583 and time != st[stat.ST_MTIME] & _rangemask
1577 1584 ):
1578 1585 ladd(fn)
1579 1586 elif st[stat.ST_MTIME] == lastnormaltime:
1580 1587 # fn may have just been marked as normal and it may have
1581 1588 # changed in the same second without changing its size.
1582 1589 # This can happen if we quickly do multiple commits.
1583 1590 # Force lookup, so we don't miss such a racy file change.
1584 1591 ladd(fn)
1585 1592 elif listclean:
1586 1593 cadd(fn)
1587 1594 status = scmutil.status(
1588 1595 modified, added, removed, deleted, unknown, ignored, clean
1589 1596 )
1590 1597 return (lookup, status)
1591 1598
1592 1599 def matches(self, match):
1593 1600 """
1594 1601 return files in the dirstate (in whatever state) filtered by match
1595 1602 """
1596 1603 dmap = self._map
1597 1604 if rustmod is not None:
1598 1605 dmap = self._map._rustmap
1599 1606
1600 1607 if match.always():
1601 1608 return dmap.keys()
1602 1609 files = match.files()
1603 1610 if match.isexact():
1604 1611 # fast path -- filter the other way around, since typically files is
1605 1612 # much smaller than dmap
1606 1613 return [f for f in files if f in dmap]
1607 1614 if match.prefix() and all(fn in dmap for fn in files):
1608 1615 # fast path -- all the values are known to be files, so just return
1609 1616 # that
1610 1617 return list(files)
1611 1618 return [f for f in dmap if match(f)]
1612 1619
1613 1620 def _actualfilename(self, tr):
1614 1621 if tr:
1615 1622 return self._pendingfilename
1616 1623 else:
1617 1624 return self._filename
1618 1625
1619 1626 def savebackup(self, tr, backupname):
1620 1627 '''Save current dirstate into backup file'''
1621 1628 filename = self._actualfilename(tr)
1622 1629 assert backupname != filename
1623 1630
1624 1631 # use '_writedirstate' instead of 'write' to write changes certainly,
1625 1632 # because the latter omits writing out if transaction is running.
1626 1633 # output file will be used to create backup of dirstate at this point.
1627 1634 if self._dirty or not self._opener.exists(filename):
1628 1635 self._writedirstate(
1629 1636 tr,
1630 1637 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1631 1638 )
1632 1639
1633 1640 if tr:
1634 1641 # ensure that subsequent tr.writepending returns True for
1635 1642 # changes written out above, even if dirstate is never
1636 1643 # changed after this
1637 1644 tr.addfilegenerator(
1638 1645 b'dirstate',
1639 1646 (self._filename,),
1640 1647 lambda f: self._writedirstate(tr, f),
1641 1648 location=b'plain',
1642 1649 )
1643 1650
1644 1651 # ensure that pending file written above is unlinked at
1645 1652 # failure, even if tr.writepending isn't invoked until the
1646 1653 # end of this transaction
1647 1654 tr.registertmp(filename, location=b'plain')
1648 1655
1649 1656 self._opener.tryunlink(backupname)
1650 1657 # hardlink backup is okay because _writedirstate is always called
1651 1658 # with an "atomictemp=True" file.
1652 1659 util.copyfile(
1653 1660 self._opener.join(filename),
1654 1661 self._opener.join(backupname),
1655 1662 hardlink=True,
1656 1663 )
1657 1664
1658 1665 def restorebackup(self, tr, backupname):
1659 1666 '''Restore dirstate by backup file'''
1660 1667 # this "invalidate()" prevents "wlock.release()" from writing
1661 1668 # changes of dirstate out after restoring from backup file
1662 1669 self.invalidate()
1663 1670 filename = self._actualfilename(tr)
1664 1671 o = self._opener
1665 1672 if util.samefile(o.join(backupname), o.join(filename)):
1666 1673 o.unlink(backupname)
1667 1674 else:
1668 1675 o.rename(backupname, filename, checkambig=True)
1669 1676
1670 1677 def clearbackup(self, tr, backupname):
1671 1678 '''Clear backup file'''
1672 1679 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now