##// END OF EJS Templates
dirstate: deprecated `remove` outside of `update/merge`...
marmoute -
r48462:1a79bb8c default
parent child Browse files
Show More
@@ -1,1616 +1,1623 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .pycompat import delattr
18 18
19 19 from hgdemandimport import tracing
20 20
21 21 from . import (
22 22 dirstatemap,
23 23 encoding,
24 24 error,
25 25 match as matchmod,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 sparse,
31 31 util,
32 32 )
33 33
34 34 from .interfaces import (
35 35 dirstate as intdirstate,
36 36 util as interfaceutil,
37 37 )
38 38
39 39 parsers = policy.importmod('parsers')
40 40 rustmod = policy.importrust('dirstate')
41 41
42 42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43 43
44 44 propertycache = util.propertycache
45 45 filecache = scmutil.filecache
46 46 _rangemask = dirstatemap.rangemask
47 47
48 48 DirstateItem = parsers.DirstateItem
49 49
50 50
51 51 class repocache(filecache):
52 52 """filecache for files in .hg/"""
53 53
54 54 def join(self, obj, fname):
55 55 return obj._opener.join(fname)
56 56
57 57
58 58 class rootcache(filecache):
59 59 """filecache for files in the repository root"""
60 60
61 61 def join(self, obj, fname):
62 62 return obj._join(fname)
63 63
64 64
65 65 def _getfsnow(vfs):
66 66 '''Get "now" timestamp on filesystem'''
67 67 tmpfd, tmpname = vfs.mkstemp()
68 68 try:
69 69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 70 finally:
71 71 os.close(tmpfd)
72 72 vfs.unlink(tmpname)
73 73
74 74
75 75 def requires_parents_change(func):
76 76 def wrap(self, *args, **kwargs):
77 77 if not self.pendingparentchange():
78 78 msg = 'calling `%s` outside of a parentchange context'
79 79 msg %= func.__name__
80 80 raise error.ProgrammingError(msg)
81 81 return func(self, *args, **kwargs)
82 82
83 83 return wrap
84 84
85 85
86 86 def requires_no_parents_change(func):
87 87 def wrap(self, *args, **kwargs):
88 88 if self.pendingparentchange():
89 89 msg = 'calling `%s` inside of a parentchange context'
90 90 msg %= func.__name__
91 91 raise error.ProgrammingError(msg)
92 92 return func(self, *args, **kwargs)
93 93
94 94 return wrap
95 95
96 96
97 97 @interfaceutil.implementer(intdirstate.idirstate)
98 98 class dirstate(object):
99 99 def __init__(
100 100 self,
101 101 opener,
102 102 ui,
103 103 root,
104 104 validate,
105 105 sparsematchfn,
106 106 nodeconstants,
107 107 use_dirstate_v2,
108 108 ):
109 109 """Create a new dirstate object.
110 110
111 111 opener is an open()-like callable that can be used to open the
112 112 dirstate file; root is the root of the directory tracked by
113 113 the dirstate.
114 114 """
115 115 self._use_dirstate_v2 = use_dirstate_v2
116 116 self._nodeconstants = nodeconstants
117 117 self._opener = opener
118 118 self._validate = validate
119 119 self._root = root
120 120 self._sparsematchfn = sparsematchfn
121 121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 122 # UNC path pointing to root share (issue4557)
123 123 self._rootdir = pathutil.normasprefix(root)
124 124 self._dirty = False
125 125 self._lastnormaltime = 0
126 126 self._ui = ui
127 127 self._filecache = {}
128 128 self._parentwriters = 0
129 129 self._filename = b'dirstate'
130 130 self._pendingfilename = b'%s.pending' % self._filename
131 131 self._plchangecallbacks = {}
132 132 self._origpl = None
133 133 self._updatedfiles = set()
134 134 self._mapcls = dirstatemap.dirstatemap
135 135 # Access and cache cwd early, so we don't access it for the first time
136 136 # after a working-copy update caused it to not exist (accessing it then
137 137 # raises an exception).
138 138 self._cwd
139 139
140 140 def prefetch_parents(self):
141 141 """make sure the parents are loaded
142 142
143 143 Used to avoid a race condition.
144 144 """
145 145 self._pl
146 146
147 147 @contextlib.contextmanager
148 148 def parentchange(self):
149 149 """Context manager for handling dirstate parents.
150 150
151 151 If an exception occurs in the scope of the context manager,
152 152 the incoherent dirstate won't be written when wlock is
153 153 released.
154 154 """
155 155 self._parentwriters += 1
156 156 yield
157 157 # Typically we want the "undo" step of a context manager in a
158 158 # finally block so it happens even when an exception
159 159 # occurs. In this case, however, we only want to decrement
160 160 # parentwriters if the code in the with statement exits
161 161 # normally, so we don't have a try/finally here on purpose.
162 162 self._parentwriters -= 1
163 163
164 164 def pendingparentchange(self):
165 165 """Returns true if the dirstate is in the middle of a set of changes
166 166 that modify the dirstate parent.
167 167 """
168 168 return self._parentwriters > 0
169 169
170 170 @propertycache
171 171 def _map(self):
172 172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 173 self._map = self._mapcls(
174 174 self._ui,
175 175 self._opener,
176 176 self._root,
177 177 self._nodeconstants,
178 178 self._use_dirstate_v2,
179 179 )
180 180 return self._map
181 181
182 182 @property
183 183 def _sparsematcher(self):
184 184 """The matcher for the sparse checkout.
185 185
186 186 The working directory may not include every file from a manifest. The
187 187 matcher obtained by this property will match a path if it is to be
188 188 included in the working directory.
189 189 """
190 190 # TODO there is potential to cache this property. For now, the matcher
191 191 # is resolved on every access. (But the called function does use a
192 192 # cache to keep the lookup fast.)
193 193 return self._sparsematchfn()
194 194
195 195 @repocache(b'branch')
196 196 def _branch(self):
197 197 try:
198 198 return self._opener.read(b"branch").strip() or b"default"
199 199 except IOError as inst:
200 200 if inst.errno != errno.ENOENT:
201 201 raise
202 202 return b"default"
203 203
204 204 @property
205 205 def _pl(self):
206 206 return self._map.parents()
207 207
208 208 def hasdir(self, d):
209 209 return self._map.hastrackeddir(d)
210 210
211 211 @rootcache(b'.hgignore')
212 212 def _ignore(self):
213 213 files = self._ignorefiles()
214 214 if not files:
215 215 return matchmod.never()
216 216
217 217 pats = [b'include:%s' % f for f in files]
218 218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219 219
220 220 @propertycache
221 221 def _slash(self):
222 222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223 223
224 224 @propertycache
225 225 def _checklink(self):
226 226 return util.checklink(self._root)
227 227
228 228 @propertycache
229 229 def _checkexec(self):
230 230 return bool(util.checkexec(self._root))
231 231
232 232 @propertycache
233 233 def _checkcase(self):
234 234 return not util.fscasesensitive(self._join(b'.hg'))
235 235
236 236 def _join(self, f):
237 237 # much faster than os.path.join()
238 238 # it's safe because f is always a relative path
239 239 return self._rootdir + f
240 240
241 241 def flagfunc(self, buildfallback):
242 242 if self._checklink and self._checkexec:
243 243
244 244 def f(x):
245 245 try:
246 246 st = os.lstat(self._join(x))
247 247 if util.statislink(st):
248 248 return b'l'
249 249 if util.statisexec(st):
250 250 return b'x'
251 251 except OSError:
252 252 pass
253 253 return b''
254 254
255 255 return f
256 256
257 257 fallback = buildfallback()
258 258 if self._checklink:
259 259
260 260 def f(x):
261 261 if os.path.islink(self._join(x)):
262 262 return b'l'
263 263 if b'x' in fallback(x):
264 264 return b'x'
265 265 return b''
266 266
267 267 return f
268 268 if self._checkexec:
269 269
270 270 def f(x):
271 271 if b'l' in fallback(x):
272 272 return b'l'
273 273 if util.isexec(self._join(x)):
274 274 return b'x'
275 275 return b''
276 276
277 277 return f
278 278 else:
279 279 return fallback
280 280
281 281 @propertycache
282 282 def _cwd(self):
283 283 # internal config: ui.forcecwd
284 284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 285 if forcecwd:
286 286 return forcecwd
287 287 return encoding.getcwd()
288 288
289 289 def getcwd(self):
290 290 """Return the path from which a canonical path is calculated.
291 291
292 292 This path should be used to resolve file patterns or to convert
293 293 canonical paths back to file paths for display. It shouldn't be
294 294 used to get real file paths. Use vfs functions instead.
295 295 """
296 296 cwd = self._cwd
297 297 if cwd == self._root:
298 298 return b''
299 299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 300 rootsep = self._root
301 301 if not util.endswithsep(rootsep):
302 302 rootsep += pycompat.ossep
303 303 if cwd.startswith(rootsep):
304 304 return cwd[len(rootsep) :]
305 305 else:
306 306 # we're outside the repo. return an absolute path.
307 307 return cwd
308 308
309 309 def pathto(self, f, cwd=None):
310 310 if cwd is None:
311 311 cwd = self.getcwd()
312 312 path = util.pathto(self._root, cwd, f)
313 313 if self._slash:
314 314 return util.pconvert(path)
315 315 return path
316 316
317 317 def __getitem__(self, key):
318 318 """Return the current state of key (a filename) in the dirstate.
319 319
320 320 States are:
321 321 n normal
322 322 m needs merging
323 323 r marked for removal
324 324 a marked for addition
325 325 ? not tracked
326 326
327 327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 328 consider migrating all user of this to going through the dirstate entry
329 329 instead.
330 330 """
331 331 entry = self._map.get(key)
332 332 if entry is not None:
333 333 return entry.state
334 334 return b'?'
335 335
336 336 def __contains__(self, key):
337 337 return key in self._map
338 338
339 339 def __iter__(self):
340 340 return iter(sorted(self._map))
341 341
342 342 def items(self):
343 343 return pycompat.iteritems(self._map)
344 344
345 345 iteritems = items
346 346
347 347 def directories(self):
348 348 return self._map.directories()
349 349
350 350 def parents(self):
351 351 return [self._validate(p) for p in self._pl]
352 352
353 353 def p1(self):
354 354 return self._validate(self._pl[0])
355 355
356 356 def p2(self):
357 357 return self._validate(self._pl[1])
358 358
359 359 @property
360 360 def in_merge(self):
361 361 """True if a merge is in progress"""
362 362 return self._pl[1] != self._nodeconstants.nullid
363 363
364 364 def branch(self):
365 365 return encoding.tolocal(self._branch)
366 366
367 367 def setparents(self, p1, p2=None):
368 368 """Set dirstate parents to p1 and p2.
369 369
370 370 When moving from two parents to one, "merged" entries a
371 371 adjusted to normal and previous copy records discarded and
372 372 returned by the call.
373 373
374 374 See localrepo.setparents()
375 375 """
376 376 if p2 is None:
377 377 p2 = self._nodeconstants.nullid
378 378 if self._parentwriters == 0:
379 379 raise ValueError(
380 380 b"cannot set dirstate parent outside of "
381 381 b"dirstate.parentchange context manager"
382 382 )
383 383
384 384 self._dirty = True
385 385 oldp2 = self._pl[1]
386 386 if self._origpl is None:
387 387 self._origpl = self._pl
388 388 self._map.setparents(p1, p2)
389 389 copies = {}
390 390 if (
391 391 oldp2 != self._nodeconstants.nullid
392 392 and p2 == self._nodeconstants.nullid
393 393 ):
394 394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395 395
396 396 for f in candidatefiles:
397 397 s = self._map.get(f)
398 398 if s is None:
399 399 continue
400 400
401 401 # Discard "merged" markers when moving away from a merge state
402 402 if s.merged:
403 403 source = self._map.copymap.get(f)
404 404 if source:
405 405 copies[f] = source
406 406 self.normallookup(f)
407 407 # Also fix up otherparent markers
408 408 elif s.from_p2:
409 409 source = self._map.copymap.get(f)
410 410 if source:
411 411 copies[f] = source
412 412 self._add(f)
413 413 return copies
414 414
415 415 def setbranch(self, branch):
416 416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 418 try:
419 419 f.write(self._branch + b'\n')
420 420 f.close()
421 421
422 422 # make sure filecache has the correct stat info for _branch after
423 423 # replacing the underlying file
424 424 ce = self._filecache[b'_branch']
425 425 if ce:
426 426 ce.refresh()
427 427 except: # re-raises
428 428 f.discard()
429 429 raise
430 430
431 431 def invalidate(self):
432 432 """Causes the next access to reread the dirstate.
433 433
434 434 This is different from localrepo.invalidatedirstate() because it always
435 435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 436 check whether the dirstate has changed before rereading it."""
437 437
438 438 for a in ("_map", "_branch", "_ignore"):
439 439 if a in self.__dict__:
440 440 delattr(self, a)
441 441 self._lastnormaltime = 0
442 442 self._dirty = False
443 443 self._updatedfiles.clear()
444 444 self._parentwriters = 0
445 445 self._origpl = None
446 446
447 447 def copy(self, source, dest):
448 448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 449 if source == dest:
450 450 return
451 451 self._dirty = True
452 452 if source is not None:
453 453 self._map.copymap[dest] = source
454 454 self._updatedfiles.add(source)
455 455 self._updatedfiles.add(dest)
456 456 elif self._map.copymap.pop(dest, None):
457 457 self._updatedfiles.add(dest)
458 458
459 459 def copied(self, file):
460 460 return self._map.copymap.get(file, None)
461 461
462 462 def copies(self):
463 463 return self._map.copymap
464 464
465 465 @requires_no_parents_change
466 466 def set_tracked(self, filename):
467 467 """a "public" method for generic code to mark a file as tracked
468 468
469 469 This function is to be called outside of "update/merge" case. For
470 470 example by a command like `hg add X`.
471 471
472 472 return True the file was previously untracked, False otherwise.
473 473 """
474 474 entry = self._map.get(filename)
475 475 if entry is None:
476 476 self._add(filename)
477 477 return True
478 478 elif not entry.tracked:
479 479 self.normallookup(filename)
480 480 return True
481 481 return False
482 482
483 483 @requires_no_parents_change
484 484 def set_untracked(self, filename):
485 485 """a "public" method for generic code to mark a file as untracked
486 486
487 487 This function is to be called outside of "update/merge" case. For
488 488 example by a command like `hg remove X`.
489 489
490 490 return True the file was previously tracked, False otherwise.
491 491 """
492 492 entry = self._map.get(filename)
493 493 if entry is None:
494 494 return False
495 495 elif entry.added:
496 496 self._drop(filename)
497 497 return True
498 498 else:
499 499 self._remove(filename)
500 500 return True
501 501
502 502 @requires_parents_change
503 503 def update_file_reference(
504 504 self,
505 505 filename,
506 506 p1_tracked,
507 507 ):
508 508 """Set a file as tracked in the parent (or not)
509 509
510 510 This is to be called when adjust the dirstate to a new parent after an history
511 511 rewriting operation.
512 512
513 513 It should not be called during a merge (p2 != nullid) and only within
514 514 a `with dirstate.parentchange():` context.
515 515 """
516 516 if self.in_merge:
517 517 msg = b'update_file_reference should not be called when merging'
518 518 raise error.ProgrammingError(msg)
519 519 entry = self._map.get(filename)
520 520 if entry is None:
521 521 wc_tracked = False
522 522 else:
523 523 wc_tracked = entry.tracked
524 524 if p1_tracked and wc_tracked:
525 525 # the underlying reference might have changed, we will have to
526 526 # check it.
527 527 self.normallookup(filename)
528 528 elif not (p1_tracked or wc_tracked):
529 529 # the file is no longer relevant to anyone
530 530 self._drop(filename)
531 531 elif (not p1_tracked) and wc_tracked:
532 532 if not entry.added:
533 533 self._add(filename)
534 534 elif p1_tracked and not wc_tracked:
535 535 if entry is None or not entry.removed:
536 536 self._remove(filename)
537 537 else:
538 538 assert False, 'unreachable'
539 539
540 540 @requires_parents_change
541 541 def update_file(
542 542 self,
543 543 filename,
544 544 wc_tracked,
545 545 p1_tracked,
546 546 p2_tracked=False,
547 547 merged=False,
548 548 clean_p1=False,
549 549 clean_p2=False,
550 550 possibly_dirty=False,
551 551 ):
552 552 """update the information about a file in the dirstate
553 553
554 554 This is to be called when the direstates parent changes to keep track
555 555 of what is the file situation in regards to the working copy and its parent.
556 556
557 557 This function must be called within a `dirstate.parentchange` context.
558 558
559 559 note: the API is at an early stage and we might need to ajust it
560 560 depending of what information ends up being relevant and useful to
561 561 other processing.
562 562 """
563 563 if not self.pendingparentchange():
564 564 msg = b'calling `update_file` outside of a parentchange context'
565 565 raise error.ProgrammingError(msg)
566 566 if merged and (clean_p1 or clean_p2):
567 567 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
568 568 raise error.ProgrammingError(msg)
569 569 assert not (merged and (clean_p1 or clean_p1))
570 570 if not (p1_tracked or p2_tracked or wc_tracked):
571 571 self._drop(filename)
572 572 elif merged:
573 573 assert wc_tracked
574 574 if not self.in_merge:
575 575 self.normallookup(filename)
576 576 self.otherparent(filename)
577 577 elif not (p1_tracked or p2_tracked) and wc_tracked:
578 578 self._addpath(filename, added=True, possibly_dirty=possibly_dirty)
579 579 self._map.copymap.pop(filename, None)
580 580 elif (p1_tracked or p2_tracked) and not wc_tracked:
581 581 self._remove(filename)
582 582 elif clean_p2 and wc_tracked:
583 583 assert p2_tracked
584 584 self.otherparent(filename)
585 585 elif not p1_tracked and p2_tracked and wc_tracked:
586 586 self._addpath(filename, from_p2=True, possibly_dirty=possibly_dirty)
587 587 self._map.copymap.pop(filename, None)
588 588 elif possibly_dirty:
589 589 self._addpath(filename, possibly_dirty=possibly_dirty)
590 590 elif wc_tracked:
591 591 self.normal(filename)
592 592 # XXX We need something for file that are dirty after an update
593 593 else:
594 594 assert False, 'unreachable'
595 595
596 596 @requires_parents_change
597 597 def update_parent_file_data(self, f, filedata):
598 598 """update the information about the content of a file
599 599
600 600 This function should be called within a `dirstate.parentchange` context.
601 601 """
602 602 self.normal(f, parentfiledata=filedata)
603 603
604 604 def _addpath(
605 605 self,
606 606 f,
607 607 mode=0,
608 608 size=None,
609 609 mtime=None,
610 610 added=False,
611 611 merged=False,
612 612 from_p2=False,
613 613 possibly_dirty=False,
614 614 ):
615 615 entry = self._map.get(f)
616 616 if added or entry is not None and entry.removed:
617 617 scmutil.checkfilename(f)
618 618 if self._map.hastrackeddir(f):
619 619 msg = _(b'directory %r already in dirstate')
620 620 msg %= pycompat.bytestr(f)
621 621 raise error.Abort(msg)
622 622 # shadows
623 623 for d in pathutil.finddirs(f):
624 624 if self._map.hastrackeddir(d):
625 625 break
626 626 entry = self._map.get(d)
627 627 if entry is not None and not entry.removed:
628 628 msg = _(b'file %r in dirstate clashes with %r')
629 629 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
630 630 raise error.Abort(msg)
631 631 self._dirty = True
632 632 self._updatedfiles.add(f)
633 633 self._map.addfile(
634 634 f,
635 635 mode=mode,
636 636 size=size,
637 637 mtime=mtime,
638 638 added=added,
639 639 merged=merged,
640 640 from_p2=from_p2,
641 641 possibly_dirty=possibly_dirty,
642 642 )
643 643
644 644 def normal(self, f, parentfiledata=None):
645 645 """Mark a file normal and clean.
646 646
647 647 parentfiledata: (mode, size, mtime) of the clean file
648 648
649 649 parentfiledata should be computed from memory (for mode,
650 650 size), as or close as possible from the point where we
651 651 determined the file was clean, to limit the risk of the
652 652 file having been changed by an external process between the
653 653 moment where the file was determined to be clean and now."""
654 654 if parentfiledata:
655 655 (mode, size, mtime) = parentfiledata
656 656 else:
657 657 s = os.lstat(self._join(f))
658 658 mode = s.st_mode
659 659 size = s.st_size
660 660 mtime = s[stat.ST_MTIME]
661 661 self._addpath(f, mode=mode, size=size, mtime=mtime)
662 662 self._map.copymap.pop(f, None)
663 663 if f in self._map.nonnormalset:
664 664 self._map.nonnormalset.remove(f)
665 665 if mtime > self._lastnormaltime:
666 666 # Remember the most recent modification timeslot for status(),
667 667 # to make sure we won't miss future size-preserving file content
668 668 # modifications that happen within the same timeslot.
669 669 self._lastnormaltime = mtime
670 670
671 671 def normallookup(self, f):
672 672 '''Mark a file normal, but possibly dirty.'''
673 673 if self.in_merge:
674 674 # if there is a merge going on and the file was either
675 675 # "merged" or coming from other parent (-2) before
676 676 # being removed, restore that state.
677 677 entry = self._map.get(f)
678 678 if entry is not None:
679 679 # XXX this should probably be dealt with a a lower level
680 680 # (see `merged_removed` and `from_p2_removed`)
681 681 if entry.merged_removed or entry.from_p2_removed:
682 682 source = self._map.copymap.get(f)
683 683 if entry.merged_removed:
684 684 self.merge(f)
685 685 elif entry.from_p2_removed:
686 686 self.otherparent(f)
687 687 if source is not None:
688 688 self.copy(source, f)
689 689 return
690 690 elif entry.merged or entry.from_p2:
691 691 return
692 692 self._addpath(f, possibly_dirty=True)
693 693 self._map.copymap.pop(f, None)
694 694
695 695 def otherparent(self, f):
696 696 '''Mark as coming from the other parent, always dirty.'''
697 697 if not self.in_merge:
698 698 msg = _(b"setting %r to other parent only allowed in merges") % f
699 699 raise error.Abort(msg)
700 700 entry = self._map.get(f)
701 701 if entry is not None and entry.tracked:
702 702 # merge-like
703 703 self._addpath(f, merged=True)
704 704 else:
705 705 # add-like
706 706 self._addpath(f, from_p2=True)
707 707 self._map.copymap.pop(f, None)
708 708
709 709 def add(self, f):
710 710 '''Mark a file added.'''
711 711 if not self.pendingparentchange():
712 712 util.nouideprecwarn(
713 713 b"do not use `add` outside of update/merge context."
714 714 b" Use `set_tracked`",
715 715 b'6.0',
716 716 stacklevel=2,
717 717 )
718 718 self._add(f)
719 719
720 720 def _add(self, filename):
721 721 """internal function to mark a file as added"""
722 722 self._addpath(filename, added=True)
723 723 self._map.copymap.pop(filename, None)
724 724
725 725 def remove(self, f):
726 726 '''Mark a file removed'''
727 if not self.pendingparentchange():
728 util.nouideprecwarn(
729 b"do not use `remove` outside of update/merge context."
730 b" Use `set_untracked`",
731 b'6.0',
732 stacklevel=2,
733 )
727 734 self._remove(f)
728 735
729 736 def _remove(self, filename):
730 737 """internal function to mark a file removed"""
731 738 self._dirty = True
732 739 self._updatedfiles.add(filename)
733 740 self._map.removefile(filename, in_merge=self.in_merge)
734 741
735 742 def merge(self, f):
736 743 '''Mark a file merged.'''
737 744 if not self.in_merge:
738 745 return self.normallookup(f)
739 746 return self.otherparent(f)
740 747
741 748 def drop(self, f):
742 749 '''Drop a file from the dirstate'''
743 750 self._drop(f)
744 751
745 752 def _drop(self, filename):
746 753 """internal function to drop a file from the dirstate"""
747 754 if self._map.dropfile(filename):
748 755 self._dirty = True
749 756 self._updatedfiles.add(filename)
750 757 self._map.copymap.pop(filename, None)
751 758
752 759 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
753 760 if exists is None:
754 761 exists = os.path.lexists(os.path.join(self._root, path))
755 762 if not exists:
756 763 # Maybe a path component exists
757 764 if not ignoremissing and b'/' in path:
758 765 d, f = path.rsplit(b'/', 1)
759 766 d = self._normalize(d, False, ignoremissing, None)
760 767 folded = d + b"/" + f
761 768 else:
762 769 # No path components, preserve original case
763 770 folded = path
764 771 else:
765 772 # recursively normalize leading directory components
766 773 # against dirstate
767 774 if b'/' in normed:
768 775 d, f = normed.rsplit(b'/', 1)
769 776 d = self._normalize(d, False, ignoremissing, True)
770 777 r = self._root + b"/" + d
771 778 folded = d + b"/" + util.fspath(f, r)
772 779 else:
773 780 folded = util.fspath(normed, self._root)
774 781 storemap[normed] = folded
775 782
776 783 return folded
777 784
778 785 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
779 786 normed = util.normcase(path)
780 787 folded = self._map.filefoldmap.get(normed, None)
781 788 if folded is None:
782 789 if isknown:
783 790 folded = path
784 791 else:
785 792 folded = self._discoverpath(
786 793 path, normed, ignoremissing, exists, self._map.filefoldmap
787 794 )
788 795 return folded
789 796
790 797 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
791 798 normed = util.normcase(path)
792 799 folded = self._map.filefoldmap.get(normed, None)
793 800 if folded is None:
794 801 folded = self._map.dirfoldmap.get(normed, None)
795 802 if folded is None:
796 803 if isknown:
797 804 folded = path
798 805 else:
799 806 # store discovered result in dirfoldmap so that future
800 807 # normalizefile calls don't start matching directories
801 808 folded = self._discoverpath(
802 809 path, normed, ignoremissing, exists, self._map.dirfoldmap
803 810 )
804 811 return folded
805 812
806 813 def normalize(self, path, isknown=False, ignoremissing=False):
807 814 """
808 815 normalize the case of a pathname when on a casefolding filesystem
809 816
810 817 isknown specifies whether the filename came from walking the
811 818 disk, to avoid extra filesystem access.
812 819
813 820 If ignoremissing is True, missing path are returned
814 821 unchanged. Otherwise, we try harder to normalize possibly
815 822 existing path components.
816 823
817 824 The normalized case is determined based on the following precedence:
818 825
819 826 - version of name already stored in the dirstate
820 827 - version of name stored on disk
821 828 - version provided via command arguments
822 829 """
823 830
824 831 if self._checkcase:
825 832 return self._normalize(path, isknown, ignoremissing)
826 833 return path
827 834
828 835 def clear(self):
829 836 self._map.clear()
830 837 self._lastnormaltime = 0
831 838 self._updatedfiles.clear()
832 839 self._dirty = True
833 840
834 841 def rebuild(self, parent, allfiles, changedfiles=None):
835 842 if changedfiles is None:
836 843 # Rebuild entire dirstate
837 844 to_lookup = allfiles
838 845 to_drop = []
839 846 lastnormaltime = self._lastnormaltime
840 847 self.clear()
841 848 self._lastnormaltime = lastnormaltime
842 849 elif len(changedfiles) < 10:
843 850 # Avoid turning allfiles into a set, which can be expensive if it's
844 851 # large.
845 852 to_lookup = []
846 853 to_drop = []
847 854 for f in changedfiles:
848 855 if f in allfiles:
849 856 to_lookup.append(f)
850 857 else:
851 858 to_drop.append(f)
852 859 else:
853 860 changedfilesset = set(changedfiles)
854 861 to_lookup = changedfilesset & set(allfiles)
855 862 to_drop = changedfilesset - to_lookup
856 863
857 864 if self._origpl is None:
858 865 self._origpl = self._pl
859 866 self._map.setparents(parent, self._nodeconstants.nullid)
860 867
861 868 for f in to_lookup:
862 869 self.normallookup(f)
863 870 for f in to_drop:
864 871 self._drop(f)
865 872
866 873 self._dirty = True
867 874
868 875 def identity(self):
869 876 """Return identity of dirstate itself to detect changing in storage
870 877
871 878 If identity of previous dirstate is equal to this, writing
872 879 changes based on the former dirstate out can keep consistency.
873 880 """
874 881 return self._map.identity
875 882
876 883 def write(self, tr):
877 884 if not self._dirty:
878 885 return
879 886
880 887 filename = self._filename
881 888 if tr:
882 889 # 'dirstate.write()' is not only for writing in-memory
883 890 # changes out, but also for dropping ambiguous timestamp.
884 891 # delayed writing re-raise "ambiguous timestamp issue".
885 892 # See also the wiki page below for detail:
886 893 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
887 894
888 895 # emulate dropping timestamp in 'parsers.pack_dirstate'
889 896 now = _getfsnow(self._opener)
890 897 self._map.clearambiguoustimes(self._updatedfiles, now)
891 898
892 899 # emulate that all 'dirstate.normal' results are written out
893 900 self._lastnormaltime = 0
894 901 self._updatedfiles.clear()
895 902
896 903 # delay writing in-memory changes out
897 904 tr.addfilegenerator(
898 905 b'dirstate',
899 906 (self._filename,),
900 907 self._writedirstate,
901 908 location=b'plain',
902 909 )
903 910 return
904 911
905 912 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
906 913 self._writedirstate(st)
907 914
908 915 def addparentchangecallback(self, category, callback):
909 916 """add a callback to be called when the wd parents are changed
910 917
911 918 Callback will be called with the following arguments:
912 919 dirstate, (oldp1, oldp2), (newp1, newp2)
913 920
914 921 Category is a unique identifier to allow overwriting an old callback
915 922 with a newer callback.
916 923 """
917 924 self._plchangecallbacks[category] = callback
918 925
919 926 def _writedirstate(self, st):
920 927 # notify callbacks about parents change
921 928 if self._origpl is not None and self._origpl != self._pl:
922 929 for c, callback in sorted(
923 930 pycompat.iteritems(self._plchangecallbacks)
924 931 ):
925 932 callback(self, self._origpl, self._pl)
926 933 self._origpl = None
927 934 # use the modification time of the newly created temporary file as the
928 935 # filesystem's notion of 'now'
929 936 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
930 937
931 938 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
932 939 # timestamp of each entries in dirstate, because of 'now > mtime'
933 940 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
934 941 if delaywrite > 0:
935 942 # do we have any files to delay for?
936 943 for f, e in pycompat.iteritems(self._map):
937 944 if e.need_delay(now):
938 945 import time # to avoid useless import
939 946
940 947 # rather than sleep n seconds, sleep until the next
941 948 # multiple of n seconds
942 949 clock = time.time()
943 950 start = int(clock) - (int(clock) % delaywrite)
944 951 end = start + delaywrite
945 952 time.sleep(end - clock)
946 953 now = end # trust our estimate that the end is near now
947 954 break
948 955
949 956 self._map.write(st, now)
950 957 self._lastnormaltime = 0
951 958 self._dirty = False
952 959
953 960 def _dirignore(self, f):
954 961 if self._ignore(f):
955 962 return True
956 963 for p in pathutil.finddirs(f):
957 964 if self._ignore(p):
958 965 return True
959 966 return False
960 967
961 968 def _ignorefiles(self):
962 969 files = []
963 970 if os.path.exists(self._join(b'.hgignore')):
964 971 files.append(self._join(b'.hgignore'))
965 972 for name, path in self._ui.configitems(b"ui"):
966 973 if name == b'ignore' or name.startswith(b'ignore.'):
967 974 # we need to use os.path.join here rather than self._join
968 975 # because path is arbitrary and user-specified
969 976 files.append(os.path.join(self._rootdir, util.expandpath(path)))
970 977 return files
971 978
972 979 def _ignorefileandline(self, f):
973 980 files = collections.deque(self._ignorefiles())
974 981 visited = set()
975 982 while files:
976 983 i = files.popleft()
977 984 patterns = matchmod.readpatternfile(
978 985 i, self._ui.warn, sourceinfo=True
979 986 )
980 987 for pattern, lineno, line in patterns:
981 988 kind, p = matchmod._patsplit(pattern, b'glob')
982 989 if kind == b"subinclude":
983 990 if p not in visited:
984 991 files.append(p)
985 992 continue
986 993 m = matchmod.match(
987 994 self._root, b'', [], [pattern], warn=self._ui.warn
988 995 )
989 996 if m(f):
990 997 return (i, lineno, line)
991 998 visited.add(i)
992 999 return (None, -1, b"")
993 1000
994 1001 def _walkexplicit(self, match, subrepos):
995 1002 """Get stat data about the files explicitly specified by match.
996 1003
997 1004 Return a triple (results, dirsfound, dirsnotfound).
998 1005 - results is a mapping from filename to stat result. It also contains
999 1006 listings mapping subrepos and .hg to None.
1000 1007 - dirsfound is a list of files found to be directories.
1001 1008 - dirsnotfound is a list of files that the dirstate thinks are
1002 1009 directories and that were not found."""
1003 1010
1004 1011 def badtype(mode):
1005 1012 kind = _(b'unknown')
1006 1013 if stat.S_ISCHR(mode):
1007 1014 kind = _(b'character device')
1008 1015 elif stat.S_ISBLK(mode):
1009 1016 kind = _(b'block device')
1010 1017 elif stat.S_ISFIFO(mode):
1011 1018 kind = _(b'fifo')
1012 1019 elif stat.S_ISSOCK(mode):
1013 1020 kind = _(b'socket')
1014 1021 elif stat.S_ISDIR(mode):
1015 1022 kind = _(b'directory')
1016 1023 return _(b'unsupported file type (type is %s)') % kind
1017 1024
1018 1025 badfn = match.bad
1019 1026 dmap = self._map
1020 1027 lstat = os.lstat
1021 1028 getkind = stat.S_IFMT
1022 1029 dirkind = stat.S_IFDIR
1023 1030 regkind = stat.S_IFREG
1024 1031 lnkkind = stat.S_IFLNK
1025 1032 join = self._join
1026 1033 dirsfound = []
1027 1034 foundadd = dirsfound.append
1028 1035 dirsnotfound = []
1029 1036 notfoundadd = dirsnotfound.append
1030 1037
1031 1038 if not match.isexact() and self._checkcase:
1032 1039 normalize = self._normalize
1033 1040 else:
1034 1041 normalize = None
1035 1042
1036 1043 files = sorted(match.files())
1037 1044 subrepos.sort()
1038 1045 i, j = 0, 0
1039 1046 while i < len(files) and j < len(subrepos):
1040 1047 subpath = subrepos[j] + b"/"
1041 1048 if files[i] < subpath:
1042 1049 i += 1
1043 1050 continue
1044 1051 while i < len(files) and files[i].startswith(subpath):
1045 1052 del files[i]
1046 1053 j += 1
1047 1054
1048 1055 if not files or b'' in files:
1049 1056 files = [b'']
1050 1057 # constructing the foldmap is expensive, so don't do it for the
1051 1058 # common case where files is ['']
1052 1059 normalize = None
1053 1060 results = dict.fromkeys(subrepos)
1054 1061 results[b'.hg'] = None
1055 1062
1056 1063 for ff in files:
1057 1064 if normalize:
1058 1065 nf = normalize(ff, False, True)
1059 1066 else:
1060 1067 nf = ff
1061 1068 if nf in results:
1062 1069 continue
1063 1070
1064 1071 try:
1065 1072 st = lstat(join(nf))
1066 1073 kind = getkind(st.st_mode)
1067 1074 if kind == dirkind:
1068 1075 if nf in dmap:
1069 1076 # file replaced by dir on disk but still in dirstate
1070 1077 results[nf] = None
1071 1078 foundadd((nf, ff))
1072 1079 elif kind == regkind or kind == lnkkind:
1073 1080 results[nf] = st
1074 1081 else:
1075 1082 badfn(ff, badtype(kind))
1076 1083 if nf in dmap:
1077 1084 results[nf] = None
1078 1085 except OSError as inst: # nf not found on disk - it is dirstate only
1079 1086 if nf in dmap: # does it exactly match a missing file?
1080 1087 results[nf] = None
1081 1088 else: # does it match a missing directory?
1082 1089 if self._map.hasdir(nf):
1083 1090 notfoundadd(nf)
1084 1091 else:
1085 1092 badfn(ff, encoding.strtolocal(inst.strerror))
1086 1093
1087 1094 # match.files() may contain explicitly-specified paths that shouldn't
1088 1095 # be taken; drop them from the list of files found. dirsfound/notfound
1089 1096 # aren't filtered here because they will be tested later.
1090 1097 if match.anypats():
1091 1098 for f in list(results):
1092 1099 if f == b'.hg' or f in subrepos:
1093 1100 # keep sentinel to disable further out-of-repo walks
1094 1101 continue
1095 1102 if not match(f):
1096 1103 del results[f]
1097 1104
1098 1105 # Case insensitive filesystems cannot rely on lstat() failing to detect
1099 1106 # a case-only rename. Prune the stat object for any file that does not
1100 1107 # match the case in the filesystem, if there are multiple files that
1101 1108 # normalize to the same path.
1102 1109 if match.isexact() and self._checkcase:
1103 1110 normed = {}
1104 1111
1105 1112 for f, st in pycompat.iteritems(results):
1106 1113 if st is None:
1107 1114 continue
1108 1115
1109 1116 nc = util.normcase(f)
1110 1117 paths = normed.get(nc)
1111 1118
1112 1119 if paths is None:
1113 1120 paths = set()
1114 1121 normed[nc] = paths
1115 1122
1116 1123 paths.add(f)
1117 1124
1118 1125 for norm, paths in pycompat.iteritems(normed):
1119 1126 if len(paths) > 1:
1120 1127 for path in paths:
1121 1128 folded = self._discoverpath(
1122 1129 path, norm, True, None, self._map.dirfoldmap
1123 1130 )
1124 1131 if path != folded:
1125 1132 results[path] = None
1126 1133
1127 1134 return results, dirsfound, dirsnotfound
1128 1135
1129 1136 def walk(self, match, subrepos, unknown, ignored, full=True):
1130 1137 """
1131 1138 Walk recursively through the directory tree, finding all files
1132 1139 matched by match.
1133 1140
1134 1141 If full is False, maybe skip some known-clean files.
1135 1142
1136 1143 Return a dict mapping filename to stat-like object (either
1137 1144 mercurial.osutil.stat instance or return value of os.stat()).
1138 1145
1139 1146 """
1140 1147 # full is a flag that extensions that hook into walk can use -- this
1141 1148 # implementation doesn't use it at all. This satisfies the contract
1142 1149 # because we only guarantee a "maybe".
1143 1150
1144 1151 if ignored:
1145 1152 ignore = util.never
1146 1153 dirignore = util.never
1147 1154 elif unknown:
1148 1155 ignore = self._ignore
1149 1156 dirignore = self._dirignore
1150 1157 else:
1151 1158 # if not unknown and not ignored, drop dir recursion and step 2
1152 1159 ignore = util.always
1153 1160 dirignore = util.always
1154 1161
1155 1162 matchfn = match.matchfn
1156 1163 matchalways = match.always()
1157 1164 matchtdir = match.traversedir
1158 1165 dmap = self._map
1159 1166 listdir = util.listdir
1160 1167 lstat = os.lstat
1161 1168 dirkind = stat.S_IFDIR
1162 1169 regkind = stat.S_IFREG
1163 1170 lnkkind = stat.S_IFLNK
1164 1171 join = self._join
1165 1172
1166 1173 exact = skipstep3 = False
1167 1174 if match.isexact(): # match.exact
1168 1175 exact = True
1169 1176 dirignore = util.always # skip step 2
1170 1177 elif match.prefix(): # match.match, no patterns
1171 1178 skipstep3 = True
1172 1179
1173 1180 if not exact and self._checkcase:
1174 1181 normalize = self._normalize
1175 1182 normalizefile = self._normalizefile
1176 1183 skipstep3 = False
1177 1184 else:
1178 1185 normalize = self._normalize
1179 1186 normalizefile = None
1180 1187
1181 1188 # step 1: find all explicit files
1182 1189 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1183 1190 if matchtdir:
1184 1191 for d in work:
1185 1192 matchtdir(d[0])
1186 1193 for d in dirsnotfound:
1187 1194 matchtdir(d)
1188 1195
1189 1196 skipstep3 = skipstep3 and not (work or dirsnotfound)
1190 1197 work = [d for d in work if not dirignore(d[0])]
1191 1198
1192 1199 # step 2: visit subdirectories
1193 1200 def traverse(work, alreadynormed):
1194 1201 wadd = work.append
1195 1202 while work:
1196 1203 tracing.counter('dirstate.walk work', len(work))
1197 1204 nd = work.pop()
1198 1205 visitentries = match.visitchildrenset(nd)
1199 1206 if not visitentries:
1200 1207 continue
1201 1208 if visitentries == b'this' or visitentries == b'all':
1202 1209 visitentries = None
1203 1210 skip = None
1204 1211 if nd != b'':
1205 1212 skip = b'.hg'
1206 1213 try:
1207 1214 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1208 1215 entries = listdir(join(nd), stat=True, skip=skip)
1209 1216 except OSError as inst:
1210 1217 if inst.errno in (errno.EACCES, errno.ENOENT):
1211 1218 match.bad(
1212 1219 self.pathto(nd), encoding.strtolocal(inst.strerror)
1213 1220 )
1214 1221 continue
1215 1222 raise
1216 1223 for f, kind, st in entries:
1217 1224 # Some matchers may return files in the visitentries set,
1218 1225 # instead of 'this', if the matcher explicitly mentions them
1219 1226 # and is not an exactmatcher. This is acceptable; we do not
1220 1227 # make any hard assumptions about file-or-directory below
1221 1228 # based on the presence of `f` in visitentries. If
1222 1229 # visitchildrenset returned a set, we can always skip the
1223 1230 # entries *not* in the set it provided regardless of whether
1224 1231 # they're actually a file or a directory.
1225 1232 if visitentries and f not in visitentries:
1226 1233 continue
1227 1234 if normalizefile:
1228 1235 # even though f might be a directory, we're only
1229 1236 # interested in comparing it to files currently in the
1230 1237 # dmap -- therefore normalizefile is enough
1231 1238 nf = normalizefile(
1232 1239 nd and (nd + b"/" + f) or f, True, True
1233 1240 )
1234 1241 else:
1235 1242 nf = nd and (nd + b"/" + f) or f
1236 1243 if nf not in results:
1237 1244 if kind == dirkind:
1238 1245 if not ignore(nf):
1239 1246 if matchtdir:
1240 1247 matchtdir(nf)
1241 1248 wadd(nf)
1242 1249 if nf in dmap and (matchalways or matchfn(nf)):
1243 1250 results[nf] = None
1244 1251 elif kind == regkind or kind == lnkkind:
1245 1252 if nf in dmap:
1246 1253 if matchalways or matchfn(nf):
1247 1254 results[nf] = st
1248 1255 elif (matchalways or matchfn(nf)) and not ignore(
1249 1256 nf
1250 1257 ):
1251 1258 # unknown file -- normalize if necessary
1252 1259 if not alreadynormed:
1253 1260 nf = normalize(nf, False, True)
1254 1261 results[nf] = st
1255 1262 elif nf in dmap and (matchalways or matchfn(nf)):
1256 1263 results[nf] = None
1257 1264
1258 1265 for nd, d in work:
1259 1266 # alreadynormed means that processwork doesn't have to do any
1260 1267 # expensive directory normalization
1261 1268 alreadynormed = not normalize or nd == d
1262 1269 traverse([d], alreadynormed)
1263 1270
1264 1271 for s in subrepos:
1265 1272 del results[s]
1266 1273 del results[b'.hg']
1267 1274
1268 1275 # step 3: visit remaining files from dmap
1269 1276 if not skipstep3 and not exact:
1270 1277 # If a dmap file is not in results yet, it was either
1271 1278 # a) not matching matchfn b) ignored, c) missing, or d) under a
1272 1279 # symlink directory.
1273 1280 if not results and matchalways:
1274 1281 visit = [f for f in dmap]
1275 1282 else:
1276 1283 visit = [f for f in dmap if f not in results and matchfn(f)]
1277 1284 visit.sort()
1278 1285
1279 1286 if unknown:
1280 1287 # unknown == True means we walked all dirs under the roots
1281 1288 # that wasn't ignored, and everything that matched was stat'ed
1282 1289 # and is already in results.
1283 1290 # The rest must thus be ignored or under a symlink.
1284 1291 audit_path = pathutil.pathauditor(self._root, cached=True)
1285 1292
1286 1293 for nf in iter(visit):
1287 1294 # If a stat for the same file was already added with a
1288 1295 # different case, don't add one for this, since that would
1289 1296 # make it appear as if the file exists under both names
1290 1297 # on disk.
1291 1298 if (
1292 1299 normalizefile
1293 1300 and normalizefile(nf, True, True) in results
1294 1301 ):
1295 1302 results[nf] = None
1296 1303 # Report ignored items in the dmap as long as they are not
1297 1304 # under a symlink directory.
1298 1305 elif audit_path.check(nf):
1299 1306 try:
1300 1307 results[nf] = lstat(join(nf))
1301 1308 # file was just ignored, no links, and exists
1302 1309 except OSError:
1303 1310 # file doesn't exist
1304 1311 results[nf] = None
1305 1312 else:
1306 1313 # It's either missing or under a symlink directory
1307 1314 # which we in this case report as missing
1308 1315 results[nf] = None
1309 1316 else:
1310 1317 # We may not have walked the full directory tree above,
1311 1318 # so stat and check everything we missed.
1312 1319 iv = iter(visit)
1313 1320 for st in util.statfiles([join(i) for i in visit]):
1314 1321 results[next(iv)] = st
1315 1322 return results
1316 1323
1317 1324 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1318 1325 # Force Rayon (Rust parallelism library) to respect the number of
1319 1326 # workers. This is a temporary workaround until Rust code knows
1320 1327 # how to read the config file.
1321 1328 numcpus = self._ui.configint(b"worker", b"numcpus")
1322 1329 if numcpus is not None:
1323 1330 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1324 1331
1325 1332 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1326 1333 if not workers_enabled:
1327 1334 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1328 1335
1329 1336 (
1330 1337 lookup,
1331 1338 modified,
1332 1339 added,
1333 1340 removed,
1334 1341 deleted,
1335 1342 clean,
1336 1343 ignored,
1337 1344 unknown,
1338 1345 warnings,
1339 1346 bad,
1340 1347 traversed,
1341 1348 dirty,
1342 1349 ) = rustmod.status(
1343 1350 self._map._rustmap,
1344 1351 matcher,
1345 1352 self._rootdir,
1346 1353 self._ignorefiles(),
1347 1354 self._checkexec,
1348 1355 self._lastnormaltime,
1349 1356 bool(list_clean),
1350 1357 bool(list_ignored),
1351 1358 bool(list_unknown),
1352 1359 bool(matcher.traversedir),
1353 1360 )
1354 1361
1355 1362 self._dirty |= dirty
1356 1363
1357 1364 if matcher.traversedir:
1358 1365 for dir in traversed:
1359 1366 matcher.traversedir(dir)
1360 1367
1361 1368 if self._ui.warn:
1362 1369 for item in warnings:
1363 1370 if isinstance(item, tuple):
1364 1371 file_path, syntax = item
1365 1372 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1366 1373 file_path,
1367 1374 syntax,
1368 1375 )
1369 1376 self._ui.warn(msg)
1370 1377 else:
1371 1378 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1372 1379 self._ui.warn(
1373 1380 msg
1374 1381 % (
1375 1382 pathutil.canonpath(
1376 1383 self._rootdir, self._rootdir, item
1377 1384 ),
1378 1385 b"No such file or directory",
1379 1386 )
1380 1387 )
1381 1388
1382 1389 for (fn, message) in bad:
1383 1390 matcher.bad(fn, encoding.strtolocal(message))
1384 1391
1385 1392 status = scmutil.status(
1386 1393 modified=modified,
1387 1394 added=added,
1388 1395 removed=removed,
1389 1396 deleted=deleted,
1390 1397 unknown=unknown,
1391 1398 ignored=ignored,
1392 1399 clean=clean,
1393 1400 )
1394 1401 return (lookup, status)
1395 1402
1396 1403 def status(self, match, subrepos, ignored, clean, unknown):
1397 1404 """Determine the status of the working copy relative to the
1398 1405 dirstate and return a pair of (unsure, status), where status is of type
1399 1406 scmutil.status and:
1400 1407
1401 1408 unsure:
1402 1409 files that might have been modified since the dirstate was
1403 1410 written, but need to be read to be sure (size is the same
1404 1411 but mtime differs)
1405 1412 status.modified:
1406 1413 files that have definitely been modified since the dirstate
1407 1414 was written (different size or mode)
1408 1415 status.clean:
1409 1416 files that have definitely not been modified since the
1410 1417 dirstate was written
1411 1418 """
1412 1419 listignored, listclean, listunknown = ignored, clean, unknown
1413 1420 lookup, modified, added, unknown, ignored = [], [], [], [], []
1414 1421 removed, deleted, clean = [], [], []
1415 1422
1416 1423 dmap = self._map
1417 1424 dmap.preload()
1418 1425
1419 1426 use_rust = True
1420 1427
1421 1428 allowed_matchers = (
1422 1429 matchmod.alwaysmatcher,
1423 1430 matchmod.exactmatcher,
1424 1431 matchmod.includematcher,
1425 1432 )
1426 1433
1427 1434 if rustmod is None:
1428 1435 use_rust = False
1429 1436 elif self._checkcase:
1430 1437 # Case-insensitive filesystems are not handled yet
1431 1438 use_rust = False
1432 1439 elif subrepos:
1433 1440 use_rust = False
1434 1441 elif sparse.enabled:
1435 1442 use_rust = False
1436 1443 elif not isinstance(match, allowed_matchers):
1437 1444 # Some matchers have yet to be implemented
1438 1445 use_rust = False
1439 1446
1440 1447 if use_rust:
1441 1448 try:
1442 1449 return self._rust_status(
1443 1450 match, listclean, listignored, listunknown
1444 1451 )
1445 1452 except rustmod.FallbackError:
1446 1453 pass
1447 1454
1448 1455 def noop(f):
1449 1456 pass
1450 1457
1451 1458 dcontains = dmap.__contains__
1452 1459 dget = dmap.__getitem__
1453 1460 ladd = lookup.append # aka "unsure"
1454 1461 madd = modified.append
1455 1462 aadd = added.append
1456 1463 uadd = unknown.append if listunknown else noop
1457 1464 iadd = ignored.append if listignored else noop
1458 1465 radd = removed.append
1459 1466 dadd = deleted.append
1460 1467 cadd = clean.append if listclean else noop
1461 1468 mexact = match.exact
1462 1469 dirignore = self._dirignore
1463 1470 checkexec = self._checkexec
1464 1471 copymap = self._map.copymap
1465 1472 lastnormaltime = self._lastnormaltime
1466 1473
1467 1474 # We need to do full walks when either
1468 1475 # - we're listing all clean files, or
1469 1476 # - match.traversedir does something, because match.traversedir should
1470 1477 # be called for every dir in the working dir
1471 1478 full = listclean or match.traversedir is not None
1472 1479 for fn, st in pycompat.iteritems(
1473 1480 self.walk(match, subrepos, listunknown, listignored, full=full)
1474 1481 ):
1475 1482 if not dcontains(fn):
1476 1483 if (listignored or mexact(fn)) and dirignore(fn):
1477 1484 if listignored:
1478 1485 iadd(fn)
1479 1486 else:
1480 1487 uadd(fn)
1481 1488 continue
1482 1489
1483 1490 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1484 1491 # written like that for performance reasons. dmap[fn] is not a
1485 1492 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1486 1493 # opcode has fast paths when the value to be unpacked is a tuple or
1487 1494 # a list, but falls back to creating a full-fledged iterator in
1488 1495 # general. That is much slower than simply accessing and storing the
1489 1496 # tuple members one by one.
1490 1497 t = dget(fn)
1491 1498 mode = t.mode
1492 1499 size = t.size
1493 1500 time = t.mtime
1494 1501
1495 1502 if not st and t.tracked:
1496 1503 dadd(fn)
1497 1504 elif t.merged:
1498 1505 madd(fn)
1499 1506 elif t.added:
1500 1507 aadd(fn)
1501 1508 elif t.removed:
1502 1509 radd(fn)
1503 1510 elif t.tracked:
1504 1511 if (
1505 1512 size >= 0
1506 1513 and (
1507 1514 (size != st.st_size and size != st.st_size & _rangemask)
1508 1515 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1509 1516 )
1510 1517 or t.from_p2
1511 1518 or fn in copymap
1512 1519 ):
1513 1520 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1514 1521 # issue6456: Size returned may be longer due to
1515 1522 # encryption on EXT-4 fscrypt, undecided.
1516 1523 ladd(fn)
1517 1524 else:
1518 1525 madd(fn)
1519 1526 elif (
1520 1527 time != st[stat.ST_MTIME]
1521 1528 and time != st[stat.ST_MTIME] & _rangemask
1522 1529 ):
1523 1530 ladd(fn)
1524 1531 elif st[stat.ST_MTIME] == lastnormaltime:
1525 1532 # fn may have just been marked as normal and it may have
1526 1533 # changed in the same second without changing its size.
1527 1534 # This can happen if we quickly do multiple commits.
1528 1535 # Force lookup, so we don't miss such a racy file change.
1529 1536 ladd(fn)
1530 1537 elif listclean:
1531 1538 cadd(fn)
1532 1539 status = scmutil.status(
1533 1540 modified, added, removed, deleted, unknown, ignored, clean
1534 1541 )
1535 1542 return (lookup, status)
1536 1543
1537 1544 def matches(self, match):
1538 1545 """
1539 1546 return files in the dirstate (in whatever state) filtered by match
1540 1547 """
1541 1548 dmap = self._map
1542 1549 if rustmod is not None:
1543 1550 dmap = self._map._rustmap
1544 1551
1545 1552 if match.always():
1546 1553 return dmap.keys()
1547 1554 files = match.files()
1548 1555 if match.isexact():
1549 1556 # fast path -- filter the other way around, since typically files is
1550 1557 # much smaller than dmap
1551 1558 return [f for f in files if f in dmap]
1552 1559 if match.prefix() and all(fn in dmap for fn in files):
1553 1560 # fast path -- all the values are known to be files, so just return
1554 1561 # that
1555 1562 return list(files)
1556 1563 return [f for f in dmap if match(f)]
1557 1564
1558 1565 def _actualfilename(self, tr):
1559 1566 if tr:
1560 1567 return self._pendingfilename
1561 1568 else:
1562 1569 return self._filename
1563 1570
1564 1571 def savebackup(self, tr, backupname):
1565 1572 '''Save current dirstate into backup file'''
1566 1573 filename = self._actualfilename(tr)
1567 1574 assert backupname != filename
1568 1575
1569 1576 # use '_writedirstate' instead of 'write' to write changes certainly,
1570 1577 # because the latter omits writing out if transaction is running.
1571 1578 # output file will be used to create backup of dirstate at this point.
1572 1579 if self._dirty or not self._opener.exists(filename):
1573 1580 self._writedirstate(
1574 1581 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1575 1582 )
1576 1583
1577 1584 if tr:
1578 1585 # ensure that subsequent tr.writepending returns True for
1579 1586 # changes written out above, even if dirstate is never
1580 1587 # changed after this
1581 1588 tr.addfilegenerator(
1582 1589 b'dirstate',
1583 1590 (self._filename,),
1584 1591 self._writedirstate,
1585 1592 location=b'plain',
1586 1593 )
1587 1594
1588 1595 # ensure that pending file written above is unlinked at
1589 1596 # failure, even if tr.writepending isn't invoked until the
1590 1597 # end of this transaction
1591 1598 tr.registertmp(filename, location=b'plain')
1592 1599
1593 1600 self._opener.tryunlink(backupname)
1594 1601 # hardlink backup is okay because _writedirstate is always called
1595 1602 # with an "atomictemp=True" file.
1596 1603 util.copyfile(
1597 1604 self._opener.join(filename),
1598 1605 self._opener.join(backupname),
1599 1606 hardlink=True,
1600 1607 )
1601 1608
1602 1609 def restorebackup(self, tr, backupname):
1603 1610 '''Restore dirstate by backup file'''
1604 1611 # this "invalidate()" prevents "wlock.release()" from writing
1605 1612 # changes of dirstate out after restoring from backup file
1606 1613 self.invalidate()
1607 1614 filename = self._actualfilename(tr)
1608 1615 o = self._opener
1609 1616 if util.samefile(o.join(backupname), o.join(filename)):
1610 1617 o.unlink(backupname)
1611 1618 else:
1612 1619 o.rename(backupname, filename, checkambig=True)
1613 1620
1614 1621 def clearbackup(self, tr, backupname):
1615 1622 '''Clear backup file'''
1616 1623 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now