##// END OF EJS Templates
dirstate: stop using `oldstate` in `dirstate._addpath`...
marmoute -
r48312:b76d54b9 default
parent child Browse files
Show More
@@ -1,1436 +1,1437 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .pycompat import delattr
18 18
19 19 from hgdemandimport import tracing
20 20
21 21 from . import (
22 22 dirstatemap,
23 23 encoding,
24 24 error,
25 25 match as matchmod,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 sparse,
31 31 util,
32 32 )
33 33
34 34 from .interfaces import (
35 35 dirstate as intdirstate,
36 36 util as interfaceutil,
37 37 )
38 38
39 39 parsers = policy.importmod('parsers')
40 40 rustmod = policy.importrust('dirstate')
41 41
42 42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43 43
44 44 propertycache = util.propertycache
45 45 filecache = scmutil.filecache
46 46 _rangemask = dirstatemap.rangemask
47 47
48 48 dirstatetuple = parsers.dirstatetuple
49 49
50 50
51 51 class repocache(filecache):
52 52 """filecache for files in .hg/"""
53 53
54 54 def join(self, obj, fname):
55 55 return obj._opener.join(fname)
56 56
57 57
58 58 class rootcache(filecache):
59 59 """filecache for files in the repository root"""
60 60
61 61 def join(self, obj, fname):
62 62 return obj._join(fname)
63 63
64 64
65 65 def _getfsnow(vfs):
66 66 '''Get "now" timestamp on filesystem'''
67 67 tmpfd, tmpname = vfs.mkstemp()
68 68 try:
69 69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 70 finally:
71 71 os.close(tmpfd)
72 72 vfs.unlink(tmpname)
73 73
74 74
75 75 @interfaceutil.implementer(intdirstate.idirstate)
76 76 class dirstate(object):
77 77 def __init__(
78 78 self,
79 79 opener,
80 80 ui,
81 81 root,
82 82 validate,
83 83 sparsematchfn,
84 84 nodeconstants,
85 85 use_dirstate_v2,
86 86 ):
87 87 """Create a new dirstate object.
88 88
89 89 opener is an open()-like callable that can be used to open the
90 90 dirstate file; root is the root of the directory tracked by
91 91 the dirstate.
92 92 """
93 93 self._use_dirstate_v2 = use_dirstate_v2
94 94 self._nodeconstants = nodeconstants
95 95 self._opener = opener
96 96 self._validate = validate
97 97 self._root = root
98 98 self._sparsematchfn = sparsematchfn
99 99 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
100 100 # UNC path pointing to root share (issue4557)
101 101 self._rootdir = pathutil.normasprefix(root)
102 102 self._dirty = False
103 103 self._lastnormaltime = 0
104 104 self._ui = ui
105 105 self._filecache = {}
106 106 self._parentwriters = 0
107 107 self._filename = b'dirstate'
108 108 self._pendingfilename = b'%s.pending' % self._filename
109 109 self._plchangecallbacks = {}
110 110 self._origpl = None
111 111 self._updatedfiles = set()
112 112 self._mapcls = dirstatemap.dirstatemap
113 113 # Access and cache cwd early, so we don't access it for the first time
114 114 # after a working-copy update caused it to not exist (accessing it then
115 115 # raises an exception).
116 116 self._cwd
117 117
118 118 def prefetch_parents(self):
119 119 """make sure the parents are loaded
120 120
121 121 Used to avoid a race condition.
122 122 """
123 123 self._pl
124 124
125 125 @contextlib.contextmanager
126 126 def parentchange(self):
127 127 """Context manager for handling dirstate parents.
128 128
129 129 If an exception occurs in the scope of the context manager,
130 130 the incoherent dirstate won't be written when wlock is
131 131 released.
132 132 """
133 133 self._parentwriters += 1
134 134 yield
135 135 # Typically we want the "undo" step of a context manager in a
136 136 # finally block so it happens even when an exception
137 137 # occurs. In this case, however, we only want to decrement
138 138 # parentwriters if the code in the with statement exits
139 139 # normally, so we don't have a try/finally here on purpose.
140 140 self._parentwriters -= 1
141 141
142 142 def pendingparentchange(self):
143 143 """Returns true if the dirstate is in the middle of a set of changes
144 144 that modify the dirstate parent.
145 145 """
146 146 return self._parentwriters > 0
147 147
148 148 @propertycache
149 149 def _map(self):
150 150 """Return the dirstate contents (see documentation for dirstatemap)."""
151 151 self._map = self._mapcls(
152 152 self._ui,
153 153 self._opener,
154 154 self._root,
155 155 self._nodeconstants,
156 156 self._use_dirstate_v2,
157 157 )
158 158 return self._map
159 159
160 160 @property
161 161 def _sparsematcher(self):
162 162 """The matcher for the sparse checkout.
163 163
164 164 The working directory may not include every file from a manifest. The
165 165 matcher obtained by this property will match a path if it is to be
166 166 included in the working directory.
167 167 """
168 168 # TODO there is potential to cache this property. For now, the matcher
169 169 # is resolved on every access. (But the called function does use a
170 170 # cache to keep the lookup fast.)
171 171 return self._sparsematchfn()
172 172
173 173 @repocache(b'branch')
174 174 def _branch(self):
175 175 try:
176 176 return self._opener.read(b"branch").strip() or b"default"
177 177 except IOError as inst:
178 178 if inst.errno != errno.ENOENT:
179 179 raise
180 180 return b"default"
181 181
182 182 @property
183 183 def _pl(self):
184 184 return self._map.parents()
185 185
186 186 def hasdir(self, d):
187 187 return self._map.hastrackeddir(d)
188 188
189 189 @rootcache(b'.hgignore')
190 190 def _ignore(self):
191 191 files = self._ignorefiles()
192 192 if not files:
193 193 return matchmod.never()
194 194
195 195 pats = [b'include:%s' % f for f in files]
196 196 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
197 197
198 198 @propertycache
199 199 def _slash(self):
200 200 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
201 201
202 202 @propertycache
203 203 def _checklink(self):
204 204 return util.checklink(self._root)
205 205
206 206 @propertycache
207 207 def _checkexec(self):
208 208 return bool(util.checkexec(self._root))
209 209
210 210 @propertycache
211 211 def _checkcase(self):
212 212 return not util.fscasesensitive(self._join(b'.hg'))
213 213
214 214 def _join(self, f):
215 215 # much faster than os.path.join()
216 216 # it's safe because f is always a relative path
217 217 return self._rootdir + f
218 218
219 219 def flagfunc(self, buildfallback):
220 220 if self._checklink and self._checkexec:
221 221
222 222 def f(x):
223 223 try:
224 224 st = os.lstat(self._join(x))
225 225 if util.statislink(st):
226 226 return b'l'
227 227 if util.statisexec(st):
228 228 return b'x'
229 229 except OSError:
230 230 pass
231 231 return b''
232 232
233 233 return f
234 234
235 235 fallback = buildfallback()
236 236 if self._checklink:
237 237
238 238 def f(x):
239 239 if os.path.islink(self._join(x)):
240 240 return b'l'
241 241 if b'x' in fallback(x):
242 242 return b'x'
243 243 return b''
244 244
245 245 return f
246 246 if self._checkexec:
247 247
248 248 def f(x):
249 249 if b'l' in fallback(x):
250 250 return b'l'
251 251 if util.isexec(self._join(x)):
252 252 return b'x'
253 253 return b''
254 254
255 255 return f
256 256 else:
257 257 return fallback
258 258
259 259 @propertycache
260 260 def _cwd(self):
261 261 # internal config: ui.forcecwd
262 262 forcecwd = self._ui.config(b'ui', b'forcecwd')
263 263 if forcecwd:
264 264 return forcecwd
265 265 return encoding.getcwd()
266 266
267 267 def getcwd(self):
268 268 """Return the path from which a canonical path is calculated.
269 269
270 270 This path should be used to resolve file patterns or to convert
271 271 canonical paths back to file paths for display. It shouldn't be
272 272 used to get real file paths. Use vfs functions instead.
273 273 """
274 274 cwd = self._cwd
275 275 if cwd == self._root:
276 276 return b''
277 277 # self._root ends with a path separator if self._root is '/' or 'C:\'
278 278 rootsep = self._root
279 279 if not util.endswithsep(rootsep):
280 280 rootsep += pycompat.ossep
281 281 if cwd.startswith(rootsep):
282 282 return cwd[len(rootsep) :]
283 283 else:
284 284 # we're outside the repo. return an absolute path.
285 285 return cwd
286 286
287 287 def pathto(self, f, cwd=None):
288 288 if cwd is None:
289 289 cwd = self.getcwd()
290 290 path = util.pathto(self._root, cwd, f)
291 291 if self._slash:
292 292 return util.pconvert(path)
293 293 return path
294 294
295 295 def __getitem__(self, key):
296 296 """Return the current state of key (a filename) in the dirstate.
297 297
298 298 States are:
299 299 n normal
300 300 m needs merging
301 301 r marked for removal
302 302 a marked for addition
303 303 ? not tracked
304 304
305 305 XXX The "state" is a bit obscure to be in the "public" API. we should
306 306 consider migrating all user of this to going through the dirstate entry
307 307 instead.
308 308 """
309 309 entry = self._map.get(key)
310 310 if entry is not None:
311 311 return entry.state
312 312 return b'?'
313 313
314 314 def __contains__(self, key):
315 315 return key in self._map
316 316
317 317 def __iter__(self):
318 318 return iter(sorted(self._map))
319 319
320 320 def items(self):
321 321 return pycompat.iteritems(self._map)
322 322
323 323 iteritems = items
324 324
325 325 def directories(self):
326 326 return self._map.directories()
327 327
328 328 def parents(self):
329 329 return [self._validate(p) for p in self._pl]
330 330
331 331 def p1(self):
332 332 return self._validate(self._pl[0])
333 333
334 334 def p2(self):
335 335 return self._validate(self._pl[1])
336 336
337 337 @property
338 338 def in_merge(self):
339 339 """True if a merge is in progress"""
340 340 return self._pl[1] != self._nodeconstants.nullid
341 341
342 342 def branch(self):
343 343 return encoding.tolocal(self._branch)
344 344
345 345 def setparents(self, p1, p2=None):
346 346 """Set dirstate parents to p1 and p2.
347 347
348 348 When moving from two parents to one, "merged" entries a
349 349 adjusted to normal and previous copy records discarded and
350 350 returned by the call.
351 351
352 352 See localrepo.setparents()
353 353 """
354 354 if p2 is None:
355 355 p2 = self._nodeconstants.nullid
356 356 if self._parentwriters == 0:
357 357 raise ValueError(
358 358 b"cannot set dirstate parent outside of "
359 359 b"dirstate.parentchange context manager"
360 360 )
361 361
362 362 self._dirty = True
363 363 oldp2 = self._pl[1]
364 364 if self._origpl is None:
365 365 self._origpl = self._pl
366 366 self._map.setparents(p1, p2)
367 367 copies = {}
368 368 if (
369 369 oldp2 != self._nodeconstants.nullid
370 370 and p2 == self._nodeconstants.nullid
371 371 ):
372 372 candidatefiles = self._map.non_normal_or_other_parent_paths()
373 373
374 374 for f in candidatefiles:
375 375 s = self._map.get(f)
376 376 if s is None:
377 377 continue
378 378
379 379 # Discard "merged" markers when moving away from a merge state
380 380 if s.merged:
381 381 source = self._map.copymap.get(f)
382 382 if source:
383 383 copies[f] = source
384 384 self.normallookup(f)
385 385 # Also fix up otherparent markers
386 386 elif s.from_p2:
387 387 source = self._map.copymap.get(f)
388 388 if source:
389 389 copies[f] = source
390 390 self.add(f)
391 391 return copies
392 392
393 393 def setbranch(self, branch):
394 394 self.__class__._branch.set(self, encoding.fromlocal(branch))
395 395 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
396 396 try:
397 397 f.write(self._branch + b'\n')
398 398 f.close()
399 399
400 400 # make sure filecache has the correct stat info for _branch after
401 401 # replacing the underlying file
402 402 ce = self._filecache[b'_branch']
403 403 if ce:
404 404 ce.refresh()
405 405 except: # re-raises
406 406 f.discard()
407 407 raise
408 408
409 409 def invalidate(self):
410 410 """Causes the next access to reread the dirstate.
411 411
412 412 This is different from localrepo.invalidatedirstate() because it always
413 413 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
414 414 check whether the dirstate has changed before rereading it."""
415 415
416 416 for a in ("_map", "_branch", "_ignore"):
417 417 if a in self.__dict__:
418 418 delattr(self, a)
419 419 self._lastnormaltime = 0
420 420 self._dirty = False
421 421 self._updatedfiles.clear()
422 422 self._parentwriters = 0
423 423 self._origpl = None
424 424
425 425 def copy(self, source, dest):
426 426 """Mark dest as a copy of source. Unmark dest if source is None."""
427 427 if source == dest:
428 428 return
429 429 self._dirty = True
430 430 if source is not None:
431 431 self._map.copymap[dest] = source
432 432 self._updatedfiles.add(source)
433 433 self._updatedfiles.add(dest)
434 434 elif self._map.copymap.pop(dest, None):
435 435 self._updatedfiles.add(dest)
436 436
437 437 def copied(self, file):
438 438 return self._map.copymap.get(file, None)
439 439
440 440 def copies(self):
441 441 return self._map.copymap
442 442
443 443 def _addpath(
444 444 self,
445 445 f,
446 446 state,
447 447 mode,
448 448 size=None,
449 449 mtime=None,
450 450 from_p2=False,
451 451 possibly_dirty=False,
452 452 ):
453 453 oldstate = self[f]
454 if state == b'a' or oldstate == b'r':
454 entry = self._map.get(f)
455 if state == b'a' or entry is not None and entry.removed:
455 456 scmutil.checkfilename(f)
456 457 if self._map.hastrackeddir(f):
457 458 msg = _(b'directory %r already in dirstate')
458 459 msg %= pycompat.bytestr(f)
459 460 raise error.Abort(msg)
460 461 # shadows
461 462 for d in pathutil.finddirs(f):
462 463 if self._map.hastrackeddir(d):
463 464 break
464 465 entry = self._map.get(d)
465 466 if entry is not None and not entry.removed:
466 467 msg = _(b'file %r in dirstate clashes with %r')
467 468 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
468 469 raise error.Abort(msg)
469 470 self._dirty = True
470 471 self._updatedfiles.add(f)
471 472 self._map.addfile(
472 473 f,
473 474 oldstate,
474 475 state=state,
475 476 mode=mode,
476 477 size=size,
477 478 mtime=mtime,
478 479 from_p2=from_p2,
479 480 possibly_dirty=possibly_dirty,
480 481 )
481 482
482 483 def normal(self, f, parentfiledata=None):
483 484 """Mark a file normal and clean.
484 485
485 486 parentfiledata: (mode, size, mtime) of the clean file
486 487
487 488 parentfiledata should be computed from memory (for mode,
488 489 size), as or close as possible from the point where we
489 490 determined the file was clean, to limit the risk of the
490 491 file having been changed by an external process between the
491 492 moment where the file was determined to be clean and now."""
492 493 if parentfiledata:
493 494 (mode, size, mtime) = parentfiledata
494 495 else:
495 496 s = os.lstat(self._join(f))
496 497 mode = s.st_mode
497 498 size = s.st_size
498 499 mtime = s[stat.ST_MTIME]
499 500 self._addpath(f, b'n', mode, size, mtime)
500 501 self._map.copymap.pop(f, None)
501 502 if f in self._map.nonnormalset:
502 503 self._map.nonnormalset.remove(f)
503 504 if mtime > self._lastnormaltime:
504 505 # Remember the most recent modification timeslot for status(),
505 506 # to make sure we won't miss future size-preserving file content
506 507 # modifications that happen within the same timeslot.
507 508 self._lastnormaltime = mtime
508 509
509 510 def normallookup(self, f):
510 511 '''Mark a file normal, but possibly dirty.'''
511 512 if self.in_merge:
512 513 # if there is a merge going on and the file was either
513 514 # "merged" or coming from other parent (-2) before
514 515 # being removed, restore that state.
515 516 entry = self._map.get(f)
516 517 if entry is not None:
517 518 # XXX this should probably be dealt with a a lower level
518 519 # (see `merged_removed` and `from_p2_removed`)
519 520 if entry.merged_removed or entry.from_p2_removed:
520 521 source = self._map.copymap.get(f)
521 522 if entry.merged_removed:
522 523 self.merge(f)
523 524 elif entry.from_p2_removed:
524 525 self.otherparent(f)
525 526 if source is not None:
526 527 self.copy(source, f)
527 528 return
528 529 elif entry.merged or entry.from_p2:
529 530 return
530 531 self._addpath(f, b'n', 0, possibly_dirty=True)
531 532 self._map.copymap.pop(f, None)
532 533
533 534 def otherparent(self, f):
534 535 '''Mark as coming from the other parent, always dirty.'''
535 536 if not self.in_merge:
536 537 msg = _(b"setting %r to other parent only allowed in merges") % f
537 538 raise error.Abort(msg)
538 539 if f in self and self[f] == b'n':
539 540 # merge-like
540 541 self._addpath(f, b'm', 0, from_p2=True)
541 542 else:
542 543 # add-like
543 544 self._addpath(f, b'n', 0, from_p2=True)
544 545 self._map.copymap.pop(f, None)
545 546
546 547 def add(self, f):
547 548 '''Mark a file added.'''
548 549 self._addpath(f, b'a', 0)
549 550 self._map.copymap.pop(f, None)
550 551
551 552 def remove(self, f):
552 553 '''Mark a file removed.'''
553 554 self._dirty = True
554 555 self._updatedfiles.add(f)
555 556 self._map.removefile(f, in_merge=self.in_merge)
556 557
557 558 def merge(self, f):
558 559 '''Mark a file merged.'''
559 560 if not self.in_merge:
560 561 return self.normallookup(f)
561 562 return self.otherparent(f)
562 563
563 564 def drop(self, f):
564 565 '''Drop a file from the dirstate'''
565 566 oldstate = self[f]
566 567 if self._map.dropfile(f, oldstate):
567 568 self._dirty = True
568 569 self._updatedfiles.add(f)
569 570 self._map.copymap.pop(f, None)
570 571
571 572 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
572 573 if exists is None:
573 574 exists = os.path.lexists(os.path.join(self._root, path))
574 575 if not exists:
575 576 # Maybe a path component exists
576 577 if not ignoremissing and b'/' in path:
577 578 d, f = path.rsplit(b'/', 1)
578 579 d = self._normalize(d, False, ignoremissing, None)
579 580 folded = d + b"/" + f
580 581 else:
581 582 # No path components, preserve original case
582 583 folded = path
583 584 else:
584 585 # recursively normalize leading directory components
585 586 # against dirstate
586 587 if b'/' in normed:
587 588 d, f = normed.rsplit(b'/', 1)
588 589 d = self._normalize(d, False, ignoremissing, True)
589 590 r = self._root + b"/" + d
590 591 folded = d + b"/" + util.fspath(f, r)
591 592 else:
592 593 folded = util.fspath(normed, self._root)
593 594 storemap[normed] = folded
594 595
595 596 return folded
596 597
597 598 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
598 599 normed = util.normcase(path)
599 600 folded = self._map.filefoldmap.get(normed, None)
600 601 if folded is None:
601 602 if isknown:
602 603 folded = path
603 604 else:
604 605 folded = self._discoverpath(
605 606 path, normed, ignoremissing, exists, self._map.filefoldmap
606 607 )
607 608 return folded
608 609
609 610 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
610 611 normed = util.normcase(path)
611 612 folded = self._map.filefoldmap.get(normed, None)
612 613 if folded is None:
613 614 folded = self._map.dirfoldmap.get(normed, None)
614 615 if folded is None:
615 616 if isknown:
616 617 folded = path
617 618 else:
618 619 # store discovered result in dirfoldmap so that future
619 620 # normalizefile calls don't start matching directories
620 621 folded = self._discoverpath(
621 622 path, normed, ignoremissing, exists, self._map.dirfoldmap
622 623 )
623 624 return folded
624 625
625 626 def normalize(self, path, isknown=False, ignoremissing=False):
626 627 """
627 628 normalize the case of a pathname when on a casefolding filesystem
628 629
629 630 isknown specifies whether the filename came from walking the
630 631 disk, to avoid extra filesystem access.
631 632
632 633 If ignoremissing is True, missing path are returned
633 634 unchanged. Otherwise, we try harder to normalize possibly
634 635 existing path components.
635 636
636 637 The normalized case is determined based on the following precedence:
637 638
638 639 - version of name already stored in the dirstate
639 640 - version of name stored on disk
640 641 - version provided via command arguments
641 642 """
642 643
643 644 if self._checkcase:
644 645 return self._normalize(path, isknown, ignoremissing)
645 646 return path
646 647
647 648 def clear(self):
648 649 self._map.clear()
649 650 self._lastnormaltime = 0
650 651 self._updatedfiles.clear()
651 652 self._dirty = True
652 653
653 654 def rebuild(self, parent, allfiles, changedfiles=None):
654 655 if changedfiles is None:
655 656 # Rebuild entire dirstate
656 657 to_lookup = allfiles
657 658 to_drop = []
658 659 lastnormaltime = self._lastnormaltime
659 660 self.clear()
660 661 self._lastnormaltime = lastnormaltime
661 662 elif len(changedfiles) < 10:
662 663 # Avoid turning allfiles into a set, which can be expensive if it's
663 664 # large.
664 665 to_lookup = []
665 666 to_drop = []
666 667 for f in changedfiles:
667 668 if f in allfiles:
668 669 to_lookup.append(f)
669 670 else:
670 671 to_drop.append(f)
671 672 else:
672 673 changedfilesset = set(changedfiles)
673 674 to_lookup = changedfilesset & set(allfiles)
674 675 to_drop = changedfilesset - to_lookup
675 676
676 677 if self._origpl is None:
677 678 self._origpl = self._pl
678 679 self._map.setparents(parent, self._nodeconstants.nullid)
679 680
680 681 for f in to_lookup:
681 682 self.normallookup(f)
682 683 for f in to_drop:
683 684 self.drop(f)
684 685
685 686 self._dirty = True
686 687
687 688 def identity(self):
688 689 """Return identity of dirstate itself to detect changing in storage
689 690
690 691 If identity of previous dirstate is equal to this, writing
691 692 changes based on the former dirstate out can keep consistency.
692 693 """
693 694 return self._map.identity
694 695
695 696 def write(self, tr):
696 697 if not self._dirty:
697 698 return
698 699
699 700 filename = self._filename
700 701 if tr:
701 702 # 'dirstate.write()' is not only for writing in-memory
702 703 # changes out, but also for dropping ambiguous timestamp.
703 704 # delayed writing re-raise "ambiguous timestamp issue".
704 705 # See also the wiki page below for detail:
705 706 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
706 707
707 708 # emulate dropping timestamp in 'parsers.pack_dirstate'
708 709 now = _getfsnow(self._opener)
709 710 self._map.clearambiguoustimes(self._updatedfiles, now)
710 711
711 712 # emulate that all 'dirstate.normal' results are written out
712 713 self._lastnormaltime = 0
713 714 self._updatedfiles.clear()
714 715
715 716 # delay writing in-memory changes out
716 717 tr.addfilegenerator(
717 718 b'dirstate',
718 719 (self._filename,),
719 720 self._writedirstate,
720 721 location=b'plain',
721 722 )
722 723 return
723 724
724 725 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
725 726 self._writedirstate(st)
726 727
727 728 def addparentchangecallback(self, category, callback):
728 729 """add a callback to be called when the wd parents are changed
729 730
730 731 Callback will be called with the following arguments:
731 732 dirstate, (oldp1, oldp2), (newp1, newp2)
732 733
733 734 Category is a unique identifier to allow overwriting an old callback
734 735 with a newer callback.
735 736 """
736 737 self._plchangecallbacks[category] = callback
737 738
738 739 def _writedirstate(self, st):
739 740 # notify callbacks about parents change
740 741 if self._origpl is not None and self._origpl != self._pl:
741 742 for c, callback in sorted(
742 743 pycompat.iteritems(self._plchangecallbacks)
743 744 ):
744 745 callback(self, self._origpl, self._pl)
745 746 self._origpl = None
746 747 # use the modification time of the newly created temporary file as the
747 748 # filesystem's notion of 'now'
748 749 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
749 750
750 751 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
751 752 # timestamp of each entries in dirstate, because of 'now > mtime'
752 753 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
753 754 if delaywrite > 0:
754 755 # do we have any files to delay for?
755 756 for f, e in pycompat.iteritems(self._map):
756 757 if e.state == b'n' and e[3] == now:
757 758 import time # to avoid useless import
758 759
759 760 # rather than sleep n seconds, sleep until the next
760 761 # multiple of n seconds
761 762 clock = time.time()
762 763 start = int(clock) - (int(clock) % delaywrite)
763 764 end = start + delaywrite
764 765 time.sleep(end - clock)
765 766 now = end # trust our estimate that the end is near now
766 767 break
767 768
768 769 self._map.write(st, now)
769 770 self._lastnormaltime = 0
770 771 self._dirty = False
771 772
772 773 def _dirignore(self, f):
773 774 if self._ignore(f):
774 775 return True
775 776 for p in pathutil.finddirs(f):
776 777 if self._ignore(p):
777 778 return True
778 779 return False
779 780
780 781 def _ignorefiles(self):
781 782 files = []
782 783 if os.path.exists(self._join(b'.hgignore')):
783 784 files.append(self._join(b'.hgignore'))
784 785 for name, path in self._ui.configitems(b"ui"):
785 786 if name == b'ignore' or name.startswith(b'ignore.'):
786 787 # we need to use os.path.join here rather than self._join
787 788 # because path is arbitrary and user-specified
788 789 files.append(os.path.join(self._rootdir, util.expandpath(path)))
789 790 return files
790 791
791 792 def _ignorefileandline(self, f):
792 793 files = collections.deque(self._ignorefiles())
793 794 visited = set()
794 795 while files:
795 796 i = files.popleft()
796 797 patterns = matchmod.readpatternfile(
797 798 i, self._ui.warn, sourceinfo=True
798 799 )
799 800 for pattern, lineno, line in patterns:
800 801 kind, p = matchmod._patsplit(pattern, b'glob')
801 802 if kind == b"subinclude":
802 803 if p not in visited:
803 804 files.append(p)
804 805 continue
805 806 m = matchmod.match(
806 807 self._root, b'', [], [pattern], warn=self._ui.warn
807 808 )
808 809 if m(f):
809 810 return (i, lineno, line)
810 811 visited.add(i)
811 812 return (None, -1, b"")
812 813
813 814 def _walkexplicit(self, match, subrepos):
814 815 """Get stat data about the files explicitly specified by match.
815 816
816 817 Return a triple (results, dirsfound, dirsnotfound).
817 818 - results is a mapping from filename to stat result. It also contains
818 819 listings mapping subrepos and .hg to None.
819 820 - dirsfound is a list of files found to be directories.
820 821 - dirsnotfound is a list of files that the dirstate thinks are
821 822 directories and that were not found."""
822 823
823 824 def badtype(mode):
824 825 kind = _(b'unknown')
825 826 if stat.S_ISCHR(mode):
826 827 kind = _(b'character device')
827 828 elif stat.S_ISBLK(mode):
828 829 kind = _(b'block device')
829 830 elif stat.S_ISFIFO(mode):
830 831 kind = _(b'fifo')
831 832 elif stat.S_ISSOCK(mode):
832 833 kind = _(b'socket')
833 834 elif stat.S_ISDIR(mode):
834 835 kind = _(b'directory')
835 836 return _(b'unsupported file type (type is %s)') % kind
836 837
837 838 badfn = match.bad
838 839 dmap = self._map
839 840 lstat = os.lstat
840 841 getkind = stat.S_IFMT
841 842 dirkind = stat.S_IFDIR
842 843 regkind = stat.S_IFREG
843 844 lnkkind = stat.S_IFLNK
844 845 join = self._join
845 846 dirsfound = []
846 847 foundadd = dirsfound.append
847 848 dirsnotfound = []
848 849 notfoundadd = dirsnotfound.append
849 850
850 851 if not match.isexact() and self._checkcase:
851 852 normalize = self._normalize
852 853 else:
853 854 normalize = None
854 855
855 856 files = sorted(match.files())
856 857 subrepos.sort()
857 858 i, j = 0, 0
858 859 while i < len(files) and j < len(subrepos):
859 860 subpath = subrepos[j] + b"/"
860 861 if files[i] < subpath:
861 862 i += 1
862 863 continue
863 864 while i < len(files) and files[i].startswith(subpath):
864 865 del files[i]
865 866 j += 1
866 867
867 868 if not files or b'' in files:
868 869 files = [b'']
869 870 # constructing the foldmap is expensive, so don't do it for the
870 871 # common case where files is ['']
871 872 normalize = None
872 873 results = dict.fromkeys(subrepos)
873 874 results[b'.hg'] = None
874 875
875 876 for ff in files:
876 877 if normalize:
877 878 nf = normalize(ff, False, True)
878 879 else:
879 880 nf = ff
880 881 if nf in results:
881 882 continue
882 883
883 884 try:
884 885 st = lstat(join(nf))
885 886 kind = getkind(st.st_mode)
886 887 if kind == dirkind:
887 888 if nf in dmap:
888 889 # file replaced by dir on disk but still in dirstate
889 890 results[nf] = None
890 891 foundadd((nf, ff))
891 892 elif kind == regkind or kind == lnkkind:
892 893 results[nf] = st
893 894 else:
894 895 badfn(ff, badtype(kind))
895 896 if nf in dmap:
896 897 results[nf] = None
897 898 except OSError as inst: # nf not found on disk - it is dirstate only
898 899 if nf in dmap: # does it exactly match a missing file?
899 900 results[nf] = None
900 901 else: # does it match a missing directory?
901 902 if self._map.hasdir(nf):
902 903 notfoundadd(nf)
903 904 else:
904 905 badfn(ff, encoding.strtolocal(inst.strerror))
905 906
906 907 # match.files() may contain explicitly-specified paths that shouldn't
907 908 # be taken; drop them from the list of files found. dirsfound/notfound
908 909 # aren't filtered here because they will be tested later.
909 910 if match.anypats():
910 911 for f in list(results):
911 912 if f == b'.hg' or f in subrepos:
912 913 # keep sentinel to disable further out-of-repo walks
913 914 continue
914 915 if not match(f):
915 916 del results[f]
916 917
917 918 # Case insensitive filesystems cannot rely on lstat() failing to detect
918 919 # a case-only rename. Prune the stat object for any file that does not
919 920 # match the case in the filesystem, if there are multiple files that
920 921 # normalize to the same path.
921 922 if match.isexact() and self._checkcase:
922 923 normed = {}
923 924
924 925 for f, st in pycompat.iteritems(results):
925 926 if st is None:
926 927 continue
927 928
928 929 nc = util.normcase(f)
929 930 paths = normed.get(nc)
930 931
931 932 if paths is None:
932 933 paths = set()
933 934 normed[nc] = paths
934 935
935 936 paths.add(f)
936 937
937 938 for norm, paths in pycompat.iteritems(normed):
938 939 if len(paths) > 1:
939 940 for path in paths:
940 941 folded = self._discoverpath(
941 942 path, norm, True, None, self._map.dirfoldmap
942 943 )
943 944 if path != folded:
944 945 results[path] = None
945 946
946 947 return results, dirsfound, dirsnotfound
947 948
948 949 def walk(self, match, subrepos, unknown, ignored, full=True):
949 950 """
950 951 Walk recursively through the directory tree, finding all files
951 952 matched by match.
952 953
953 954 If full is False, maybe skip some known-clean files.
954 955
955 956 Return a dict mapping filename to stat-like object (either
956 957 mercurial.osutil.stat instance or return value of os.stat()).
957 958
958 959 """
959 960 # full is a flag that extensions that hook into walk can use -- this
960 961 # implementation doesn't use it at all. This satisfies the contract
961 962 # because we only guarantee a "maybe".
962 963
963 964 if ignored:
964 965 ignore = util.never
965 966 dirignore = util.never
966 967 elif unknown:
967 968 ignore = self._ignore
968 969 dirignore = self._dirignore
969 970 else:
970 971 # if not unknown and not ignored, drop dir recursion and step 2
971 972 ignore = util.always
972 973 dirignore = util.always
973 974
974 975 matchfn = match.matchfn
975 976 matchalways = match.always()
976 977 matchtdir = match.traversedir
977 978 dmap = self._map
978 979 listdir = util.listdir
979 980 lstat = os.lstat
980 981 dirkind = stat.S_IFDIR
981 982 regkind = stat.S_IFREG
982 983 lnkkind = stat.S_IFLNK
983 984 join = self._join
984 985
985 986 exact = skipstep3 = False
986 987 if match.isexact(): # match.exact
987 988 exact = True
988 989 dirignore = util.always # skip step 2
989 990 elif match.prefix(): # match.match, no patterns
990 991 skipstep3 = True
991 992
992 993 if not exact and self._checkcase:
993 994 normalize = self._normalize
994 995 normalizefile = self._normalizefile
995 996 skipstep3 = False
996 997 else:
997 998 normalize = self._normalize
998 999 normalizefile = None
999 1000
1000 1001 # step 1: find all explicit files
1001 1002 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1002 1003 if matchtdir:
1003 1004 for d in work:
1004 1005 matchtdir(d[0])
1005 1006 for d in dirsnotfound:
1006 1007 matchtdir(d)
1007 1008
1008 1009 skipstep3 = skipstep3 and not (work or dirsnotfound)
1009 1010 work = [d for d in work if not dirignore(d[0])]
1010 1011
1011 1012 # step 2: visit subdirectories
1012 1013 def traverse(work, alreadynormed):
1013 1014 wadd = work.append
1014 1015 while work:
1015 1016 tracing.counter('dirstate.walk work', len(work))
1016 1017 nd = work.pop()
1017 1018 visitentries = match.visitchildrenset(nd)
1018 1019 if not visitentries:
1019 1020 continue
1020 1021 if visitentries == b'this' or visitentries == b'all':
1021 1022 visitentries = None
1022 1023 skip = None
1023 1024 if nd != b'':
1024 1025 skip = b'.hg'
1025 1026 try:
1026 1027 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1027 1028 entries = listdir(join(nd), stat=True, skip=skip)
1028 1029 except OSError as inst:
1029 1030 if inst.errno in (errno.EACCES, errno.ENOENT):
1030 1031 match.bad(
1031 1032 self.pathto(nd), encoding.strtolocal(inst.strerror)
1032 1033 )
1033 1034 continue
1034 1035 raise
1035 1036 for f, kind, st in entries:
1036 1037 # Some matchers may return files in the visitentries set,
1037 1038 # instead of 'this', if the matcher explicitly mentions them
1038 1039 # and is not an exactmatcher. This is acceptable; we do not
1039 1040 # make any hard assumptions about file-or-directory below
1040 1041 # based on the presence of `f` in visitentries. If
1041 1042 # visitchildrenset returned a set, we can always skip the
1042 1043 # entries *not* in the set it provided regardless of whether
1043 1044 # they're actually a file or a directory.
1044 1045 if visitentries and f not in visitentries:
1045 1046 continue
1046 1047 if normalizefile:
1047 1048 # even though f might be a directory, we're only
1048 1049 # interested in comparing it to files currently in the
1049 1050 # dmap -- therefore normalizefile is enough
1050 1051 nf = normalizefile(
1051 1052 nd and (nd + b"/" + f) or f, True, True
1052 1053 )
1053 1054 else:
1054 1055 nf = nd and (nd + b"/" + f) or f
1055 1056 if nf not in results:
1056 1057 if kind == dirkind:
1057 1058 if not ignore(nf):
1058 1059 if matchtdir:
1059 1060 matchtdir(nf)
1060 1061 wadd(nf)
1061 1062 if nf in dmap and (matchalways or matchfn(nf)):
1062 1063 results[nf] = None
1063 1064 elif kind == regkind or kind == lnkkind:
1064 1065 if nf in dmap:
1065 1066 if matchalways or matchfn(nf):
1066 1067 results[nf] = st
1067 1068 elif (matchalways or matchfn(nf)) and not ignore(
1068 1069 nf
1069 1070 ):
1070 1071 # unknown file -- normalize if necessary
1071 1072 if not alreadynormed:
1072 1073 nf = normalize(nf, False, True)
1073 1074 results[nf] = st
1074 1075 elif nf in dmap and (matchalways or matchfn(nf)):
1075 1076 results[nf] = None
1076 1077
1077 1078 for nd, d in work:
1078 1079 # alreadynormed means that processwork doesn't have to do any
1079 1080 # expensive directory normalization
1080 1081 alreadynormed = not normalize or nd == d
1081 1082 traverse([d], alreadynormed)
1082 1083
1083 1084 for s in subrepos:
1084 1085 del results[s]
1085 1086 del results[b'.hg']
1086 1087
1087 1088 # step 3: visit remaining files from dmap
1088 1089 if not skipstep3 and not exact:
1089 1090 # If a dmap file is not in results yet, it was either
1090 1091 # a) not matching matchfn b) ignored, c) missing, or d) under a
1091 1092 # symlink directory.
1092 1093 if not results and matchalways:
1093 1094 visit = [f for f in dmap]
1094 1095 else:
1095 1096 visit = [f for f in dmap if f not in results and matchfn(f)]
1096 1097 visit.sort()
1097 1098
1098 1099 if unknown:
1099 1100 # unknown == True means we walked all dirs under the roots
1100 1101 # that wasn't ignored, and everything that matched was stat'ed
1101 1102 # and is already in results.
1102 1103 # The rest must thus be ignored or under a symlink.
1103 1104 audit_path = pathutil.pathauditor(self._root, cached=True)
1104 1105
1105 1106 for nf in iter(visit):
1106 1107 # If a stat for the same file was already added with a
1107 1108 # different case, don't add one for this, since that would
1108 1109 # make it appear as if the file exists under both names
1109 1110 # on disk.
1110 1111 if (
1111 1112 normalizefile
1112 1113 and normalizefile(nf, True, True) in results
1113 1114 ):
1114 1115 results[nf] = None
1115 1116 # Report ignored items in the dmap as long as they are not
1116 1117 # under a symlink directory.
1117 1118 elif audit_path.check(nf):
1118 1119 try:
1119 1120 results[nf] = lstat(join(nf))
1120 1121 # file was just ignored, no links, and exists
1121 1122 except OSError:
1122 1123 # file doesn't exist
1123 1124 results[nf] = None
1124 1125 else:
1125 1126 # It's either missing or under a symlink directory
1126 1127 # which we in this case report as missing
1127 1128 results[nf] = None
1128 1129 else:
1129 1130 # We may not have walked the full directory tree above,
1130 1131 # so stat and check everything we missed.
1131 1132 iv = iter(visit)
1132 1133 for st in util.statfiles([join(i) for i in visit]):
1133 1134 results[next(iv)] = st
1134 1135 return results
1135 1136
1136 1137 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1137 1138 # Force Rayon (Rust parallelism library) to respect the number of
1138 1139 # workers. This is a temporary workaround until Rust code knows
1139 1140 # how to read the config file.
1140 1141 numcpus = self._ui.configint(b"worker", b"numcpus")
1141 1142 if numcpus is not None:
1142 1143 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1143 1144
1144 1145 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1145 1146 if not workers_enabled:
1146 1147 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1147 1148
1148 1149 (
1149 1150 lookup,
1150 1151 modified,
1151 1152 added,
1152 1153 removed,
1153 1154 deleted,
1154 1155 clean,
1155 1156 ignored,
1156 1157 unknown,
1157 1158 warnings,
1158 1159 bad,
1159 1160 traversed,
1160 1161 dirty,
1161 1162 ) = rustmod.status(
1162 1163 self._map._rustmap,
1163 1164 matcher,
1164 1165 self._rootdir,
1165 1166 self._ignorefiles(),
1166 1167 self._checkexec,
1167 1168 self._lastnormaltime,
1168 1169 bool(list_clean),
1169 1170 bool(list_ignored),
1170 1171 bool(list_unknown),
1171 1172 bool(matcher.traversedir),
1172 1173 )
1173 1174
1174 1175 self._dirty |= dirty
1175 1176
1176 1177 if matcher.traversedir:
1177 1178 for dir in traversed:
1178 1179 matcher.traversedir(dir)
1179 1180
1180 1181 if self._ui.warn:
1181 1182 for item in warnings:
1182 1183 if isinstance(item, tuple):
1183 1184 file_path, syntax = item
1184 1185 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1185 1186 file_path,
1186 1187 syntax,
1187 1188 )
1188 1189 self._ui.warn(msg)
1189 1190 else:
1190 1191 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1191 1192 self._ui.warn(
1192 1193 msg
1193 1194 % (
1194 1195 pathutil.canonpath(
1195 1196 self._rootdir, self._rootdir, item
1196 1197 ),
1197 1198 b"No such file or directory",
1198 1199 )
1199 1200 )
1200 1201
1201 1202 for (fn, message) in bad:
1202 1203 matcher.bad(fn, encoding.strtolocal(message))
1203 1204
1204 1205 status = scmutil.status(
1205 1206 modified=modified,
1206 1207 added=added,
1207 1208 removed=removed,
1208 1209 deleted=deleted,
1209 1210 unknown=unknown,
1210 1211 ignored=ignored,
1211 1212 clean=clean,
1212 1213 )
1213 1214 return (lookup, status)
1214 1215
1215 1216 def status(self, match, subrepos, ignored, clean, unknown):
1216 1217 """Determine the status of the working copy relative to the
1217 1218 dirstate and return a pair of (unsure, status), where status is of type
1218 1219 scmutil.status and:
1219 1220
1220 1221 unsure:
1221 1222 files that might have been modified since the dirstate was
1222 1223 written, but need to be read to be sure (size is the same
1223 1224 but mtime differs)
1224 1225 status.modified:
1225 1226 files that have definitely been modified since the dirstate
1226 1227 was written (different size or mode)
1227 1228 status.clean:
1228 1229 files that have definitely not been modified since the
1229 1230 dirstate was written
1230 1231 """
1231 1232 listignored, listclean, listunknown = ignored, clean, unknown
1232 1233 lookup, modified, added, unknown, ignored = [], [], [], [], []
1233 1234 removed, deleted, clean = [], [], []
1234 1235
1235 1236 dmap = self._map
1236 1237 dmap.preload()
1237 1238
1238 1239 use_rust = True
1239 1240
1240 1241 allowed_matchers = (
1241 1242 matchmod.alwaysmatcher,
1242 1243 matchmod.exactmatcher,
1243 1244 matchmod.includematcher,
1244 1245 )
1245 1246
1246 1247 if rustmod is None:
1247 1248 use_rust = False
1248 1249 elif self._checkcase:
1249 1250 # Case-insensitive filesystems are not handled yet
1250 1251 use_rust = False
1251 1252 elif subrepos:
1252 1253 use_rust = False
1253 1254 elif sparse.enabled:
1254 1255 use_rust = False
1255 1256 elif not isinstance(match, allowed_matchers):
1256 1257 # Some matchers have yet to be implemented
1257 1258 use_rust = False
1258 1259
1259 1260 if use_rust:
1260 1261 try:
1261 1262 return self._rust_status(
1262 1263 match, listclean, listignored, listunknown
1263 1264 )
1264 1265 except rustmod.FallbackError:
1265 1266 pass
1266 1267
1267 1268 def noop(f):
1268 1269 pass
1269 1270
1270 1271 dcontains = dmap.__contains__
1271 1272 dget = dmap.__getitem__
1272 1273 ladd = lookup.append # aka "unsure"
1273 1274 madd = modified.append
1274 1275 aadd = added.append
1275 1276 uadd = unknown.append if listunknown else noop
1276 1277 iadd = ignored.append if listignored else noop
1277 1278 radd = removed.append
1278 1279 dadd = deleted.append
1279 1280 cadd = clean.append if listclean else noop
1280 1281 mexact = match.exact
1281 1282 dirignore = self._dirignore
1282 1283 checkexec = self._checkexec
1283 1284 copymap = self._map.copymap
1284 1285 lastnormaltime = self._lastnormaltime
1285 1286
1286 1287 # We need to do full walks when either
1287 1288 # - we're listing all clean files, or
1288 1289 # - match.traversedir does something, because match.traversedir should
1289 1290 # be called for every dir in the working dir
1290 1291 full = listclean or match.traversedir is not None
1291 1292 for fn, st in pycompat.iteritems(
1292 1293 self.walk(match, subrepos, listunknown, listignored, full=full)
1293 1294 ):
1294 1295 if not dcontains(fn):
1295 1296 if (listignored or mexact(fn)) and dirignore(fn):
1296 1297 if listignored:
1297 1298 iadd(fn)
1298 1299 else:
1299 1300 uadd(fn)
1300 1301 continue
1301 1302
1302 1303 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1303 1304 # written like that for performance reasons. dmap[fn] is not a
1304 1305 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1305 1306 # opcode has fast paths when the value to be unpacked is a tuple or
1306 1307 # a list, but falls back to creating a full-fledged iterator in
1307 1308 # general. That is much slower than simply accessing and storing the
1308 1309 # tuple members one by one.
1309 1310 t = dget(fn)
1310 1311 state = t.state
1311 1312 mode = t[1]
1312 1313 size = t[2]
1313 1314 time = t[3]
1314 1315
1315 1316 if not st and state in b"nma":
1316 1317 dadd(fn)
1317 1318 elif state == b'n':
1318 1319 if (
1319 1320 size >= 0
1320 1321 and (
1321 1322 (size != st.st_size and size != st.st_size & _rangemask)
1322 1323 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1323 1324 )
1324 1325 or t.from_p2
1325 1326 or fn in copymap
1326 1327 ):
1327 1328 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1328 1329 # issue6456: Size returned may be longer due to
1329 1330 # encryption on EXT-4 fscrypt, undecided.
1330 1331 ladd(fn)
1331 1332 else:
1332 1333 madd(fn)
1333 1334 elif (
1334 1335 time != st[stat.ST_MTIME]
1335 1336 and time != st[stat.ST_MTIME] & _rangemask
1336 1337 ):
1337 1338 ladd(fn)
1338 1339 elif st[stat.ST_MTIME] == lastnormaltime:
1339 1340 # fn may have just been marked as normal and it may have
1340 1341 # changed in the same second without changing its size.
1341 1342 # This can happen if we quickly do multiple commits.
1342 1343 # Force lookup, so we don't miss such a racy file change.
1343 1344 ladd(fn)
1344 1345 elif listclean:
1345 1346 cadd(fn)
1346 1347 elif t.merged:
1347 1348 madd(fn)
1348 1349 elif state == b'a':
1349 1350 aadd(fn)
1350 1351 elif t.removed:
1351 1352 radd(fn)
1352 1353 status = scmutil.status(
1353 1354 modified, added, removed, deleted, unknown, ignored, clean
1354 1355 )
1355 1356 return (lookup, status)
1356 1357
1357 1358 def matches(self, match):
1358 1359 """
1359 1360 return files in the dirstate (in whatever state) filtered by match
1360 1361 """
1361 1362 dmap = self._map
1362 1363 if rustmod is not None:
1363 1364 dmap = self._map._rustmap
1364 1365
1365 1366 if match.always():
1366 1367 return dmap.keys()
1367 1368 files = match.files()
1368 1369 if match.isexact():
1369 1370 # fast path -- filter the other way around, since typically files is
1370 1371 # much smaller than dmap
1371 1372 return [f for f in files if f in dmap]
1372 1373 if match.prefix() and all(fn in dmap for fn in files):
1373 1374 # fast path -- all the values are known to be files, so just return
1374 1375 # that
1375 1376 return list(files)
1376 1377 return [f for f in dmap if match(f)]
1377 1378
1378 1379 def _actualfilename(self, tr):
1379 1380 if tr:
1380 1381 return self._pendingfilename
1381 1382 else:
1382 1383 return self._filename
1383 1384
1384 1385 def savebackup(self, tr, backupname):
1385 1386 '''Save current dirstate into backup file'''
1386 1387 filename = self._actualfilename(tr)
1387 1388 assert backupname != filename
1388 1389
1389 1390 # use '_writedirstate' instead of 'write' to write changes certainly,
1390 1391 # because the latter omits writing out if transaction is running.
1391 1392 # output file will be used to create backup of dirstate at this point.
1392 1393 if self._dirty or not self._opener.exists(filename):
1393 1394 self._writedirstate(
1394 1395 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1395 1396 )
1396 1397
1397 1398 if tr:
1398 1399 # ensure that subsequent tr.writepending returns True for
1399 1400 # changes written out above, even if dirstate is never
1400 1401 # changed after this
1401 1402 tr.addfilegenerator(
1402 1403 b'dirstate',
1403 1404 (self._filename,),
1404 1405 self._writedirstate,
1405 1406 location=b'plain',
1406 1407 )
1407 1408
1408 1409 # ensure that pending file written above is unlinked at
1409 1410 # failure, even if tr.writepending isn't invoked until the
1410 1411 # end of this transaction
1411 1412 tr.registertmp(filename, location=b'plain')
1412 1413
1413 1414 self._opener.tryunlink(backupname)
1414 1415 # hardlink backup is okay because _writedirstate is always called
1415 1416 # with an "atomictemp=True" file.
1416 1417 util.copyfile(
1417 1418 self._opener.join(filename),
1418 1419 self._opener.join(backupname),
1419 1420 hardlink=True,
1420 1421 )
1421 1422
1422 1423 def restorebackup(self, tr, backupname):
1423 1424 '''Restore dirstate by backup file'''
1424 1425 # this "invalidate()" prevents "wlock.release()" from writing
1425 1426 # changes of dirstate out after restoring from backup file
1426 1427 self.invalidate()
1427 1428 filename = self._actualfilename(tr)
1428 1429 o = self._opener
1429 1430 if util.samefile(o.join(backupname), o.join(filename)):
1430 1431 o.unlink(backupname)
1431 1432 else:
1432 1433 o.rename(backupname, filename, checkambig=True)
1433 1434
1434 1435 def clearbackup(self, tr, backupname):
1435 1436 '''Clear backup file'''
1436 1437 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now