##// END OF EJS Templates
dirstate: introduce an internal `_add` method...
marmoute -
r48389:f5c24c12 default
parent child Browse files
Show More
@@ -1,1436 +1,1440 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .pycompat import delattr
18 18
19 19 from hgdemandimport import tracing
20 20
21 21 from . import (
22 22 dirstatemap,
23 23 encoding,
24 24 error,
25 25 match as matchmod,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 sparse,
31 31 util,
32 32 )
33 33
34 34 from .interfaces import (
35 35 dirstate as intdirstate,
36 36 util as interfaceutil,
37 37 )
38 38
39 39 parsers = policy.importmod('parsers')
40 40 rustmod = policy.importrust('dirstate')
41 41
42 42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43 43
44 44 propertycache = util.propertycache
45 45 filecache = scmutil.filecache
46 46 _rangemask = dirstatemap.rangemask
47 47
48 48 DirstateItem = parsers.DirstateItem
49 49
50 50
51 51 class repocache(filecache):
52 52 """filecache for files in .hg/"""
53 53
54 54 def join(self, obj, fname):
55 55 return obj._opener.join(fname)
56 56
57 57
58 58 class rootcache(filecache):
59 59 """filecache for files in the repository root"""
60 60
61 61 def join(self, obj, fname):
62 62 return obj._join(fname)
63 63
64 64
65 65 def _getfsnow(vfs):
66 66 '''Get "now" timestamp on filesystem'''
67 67 tmpfd, tmpname = vfs.mkstemp()
68 68 try:
69 69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 70 finally:
71 71 os.close(tmpfd)
72 72 vfs.unlink(tmpname)
73 73
74 74
75 75 @interfaceutil.implementer(intdirstate.idirstate)
76 76 class dirstate(object):
77 77 def __init__(
78 78 self,
79 79 opener,
80 80 ui,
81 81 root,
82 82 validate,
83 83 sparsematchfn,
84 84 nodeconstants,
85 85 use_dirstate_v2,
86 86 ):
87 87 """Create a new dirstate object.
88 88
89 89 opener is an open()-like callable that can be used to open the
90 90 dirstate file; root is the root of the directory tracked by
91 91 the dirstate.
92 92 """
93 93 self._use_dirstate_v2 = use_dirstate_v2
94 94 self._nodeconstants = nodeconstants
95 95 self._opener = opener
96 96 self._validate = validate
97 97 self._root = root
98 98 self._sparsematchfn = sparsematchfn
99 99 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
100 100 # UNC path pointing to root share (issue4557)
101 101 self._rootdir = pathutil.normasprefix(root)
102 102 self._dirty = False
103 103 self._lastnormaltime = 0
104 104 self._ui = ui
105 105 self._filecache = {}
106 106 self._parentwriters = 0
107 107 self._filename = b'dirstate'
108 108 self._pendingfilename = b'%s.pending' % self._filename
109 109 self._plchangecallbacks = {}
110 110 self._origpl = None
111 111 self._updatedfiles = set()
112 112 self._mapcls = dirstatemap.dirstatemap
113 113 # Access and cache cwd early, so we don't access it for the first time
114 114 # after a working-copy update caused it to not exist (accessing it then
115 115 # raises an exception).
116 116 self._cwd
117 117
118 118 def prefetch_parents(self):
119 119 """make sure the parents are loaded
120 120
121 121 Used to avoid a race condition.
122 122 """
123 123 self._pl
124 124
125 125 @contextlib.contextmanager
126 126 def parentchange(self):
127 127 """Context manager for handling dirstate parents.
128 128
129 129 If an exception occurs in the scope of the context manager,
130 130 the incoherent dirstate won't be written when wlock is
131 131 released.
132 132 """
133 133 self._parentwriters += 1
134 134 yield
135 135 # Typically we want the "undo" step of a context manager in a
136 136 # finally block so it happens even when an exception
137 137 # occurs. In this case, however, we only want to decrement
138 138 # parentwriters if the code in the with statement exits
139 139 # normally, so we don't have a try/finally here on purpose.
140 140 self._parentwriters -= 1
141 141
142 142 def pendingparentchange(self):
143 143 """Returns true if the dirstate is in the middle of a set of changes
144 144 that modify the dirstate parent.
145 145 """
146 146 return self._parentwriters > 0
147 147
148 148 @propertycache
149 149 def _map(self):
150 150 """Return the dirstate contents (see documentation for dirstatemap)."""
151 151 self._map = self._mapcls(
152 152 self._ui,
153 153 self._opener,
154 154 self._root,
155 155 self._nodeconstants,
156 156 self._use_dirstate_v2,
157 157 )
158 158 return self._map
159 159
160 160 @property
161 161 def _sparsematcher(self):
162 162 """The matcher for the sparse checkout.
163 163
164 164 The working directory may not include every file from a manifest. The
165 165 matcher obtained by this property will match a path if it is to be
166 166 included in the working directory.
167 167 """
168 168 # TODO there is potential to cache this property. For now, the matcher
169 169 # is resolved on every access. (But the called function does use a
170 170 # cache to keep the lookup fast.)
171 171 return self._sparsematchfn()
172 172
173 173 @repocache(b'branch')
174 174 def _branch(self):
175 175 try:
176 176 return self._opener.read(b"branch").strip() or b"default"
177 177 except IOError as inst:
178 178 if inst.errno != errno.ENOENT:
179 179 raise
180 180 return b"default"
181 181
182 182 @property
183 183 def _pl(self):
184 184 return self._map.parents()
185 185
186 186 def hasdir(self, d):
187 187 return self._map.hastrackeddir(d)
188 188
189 189 @rootcache(b'.hgignore')
190 190 def _ignore(self):
191 191 files = self._ignorefiles()
192 192 if not files:
193 193 return matchmod.never()
194 194
195 195 pats = [b'include:%s' % f for f in files]
196 196 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
197 197
198 198 @propertycache
199 199 def _slash(self):
200 200 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
201 201
202 202 @propertycache
203 203 def _checklink(self):
204 204 return util.checklink(self._root)
205 205
206 206 @propertycache
207 207 def _checkexec(self):
208 208 return bool(util.checkexec(self._root))
209 209
210 210 @propertycache
211 211 def _checkcase(self):
212 212 return not util.fscasesensitive(self._join(b'.hg'))
213 213
214 214 def _join(self, f):
215 215 # much faster than os.path.join()
216 216 # it's safe because f is always a relative path
217 217 return self._rootdir + f
218 218
219 219 def flagfunc(self, buildfallback):
220 220 if self._checklink and self._checkexec:
221 221
222 222 def f(x):
223 223 try:
224 224 st = os.lstat(self._join(x))
225 225 if util.statislink(st):
226 226 return b'l'
227 227 if util.statisexec(st):
228 228 return b'x'
229 229 except OSError:
230 230 pass
231 231 return b''
232 232
233 233 return f
234 234
235 235 fallback = buildfallback()
236 236 if self._checklink:
237 237
238 238 def f(x):
239 239 if os.path.islink(self._join(x)):
240 240 return b'l'
241 241 if b'x' in fallback(x):
242 242 return b'x'
243 243 return b''
244 244
245 245 return f
246 246 if self._checkexec:
247 247
248 248 def f(x):
249 249 if b'l' in fallback(x):
250 250 return b'l'
251 251 if util.isexec(self._join(x)):
252 252 return b'x'
253 253 return b''
254 254
255 255 return f
256 256 else:
257 257 return fallback
258 258
259 259 @propertycache
260 260 def _cwd(self):
261 261 # internal config: ui.forcecwd
262 262 forcecwd = self._ui.config(b'ui', b'forcecwd')
263 263 if forcecwd:
264 264 return forcecwd
265 265 return encoding.getcwd()
266 266
267 267 def getcwd(self):
268 268 """Return the path from which a canonical path is calculated.
269 269
270 270 This path should be used to resolve file patterns or to convert
271 271 canonical paths back to file paths for display. It shouldn't be
272 272 used to get real file paths. Use vfs functions instead.
273 273 """
274 274 cwd = self._cwd
275 275 if cwd == self._root:
276 276 return b''
277 277 # self._root ends with a path separator if self._root is '/' or 'C:\'
278 278 rootsep = self._root
279 279 if not util.endswithsep(rootsep):
280 280 rootsep += pycompat.ossep
281 281 if cwd.startswith(rootsep):
282 282 return cwd[len(rootsep) :]
283 283 else:
284 284 # we're outside the repo. return an absolute path.
285 285 return cwd
286 286
287 287 def pathto(self, f, cwd=None):
288 288 if cwd is None:
289 289 cwd = self.getcwd()
290 290 path = util.pathto(self._root, cwd, f)
291 291 if self._slash:
292 292 return util.pconvert(path)
293 293 return path
294 294
295 295 def __getitem__(self, key):
296 296 """Return the current state of key (a filename) in the dirstate.
297 297
298 298 States are:
299 299 n normal
300 300 m needs merging
301 301 r marked for removal
302 302 a marked for addition
303 303 ? not tracked
304 304
305 305 XXX The "state" is a bit obscure to be in the "public" API. we should
306 306 consider migrating all user of this to going through the dirstate entry
307 307 instead.
308 308 """
309 309 entry = self._map.get(key)
310 310 if entry is not None:
311 311 return entry.state
312 312 return b'?'
313 313
314 314 def __contains__(self, key):
315 315 return key in self._map
316 316
317 317 def __iter__(self):
318 318 return iter(sorted(self._map))
319 319
320 320 def items(self):
321 321 return pycompat.iteritems(self._map)
322 322
323 323 iteritems = items
324 324
325 325 def directories(self):
326 326 return self._map.directories()
327 327
328 328 def parents(self):
329 329 return [self._validate(p) for p in self._pl]
330 330
331 331 def p1(self):
332 332 return self._validate(self._pl[0])
333 333
334 334 def p2(self):
335 335 return self._validate(self._pl[1])
336 336
337 337 @property
338 338 def in_merge(self):
339 339 """True if a merge is in progress"""
340 340 return self._pl[1] != self._nodeconstants.nullid
341 341
342 342 def branch(self):
343 343 return encoding.tolocal(self._branch)
344 344
345 345 def setparents(self, p1, p2=None):
346 346 """Set dirstate parents to p1 and p2.
347 347
348 348 When moving from two parents to one, "merged" entries a
349 349 adjusted to normal and previous copy records discarded and
350 350 returned by the call.
351 351
352 352 See localrepo.setparents()
353 353 """
354 354 if p2 is None:
355 355 p2 = self._nodeconstants.nullid
356 356 if self._parentwriters == 0:
357 357 raise ValueError(
358 358 b"cannot set dirstate parent outside of "
359 359 b"dirstate.parentchange context manager"
360 360 )
361 361
362 362 self._dirty = True
363 363 oldp2 = self._pl[1]
364 364 if self._origpl is None:
365 365 self._origpl = self._pl
366 366 self._map.setparents(p1, p2)
367 367 copies = {}
368 368 if (
369 369 oldp2 != self._nodeconstants.nullid
370 370 and p2 == self._nodeconstants.nullid
371 371 ):
372 372 candidatefiles = self._map.non_normal_or_other_parent_paths()
373 373
374 374 for f in candidatefiles:
375 375 s = self._map.get(f)
376 376 if s is None:
377 377 continue
378 378
379 379 # Discard "merged" markers when moving away from a merge state
380 380 if s.merged:
381 381 source = self._map.copymap.get(f)
382 382 if source:
383 383 copies[f] = source
384 384 self.normallookup(f)
385 385 # Also fix up otherparent markers
386 386 elif s.from_p2:
387 387 source = self._map.copymap.get(f)
388 388 if source:
389 389 copies[f] = source
390 self.add(f)
390 self._add(f)
391 391 return copies
392 392
393 393 def setbranch(self, branch):
394 394 self.__class__._branch.set(self, encoding.fromlocal(branch))
395 395 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
396 396 try:
397 397 f.write(self._branch + b'\n')
398 398 f.close()
399 399
400 400 # make sure filecache has the correct stat info for _branch after
401 401 # replacing the underlying file
402 402 ce = self._filecache[b'_branch']
403 403 if ce:
404 404 ce.refresh()
405 405 except: # re-raises
406 406 f.discard()
407 407 raise
408 408
409 409 def invalidate(self):
410 410 """Causes the next access to reread the dirstate.
411 411
412 412 This is different from localrepo.invalidatedirstate() because it always
413 413 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
414 414 check whether the dirstate has changed before rereading it."""
415 415
416 416 for a in ("_map", "_branch", "_ignore"):
417 417 if a in self.__dict__:
418 418 delattr(self, a)
419 419 self._lastnormaltime = 0
420 420 self._dirty = False
421 421 self._updatedfiles.clear()
422 422 self._parentwriters = 0
423 423 self._origpl = None
424 424
425 425 def copy(self, source, dest):
426 426 """Mark dest as a copy of source. Unmark dest if source is None."""
427 427 if source == dest:
428 428 return
429 429 self._dirty = True
430 430 if source is not None:
431 431 self._map.copymap[dest] = source
432 432 self._updatedfiles.add(source)
433 433 self._updatedfiles.add(dest)
434 434 elif self._map.copymap.pop(dest, None):
435 435 self._updatedfiles.add(dest)
436 436
437 437 def copied(self, file):
438 438 return self._map.copymap.get(file, None)
439 439
440 440 def copies(self):
441 441 return self._map.copymap
442 442
443 443 def _addpath(
444 444 self,
445 445 f,
446 446 mode=0,
447 447 size=None,
448 448 mtime=None,
449 449 added=False,
450 450 merged=False,
451 451 from_p2=False,
452 452 possibly_dirty=False,
453 453 ):
454 454 entry = self._map.get(f)
455 455 if added or entry is not None and entry.removed:
456 456 scmutil.checkfilename(f)
457 457 if self._map.hastrackeddir(f):
458 458 msg = _(b'directory %r already in dirstate')
459 459 msg %= pycompat.bytestr(f)
460 460 raise error.Abort(msg)
461 461 # shadows
462 462 for d in pathutil.finddirs(f):
463 463 if self._map.hastrackeddir(d):
464 464 break
465 465 entry = self._map.get(d)
466 466 if entry is not None and not entry.removed:
467 467 msg = _(b'file %r in dirstate clashes with %r')
468 468 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
469 469 raise error.Abort(msg)
470 470 self._dirty = True
471 471 self._updatedfiles.add(f)
472 472 self._map.addfile(
473 473 f,
474 474 mode=mode,
475 475 size=size,
476 476 mtime=mtime,
477 477 added=added,
478 478 merged=merged,
479 479 from_p2=from_p2,
480 480 possibly_dirty=possibly_dirty,
481 481 )
482 482
483 483 def normal(self, f, parentfiledata=None):
484 484 """Mark a file normal and clean.
485 485
486 486 parentfiledata: (mode, size, mtime) of the clean file
487 487
488 488 parentfiledata should be computed from memory (for mode,
489 489 size), as or close as possible from the point where we
490 490 determined the file was clean, to limit the risk of the
491 491 file having been changed by an external process between the
492 492 moment where the file was determined to be clean and now."""
493 493 if parentfiledata:
494 494 (mode, size, mtime) = parentfiledata
495 495 else:
496 496 s = os.lstat(self._join(f))
497 497 mode = s.st_mode
498 498 size = s.st_size
499 499 mtime = s[stat.ST_MTIME]
500 500 self._addpath(f, mode=mode, size=size, mtime=mtime)
501 501 self._map.copymap.pop(f, None)
502 502 if f in self._map.nonnormalset:
503 503 self._map.nonnormalset.remove(f)
504 504 if mtime > self._lastnormaltime:
505 505 # Remember the most recent modification timeslot for status(),
506 506 # to make sure we won't miss future size-preserving file content
507 507 # modifications that happen within the same timeslot.
508 508 self._lastnormaltime = mtime
509 509
510 510 def normallookup(self, f):
511 511 '''Mark a file normal, but possibly dirty.'''
512 512 if self.in_merge:
513 513 # if there is a merge going on and the file was either
514 514 # "merged" or coming from other parent (-2) before
515 515 # being removed, restore that state.
516 516 entry = self._map.get(f)
517 517 if entry is not None:
518 518 # XXX this should probably be dealt with a a lower level
519 519 # (see `merged_removed` and `from_p2_removed`)
520 520 if entry.merged_removed or entry.from_p2_removed:
521 521 source = self._map.copymap.get(f)
522 522 if entry.merged_removed:
523 523 self.merge(f)
524 524 elif entry.from_p2_removed:
525 525 self.otherparent(f)
526 526 if source is not None:
527 527 self.copy(source, f)
528 528 return
529 529 elif entry.merged or entry.from_p2:
530 530 return
531 531 self._addpath(f, possibly_dirty=True)
532 532 self._map.copymap.pop(f, None)
533 533
534 534 def otherparent(self, f):
535 535 '''Mark as coming from the other parent, always dirty.'''
536 536 if not self.in_merge:
537 537 msg = _(b"setting %r to other parent only allowed in merges") % f
538 538 raise error.Abort(msg)
539 539 entry = self._map.get(f)
540 540 if entry is not None and entry.tracked:
541 541 # merge-like
542 542 self._addpath(f, merged=True)
543 543 else:
544 544 # add-like
545 545 self._addpath(f, from_p2=True)
546 546 self._map.copymap.pop(f, None)
547 547
548 548 def add(self, f):
549 549 '''Mark a file added.'''
550 self._addpath(f, added=True)
551 self._map.copymap.pop(f, None)
550 self._add(f)
551
552 def _add(self, filename):
553 """internal function to mark a file as added"""
554 self._addpath(filename, added=True)
555 self._map.copymap.pop(filename, None)
552 556
553 557 def remove(self, f):
554 558 '''Mark a file removed.'''
555 559 self._dirty = True
556 560 self._updatedfiles.add(f)
557 561 self._map.removefile(f, in_merge=self.in_merge)
558 562
559 563 def merge(self, f):
560 564 '''Mark a file merged.'''
561 565 if not self.in_merge:
562 566 return self.normallookup(f)
563 567 return self.otherparent(f)
564 568
565 569 def drop(self, f):
566 570 '''Drop a file from the dirstate'''
567 571 if self._map.dropfile(f):
568 572 self._dirty = True
569 573 self._updatedfiles.add(f)
570 574 self._map.copymap.pop(f, None)
571 575
572 576 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
573 577 if exists is None:
574 578 exists = os.path.lexists(os.path.join(self._root, path))
575 579 if not exists:
576 580 # Maybe a path component exists
577 581 if not ignoremissing and b'/' in path:
578 582 d, f = path.rsplit(b'/', 1)
579 583 d = self._normalize(d, False, ignoremissing, None)
580 584 folded = d + b"/" + f
581 585 else:
582 586 # No path components, preserve original case
583 587 folded = path
584 588 else:
585 589 # recursively normalize leading directory components
586 590 # against dirstate
587 591 if b'/' in normed:
588 592 d, f = normed.rsplit(b'/', 1)
589 593 d = self._normalize(d, False, ignoremissing, True)
590 594 r = self._root + b"/" + d
591 595 folded = d + b"/" + util.fspath(f, r)
592 596 else:
593 597 folded = util.fspath(normed, self._root)
594 598 storemap[normed] = folded
595 599
596 600 return folded
597 601
598 602 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
599 603 normed = util.normcase(path)
600 604 folded = self._map.filefoldmap.get(normed, None)
601 605 if folded is None:
602 606 if isknown:
603 607 folded = path
604 608 else:
605 609 folded = self._discoverpath(
606 610 path, normed, ignoremissing, exists, self._map.filefoldmap
607 611 )
608 612 return folded
609 613
610 614 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
611 615 normed = util.normcase(path)
612 616 folded = self._map.filefoldmap.get(normed, None)
613 617 if folded is None:
614 618 folded = self._map.dirfoldmap.get(normed, None)
615 619 if folded is None:
616 620 if isknown:
617 621 folded = path
618 622 else:
619 623 # store discovered result in dirfoldmap so that future
620 624 # normalizefile calls don't start matching directories
621 625 folded = self._discoverpath(
622 626 path, normed, ignoremissing, exists, self._map.dirfoldmap
623 627 )
624 628 return folded
625 629
626 630 def normalize(self, path, isknown=False, ignoremissing=False):
627 631 """
628 632 normalize the case of a pathname when on a casefolding filesystem
629 633
630 634 isknown specifies whether the filename came from walking the
631 635 disk, to avoid extra filesystem access.
632 636
633 637 If ignoremissing is True, missing path are returned
634 638 unchanged. Otherwise, we try harder to normalize possibly
635 639 existing path components.
636 640
637 641 The normalized case is determined based on the following precedence:
638 642
639 643 - version of name already stored in the dirstate
640 644 - version of name stored on disk
641 645 - version provided via command arguments
642 646 """
643 647
644 648 if self._checkcase:
645 649 return self._normalize(path, isknown, ignoremissing)
646 650 return path
647 651
648 652 def clear(self):
649 653 self._map.clear()
650 654 self._lastnormaltime = 0
651 655 self._updatedfiles.clear()
652 656 self._dirty = True
653 657
654 658 def rebuild(self, parent, allfiles, changedfiles=None):
655 659 if changedfiles is None:
656 660 # Rebuild entire dirstate
657 661 to_lookup = allfiles
658 662 to_drop = []
659 663 lastnormaltime = self._lastnormaltime
660 664 self.clear()
661 665 self._lastnormaltime = lastnormaltime
662 666 elif len(changedfiles) < 10:
663 667 # Avoid turning allfiles into a set, which can be expensive if it's
664 668 # large.
665 669 to_lookup = []
666 670 to_drop = []
667 671 for f in changedfiles:
668 672 if f in allfiles:
669 673 to_lookup.append(f)
670 674 else:
671 675 to_drop.append(f)
672 676 else:
673 677 changedfilesset = set(changedfiles)
674 678 to_lookup = changedfilesset & set(allfiles)
675 679 to_drop = changedfilesset - to_lookup
676 680
677 681 if self._origpl is None:
678 682 self._origpl = self._pl
679 683 self._map.setparents(parent, self._nodeconstants.nullid)
680 684
681 685 for f in to_lookup:
682 686 self.normallookup(f)
683 687 for f in to_drop:
684 688 self.drop(f)
685 689
686 690 self._dirty = True
687 691
688 692 def identity(self):
689 693 """Return identity of dirstate itself to detect changing in storage
690 694
691 695 If identity of previous dirstate is equal to this, writing
692 696 changes based on the former dirstate out can keep consistency.
693 697 """
694 698 return self._map.identity
695 699
696 700 def write(self, tr):
697 701 if not self._dirty:
698 702 return
699 703
700 704 filename = self._filename
701 705 if tr:
702 706 # 'dirstate.write()' is not only for writing in-memory
703 707 # changes out, but also for dropping ambiguous timestamp.
704 708 # delayed writing re-raise "ambiguous timestamp issue".
705 709 # See also the wiki page below for detail:
706 710 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
707 711
708 712 # emulate dropping timestamp in 'parsers.pack_dirstate'
709 713 now = _getfsnow(self._opener)
710 714 self._map.clearambiguoustimes(self._updatedfiles, now)
711 715
712 716 # emulate that all 'dirstate.normal' results are written out
713 717 self._lastnormaltime = 0
714 718 self._updatedfiles.clear()
715 719
716 720 # delay writing in-memory changes out
717 721 tr.addfilegenerator(
718 722 b'dirstate',
719 723 (self._filename,),
720 724 self._writedirstate,
721 725 location=b'plain',
722 726 )
723 727 return
724 728
725 729 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
726 730 self._writedirstate(st)
727 731
728 732 def addparentchangecallback(self, category, callback):
729 733 """add a callback to be called when the wd parents are changed
730 734
731 735 Callback will be called with the following arguments:
732 736 dirstate, (oldp1, oldp2), (newp1, newp2)
733 737
734 738 Category is a unique identifier to allow overwriting an old callback
735 739 with a newer callback.
736 740 """
737 741 self._plchangecallbacks[category] = callback
738 742
739 743 def _writedirstate(self, st):
740 744 # notify callbacks about parents change
741 745 if self._origpl is not None and self._origpl != self._pl:
742 746 for c, callback in sorted(
743 747 pycompat.iteritems(self._plchangecallbacks)
744 748 ):
745 749 callback(self, self._origpl, self._pl)
746 750 self._origpl = None
747 751 # use the modification time of the newly created temporary file as the
748 752 # filesystem's notion of 'now'
749 753 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
750 754
751 755 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
752 756 # timestamp of each entries in dirstate, because of 'now > mtime'
753 757 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
754 758 if delaywrite > 0:
755 759 # do we have any files to delay for?
756 760 for f, e in pycompat.iteritems(self._map):
757 761 if e.need_delay(now):
758 762 import time # to avoid useless import
759 763
760 764 # rather than sleep n seconds, sleep until the next
761 765 # multiple of n seconds
762 766 clock = time.time()
763 767 start = int(clock) - (int(clock) % delaywrite)
764 768 end = start + delaywrite
765 769 time.sleep(end - clock)
766 770 now = end # trust our estimate that the end is near now
767 771 break
768 772
769 773 self._map.write(st, now)
770 774 self._lastnormaltime = 0
771 775 self._dirty = False
772 776
773 777 def _dirignore(self, f):
774 778 if self._ignore(f):
775 779 return True
776 780 for p in pathutil.finddirs(f):
777 781 if self._ignore(p):
778 782 return True
779 783 return False
780 784
781 785 def _ignorefiles(self):
782 786 files = []
783 787 if os.path.exists(self._join(b'.hgignore')):
784 788 files.append(self._join(b'.hgignore'))
785 789 for name, path in self._ui.configitems(b"ui"):
786 790 if name == b'ignore' or name.startswith(b'ignore.'):
787 791 # we need to use os.path.join here rather than self._join
788 792 # because path is arbitrary and user-specified
789 793 files.append(os.path.join(self._rootdir, util.expandpath(path)))
790 794 return files
791 795
792 796 def _ignorefileandline(self, f):
793 797 files = collections.deque(self._ignorefiles())
794 798 visited = set()
795 799 while files:
796 800 i = files.popleft()
797 801 patterns = matchmod.readpatternfile(
798 802 i, self._ui.warn, sourceinfo=True
799 803 )
800 804 for pattern, lineno, line in patterns:
801 805 kind, p = matchmod._patsplit(pattern, b'glob')
802 806 if kind == b"subinclude":
803 807 if p not in visited:
804 808 files.append(p)
805 809 continue
806 810 m = matchmod.match(
807 811 self._root, b'', [], [pattern], warn=self._ui.warn
808 812 )
809 813 if m(f):
810 814 return (i, lineno, line)
811 815 visited.add(i)
812 816 return (None, -1, b"")
813 817
814 818 def _walkexplicit(self, match, subrepos):
815 819 """Get stat data about the files explicitly specified by match.
816 820
817 821 Return a triple (results, dirsfound, dirsnotfound).
818 822 - results is a mapping from filename to stat result. It also contains
819 823 listings mapping subrepos and .hg to None.
820 824 - dirsfound is a list of files found to be directories.
821 825 - dirsnotfound is a list of files that the dirstate thinks are
822 826 directories and that were not found."""
823 827
824 828 def badtype(mode):
825 829 kind = _(b'unknown')
826 830 if stat.S_ISCHR(mode):
827 831 kind = _(b'character device')
828 832 elif stat.S_ISBLK(mode):
829 833 kind = _(b'block device')
830 834 elif stat.S_ISFIFO(mode):
831 835 kind = _(b'fifo')
832 836 elif stat.S_ISSOCK(mode):
833 837 kind = _(b'socket')
834 838 elif stat.S_ISDIR(mode):
835 839 kind = _(b'directory')
836 840 return _(b'unsupported file type (type is %s)') % kind
837 841
838 842 badfn = match.bad
839 843 dmap = self._map
840 844 lstat = os.lstat
841 845 getkind = stat.S_IFMT
842 846 dirkind = stat.S_IFDIR
843 847 regkind = stat.S_IFREG
844 848 lnkkind = stat.S_IFLNK
845 849 join = self._join
846 850 dirsfound = []
847 851 foundadd = dirsfound.append
848 852 dirsnotfound = []
849 853 notfoundadd = dirsnotfound.append
850 854
851 855 if not match.isexact() and self._checkcase:
852 856 normalize = self._normalize
853 857 else:
854 858 normalize = None
855 859
856 860 files = sorted(match.files())
857 861 subrepos.sort()
858 862 i, j = 0, 0
859 863 while i < len(files) and j < len(subrepos):
860 864 subpath = subrepos[j] + b"/"
861 865 if files[i] < subpath:
862 866 i += 1
863 867 continue
864 868 while i < len(files) and files[i].startswith(subpath):
865 869 del files[i]
866 870 j += 1
867 871
868 872 if not files or b'' in files:
869 873 files = [b'']
870 874 # constructing the foldmap is expensive, so don't do it for the
871 875 # common case where files is ['']
872 876 normalize = None
873 877 results = dict.fromkeys(subrepos)
874 878 results[b'.hg'] = None
875 879
876 880 for ff in files:
877 881 if normalize:
878 882 nf = normalize(ff, False, True)
879 883 else:
880 884 nf = ff
881 885 if nf in results:
882 886 continue
883 887
884 888 try:
885 889 st = lstat(join(nf))
886 890 kind = getkind(st.st_mode)
887 891 if kind == dirkind:
888 892 if nf in dmap:
889 893 # file replaced by dir on disk but still in dirstate
890 894 results[nf] = None
891 895 foundadd((nf, ff))
892 896 elif kind == regkind or kind == lnkkind:
893 897 results[nf] = st
894 898 else:
895 899 badfn(ff, badtype(kind))
896 900 if nf in dmap:
897 901 results[nf] = None
898 902 except OSError as inst: # nf not found on disk - it is dirstate only
899 903 if nf in dmap: # does it exactly match a missing file?
900 904 results[nf] = None
901 905 else: # does it match a missing directory?
902 906 if self._map.hasdir(nf):
903 907 notfoundadd(nf)
904 908 else:
905 909 badfn(ff, encoding.strtolocal(inst.strerror))
906 910
907 911 # match.files() may contain explicitly-specified paths that shouldn't
908 912 # be taken; drop them from the list of files found. dirsfound/notfound
909 913 # aren't filtered here because they will be tested later.
910 914 if match.anypats():
911 915 for f in list(results):
912 916 if f == b'.hg' or f in subrepos:
913 917 # keep sentinel to disable further out-of-repo walks
914 918 continue
915 919 if not match(f):
916 920 del results[f]
917 921
918 922 # Case insensitive filesystems cannot rely on lstat() failing to detect
919 923 # a case-only rename. Prune the stat object for any file that does not
920 924 # match the case in the filesystem, if there are multiple files that
921 925 # normalize to the same path.
922 926 if match.isexact() and self._checkcase:
923 927 normed = {}
924 928
925 929 for f, st in pycompat.iteritems(results):
926 930 if st is None:
927 931 continue
928 932
929 933 nc = util.normcase(f)
930 934 paths = normed.get(nc)
931 935
932 936 if paths is None:
933 937 paths = set()
934 938 normed[nc] = paths
935 939
936 940 paths.add(f)
937 941
938 942 for norm, paths in pycompat.iteritems(normed):
939 943 if len(paths) > 1:
940 944 for path in paths:
941 945 folded = self._discoverpath(
942 946 path, norm, True, None, self._map.dirfoldmap
943 947 )
944 948 if path != folded:
945 949 results[path] = None
946 950
947 951 return results, dirsfound, dirsnotfound
948 952
949 953 def walk(self, match, subrepos, unknown, ignored, full=True):
950 954 """
951 955 Walk recursively through the directory tree, finding all files
952 956 matched by match.
953 957
954 958 If full is False, maybe skip some known-clean files.
955 959
956 960 Return a dict mapping filename to stat-like object (either
957 961 mercurial.osutil.stat instance or return value of os.stat()).
958 962
959 963 """
960 964 # full is a flag that extensions that hook into walk can use -- this
961 965 # implementation doesn't use it at all. This satisfies the contract
962 966 # because we only guarantee a "maybe".
963 967
964 968 if ignored:
965 969 ignore = util.never
966 970 dirignore = util.never
967 971 elif unknown:
968 972 ignore = self._ignore
969 973 dirignore = self._dirignore
970 974 else:
971 975 # if not unknown and not ignored, drop dir recursion and step 2
972 976 ignore = util.always
973 977 dirignore = util.always
974 978
975 979 matchfn = match.matchfn
976 980 matchalways = match.always()
977 981 matchtdir = match.traversedir
978 982 dmap = self._map
979 983 listdir = util.listdir
980 984 lstat = os.lstat
981 985 dirkind = stat.S_IFDIR
982 986 regkind = stat.S_IFREG
983 987 lnkkind = stat.S_IFLNK
984 988 join = self._join
985 989
986 990 exact = skipstep3 = False
987 991 if match.isexact(): # match.exact
988 992 exact = True
989 993 dirignore = util.always # skip step 2
990 994 elif match.prefix(): # match.match, no patterns
991 995 skipstep3 = True
992 996
993 997 if not exact and self._checkcase:
994 998 normalize = self._normalize
995 999 normalizefile = self._normalizefile
996 1000 skipstep3 = False
997 1001 else:
998 1002 normalize = self._normalize
999 1003 normalizefile = None
1000 1004
1001 1005 # step 1: find all explicit files
1002 1006 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1003 1007 if matchtdir:
1004 1008 for d in work:
1005 1009 matchtdir(d[0])
1006 1010 for d in dirsnotfound:
1007 1011 matchtdir(d)
1008 1012
1009 1013 skipstep3 = skipstep3 and not (work or dirsnotfound)
1010 1014 work = [d for d in work if not dirignore(d[0])]
1011 1015
1012 1016 # step 2: visit subdirectories
1013 1017 def traverse(work, alreadynormed):
1014 1018 wadd = work.append
1015 1019 while work:
1016 1020 tracing.counter('dirstate.walk work', len(work))
1017 1021 nd = work.pop()
1018 1022 visitentries = match.visitchildrenset(nd)
1019 1023 if not visitentries:
1020 1024 continue
1021 1025 if visitentries == b'this' or visitentries == b'all':
1022 1026 visitentries = None
1023 1027 skip = None
1024 1028 if nd != b'':
1025 1029 skip = b'.hg'
1026 1030 try:
1027 1031 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1028 1032 entries = listdir(join(nd), stat=True, skip=skip)
1029 1033 except OSError as inst:
1030 1034 if inst.errno in (errno.EACCES, errno.ENOENT):
1031 1035 match.bad(
1032 1036 self.pathto(nd), encoding.strtolocal(inst.strerror)
1033 1037 )
1034 1038 continue
1035 1039 raise
1036 1040 for f, kind, st in entries:
1037 1041 # Some matchers may return files in the visitentries set,
1038 1042 # instead of 'this', if the matcher explicitly mentions them
1039 1043 # and is not an exactmatcher. This is acceptable; we do not
1040 1044 # make any hard assumptions about file-or-directory below
1041 1045 # based on the presence of `f` in visitentries. If
1042 1046 # visitchildrenset returned a set, we can always skip the
1043 1047 # entries *not* in the set it provided regardless of whether
1044 1048 # they're actually a file or a directory.
1045 1049 if visitentries and f not in visitentries:
1046 1050 continue
1047 1051 if normalizefile:
1048 1052 # even though f might be a directory, we're only
1049 1053 # interested in comparing it to files currently in the
1050 1054 # dmap -- therefore normalizefile is enough
1051 1055 nf = normalizefile(
1052 1056 nd and (nd + b"/" + f) or f, True, True
1053 1057 )
1054 1058 else:
1055 1059 nf = nd and (nd + b"/" + f) or f
1056 1060 if nf not in results:
1057 1061 if kind == dirkind:
1058 1062 if not ignore(nf):
1059 1063 if matchtdir:
1060 1064 matchtdir(nf)
1061 1065 wadd(nf)
1062 1066 if nf in dmap and (matchalways or matchfn(nf)):
1063 1067 results[nf] = None
1064 1068 elif kind == regkind or kind == lnkkind:
1065 1069 if nf in dmap:
1066 1070 if matchalways or matchfn(nf):
1067 1071 results[nf] = st
1068 1072 elif (matchalways or matchfn(nf)) and not ignore(
1069 1073 nf
1070 1074 ):
1071 1075 # unknown file -- normalize if necessary
1072 1076 if not alreadynormed:
1073 1077 nf = normalize(nf, False, True)
1074 1078 results[nf] = st
1075 1079 elif nf in dmap and (matchalways or matchfn(nf)):
1076 1080 results[nf] = None
1077 1081
1078 1082 for nd, d in work:
1079 1083 # alreadynormed means that processwork doesn't have to do any
1080 1084 # expensive directory normalization
1081 1085 alreadynormed = not normalize or nd == d
1082 1086 traverse([d], alreadynormed)
1083 1087
1084 1088 for s in subrepos:
1085 1089 del results[s]
1086 1090 del results[b'.hg']
1087 1091
1088 1092 # step 3: visit remaining files from dmap
1089 1093 if not skipstep3 and not exact:
1090 1094 # If a dmap file is not in results yet, it was either
1091 1095 # a) not matching matchfn b) ignored, c) missing, or d) under a
1092 1096 # symlink directory.
1093 1097 if not results and matchalways:
1094 1098 visit = [f for f in dmap]
1095 1099 else:
1096 1100 visit = [f for f in dmap if f not in results and matchfn(f)]
1097 1101 visit.sort()
1098 1102
1099 1103 if unknown:
1100 1104 # unknown == True means we walked all dirs under the roots
1101 1105 # that wasn't ignored, and everything that matched was stat'ed
1102 1106 # and is already in results.
1103 1107 # The rest must thus be ignored or under a symlink.
1104 1108 audit_path = pathutil.pathauditor(self._root, cached=True)
1105 1109
1106 1110 for nf in iter(visit):
1107 1111 # If a stat for the same file was already added with a
1108 1112 # different case, don't add one for this, since that would
1109 1113 # make it appear as if the file exists under both names
1110 1114 # on disk.
1111 1115 if (
1112 1116 normalizefile
1113 1117 and normalizefile(nf, True, True) in results
1114 1118 ):
1115 1119 results[nf] = None
1116 1120 # Report ignored items in the dmap as long as they are not
1117 1121 # under a symlink directory.
1118 1122 elif audit_path.check(nf):
1119 1123 try:
1120 1124 results[nf] = lstat(join(nf))
1121 1125 # file was just ignored, no links, and exists
1122 1126 except OSError:
1123 1127 # file doesn't exist
1124 1128 results[nf] = None
1125 1129 else:
1126 1130 # It's either missing or under a symlink directory
1127 1131 # which we in this case report as missing
1128 1132 results[nf] = None
1129 1133 else:
1130 1134 # We may not have walked the full directory tree above,
1131 1135 # so stat and check everything we missed.
1132 1136 iv = iter(visit)
1133 1137 for st in util.statfiles([join(i) for i in visit]):
1134 1138 results[next(iv)] = st
1135 1139 return results
1136 1140
1137 1141 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1138 1142 # Force Rayon (Rust parallelism library) to respect the number of
1139 1143 # workers. This is a temporary workaround until Rust code knows
1140 1144 # how to read the config file.
1141 1145 numcpus = self._ui.configint(b"worker", b"numcpus")
1142 1146 if numcpus is not None:
1143 1147 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1144 1148
1145 1149 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1146 1150 if not workers_enabled:
1147 1151 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1148 1152
1149 1153 (
1150 1154 lookup,
1151 1155 modified,
1152 1156 added,
1153 1157 removed,
1154 1158 deleted,
1155 1159 clean,
1156 1160 ignored,
1157 1161 unknown,
1158 1162 warnings,
1159 1163 bad,
1160 1164 traversed,
1161 1165 dirty,
1162 1166 ) = rustmod.status(
1163 1167 self._map._rustmap,
1164 1168 matcher,
1165 1169 self._rootdir,
1166 1170 self._ignorefiles(),
1167 1171 self._checkexec,
1168 1172 self._lastnormaltime,
1169 1173 bool(list_clean),
1170 1174 bool(list_ignored),
1171 1175 bool(list_unknown),
1172 1176 bool(matcher.traversedir),
1173 1177 )
1174 1178
1175 1179 self._dirty |= dirty
1176 1180
1177 1181 if matcher.traversedir:
1178 1182 for dir in traversed:
1179 1183 matcher.traversedir(dir)
1180 1184
1181 1185 if self._ui.warn:
1182 1186 for item in warnings:
1183 1187 if isinstance(item, tuple):
1184 1188 file_path, syntax = item
1185 1189 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1186 1190 file_path,
1187 1191 syntax,
1188 1192 )
1189 1193 self._ui.warn(msg)
1190 1194 else:
1191 1195 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1192 1196 self._ui.warn(
1193 1197 msg
1194 1198 % (
1195 1199 pathutil.canonpath(
1196 1200 self._rootdir, self._rootdir, item
1197 1201 ),
1198 1202 b"No such file or directory",
1199 1203 )
1200 1204 )
1201 1205
1202 1206 for (fn, message) in bad:
1203 1207 matcher.bad(fn, encoding.strtolocal(message))
1204 1208
1205 1209 status = scmutil.status(
1206 1210 modified=modified,
1207 1211 added=added,
1208 1212 removed=removed,
1209 1213 deleted=deleted,
1210 1214 unknown=unknown,
1211 1215 ignored=ignored,
1212 1216 clean=clean,
1213 1217 )
1214 1218 return (lookup, status)
1215 1219
1216 1220 def status(self, match, subrepos, ignored, clean, unknown):
1217 1221 """Determine the status of the working copy relative to the
1218 1222 dirstate and return a pair of (unsure, status), where status is of type
1219 1223 scmutil.status and:
1220 1224
1221 1225 unsure:
1222 1226 files that might have been modified since the dirstate was
1223 1227 written, but need to be read to be sure (size is the same
1224 1228 but mtime differs)
1225 1229 status.modified:
1226 1230 files that have definitely been modified since the dirstate
1227 1231 was written (different size or mode)
1228 1232 status.clean:
1229 1233 files that have definitely not been modified since the
1230 1234 dirstate was written
1231 1235 """
1232 1236 listignored, listclean, listunknown = ignored, clean, unknown
1233 1237 lookup, modified, added, unknown, ignored = [], [], [], [], []
1234 1238 removed, deleted, clean = [], [], []
1235 1239
1236 1240 dmap = self._map
1237 1241 dmap.preload()
1238 1242
1239 1243 use_rust = True
1240 1244
1241 1245 allowed_matchers = (
1242 1246 matchmod.alwaysmatcher,
1243 1247 matchmod.exactmatcher,
1244 1248 matchmod.includematcher,
1245 1249 )
1246 1250
1247 1251 if rustmod is None:
1248 1252 use_rust = False
1249 1253 elif self._checkcase:
1250 1254 # Case-insensitive filesystems are not handled yet
1251 1255 use_rust = False
1252 1256 elif subrepos:
1253 1257 use_rust = False
1254 1258 elif sparse.enabled:
1255 1259 use_rust = False
1256 1260 elif not isinstance(match, allowed_matchers):
1257 1261 # Some matchers have yet to be implemented
1258 1262 use_rust = False
1259 1263
1260 1264 if use_rust:
1261 1265 try:
1262 1266 return self._rust_status(
1263 1267 match, listclean, listignored, listunknown
1264 1268 )
1265 1269 except rustmod.FallbackError:
1266 1270 pass
1267 1271
1268 1272 def noop(f):
1269 1273 pass
1270 1274
1271 1275 dcontains = dmap.__contains__
1272 1276 dget = dmap.__getitem__
1273 1277 ladd = lookup.append # aka "unsure"
1274 1278 madd = modified.append
1275 1279 aadd = added.append
1276 1280 uadd = unknown.append if listunknown else noop
1277 1281 iadd = ignored.append if listignored else noop
1278 1282 radd = removed.append
1279 1283 dadd = deleted.append
1280 1284 cadd = clean.append if listclean else noop
1281 1285 mexact = match.exact
1282 1286 dirignore = self._dirignore
1283 1287 checkexec = self._checkexec
1284 1288 copymap = self._map.copymap
1285 1289 lastnormaltime = self._lastnormaltime
1286 1290
1287 1291 # We need to do full walks when either
1288 1292 # - we're listing all clean files, or
1289 1293 # - match.traversedir does something, because match.traversedir should
1290 1294 # be called for every dir in the working dir
1291 1295 full = listclean or match.traversedir is not None
1292 1296 for fn, st in pycompat.iteritems(
1293 1297 self.walk(match, subrepos, listunknown, listignored, full=full)
1294 1298 ):
1295 1299 if not dcontains(fn):
1296 1300 if (listignored or mexact(fn)) and dirignore(fn):
1297 1301 if listignored:
1298 1302 iadd(fn)
1299 1303 else:
1300 1304 uadd(fn)
1301 1305 continue
1302 1306
1303 1307 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1304 1308 # written like that for performance reasons. dmap[fn] is not a
1305 1309 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1306 1310 # opcode has fast paths when the value to be unpacked is a tuple or
1307 1311 # a list, but falls back to creating a full-fledged iterator in
1308 1312 # general. That is much slower than simply accessing and storing the
1309 1313 # tuple members one by one.
1310 1314 t = dget(fn)
1311 1315 mode = t.mode
1312 1316 size = t.size
1313 1317 time = t.mtime
1314 1318
1315 1319 if not st and t.tracked:
1316 1320 dadd(fn)
1317 1321 elif t.merged:
1318 1322 madd(fn)
1319 1323 elif t.added:
1320 1324 aadd(fn)
1321 1325 elif t.removed:
1322 1326 radd(fn)
1323 1327 elif t.tracked:
1324 1328 if (
1325 1329 size >= 0
1326 1330 and (
1327 1331 (size != st.st_size and size != st.st_size & _rangemask)
1328 1332 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1329 1333 )
1330 1334 or t.from_p2
1331 1335 or fn in copymap
1332 1336 ):
1333 1337 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1334 1338 # issue6456: Size returned may be longer due to
1335 1339 # encryption on EXT-4 fscrypt, undecided.
1336 1340 ladd(fn)
1337 1341 else:
1338 1342 madd(fn)
1339 1343 elif (
1340 1344 time != st[stat.ST_MTIME]
1341 1345 and time != st[stat.ST_MTIME] & _rangemask
1342 1346 ):
1343 1347 ladd(fn)
1344 1348 elif st[stat.ST_MTIME] == lastnormaltime:
1345 1349 # fn may have just been marked as normal and it may have
1346 1350 # changed in the same second without changing its size.
1347 1351 # This can happen if we quickly do multiple commits.
1348 1352 # Force lookup, so we don't miss such a racy file change.
1349 1353 ladd(fn)
1350 1354 elif listclean:
1351 1355 cadd(fn)
1352 1356 status = scmutil.status(
1353 1357 modified, added, removed, deleted, unknown, ignored, clean
1354 1358 )
1355 1359 return (lookup, status)
1356 1360
1357 1361 def matches(self, match):
1358 1362 """
1359 1363 return files in the dirstate (in whatever state) filtered by match
1360 1364 """
1361 1365 dmap = self._map
1362 1366 if rustmod is not None:
1363 1367 dmap = self._map._rustmap
1364 1368
1365 1369 if match.always():
1366 1370 return dmap.keys()
1367 1371 files = match.files()
1368 1372 if match.isexact():
1369 1373 # fast path -- filter the other way around, since typically files is
1370 1374 # much smaller than dmap
1371 1375 return [f for f in files if f in dmap]
1372 1376 if match.prefix() and all(fn in dmap for fn in files):
1373 1377 # fast path -- all the values are known to be files, so just return
1374 1378 # that
1375 1379 return list(files)
1376 1380 return [f for f in dmap if match(f)]
1377 1381
1378 1382 def _actualfilename(self, tr):
1379 1383 if tr:
1380 1384 return self._pendingfilename
1381 1385 else:
1382 1386 return self._filename
1383 1387
1384 1388 def savebackup(self, tr, backupname):
1385 1389 '''Save current dirstate into backup file'''
1386 1390 filename = self._actualfilename(tr)
1387 1391 assert backupname != filename
1388 1392
1389 1393 # use '_writedirstate' instead of 'write' to write changes certainly,
1390 1394 # because the latter omits writing out if transaction is running.
1391 1395 # output file will be used to create backup of dirstate at this point.
1392 1396 if self._dirty or not self._opener.exists(filename):
1393 1397 self._writedirstate(
1394 1398 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1395 1399 )
1396 1400
1397 1401 if tr:
1398 1402 # ensure that subsequent tr.writepending returns True for
1399 1403 # changes written out above, even if dirstate is never
1400 1404 # changed after this
1401 1405 tr.addfilegenerator(
1402 1406 b'dirstate',
1403 1407 (self._filename,),
1404 1408 self._writedirstate,
1405 1409 location=b'plain',
1406 1410 )
1407 1411
1408 1412 # ensure that pending file written above is unlinked at
1409 1413 # failure, even if tr.writepending isn't invoked until the
1410 1414 # end of this transaction
1411 1415 tr.registertmp(filename, location=b'plain')
1412 1416
1413 1417 self._opener.tryunlink(backupname)
1414 1418 # hardlink backup is okay because _writedirstate is always called
1415 1419 # with an "atomictemp=True" file.
1416 1420 util.copyfile(
1417 1421 self._opener.join(filename),
1418 1422 self._opener.join(backupname),
1419 1423 hardlink=True,
1420 1424 )
1421 1425
1422 1426 def restorebackup(self, tr, backupname):
1423 1427 '''Restore dirstate by backup file'''
1424 1428 # this "invalidate()" prevents "wlock.release()" from writing
1425 1429 # changes of dirstate out after restoring from backup file
1426 1430 self.invalidate()
1427 1431 filename = self._actualfilename(tr)
1428 1432 o = self._opener
1429 1433 if util.samefile(o.join(backupname), o.join(filename)):
1430 1434 o.unlink(backupname)
1431 1435 else:
1432 1436 o.rename(backupname, filename, checkambig=True)
1433 1437
1434 1438 def clearbackup(self, tr, backupname):
1435 1439 '''Clear backup file'''
1436 1440 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now