##// END OF EJS Templates
dirstate: introduce an internal `_drop` method...
marmoute -
r48391:3d8b639b default
parent child Browse files
Show More
@@ -1,1444 +1,1448 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .pycompat import delattr
18 18
19 19 from hgdemandimport import tracing
20 20
21 21 from . import (
22 22 dirstatemap,
23 23 encoding,
24 24 error,
25 25 match as matchmod,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 sparse,
31 31 util,
32 32 )
33 33
34 34 from .interfaces import (
35 35 dirstate as intdirstate,
36 36 util as interfaceutil,
37 37 )
38 38
39 39 parsers = policy.importmod('parsers')
40 40 rustmod = policy.importrust('dirstate')
41 41
42 42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43 43
44 44 propertycache = util.propertycache
45 45 filecache = scmutil.filecache
46 46 _rangemask = dirstatemap.rangemask
47 47
48 48 DirstateItem = parsers.DirstateItem
49 49
50 50
51 51 class repocache(filecache):
52 52 """filecache for files in .hg/"""
53 53
54 54 def join(self, obj, fname):
55 55 return obj._opener.join(fname)
56 56
57 57
58 58 class rootcache(filecache):
59 59 """filecache for files in the repository root"""
60 60
61 61 def join(self, obj, fname):
62 62 return obj._join(fname)
63 63
64 64
65 65 def _getfsnow(vfs):
66 66 '''Get "now" timestamp on filesystem'''
67 67 tmpfd, tmpname = vfs.mkstemp()
68 68 try:
69 69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 70 finally:
71 71 os.close(tmpfd)
72 72 vfs.unlink(tmpname)
73 73
74 74
75 75 @interfaceutil.implementer(intdirstate.idirstate)
76 76 class dirstate(object):
77 77 def __init__(
78 78 self,
79 79 opener,
80 80 ui,
81 81 root,
82 82 validate,
83 83 sparsematchfn,
84 84 nodeconstants,
85 85 use_dirstate_v2,
86 86 ):
87 87 """Create a new dirstate object.
88 88
89 89 opener is an open()-like callable that can be used to open the
90 90 dirstate file; root is the root of the directory tracked by
91 91 the dirstate.
92 92 """
93 93 self._use_dirstate_v2 = use_dirstate_v2
94 94 self._nodeconstants = nodeconstants
95 95 self._opener = opener
96 96 self._validate = validate
97 97 self._root = root
98 98 self._sparsematchfn = sparsematchfn
99 99 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
100 100 # UNC path pointing to root share (issue4557)
101 101 self._rootdir = pathutil.normasprefix(root)
102 102 self._dirty = False
103 103 self._lastnormaltime = 0
104 104 self._ui = ui
105 105 self._filecache = {}
106 106 self._parentwriters = 0
107 107 self._filename = b'dirstate'
108 108 self._pendingfilename = b'%s.pending' % self._filename
109 109 self._plchangecallbacks = {}
110 110 self._origpl = None
111 111 self._updatedfiles = set()
112 112 self._mapcls = dirstatemap.dirstatemap
113 113 # Access and cache cwd early, so we don't access it for the first time
114 114 # after a working-copy update caused it to not exist (accessing it then
115 115 # raises an exception).
116 116 self._cwd
117 117
118 118 def prefetch_parents(self):
119 119 """make sure the parents are loaded
120 120
121 121 Used to avoid a race condition.
122 122 """
123 123 self._pl
124 124
125 125 @contextlib.contextmanager
126 126 def parentchange(self):
127 127 """Context manager for handling dirstate parents.
128 128
129 129 If an exception occurs in the scope of the context manager,
130 130 the incoherent dirstate won't be written when wlock is
131 131 released.
132 132 """
133 133 self._parentwriters += 1
134 134 yield
135 135 # Typically we want the "undo" step of a context manager in a
136 136 # finally block so it happens even when an exception
137 137 # occurs. In this case, however, we only want to decrement
138 138 # parentwriters if the code in the with statement exits
139 139 # normally, so we don't have a try/finally here on purpose.
140 140 self._parentwriters -= 1
141 141
142 142 def pendingparentchange(self):
143 143 """Returns true if the dirstate is in the middle of a set of changes
144 144 that modify the dirstate parent.
145 145 """
146 146 return self._parentwriters > 0
147 147
148 148 @propertycache
149 149 def _map(self):
150 150 """Return the dirstate contents (see documentation for dirstatemap)."""
151 151 self._map = self._mapcls(
152 152 self._ui,
153 153 self._opener,
154 154 self._root,
155 155 self._nodeconstants,
156 156 self._use_dirstate_v2,
157 157 )
158 158 return self._map
159 159
160 160 @property
161 161 def _sparsematcher(self):
162 162 """The matcher for the sparse checkout.
163 163
164 164 The working directory may not include every file from a manifest. The
165 165 matcher obtained by this property will match a path if it is to be
166 166 included in the working directory.
167 167 """
168 168 # TODO there is potential to cache this property. For now, the matcher
169 169 # is resolved on every access. (But the called function does use a
170 170 # cache to keep the lookup fast.)
171 171 return self._sparsematchfn()
172 172
173 173 @repocache(b'branch')
174 174 def _branch(self):
175 175 try:
176 176 return self._opener.read(b"branch").strip() or b"default"
177 177 except IOError as inst:
178 178 if inst.errno != errno.ENOENT:
179 179 raise
180 180 return b"default"
181 181
182 182 @property
183 183 def _pl(self):
184 184 return self._map.parents()
185 185
186 186 def hasdir(self, d):
187 187 return self._map.hastrackeddir(d)
188 188
189 189 @rootcache(b'.hgignore')
190 190 def _ignore(self):
191 191 files = self._ignorefiles()
192 192 if not files:
193 193 return matchmod.never()
194 194
195 195 pats = [b'include:%s' % f for f in files]
196 196 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
197 197
198 198 @propertycache
199 199 def _slash(self):
200 200 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
201 201
202 202 @propertycache
203 203 def _checklink(self):
204 204 return util.checklink(self._root)
205 205
206 206 @propertycache
207 207 def _checkexec(self):
208 208 return bool(util.checkexec(self._root))
209 209
210 210 @propertycache
211 211 def _checkcase(self):
212 212 return not util.fscasesensitive(self._join(b'.hg'))
213 213
214 214 def _join(self, f):
215 215 # much faster than os.path.join()
216 216 # it's safe because f is always a relative path
217 217 return self._rootdir + f
218 218
219 219 def flagfunc(self, buildfallback):
220 220 if self._checklink and self._checkexec:
221 221
222 222 def f(x):
223 223 try:
224 224 st = os.lstat(self._join(x))
225 225 if util.statislink(st):
226 226 return b'l'
227 227 if util.statisexec(st):
228 228 return b'x'
229 229 except OSError:
230 230 pass
231 231 return b''
232 232
233 233 return f
234 234
235 235 fallback = buildfallback()
236 236 if self._checklink:
237 237
238 238 def f(x):
239 239 if os.path.islink(self._join(x)):
240 240 return b'l'
241 241 if b'x' in fallback(x):
242 242 return b'x'
243 243 return b''
244 244
245 245 return f
246 246 if self._checkexec:
247 247
248 248 def f(x):
249 249 if b'l' in fallback(x):
250 250 return b'l'
251 251 if util.isexec(self._join(x)):
252 252 return b'x'
253 253 return b''
254 254
255 255 return f
256 256 else:
257 257 return fallback
258 258
259 259 @propertycache
260 260 def _cwd(self):
261 261 # internal config: ui.forcecwd
262 262 forcecwd = self._ui.config(b'ui', b'forcecwd')
263 263 if forcecwd:
264 264 return forcecwd
265 265 return encoding.getcwd()
266 266
267 267 def getcwd(self):
268 268 """Return the path from which a canonical path is calculated.
269 269
270 270 This path should be used to resolve file patterns or to convert
271 271 canonical paths back to file paths for display. It shouldn't be
272 272 used to get real file paths. Use vfs functions instead.
273 273 """
274 274 cwd = self._cwd
275 275 if cwd == self._root:
276 276 return b''
277 277 # self._root ends with a path separator if self._root is '/' or 'C:\'
278 278 rootsep = self._root
279 279 if not util.endswithsep(rootsep):
280 280 rootsep += pycompat.ossep
281 281 if cwd.startswith(rootsep):
282 282 return cwd[len(rootsep) :]
283 283 else:
284 284 # we're outside the repo. return an absolute path.
285 285 return cwd
286 286
287 287 def pathto(self, f, cwd=None):
288 288 if cwd is None:
289 289 cwd = self.getcwd()
290 290 path = util.pathto(self._root, cwd, f)
291 291 if self._slash:
292 292 return util.pconvert(path)
293 293 return path
294 294
295 295 def __getitem__(self, key):
296 296 """Return the current state of key (a filename) in the dirstate.
297 297
298 298 States are:
299 299 n normal
300 300 m needs merging
301 301 r marked for removal
302 302 a marked for addition
303 303 ? not tracked
304 304
305 305 XXX The "state" is a bit obscure to be in the "public" API. we should
306 306 consider migrating all user of this to going through the dirstate entry
307 307 instead.
308 308 """
309 309 entry = self._map.get(key)
310 310 if entry is not None:
311 311 return entry.state
312 312 return b'?'
313 313
314 314 def __contains__(self, key):
315 315 return key in self._map
316 316
317 317 def __iter__(self):
318 318 return iter(sorted(self._map))
319 319
320 320 def items(self):
321 321 return pycompat.iteritems(self._map)
322 322
323 323 iteritems = items
324 324
325 325 def directories(self):
326 326 return self._map.directories()
327 327
328 328 def parents(self):
329 329 return [self._validate(p) for p in self._pl]
330 330
331 331 def p1(self):
332 332 return self._validate(self._pl[0])
333 333
334 334 def p2(self):
335 335 return self._validate(self._pl[1])
336 336
337 337 @property
338 338 def in_merge(self):
339 339 """True if a merge is in progress"""
340 340 return self._pl[1] != self._nodeconstants.nullid
341 341
342 342 def branch(self):
343 343 return encoding.tolocal(self._branch)
344 344
345 345 def setparents(self, p1, p2=None):
346 346 """Set dirstate parents to p1 and p2.
347 347
348 348 When moving from two parents to one, "merged" entries a
349 349 adjusted to normal and previous copy records discarded and
350 350 returned by the call.
351 351
352 352 See localrepo.setparents()
353 353 """
354 354 if p2 is None:
355 355 p2 = self._nodeconstants.nullid
356 356 if self._parentwriters == 0:
357 357 raise ValueError(
358 358 b"cannot set dirstate parent outside of "
359 359 b"dirstate.parentchange context manager"
360 360 )
361 361
362 362 self._dirty = True
363 363 oldp2 = self._pl[1]
364 364 if self._origpl is None:
365 365 self._origpl = self._pl
366 366 self._map.setparents(p1, p2)
367 367 copies = {}
368 368 if (
369 369 oldp2 != self._nodeconstants.nullid
370 370 and p2 == self._nodeconstants.nullid
371 371 ):
372 372 candidatefiles = self._map.non_normal_or_other_parent_paths()
373 373
374 374 for f in candidatefiles:
375 375 s = self._map.get(f)
376 376 if s is None:
377 377 continue
378 378
379 379 # Discard "merged" markers when moving away from a merge state
380 380 if s.merged:
381 381 source = self._map.copymap.get(f)
382 382 if source:
383 383 copies[f] = source
384 384 self.normallookup(f)
385 385 # Also fix up otherparent markers
386 386 elif s.from_p2:
387 387 source = self._map.copymap.get(f)
388 388 if source:
389 389 copies[f] = source
390 390 self._add(f)
391 391 return copies
392 392
393 393 def setbranch(self, branch):
394 394 self.__class__._branch.set(self, encoding.fromlocal(branch))
395 395 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
396 396 try:
397 397 f.write(self._branch + b'\n')
398 398 f.close()
399 399
400 400 # make sure filecache has the correct stat info for _branch after
401 401 # replacing the underlying file
402 402 ce = self._filecache[b'_branch']
403 403 if ce:
404 404 ce.refresh()
405 405 except: # re-raises
406 406 f.discard()
407 407 raise
408 408
409 409 def invalidate(self):
410 410 """Causes the next access to reread the dirstate.
411 411
412 412 This is different from localrepo.invalidatedirstate() because it always
413 413 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
414 414 check whether the dirstate has changed before rereading it."""
415 415
416 416 for a in ("_map", "_branch", "_ignore"):
417 417 if a in self.__dict__:
418 418 delattr(self, a)
419 419 self._lastnormaltime = 0
420 420 self._dirty = False
421 421 self._updatedfiles.clear()
422 422 self._parentwriters = 0
423 423 self._origpl = None
424 424
425 425 def copy(self, source, dest):
426 426 """Mark dest as a copy of source. Unmark dest if source is None."""
427 427 if source == dest:
428 428 return
429 429 self._dirty = True
430 430 if source is not None:
431 431 self._map.copymap[dest] = source
432 432 self._updatedfiles.add(source)
433 433 self._updatedfiles.add(dest)
434 434 elif self._map.copymap.pop(dest, None):
435 435 self._updatedfiles.add(dest)
436 436
437 437 def copied(self, file):
438 438 return self._map.copymap.get(file, None)
439 439
440 440 def copies(self):
441 441 return self._map.copymap
442 442
443 443 def _addpath(
444 444 self,
445 445 f,
446 446 mode=0,
447 447 size=None,
448 448 mtime=None,
449 449 added=False,
450 450 merged=False,
451 451 from_p2=False,
452 452 possibly_dirty=False,
453 453 ):
454 454 entry = self._map.get(f)
455 455 if added or entry is not None and entry.removed:
456 456 scmutil.checkfilename(f)
457 457 if self._map.hastrackeddir(f):
458 458 msg = _(b'directory %r already in dirstate')
459 459 msg %= pycompat.bytestr(f)
460 460 raise error.Abort(msg)
461 461 # shadows
462 462 for d in pathutil.finddirs(f):
463 463 if self._map.hastrackeddir(d):
464 464 break
465 465 entry = self._map.get(d)
466 466 if entry is not None and not entry.removed:
467 467 msg = _(b'file %r in dirstate clashes with %r')
468 468 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
469 469 raise error.Abort(msg)
470 470 self._dirty = True
471 471 self._updatedfiles.add(f)
472 472 self._map.addfile(
473 473 f,
474 474 mode=mode,
475 475 size=size,
476 476 mtime=mtime,
477 477 added=added,
478 478 merged=merged,
479 479 from_p2=from_p2,
480 480 possibly_dirty=possibly_dirty,
481 481 )
482 482
483 483 def normal(self, f, parentfiledata=None):
484 484 """Mark a file normal and clean.
485 485
486 486 parentfiledata: (mode, size, mtime) of the clean file
487 487
488 488 parentfiledata should be computed from memory (for mode,
489 489 size), as or close as possible from the point where we
490 490 determined the file was clean, to limit the risk of the
491 491 file having been changed by an external process between the
492 492 moment where the file was determined to be clean and now."""
493 493 if parentfiledata:
494 494 (mode, size, mtime) = parentfiledata
495 495 else:
496 496 s = os.lstat(self._join(f))
497 497 mode = s.st_mode
498 498 size = s.st_size
499 499 mtime = s[stat.ST_MTIME]
500 500 self._addpath(f, mode=mode, size=size, mtime=mtime)
501 501 self._map.copymap.pop(f, None)
502 502 if f in self._map.nonnormalset:
503 503 self._map.nonnormalset.remove(f)
504 504 if mtime > self._lastnormaltime:
505 505 # Remember the most recent modification timeslot for status(),
506 506 # to make sure we won't miss future size-preserving file content
507 507 # modifications that happen within the same timeslot.
508 508 self._lastnormaltime = mtime
509 509
510 510 def normallookup(self, f):
511 511 '''Mark a file normal, but possibly dirty.'''
512 512 if self.in_merge:
513 513 # if there is a merge going on and the file was either
514 514 # "merged" or coming from other parent (-2) before
515 515 # being removed, restore that state.
516 516 entry = self._map.get(f)
517 517 if entry is not None:
518 518 # XXX this should probably be dealt with a a lower level
519 519 # (see `merged_removed` and `from_p2_removed`)
520 520 if entry.merged_removed or entry.from_p2_removed:
521 521 source = self._map.copymap.get(f)
522 522 if entry.merged_removed:
523 523 self.merge(f)
524 524 elif entry.from_p2_removed:
525 525 self.otherparent(f)
526 526 if source is not None:
527 527 self.copy(source, f)
528 528 return
529 529 elif entry.merged or entry.from_p2:
530 530 return
531 531 self._addpath(f, possibly_dirty=True)
532 532 self._map.copymap.pop(f, None)
533 533
534 534 def otherparent(self, f):
535 535 '''Mark as coming from the other parent, always dirty.'''
536 536 if not self.in_merge:
537 537 msg = _(b"setting %r to other parent only allowed in merges") % f
538 538 raise error.Abort(msg)
539 539 entry = self._map.get(f)
540 540 if entry is not None and entry.tracked:
541 541 # merge-like
542 542 self._addpath(f, merged=True)
543 543 else:
544 544 # add-like
545 545 self._addpath(f, from_p2=True)
546 546 self._map.copymap.pop(f, None)
547 547
548 548 def add(self, f):
549 549 '''Mark a file added.'''
550 550 self._add(f)
551 551
552 552 def _add(self, filename):
553 553 """internal function to mark a file as added"""
554 554 self._addpath(filename, added=True)
555 555 self._map.copymap.pop(filename, None)
556 556
557 557 def remove(self, f):
558 558 '''Mark a file removed'''
559 559 self._remove(f)
560 560
561 561 def _remove(self, filename):
562 562 """internal function to mark a file removed"""
563 563 self._dirty = True
564 564 self._updatedfiles.add(filename)
565 565 self._map.removefile(filename, in_merge=self.in_merge)
566 566
567 567 def merge(self, f):
568 568 '''Mark a file merged.'''
569 569 if not self.in_merge:
570 570 return self.normallookup(f)
571 571 return self.otherparent(f)
572 572
573 573 def drop(self, f):
574 574 '''Drop a file from the dirstate'''
575 if self._map.dropfile(f):
575 self._drop(f)
576
577 def _drop(self, filename):
578 """internal function to drop a file from the dirstate"""
579 if self._map.dropfile(filename):
576 580 self._dirty = True
577 self._updatedfiles.add(f)
578 self._map.copymap.pop(f, None)
581 self._updatedfiles.add(filename)
582 self._map.copymap.pop(filename, None)
579 583
580 584 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
581 585 if exists is None:
582 586 exists = os.path.lexists(os.path.join(self._root, path))
583 587 if not exists:
584 588 # Maybe a path component exists
585 589 if not ignoremissing and b'/' in path:
586 590 d, f = path.rsplit(b'/', 1)
587 591 d = self._normalize(d, False, ignoremissing, None)
588 592 folded = d + b"/" + f
589 593 else:
590 594 # No path components, preserve original case
591 595 folded = path
592 596 else:
593 597 # recursively normalize leading directory components
594 598 # against dirstate
595 599 if b'/' in normed:
596 600 d, f = normed.rsplit(b'/', 1)
597 601 d = self._normalize(d, False, ignoremissing, True)
598 602 r = self._root + b"/" + d
599 603 folded = d + b"/" + util.fspath(f, r)
600 604 else:
601 605 folded = util.fspath(normed, self._root)
602 606 storemap[normed] = folded
603 607
604 608 return folded
605 609
606 610 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
607 611 normed = util.normcase(path)
608 612 folded = self._map.filefoldmap.get(normed, None)
609 613 if folded is None:
610 614 if isknown:
611 615 folded = path
612 616 else:
613 617 folded = self._discoverpath(
614 618 path, normed, ignoremissing, exists, self._map.filefoldmap
615 619 )
616 620 return folded
617 621
618 622 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
619 623 normed = util.normcase(path)
620 624 folded = self._map.filefoldmap.get(normed, None)
621 625 if folded is None:
622 626 folded = self._map.dirfoldmap.get(normed, None)
623 627 if folded is None:
624 628 if isknown:
625 629 folded = path
626 630 else:
627 631 # store discovered result in dirfoldmap so that future
628 632 # normalizefile calls don't start matching directories
629 633 folded = self._discoverpath(
630 634 path, normed, ignoremissing, exists, self._map.dirfoldmap
631 635 )
632 636 return folded
633 637
634 638 def normalize(self, path, isknown=False, ignoremissing=False):
635 639 """
636 640 normalize the case of a pathname when on a casefolding filesystem
637 641
638 642 isknown specifies whether the filename came from walking the
639 643 disk, to avoid extra filesystem access.
640 644
641 645 If ignoremissing is True, missing path are returned
642 646 unchanged. Otherwise, we try harder to normalize possibly
643 647 existing path components.
644 648
645 649 The normalized case is determined based on the following precedence:
646 650
647 651 - version of name already stored in the dirstate
648 652 - version of name stored on disk
649 653 - version provided via command arguments
650 654 """
651 655
652 656 if self._checkcase:
653 657 return self._normalize(path, isknown, ignoremissing)
654 658 return path
655 659
656 660 def clear(self):
657 661 self._map.clear()
658 662 self._lastnormaltime = 0
659 663 self._updatedfiles.clear()
660 664 self._dirty = True
661 665
662 666 def rebuild(self, parent, allfiles, changedfiles=None):
663 667 if changedfiles is None:
664 668 # Rebuild entire dirstate
665 669 to_lookup = allfiles
666 670 to_drop = []
667 671 lastnormaltime = self._lastnormaltime
668 672 self.clear()
669 673 self._lastnormaltime = lastnormaltime
670 674 elif len(changedfiles) < 10:
671 675 # Avoid turning allfiles into a set, which can be expensive if it's
672 676 # large.
673 677 to_lookup = []
674 678 to_drop = []
675 679 for f in changedfiles:
676 680 if f in allfiles:
677 681 to_lookup.append(f)
678 682 else:
679 683 to_drop.append(f)
680 684 else:
681 685 changedfilesset = set(changedfiles)
682 686 to_lookup = changedfilesset & set(allfiles)
683 687 to_drop = changedfilesset - to_lookup
684 688
685 689 if self._origpl is None:
686 690 self._origpl = self._pl
687 691 self._map.setparents(parent, self._nodeconstants.nullid)
688 692
689 693 for f in to_lookup:
690 694 self.normallookup(f)
691 695 for f in to_drop:
692 self.drop(f)
696 self._drop(f)
693 697
694 698 self._dirty = True
695 699
696 700 def identity(self):
697 701 """Return identity of dirstate itself to detect changing in storage
698 702
699 703 If identity of previous dirstate is equal to this, writing
700 704 changes based on the former dirstate out can keep consistency.
701 705 """
702 706 return self._map.identity
703 707
704 708 def write(self, tr):
705 709 if not self._dirty:
706 710 return
707 711
708 712 filename = self._filename
709 713 if tr:
710 714 # 'dirstate.write()' is not only for writing in-memory
711 715 # changes out, but also for dropping ambiguous timestamp.
712 716 # delayed writing re-raise "ambiguous timestamp issue".
713 717 # See also the wiki page below for detail:
714 718 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
715 719
716 720 # emulate dropping timestamp in 'parsers.pack_dirstate'
717 721 now = _getfsnow(self._opener)
718 722 self._map.clearambiguoustimes(self._updatedfiles, now)
719 723
720 724 # emulate that all 'dirstate.normal' results are written out
721 725 self._lastnormaltime = 0
722 726 self._updatedfiles.clear()
723 727
724 728 # delay writing in-memory changes out
725 729 tr.addfilegenerator(
726 730 b'dirstate',
727 731 (self._filename,),
728 732 self._writedirstate,
729 733 location=b'plain',
730 734 )
731 735 return
732 736
733 737 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
734 738 self._writedirstate(st)
735 739
736 740 def addparentchangecallback(self, category, callback):
737 741 """add a callback to be called when the wd parents are changed
738 742
739 743 Callback will be called with the following arguments:
740 744 dirstate, (oldp1, oldp2), (newp1, newp2)
741 745
742 746 Category is a unique identifier to allow overwriting an old callback
743 747 with a newer callback.
744 748 """
745 749 self._plchangecallbacks[category] = callback
746 750
747 751 def _writedirstate(self, st):
748 752 # notify callbacks about parents change
749 753 if self._origpl is not None and self._origpl != self._pl:
750 754 for c, callback in sorted(
751 755 pycompat.iteritems(self._plchangecallbacks)
752 756 ):
753 757 callback(self, self._origpl, self._pl)
754 758 self._origpl = None
755 759 # use the modification time of the newly created temporary file as the
756 760 # filesystem's notion of 'now'
757 761 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
758 762
759 763 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
760 764 # timestamp of each entries in dirstate, because of 'now > mtime'
761 765 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
762 766 if delaywrite > 0:
763 767 # do we have any files to delay for?
764 768 for f, e in pycompat.iteritems(self._map):
765 769 if e.need_delay(now):
766 770 import time # to avoid useless import
767 771
768 772 # rather than sleep n seconds, sleep until the next
769 773 # multiple of n seconds
770 774 clock = time.time()
771 775 start = int(clock) - (int(clock) % delaywrite)
772 776 end = start + delaywrite
773 777 time.sleep(end - clock)
774 778 now = end # trust our estimate that the end is near now
775 779 break
776 780
777 781 self._map.write(st, now)
778 782 self._lastnormaltime = 0
779 783 self._dirty = False
780 784
781 785 def _dirignore(self, f):
782 786 if self._ignore(f):
783 787 return True
784 788 for p in pathutil.finddirs(f):
785 789 if self._ignore(p):
786 790 return True
787 791 return False
788 792
789 793 def _ignorefiles(self):
790 794 files = []
791 795 if os.path.exists(self._join(b'.hgignore')):
792 796 files.append(self._join(b'.hgignore'))
793 797 for name, path in self._ui.configitems(b"ui"):
794 798 if name == b'ignore' or name.startswith(b'ignore.'):
795 799 # we need to use os.path.join here rather than self._join
796 800 # because path is arbitrary and user-specified
797 801 files.append(os.path.join(self._rootdir, util.expandpath(path)))
798 802 return files
799 803
800 804 def _ignorefileandline(self, f):
801 805 files = collections.deque(self._ignorefiles())
802 806 visited = set()
803 807 while files:
804 808 i = files.popleft()
805 809 patterns = matchmod.readpatternfile(
806 810 i, self._ui.warn, sourceinfo=True
807 811 )
808 812 for pattern, lineno, line in patterns:
809 813 kind, p = matchmod._patsplit(pattern, b'glob')
810 814 if kind == b"subinclude":
811 815 if p not in visited:
812 816 files.append(p)
813 817 continue
814 818 m = matchmod.match(
815 819 self._root, b'', [], [pattern], warn=self._ui.warn
816 820 )
817 821 if m(f):
818 822 return (i, lineno, line)
819 823 visited.add(i)
820 824 return (None, -1, b"")
821 825
822 826 def _walkexplicit(self, match, subrepos):
823 827 """Get stat data about the files explicitly specified by match.
824 828
825 829 Return a triple (results, dirsfound, dirsnotfound).
826 830 - results is a mapping from filename to stat result. It also contains
827 831 listings mapping subrepos and .hg to None.
828 832 - dirsfound is a list of files found to be directories.
829 833 - dirsnotfound is a list of files that the dirstate thinks are
830 834 directories and that were not found."""
831 835
832 836 def badtype(mode):
833 837 kind = _(b'unknown')
834 838 if stat.S_ISCHR(mode):
835 839 kind = _(b'character device')
836 840 elif stat.S_ISBLK(mode):
837 841 kind = _(b'block device')
838 842 elif stat.S_ISFIFO(mode):
839 843 kind = _(b'fifo')
840 844 elif stat.S_ISSOCK(mode):
841 845 kind = _(b'socket')
842 846 elif stat.S_ISDIR(mode):
843 847 kind = _(b'directory')
844 848 return _(b'unsupported file type (type is %s)') % kind
845 849
846 850 badfn = match.bad
847 851 dmap = self._map
848 852 lstat = os.lstat
849 853 getkind = stat.S_IFMT
850 854 dirkind = stat.S_IFDIR
851 855 regkind = stat.S_IFREG
852 856 lnkkind = stat.S_IFLNK
853 857 join = self._join
854 858 dirsfound = []
855 859 foundadd = dirsfound.append
856 860 dirsnotfound = []
857 861 notfoundadd = dirsnotfound.append
858 862
859 863 if not match.isexact() and self._checkcase:
860 864 normalize = self._normalize
861 865 else:
862 866 normalize = None
863 867
864 868 files = sorted(match.files())
865 869 subrepos.sort()
866 870 i, j = 0, 0
867 871 while i < len(files) and j < len(subrepos):
868 872 subpath = subrepos[j] + b"/"
869 873 if files[i] < subpath:
870 874 i += 1
871 875 continue
872 876 while i < len(files) and files[i].startswith(subpath):
873 877 del files[i]
874 878 j += 1
875 879
876 880 if not files or b'' in files:
877 881 files = [b'']
878 882 # constructing the foldmap is expensive, so don't do it for the
879 883 # common case where files is ['']
880 884 normalize = None
881 885 results = dict.fromkeys(subrepos)
882 886 results[b'.hg'] = None
883 887
884 888 for ff in files:
885 889 if normalize:
886 890 nf = normalize(ff, False, True)
887 891 else:
888 892 nf = ff
889 893 if nf in results:
890 894 continue
891 895
892 896 try:
893 897 st = lstat(join(nf))
894 898 kind = getkind(st.st_mode)
895 899 if kind == dirkind:
896 900 if nf in dmap:
897 901 # file replaced by dir on disk but still in dirstate
898 902 results[nf] = None
899 903 foundadd((nf, ff))
900 904 elif kind == regkind or kind == lnkkind:
901 905 results[nf] = st
902 906 else:
903 907 badfn(ff, badtype(kind))
904 908 if nf in dmap:
905 909 results[nf] = None
906 910 except OSError as inst: # nf not found on disk - it is dirstate only
907 911 if nf in dmap: # does it exactly match a missing file?
908 912 results[nf] = None
909 913 else: # does it match a missing directory?
910 914 if self._map.hasdir(nf):
911 915 notfoundadd(nf)
912 916 else:
913 917 badfn(ff, encoding.strtolocal(inst.strerror))
914 918
915 919 # match.files() may contain explicitly-specified paths that shouldn't
916 920 # be taken; drop them from the list of files found. dirsfound/notfound
917 921 # aren't filtered here because they will be tested later.
918 922 if match.anypats():
919 923 for f in list(results):
920 924 if f == b'.hg' or f in subrepos:
921 925 # keep sentinel to disable further out-of-repo walks
922 926 continue
923 927 if not match(f):
924 928 del results[f]
925 929
926 930 # Case insensitive filesystems cannot rely on lstat() failing to detect
927 931 # a case-only rename. Prune the stat object for any file that does not
928 932 # match the case in the filesystem, if there are multiple files that
929 933 # normalize to the same path.
930 934 if match.isexact() and self._checkcase:
931 935 normed = {}
932 936
933 937 for f, st in pycompat.iteritems(results):
934 938 if st is None:
935 939 continue
936 940
937 941 nc = util.normcase(f)
938 942 paths = normed.get(nc)
939 943
940 944 if paths is None:
941 945 paths = set()
942 946 normed[nc] = paths
943 947
944 948 paths.add(f)
945 949
946 950 for norm, paths in pycompat.iteritems(normed):
947 951 if len(paths) > 1:
948 952 for path in paths:
949 953 folded = self._discoverpath(
950 954 path, norm, True, None, self._map.dirfoldmap
951 955 )
952 956 if path != folded:
953 957 results[path] = None
954 958
955 959 return results, dirsfound, dirsnotfound
956 960
957 961 def walk(self, match, subrepos, unknown, ignored, full=True):
958 962 """
959 963 Walk recursively through the directory tree, finding all files
960 964 matched by match.
961 965
962 966 If full is False, maybe skip some known-clean files.
963 967
964 968 Return a dict mapping filename to stat-like object (either
965 969 mercurial.osutil.stat instance or return value of os.stat()).
966 970
967 971 """
968 972 # full is a flag that extensions that hook into walk can use -- this
969 973 # implementation doesn't use it at all. This satisfies the contract
970 974 # because we only guarantee a "maybe".
971 975
972 976 if ignored:
973 977 ignore = util.never
974 978 dirignore = util.never
975 979 elif unknown:
976 980 ignore = self._ignore
977 981 dirignore = self._dirignore
978 982 else:
979 983 # if not unknown and not ignored, drop dir recursion and step 2
980 984 ignore = util.always
981 985 dirignore = util.always
982 986
983 987 matchfn = match.matchfn
984 988 matchalways = match.always()
985 989 matchtdir = match.traversedir
986 990 dmap = self._map
987 991 listdir = util.listdir
988 992 lstat = os.lstat
989 993 dirkind = stat.S_IFDIR
990 994 regkind = stat.S_IFREG
991 995 lnkkind = stat.S_IFLNK
992 996 join = self._join
993 997
994 998 exact = skipstep3 = False
995 999 if match.isexact(): # match.exact
996 1000 exact = True
997 1001 dirignore = util.always # skip step 2
998 1002 elif match.prefix(): # match.match, no patterns
999 1003 skipstep3 = True
1000 1004
1001 1005 if not exact and self._checkcase:
1002 1006 normalize = self._normalize
1003 1007 normalizefile = self._normalizefile
1004 1008 skipstep3 = False
1005 1009 else:
1006 1010 normalize = self._normalize
1007 1011 normalizefile = None
1008 1012
1009 1013 # step 1: find all explicit files
1010 1014 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1011 1015 if matchtdir:
1012 1016 for d in work:
1013 1017 matchtdir(d[0])
1014 1018 for d in dirsnotfound:
1015 1019 matchtdir(d)
1016 1020
1017 1021 skipstep3 = skipstep3 and not (work or dirsnotfound)
1018 1022 work = [d for d in work if not dirignore(d[0])]
1019 1023
1020 1024 # step 2: visit subdirectories
1021 1025 def traverse(work, alreadynormed):
1022 1026 wadd = work.append
1023 1027 while work:
1024 1028 tracing.counter('dirstate.walk work', len(work))
1025 1029 nd = work.pop()
1026 1030 visitentries = match.visitchildrenset(nd)
1027 1031 if not visitentries:
1028 1032 continue
1029 1033 if visitentries == b'this' or visitentries == b'all':
1030 1034 visitentries = None
1031 1035 skip = None
1032 1036 if nd != b'':
1033 1037 skip = b'.hg'
1034 1038 try:
1035 1039 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1036 1040 entries = listdir(join(nd), stat=True, skip=skip)
1037 1041 except OSError as inst:
1038 1042 if inst.errno in (errno.EACCES, errno.ENOENT):
1039 1043 match.bad(
1040 1044 self.pathto(nd), encoding.strtolocal(inst.strerror)
1041 1045 )
1042 1046 continue
1043 1047 raise
1044 1048 for f, kind, st in entries:
1045 1049 # Some matchers may return files in the visitentries set,
1046 1050 # instead of 'this', if the matcher explicitly mentions them
1047 1051 # and is not an exactmatcher. This is acceptable; we do not
1048 1052 # make any hard assumptions about file-or-directory below
1049 1053 # based on the presence of `f` in visitentries. If
1050 1054 # visitchildrenset returned a set, we can always skip the
1051 1055 # entries *not* in the set it provided regardless of whether
1052 1056 # they're actually a file or a directory.
1053 1057 if visitentries and f not in visitentries:
1054 1058 continue
1055 1059 if normalizefile:
1056 1060 # even though f might be a directory, we're only
1057 1061 # interested in comparing it to files currently in the
1058 1062 # dmap -- therefore normalizefile is enough
1059 1063 nf = normalizefile(
1060 1064 nd and (nd + b"/" + f) or f, True, True
1061 1065 )
1062 1066 else:
1063 1067 nf = nd and (nd + b"/" + f) or f
1064 1068 if nf not in results:
1065 1069 if kind == dirkind:
1066 1070 if not ignore(nf):
1067 1071 if matchtdir:
1068 1072 matchtdir(nf)
1069 1073 wadd(nf)
1070 1074 if nf in dmap and (matchalways or matchfn(nf)):
1071 1075 results[nf] = None
1072 1076 elif kind == regkind or kind == lnkkind:
1073 1077 if nf in dmap:
1074 1078 if matchalways or matchfn(nf):
1075 1079 results[nf] = st
1076 1080 elif (matchalways or matchfn(nf)) and not ignore(
1077 1081 nf
1078 1082 ):
1079 1083 # unknown file -- normalize if necessary
1080 1084 if not alreadynormed:
1081 1085 nf = normalize(nf, False, True)
1082 1086 results[nf] = st
1083 1087 elif nf in dmap and (matchalways or matchfn(nf)):
1084 1088 results[nf] = None
1085 1089
1086 1090 for nd, d in work:
1087 1091 # alreadynormed means that processwork doesn't have to do any
1088 1092 # expensive directory normalization
1089 1093 alreadynormed = not normalize or nd == d
1090 1094 traverse([d], alreadynormed)
1091 1095
1092 1096 for s in subrepos:
1093 1097 del results[s]
1094 1098 del results[b'.hg']
1095 1099
1096 1100 # step 3: visit remaining files from dmap
1097 1101 if not skipstep3 and not exact:
1098 1102 # If a dmap file is not in results yet, it was either
1099 1103 # a) not matching matchfn b) ignored, c) missing, or d) under a
1100 1104 # symlink directory.
1101 1105 if not results and matchalways:
1102 1106 visit = [f for f in dmap]
1103 1107 else:
1104 1108 visit = [f for f in dmap if f not in results and matchfn(f)]
1105 1109 visit.sort()
1106 1110
1107 1111 if unknown:
1108 1112 # unknown == True means we walked all dirs under the roots
1109 1113 # that wasn't ignored, and everything that matched was stat'ed
1110 1114 # and is already in results.
1111 1115 # The rest must thus be ignored or under a symlink.
1112 1116 audit_path = pathutil.pathauditor(self._root, cached=True)
1113 1117
1114 1118 for nf in iter(visit):
1115 1119 # If a stat for the same file was already added with a
1116 1120 # different case, don't add one for this, since that would
1117 1121 # make it appear as if the file exists under both names
1118 1122 # on disk.
1119 1123 if (
1120 1124 normalizefile
1121 1125 and normalizefile(nf, True, True) in results
1122 1126 ):
1123 1127 results[nf] = None
1124 1128 # Report ignored items in the dmap as long as they are not
1125 1129 # under a symlink directory.
1126 1130 elif audit_path.check(nf):
1127 1131 try:
1128 1132 results[nf] = lstat(join(nf))
1129 1133 # file was just ignored, no links, and exists
1130 1134 except OSError:
1131 1135 # file doesn't exist
1132 1136 results[nf] = None
1133 1137 else:
1134 1138 # It's either missing or under a symlink directory
1135 1139 # which we in this case report as missing
1136 1140 results[nf] = None
1137 1141 else:
1138 1142 # We may not have walked the full directory tree above,
1139 1143 # so stat and check everything we missed.
1140 1144 iv = iter(visit)
1141 1145 for st in util.statfiles([join(i) for i in visit]):
1142 1146 results[next(iv)] = st
1143 1147 return results
1144 1148
1145 1149 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1146 1150 # Force Rayon (Rust parallelism library) to respect the number of
1147 1151 # workers. This is a temporary workaround until Rust code knows
1148 1152 # how to read the config file.
1149 1153 numcpus = self._ui.configint(b"worker", b"numcpus")
1150 1154 if numcpus is not None:
1151 1155 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1152 1156
1153 1157 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1154 1158 if not workers_enabled:
1155 1159 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1156 1160
1157 1161 (
1158 1162 lookup,
1159 1163 modified,
1160 1164 added,
1161 1165 removed,
1162 1166 deleted,
1163 1167 clean,
1164 1168 ignored,
1165 1169 unknown,
1166 1170 warnings,
1167 1171 bad,
1168 1172 traversed,
1169 1173 dirty,
1170 1174 ) = rustmod.status(
1171 1175 self._map._rustmap,
1172 1176 matcher,
1173 1177 self._rootdir,
1174 1178 self._ignorefiles(),
1175 1179 self._checkexec,
1176 1180 self._lastnormaltime,
1177 1181 bool(list_clean),
1178 1182 bool(list_ignored),
1179 1183 bool(list_unknown),
1180 1184 bool(matcher.traversedir),
1181 1185 )
1182 1186
1183 1187 self._dirty |= dirty
1184 1188
1185 1189 if matcher.traversedir:
1186 1190 for dir in traversed:
1187 1191 matcher.traversedir(dir)
1188 1192
1189 1193 if self._ui.warn:
1190 1194 for item in warnings:
1191 1195 if isinstance(item, tuple):
1192 1196 file_path, syntax = item
1193 1197 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1194 1198 file_path,
1195 1199 syntax,
1196 1200 )
1197 1201 self._ui.warn(msg)
1198 1202 else:
1199 1203 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1200 1204 self._ui.warn(
1201 1205 msg
1202 1206 % (
1203 1207 pathutil.canonpath(
1204 1208 self._rootdir, self._rootdir, item
1205 1209 ),
1206 1210 b"No such file or directory",
1207 1211 )
1208 1212 )
1209 1213
1210 1214 for (fn, message) in bad:
1211 1215 matcher.bad(fn, encoding.strtolocal(message))
1212 1216
1213 1217 status = scmutil.status(
1214 1218 modified=modified,
1215 1219 added=added,
1216 1220 removed=removed,
1217 1221 deleted=deleted,
1218 1222 unknown=unknown,
1219 1223 ignored=ignored,
1220 1224 clean=clean,
1221 1225 )
1222 1226 return (lookup, status)
1223 1227
1224 1228 def status(self, match, subrepos, ignored, clean, unknown):
1225 1229 """Determine the status of the working copy relative to the
1226 1230 dirstate and return a pair of (unsure, status), where status is of type
1227 1231 scmutil.status and:
1228 1232
1229 1233 unsure:
1230 1234 files that might have been modified since the dirstate was
1231 1235 written, but need to be read to be sure (size is the same
1232 1236 but mtime differs)
1233 1237 status.modified:
1234 1238 files that have definitely been modified since the dirstate
1235 1239 was written (different size or mode)
1236 1240 status.clean:
1237 1241 files that have definitely not been modified since the
1238 1242 dirstate was written
1239 1243 """
1240 1244 listignored, listclean, listunknown = ignored, clean, unknown
1241 1245 lookup, modified, added, unknown, ignored = [], [], [], [], []
1242 1246 removed, deleted, clean = [], [], []
1243 1247
1244 1248 dmap = self._map
1245 1249 dmap.preload()
1246 1250
1247 1251 use_rust = True
1248 1252
1249 1253 allowed_matchers = (
1250 1254 matchmod.alwaysmatcher,
1251 1255 matchmod.exactmatcher,
1252 1256 matchmod.includematcher,
1253 1257 )
1254 1258
1255 1259 if rustmod is None:
1256 1260 use_rust = False
1257 1261 elif self._checkcase:
1258 1262 # Case-insensitive filesystems are not handled yet
1259 1263 use_rust = False
1260 1264 elif subrepos:
1261 1265 use_rust = False
1262 1266 elif sparse.enabled:
1263 1267 use_rust = False
1264 1268 elif not isinstance(match, allowed_matchers):
1265 1269 # Some matchers have yet to be implemented
1266 1270 use_rust = False
1267 1271
1268 1272 if use_rust:
1269 1273 try:
1270 1274 return self._rust_status(
1271 1275 match, listclean, listignored, listunknown
1272 1276 )
1273 1277 except rustmod.FallbackError:
1274 1278 pass
1275 1279
1276 1280 def noop(f):
1277 1281 pass
1278 1282
1279 1283 dcontains = dmap.__contains__
1280 1284 dget = dmap.__getitem__
1281 1285 ladd = lookup.append # aka "unsure"
1282 1286 madd = modified.append
1283 1287 aadd = added.append
1284 1288 uadd = unknown.append if listunknown else noop
1285 1289 iadd = ignored.append if listignored else noop
1286 1290 radd = removed.append
1287 1291 dadd = deleted.append
1288 1292 cadd = clean.append if listclean else noop
1289 1293 mexact = match.exact
1290 1294 dirignore = self._dirignore
1291 1295 checkexec = self._checkexec
1292 1296 copymap = self._map.copymap
1293 1297 lastnormaltime = self._lastnormaltime
1294 1298
1295 1299 # We need to do full walks when either
1296 1300 # - we're listing all clean files, or
1297 1301 # - match.traversedir does something, because match.traversedir should
1298 1302 # be called for every dir in the working dir
1299 1303 full = listclean or match.traversedir is not None
1300 1304 for fn, st in pycompat.iteritems(
1301 1305 self.walk(match, subrepos, listunknown, listignored, full=full)
1302 1306 ):
1303 1307 if not dcontains(fn):
1304 1308 if (listignored or mexact(fn)) and dirignore(fn):
1305 1309 if listignored:
1306 1310 iadd(fn)
1307 1311 else:
1308 1312 uadd(fn)
1309 1313 continue
1310 1314
1311 1315 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1312 1316 # written like that for performance reasons. dmap[fn] is not a
1313 1317 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1314 1318 # opcode has fast paths when the value to be unpacked is a tuple or
1315 1319 # a list, but falls back to creating a full-fledged iterator in
1316 1320 # general. That is much slower than simply accessing and storing the
1317 1321 # tuple members one by one.
1318 1322 t = dget(fn)
1319 1323 mode = t.mode
1320 1324 size = t.size
1321 1325 time = t.mtime
1322 1326
1323 1327 if not st and t.tracked:
1324 1328 dadd(fn)
1325 1329 elif t.merged:
1326 1330 madd(fn)
1327 1331 elif t.added:
1328 1332 aadd(fn)
1329 1333 elif t.removed:
1330 1334 radd(fn)
1331 1335 elif t.tracked:
1332 1336 if (
1333 1337 size >= 0
1334 1338 and (
1335 1339 (size != st.st_size and size != st.st_size & _rangemask)
1336 1340 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1337 1341 )
1338 1342 or t.from_p2
1339 1343 or fn in copymap
1340 1344 ):
1341 1345 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1342 1346 # issue6456: Size returned may be longer due to
1343 1347 # encryption on EXT-4 fscrypt, undecided.
1344 1348 ladd(fn)
1345 1349 else:
1346 1350 madd(fn)
1347 1351 elif (
1348 1352 time != st[stat.ST_MTIME]
1349 1353 and time != st[stat.ST_MTIME] & _rangemask
1350 1354 ):
1351 1355 ladd(fn)
1352 1356 elif st[stat.ST_MTIME] == lastnormaltime:
1353 1357 # fn may have just been marked as normal and it may have
1354 1358 # changed in the same second without changing its size.
1355 1359 # This can happen if we quickly do multiple commits.
1356 1360 # Force lookup, so we don't miss such a racy file change.
1357 1361 ladd(fn)
1358 1362 elif listclean:
1359 1363 cadd(fn)
1360 1364 status = scmutil.status(
1361 1365 modified, added, removed, deleted, unknown, ignored, clean
1362 1366 )
1363 1367 return (lookup, status)
1364 1368
1365 1369 def matches(self, match):
1366 1370 """
1367 1371 return files in the dirstate (in whatever state) filtered by match
1368 1372 """
1369 1373 dmap = self._map
1370 1374 if rustmod is not None:
1371 1375 dmap = self._map._rustmap
1372 1376
1373 1377 if match.always():
1374 1378 return dmap.keys()
1375 1379 files = match.files()
1376 1380 if match.isexact():
1377 1381 # fast path -- filter the other way around, since typically files is
1378 1382 # much smaller than dmap
1379 1383 return [f for f in files if f in dmap]
1380 1384 if match.prefix() and all(fn in dmap for fn in files):
1381 1385 # fast path -- all the values are known to be files, so just return
1382 1386 # that
1383 1387 return list(files)
1384 1388 return [f for f in dmap if match(f)]
1385 1389
1386 1390 def _actualfilename(self, tr):
1387 1391 if tr:
1388 1392 return self._pendingfilename
1389 1393 else:
1390 1394 return self._filename
1391 1395
1392 1396 def savebackup(self, tr, backupname):
1393 1397 '''Save current dirstate into backup file'''
1394 1398 filename = self._actualfilename(tr)
1395 1399 assert backupname != filename
1396 1400
1397 1401 # use '_writedirstate' instead of 'write' to write changes certainly,
1398 1402 # because the latter omits writing out if transaction is running.
1399 1403 # output file will be used to create backup of dirstate at this point.
1400 1404 if self._dirty or not self._opener.exists(filename):
1401 1405 self._writedirstate(
1402 1406 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1403 1407 )
1404 1408
1405 1409 if tr:
1406 1410 # ensure that subsequent tr.writepending returns True for
1407 1411 # changes written out above, even if dirstate is never
1408 1412 # changed after this
1409 1413 tr.addfilegenerator(
1410 1414 b'dirstate',
1411 1415 (self._filename,),
1412 1416 self._writedirstate,
1413 1417 location=b'plain',
1414 1418 )
1415 1419
1416 1420 # ensure that pending file written above is unlinked at
1417 1421 # failure, even if tr.writepending isn't invoked until the
1418 1422 # end of this transaction
1419 1423 tr.registertmp(filename, location=b'plain')
1420 1424
1421 1425 self._opener.tryunlink(backupname)
1422 1426 # hardlink backup is okay because _writedirstate is always called
1423 1427 # with an "atomictemp=True" file.
1424 1428 util.copyfile(
1425 1429 self._opener.join(filename),
1426 1430 self._opener.join(backupname),
1427 1431 hardlink=True,
1428 1432 )
1429 1433
1430 1434 def restorebackup(self, tr, backupname):
1431 1435 '''Restore dirstate by backup file'''
1432 1436 # this "invalidate()" prevents "wlock.release()" from writing
1433 1437 # changes of dirstate out after restoring from backup file
1434 1438 self.invalidate()
1435 1439 filename = self._actualfilename(tr)
1436 1440 o = self._opener
1437 1441 if util.samefile(o.join(backupname), o.join(filename)):
1438 1442 o.unlink(backupname)
1439 1443 else:
1440 1444 o.rename(backupname, filename, checkambig=True)
1441 1445
1442 1446 def clearbackup(self, tr, backupname):
1443 1447 '''Clear backup file'''
1444 1448 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now