##// END OF EJS Templates
dirstate: cleanup remaining of "now" during write...
marmoute -
r49222:1a8a70b4 default
parent child Browse files
Show More
@@ -1,1472 +1,1458 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .pycompat import delattr
18 18
19 19 from hgdemandimport import tracing
20 20
21 21 from . import (
22 22 dirstatemap,
23 23 encoding,
24 24 error,
25 25 match as matchmod,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 sparse,
31 31 util,
32 32 )
33 33
34 34 from .dirstateutils import (
35 35 timestamp,
36 36 )
37 37
38 38 from .interfaces import (
39 39 dirstate as intdirstate,
40 40 util as interfaceutil,
41 41 )
42 42
43 43 parsers = policy.importmod('parsers')
44 44 rustmod = policy.importrust('dirstate')
45 45
46 46 HAS_FAST_DIRSTATE_V2 = rustmod is not None
47 47
48 48 propertycache = util.propertycache
49 49 filecache = scmutil.filecache
50 50 _rangemask = dirstatemap.rangemask
51 51
52 52 DirstateItem = dirstatemap.DirstateItem
53 53
54 54
55 55 class repocache(filecache):
56 56 """filecache for files in .hg/"""
57 57
58 58 def join(self, obj, fname):
59 59 return obj._opener.join(fname)
60 60
61 61
62 62 class rootcache(filecache):
63 63 """filecache for files in the repository root"""
64 64
65 65 def join(self, obj, fname):
66 66 return obj._join(fname)
67 67
68 68
69 69 def requires_parents_change(func):
70 70 def wrap(self, *args, **kwargs):
71 71 if not self.pendingparentchange():
72 72 msg = 'calling `%s` outside of a parentchange context'
73 73 msg %= func.__name__
74 74 raise error.ProgrammingError(msg)
75 75 return func(self, *args, **kwargs)
76 76
77 77 return wrap
78 78
79 79
80 80 def requires_no_parents_change(func):
81 81 def wrap(self, *args, **kwargs):
82 82 if self.pendingparentchange():
83 83 msg = 'calling `%s` inside of a parentchange context'
84 84 msg %= func.__name__
85 85 raise error.ProgrammingError(msg)
86 86 return func(self, *args, **kwargs)
87 87
88 88 return wrap
89 89
90 90
91 91 @interfaceutil.implementer(intdirstate.idirstate)
92 92 class dirstate(object):
93 93 def __init__(
94 94 self,
95 95 opener,
96 96 ui,
97 97 root,
98 98 validate,
99 99 sparsematchfn,
100 100 nodeconstants,
101 101 use_dirstate_v2,
102 102 ):
103 103 """Create a new dirstate object.
104 104
105 105 opener is an open()-like callable that can be used to open the
106 106 dirstate file; root is the root of the directory tracked by
107 107 the dirstate.
108 108 """
109 109 self._use_dirstate_v2 = use_dirstate_v2
110 110 self._nodeconstants = nodeconstants
111 111 self._opener = opener
112 112 self._validate = validate
113 113 self._root = root
114 114 self._sparsematchfn = sparsematchfn
115 115 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
116 116 # UNC path pointing to root share (issue4557)
117 117 self._rootdir = pathutil.normasprefix(root)
118 118 self._dirty = False
119 119 self._ui = ui
120 120 self._filecache = {}
121 121 self._parentwriters = 0
122 122 self._filename = b'dirstate'
123 123 self._pendingfilename = b'%s.pending' % self._filename
124 124 self._plchangecallbacks = {}
125 125 self._origpl = None
126 126 self._mapcls = dirstatemap.dirstatemap
127 127 # Access and cache cwd early, so we don't access it for the first time
128 128 # after a working-copy update caused it to not exist (accessing it then
129 129 # raises an exception).
130 130 self._cwd
131 131
132 132 def prefetch_parents(self):
133 133 """make sure the parents are loaded
134 134
135 135 Used to avoid a race condition.
136 136 """
137 137 self._pl
138 138
139 139 @contextlib.contextmanager
140 140 def parentchange(self):
141 141 """Context manager for handling dirstate parents.
142 142
143 143 If an exception occurs in the scope of the context manager,
144 144 the incoherent dirstate won't be written when wlock is
145 145 released.
146 146 """
147 147 self._parentwriters += 1
148 148 yield
149 149 # Typically we want the "undo" step of a context manager in a
150 150 # finally block so it happens even when an exception
151 151 # occurs. In this case, however, we only want to decrement
152 152 # parentwriters if the code in the with statement exits
153 153 # normally, so we don't have a try/finally here on purpose.
154 154 self._parentwriters -= 1
155 155
156 156 def pendingparentchange(self):
157 157 """Returns true if the dirstate is in the middle of a set of changes
158 158 that modify the dirstate parent.
159 159 """
160 160 return self._parentwriters > 0
161 161
162 162 @propertycache
163 163 def _map(self):
164 164 """Return the dirstate contents (see documentation for dirstatemap)."""
165 165 self._map = self._mapcls(
166 166 self._ui,
167 167 self._opener,
168 168 self._root,
169 169 self._nodeconstants,
170 170 self._use_dirstate_v2,
171 171 )
172 172 return self._map
173 173
174 174 @property
175 175 def _sparsematcher(self):
176 176 """The matcher for the sparse checkout.
177 177
178 178 The working directory may not include every file from a manifest. The
179 179 matcher obtained by this property will match a path if it is to be
180 180 included in the working directory.
181 181 """
182 182 # TODO there is potential to cache this property. For now, the matcher
183 183 # is resolved on every access. (But the called function does use a
184 184 # cache to keep the lookup fast.)
185 185 return self._sparsematchfn()
186 186
187 187 @repocache(b'branch')
188 188 def _branch(self):
189 189 try:
190 190 return self._opener.read(b"branch").strip() or b"default"
191 191 except IOError as inst:
192 192 if inst.errno != errno.ENOENT:
193 193 raise
194 194 return b"default"
195 195
196 196 @property
197 197 def _pl(self):
198 198 return self._map.parents()
199 199
200 200 def hasdir(self, d):
201 201 return self._map.hastrackeddir(d)
202 202
203 203 @rootcache(b'.hgignore')
204 204 def _ignore(self):
205 205 files = self._ignorefiles()
206 206 if not files:
207 207 return matchmod.never()
208 208
209 209 pats = [b'include:%s' % f for f in files]
210 210 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
211 211
212 212 @propertycache
213 213 def _slash(self):
214 214 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
215 215
216 216 @propertycache
217 217 def _checklink(self):
218 218 return util.checklink(self._root)
219 219
220 220 @propertycache
221 221 def _checkexec(self):
222 222 return bool(util.checkexec(self._root))
223 223
224 224 @propertycache
225 225 def _checkcase(self):
226 226 return not util.fscasesensitive(self._join(b'.hg'))
227 227
228 228 def _join(self, f):
229 229 # much faster than os.path.join()
230 230 # it's safe because f is always a relative path
231 231 return self._rootdir + f
232 232
233 233 def flagfunc(self, buildfallback):
234 234 """build a callable that returns flags associated with a filename
235 235
236 236 The information is extracted from three possible layers:
237 237 1. the file system if it supports the information
238 238 2. the "fallback" information stored in the dirstate if any
239 239 3. a more expensive mechanism inferring the flags from the parents.
240 240 """
241 241
242 242 # small hack to cache the result of buildfallback()
243 243 fallback_func = []
244 244
245 245 def get_flags(x):
246 246 entry = None
247 247 fallback_value = None
248 248 try:
249 249 st = os.lstat(self._join(x))
250 250 except OSError:
251 251 return b''
252 252
253 253 if self._checklink:
254 254 if util.statislink(st):
255 255 return b'l'
256 256 else:
257 257 entry = self.get_entry(x)
258 258 if entry.has_fallback_symlink:
259 259 if entry.fallback_symlink:
260 260 return b'l'
261 261 else:
262 262 if not fallback_func:
263 263 fallback_func.append(buildfallback())
264 264 fallback_value = fallback_func[0](x)
265 265 if b'l' in fallback_value:
266 266 return b'l'
267 267
268 268 if self._checkexec:
269 269 if util.statisexec(st):
270 270 return b'x'
271 271 else:
272 272 if entry is None:
273 273 entry = self.get_entry(x)
274 274 if entry.has_fallback_exec:
275 275 if entry.fallback_exec:
276 276 return b'x'
277 277 else:
278 278 if fallback_value is None:
279 279 if not fallback_func:
280 280 fallback_func.append(buildfallback())
281 281 fallback_value = fallback_func[0](x)
282 282 if b'x' in fallback_value:
283 283 return b'x'
284 284 return b''
285 285
286 286 return get_flags
287 287
288 288 @propertycache
289 289 def _cwd(self):
290 290 # internal config: ui.forcecwd
291 291 forcecwd = self._ui.config(b'ui', b'forcecwd')
292 292 if forcecwd:
293 293 return forcecwd
294 294 return encoding.getcwd()
295 295
296 296 def getcwd(self):
297 297 """Return the path from which a canonical path is calculated.
298 298
299 299 This path should be used to resolve file patterns or to convert
300 300 canonical paths back to file paths for display. It shouldn't be
301 301 used to get real file paths. Use vfs functions instead.
302 302 """
303 303 cwd = self._cwd
304 304 if cwd == self._root:
305 305 return b''
306 306 # self._root ends with a path separator if self._root is '/' or 'C:\'
307 307 rootsep = self._root
308 308 if not util.endswithsep(rootsep):
309 309 rootsep += pycompat.ossep
310 310 if cwd.startswith(rootsep):
311 311 return cwd[len(rootsep) :]
312 312 else:
313 313 # we're outside the repo. return an absolute path.
314 314 return cwd
315 315
316 316 def pathto(self, f, cwd=None):
317 317 if cwd is None:
318 318 cwd = self.getcwd()
319 319 path = util.pathto(self._root, cwd, f)
320 320 if self._slash:
321 321 return util.pconvert(path)
322 322 return path
323 323
324 324 def __getitem__(self, key):
325 325 """Return the current state of key (a filename) in the dirstate.
326 326
327 327 States are:
328 328 n normal
329 329 m needs merging
330 330 r marked for removal
331 331 a marked for addition
332 332 ? not tracked
333 333
334 334 XXX The "state" is a bit obscure to be in the "public" API. we should
335 335 consider migrating all user of this to going through the dirstate entry
336 336 instead.
337 337 """
338 338 msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
339 339 util.nouideprecwarn(msg, b'6.1', stacklevel=2)
340 340 entry = self._map.get(key)
341 341 if entry is not None:
342 342 return entry.state
343 343 return b'?'
344 344
345 345 def get_entry(self, path):
346 346 """return a DirstateItem for the associated path"""
347 347 entry = self._map.get(path)
348 348 if entry is None:
349 349 return DirstateItem()
350 350 return entry
351 351
352 352 def __contains__(self, key):
353 353 return key in self._map
354 354
355 355 def __iter__(self):
356 356 return iter(sorted(self._map))
357 357
358 358 def items(self):
359 359 return pycompat.iteritems(self._map)
360 360
361 361 iteritems = items
362 362
363 363 def parents(self):
364 364 return [self._validate(p) for p in self._pl]
365 365
366 366 def p1(self):
367 367 return self._validate(self._pl[0])
368 368
369 369 def p2(self):
370 370 return self._validate(self._pl[1])
371 371
372 372 @property
373 373 def in_merge(self):
374 374 """True if a merge is in progress"""
375 375 return self._pl[1] != self._nodeconstants.nullid
376 376
377 377 def branch(self):
378 378 return encoding.tolocal(self._branch)
379 379
380 380 def setparents(self, p1, p2=None):
381 381 """Set dirstate parents to p1 and p2.
382 382
383 383 When moving from two parents to one, "merged" entries a
384 384 adjusted to normal and previous copy records discarded and
385 385 returned by the call.
386 386
387 387 See localrepo.setparents()
388 388 """
389 389 if p2 is None:
390 390 p2 = self._nodeconstants.nullid
391 391 if self._parentwriters == 0:
392 392 raise ValueError(
393 393 b"cannot set dirstate parent outside of "
394 394 b"dirstate.parentchange context manager"
395 395 )
396 396
397 397 self._dirty = True
398 398 oldp2 = self._pl[1]
399 399 if self._origpl is None:
400 400 self._origpl = self._pl
401 401 nullid = self._nodeconstants.nullid
402 402 # True if we need to fold p2 related state back to a linear case
403 403 fold_p2 = oldp2 != nullid and p2 == nullid
404 404 return self._map.setparents(p1, p2, fold_p2=fold_p2)
405 405
406 406 def setbranch(self, branch):
407 407 self.__class__._branch.set(self, encoding.fromlocal(branch))
408 408 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
409 409 try:
410 410 f.write(self._branch + b'\n')
411 411 f.close()
412 412
413 413 # make sure filecache has the correct stat info for _branch after
414 414 # replacing the underlying file
415 415 ce = self._filecache[b'_branch']
416 416 if ce:
417 417 ce.refresh()
418 418 except: # re-raises
419 419 f.discard()
420 420 raise
421 421
422 422 def invalidate(self):
423 423 """Causes the next access to reread the dirstate.
424 424
425 425 This is different from localrepo.invalidatedirstate() because it always
426 426 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
427 427 check whether the dirstate has changed before rereading it."""
428 428
429 429 for a in ("_map", "_branch", "_ignore"):
430 430 if a in self.__dict__:
431 431 delattr(self, a)
432 432 self._dirty = False
433 433 self._parentwriters = 0
434 434 self._origpl = None
435 435
436 436 def copy(self, source, dest):
437 437 """Mark dest as a copy of source. Unmark dest if source is None."""
438 438 if source == dest:
439 439 return
440 440 self._dirty = True
441 441 if source is not None:
442 442 self._map.copymap[dest] = source
443 443 else:
444 444 self._map.copymap.pop(dest, None)
445 445
446 446 def copied(self, file):
447 447 return self._map.copymap.get(file, None)
448 448
449 449 def copies(self):
450 450 return self._map.copymap
451 451
452 452 @requires_no_parents_change
453 453 def set_tracked(self, filename, reset_copy=False):
454 454 """a "public" method for generic code to mark a file as tracked
455 455
456 456 This function is to be called outside of "update/merge" case. For
457 457 example by a command like `hg add X`.
458 458
459 459 if reset_copy is set, any existing copy information will be dropped.
460 460
461 461 return True the file was previously untracked, False otherwise.
462 462 """
463 463 self._dirty = True
464 464 entry = self._map.get(filename)
465 465 if entry is None or not entry.tracked:
466 466 self._check_new_tracked_filename(filename)
467 467 pre_tracked = self._map.set_tracked(filename)
468 468 if reset_copy:
469 469 self._map.copymap.pop(filename, None)
470 470 return pre_tracked
471 471
472 472 @requires_no_parents_change
473 473 def set_untracked(self, filename):
474 474 """a "public" method for generic code to mark a file as untracked
475 475
476 476 This function is to be called outside of "update/merge" case. For
477 477 example by a command like `hg remove X`.
478 478
479 479 return True the file was previously tracked, False otherwise.
480 480 """
481 481 ret = self._map.set_untracked(filename)
482 482 if ret:
483 483 self._dirty = True
484 484 return ret
485 485
486 486 @requires_no_parents_change
487 487 def set_clean(self, filename, parentfiledata):
488 488 """record that the current state of the file on disk is known to be clean"""
489 489 self._dirty = True
490 490 if not self._map[filename].tracked:
491 491 self._check_new_tracked_filename(filename)
492 492 (mode, size, mtime) = parentfiledata
493 493 self._map.set_clean(filename, mode, size, mtime)
494 494
495 495 @requires_no_parents_change
496 496 def set_possibly_dirty(self, filename):
497 497 """record that the current state of the file on disk is unknown"""
498 498 self._dirty = True
499 499 self._map.set_possibly_dirty(filename)
500 500
501 501 @requires_parents_change
502 502 def update_file_p1(
503 503 self,
504 504 filename,
505 505 p1_tracked,
506 506 ):
507 507 """Set a file as tracked in the parent (or not)
508 508
509 509 This is to be called when adjust the dirstate to a new parent after an history
510 510 rewriting operation.
511 511
512 512 It should not be called during a merge (p2 != nullid) and only within
513 513 a `with dirstate.parentchange():` context.
514 514 """
515 515 if self.in_merge:
516 516 msg = b'update_file_reference should not be called when merging'
517 517 raise error.ProgrammingError(msg)
518 518 entry = self._map.get(filename)
519 519 if entry is None:
520 520 wc_tracked = False
521 521 else:
522 522 wc_tracked = entry.tracked
523 523 if not (p1_tracked or wc_tracked):
524 524 # the file is no longer relevant to anyone
525 525 if self._map.get(filename) is not None:
526 526 self._map.reset_state(filename)
527 527 self._dirty = True
528 528 elif (not p1_tracked) and wc_tracked:
529 529 if entry is not None and entry.added:
530 530 return # avoid dropping copy information (maybe?)
531 531
532 532 self._map.reset_state(
533 533 filename,
534 534 wc_tracked,
535 535 p1_tracked,
536 536 # the underlying reference might have changed, we will have to
537 537 # check it.
538 538 has_meaningful_mtime=False,
539 539 )
540 540
541 541 @requires_parents_change
542 542 def update_file(
543 543 self,
544 544 filename,
545 545 wc_tracked,
546 546 p1_tracked,
547 547 p2_info=False,
548 548 possibly_dirty=False,
549 549 parentfiledata=None,
550 550 ):
551 551 """update the information about a file in the dirstate
552 552
553 553 This is to be called when the direstates parent changes to keep track
554 554 of what is the file situation in regards to the working copy and its parent.
555 555
556 556 This function must be called within a `dirstate.parentchange` context.
557 557
558 558 note: the API is at an early stage and we might need to adjust it
559 559 depending of what information ends up being relevant and useful to
560 560 other processing.
561 561 """
562 562
563 563 # note: I do not think we need to double check name clash here since we
564 564 # are in a update/merge case that should already have taken care of
565 565 # this. The test agrees
566 566
567 567 self._dirty = True
568 568
569 569 self._map.reset_state(
570 570 filename,
571 571 wc_tracked,
572 572 p1_tracked,
573 573 p2_info=p2_info,
574 574 has_meaningful_mtime=not possibly_dirty,
575 575 parentfiledata=parentfiledata,
576 576 )
577 577
578 578 def _check_new_tracked_filename(self, filename):
579 579 scmutil.checkfilename(filename)
580 580 if self._map.hastrackeddir(filename):
581 581 msg = _(b'directory %r already in dirstate')
582 582 msg %= pycompat.bytestr(filename)
583 583 raise error.Abort(msg)
584 584 # shadows
585 585 for d in pathutil.finddirs(filename):
586 586 if self._map.hastrackeddir(d):
587 587 break
588 588 entry = self._map.get(d)
589 589 if entry is not None and not entry.removed:
590 590 msg = _(b'file %r in dirstate clashes with %r')
591 591 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
592 592 raise error.Abort(msg)
593 593
594 594 def _get_filedata(self, filename):
595 595 """returns"""
596 596 s = os.lstat(self._join(filename))
597 597 mode = s.st_mode
598 598 size = s.st_size
599 599 mtime = timestamp.mtime_of(s)
600 600 return (mode, size, mtime)
601 601
602 602 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
603 603 if exists is None:
604 604 exists = os.path.lexists(os.path.join(self._root, path))
605 605 if not exists:
606 606 # Maybe a path component exists
607 607 if not ignoremissing and b'/' in path:
608 608 d, f = path.rsplit(b'/', 1)
609 609 d = self._normalize(d, False, ignoremissing, None)
610 610 folded = d + b"/" + f
611 611 else:
612 612 # No path components, preserve original case
613 613 folded = path
614 614 else:
615 615 # recursively normalize leading directory components
616 616 # against dirstate
617 617 if b'/' in normed:
618 618 d, f = normed.rsplit(b'/', 1)
619 619 d = self._normalize(d, False, ignoremissing, True)
620 620 r = self._root + b"/" + d
621 621 folded = d + b"/" + util.fspath(f, r)
622 622 else:
623 623 folded = util.fspath(normed, self._root)
624 624 storemap[normed] = folded
625 625
626 626 return folded
627 627
628 628 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
629 629 normed = util.normcase(path)
630 630 folded = self._map.filefoldmap.get(normed, None)
631 631 if folded is None:
632 632 if isknown:
633 633 folded = path
634 634 else:
635 635 folded = self._discoverpath(
636 636 path, normed, ignoremissing, exists, self._map.filefoldmap
637 637 )
638 638 return folded
639 639
640 640 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
641 641 normed = util.normcase(path)
642 642 folded = self._map.filefoldmap.get(normed, None)
643 643 if folded is None:
644 644 folded = self._map.dirfoldmap.get(normed, None)
645 645 if folded is None:
646 646 if isknown:
647 647 folded = path
648 648 else:
649 649 # store discovered result in dirfoldmap so that future
650 650 # normalizefile calls don't start matching directories
651 651 folded = self._discoverpath(
652 652 path, normed, ignoremissing, exists, self._map.dirfoldmap
653 653 )
654 654 return folded
655 655
656 656 def normalize(self, path, isknown=False, ignoremissing=False):
657 657 """
658 658 normalize the case of a pathname when on a casefolding filesystem
659 659
660 660 isknown specifies whether the filename came from walking the
661 661 disk, to avoid extra filesystem access.
662 662
663 663 If ignoremissing is True, missing path are returned
664 664 unchanged. Otherwise, we try harder to normalize possibly
665 665 existing path components.
666 666
667 667 The normalized case is determined based on the following precedence:
668 668
669 669 - version of name already stored in the dirstate
670 670 - version of name stored on disk
671 671 - version provided via command arguments
672 672 """
673 673
674 674 if self._checkcase:
675 675 return self._normalize(path, isknown, ignoremissing)
676 676 return path
677 677
678 678 def clear(self):
679 679 self._map.clear()
680 680 self._dirty = True
681 681
682 682 def rebuild(self, parent, allfiles, changedfiles=None):
683 683 if changedfiles is None:
684 684 # Rebuild entire dirstate
685 685 to_lookup = allfiles
686 686 to_drop = []
687 687 self.clear()
688 688 elif len(changedfiles) < 10:
689 689 # Avoid turning allfiles into a set, which can be expensive if it's
690 690 # large.
691 691 to_lookup = []
692 692 to_drop = []
693 693 for f in changedfiles:
694 694 if f in allfiles:
695 695 to_lookup.append(f)
696 696 else:
697 697 to_drop.append(f)
698 698 else:
699 699 changedfilesset = set(changedfiles)
700 700 to_lookup = changedfilesset & set(allfiles)
701 701 to_drop = changedfilesset - to_lookup
702 702
703 703 if self._origpl is None:
704 704 self._origpl = self._pl
705 705 self._map.setparents(parent, self._nodeconstants.nullid)
706 706
707 707 for f in to_lookup:
708 708
709 709 if self.in_merge:
710 710 self.set_tracked(f)
711 711 else:
712 712 self._map.reset_state(
713 713 f,
714 714 wc_tracked=True,
715 715 p1_tracked=True,
716 716 )
717 717 for f in to_drop:
718 718 self._map.reset_state(f)
719 719
720 720 self._dirty = True
721 721
722 722 def identity(self):
723 723 """Return identity of dirstate itself to detect changing in storage
724 724
725 725 If identity of previous dirstate is equal to this, writing
726 726 changes based on the former dirstate out can keep consistency.
727 727 """
728 728 return self._map.identity
729 729
730 730 def write(self, tr):
731 731 if not self._dirty:
732 732 return
733 733
734 734 filename = self._filename
735 735 if tr:
736 # 'dirstate.write()' is not only for writing in-memory
737 # changes out, but also for dropping ambiguous timestamp.
738 # delayed writing re-raise "ambiguous timestamp issue".
739 # See also the wiki page below for detail:
740 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
741
742 # record when mtime start to be ambiguous
743 now = timestamp.get_fs_now(self._opener)
744
745 736 # delay writing in-memory changes out
746 737 tr.addfilegenerator(
747 738 b'dirstate',
748 739 (self._filename,),
749 lambda f: self._writedirstate(tr, f, now=now),
740 lambda f: self._writedirstate(tr, f),
750 741 location=b'plain',
751 742 )
752 743 return
753 744
754 745 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
755 746 self._writedirstate(tr, st)
756 747
757 748 def addparentchangecallback(self, category, callback):
758 749 """add a callback to be called when the wd parents are changed
759 750
760 751 Callback will be called with the following arguments:
761 752 dirstate, (oldp1, oldp2), (newp1, newp2)
762 753
763 754 Category is a unique identifier to allow overwriting an old callback
764 755 with a newer callback.
765 756 """
766 757 self._plchangecallbacks[category] = callback
767 758
768 def _writedirstate(self, tr, st, now=None):
759 def _writedirstate(self, tr, st):
769 760 # notify callbacks about parents change
770 761 if self._origpl is not None and self._origpl != self._pl:
771 762 for c, callback in sorted(
772 763 pycompat.iteritems(self._plchangecallbacks)
773 764 ):
774 765 callback(self, self._origpl, self._pl)
775 766 self._origpl = None
776 767
777 if now is None:
778 # use the modification time of the newly created temporary file as the
779 # filesystem's notion of 'now'
780 now = timestamp.mtime_of(util.fstat(st))
781
782 self._map.write(tr, st, now)
768 self._map.write(tr, st)
783 769 self._dirty = False
784 770
785 771 def _dirignore(self, f):
786 772 if self._ignore(f):
787 773 return True
788 774 for p in pathutil.finddirs(f):
789 775 if self._ignore(p):
790 776 return True
791 777 return False
792 778
793 779 def _ignorefiles(self):
794 780 files = []
795 781 if os.path.exists(self._join(b'.hgignore')):
796 782 files.append(self._join(b'.hgignore'))
797 783 for name, path in self._ui.configitems(b"ui"):
798 784 if name == b'ignore' or name.startswith(b'ignore.'):
799 785 # we need to use os.path.join here rather than self._join
800 786 # because path is arbitrary and user-specified
801 787 files.append(os.path.join(self._rootdir, util.expandpath(path)))
802 788 return files
803 789
804 790 def _ignorefileandline(self, f):
805 791 files = collections.deque(self._ignorefiles())
806 792 visited = set()
807 793 while files:
808 794 i = files.popleft()
809 795 patterns = matchmod.readpatternfile(
810 796 i, self._ui.warn, sourceinfo=True
811 797 )
812 798 for pattern, lineno, line in patterns:
813 799 kind, p = matchmod._patsplit(pattern, b'glob')
814 800 if kind == b"subinclude":
815 801 if p not in visited:
816 802 files.append(p)
817 803 continue
818 804 m = matchmod.match(
819 805 self._root, b'', [], [pattern], warn=self._ui.warn
820 806 )
821 807 if m(f):
822 808 return (i, lineno, line)
823 809 visited.add(i)
824 810 return (None, -1, b"")
825 811
826 812 def _walkexplicit(self, match, subrepos):
827 813 """Get stat data about the files explicitly specified by match.
828 814
829 815 Return a triple (results, dirsfound, dirsnotfound).
830 816 - results is a mapping from filename to stat result. It also contains
831 817 listings mapping subrepos and .hg to None.
832 818 - dirsfound is a list of files found to be directories.
833 819 - dirsnotfound is a list of files that the dirstate thinks are
834 820 directories and that were not found."""
835 821
836 822 def badtype(mode):
837 823 kind = _(b'unknown')
838 824 if stat.S_ISCHR(mode):
839 825 kind = _(b'character device')
840 826 elif stat.S_ISBLK(mode):
841 827 kind = _(b'block device')
842 828 elif stat.S_ISFIFO(mode):
843 829 kind = _(b'fifo')
844 830 elif stat.S_ISSOCK(mode):
845 831 kind = _(b'socket')
846 832 elif stat.S_ISDIR(mode):
847 833 kind = _(b'directory')
848 834 return _(b'unsupported file type (type is %s)') % kind
849 835
850 836 badfn = match.bad
851 837 dmap = self._map
852 838 lstat = os.lstat
853 839 getkind = stat.S_IFMT
854 840 dirkind = stat.S_IFDIR
855 841 regkind = stat.S_IFREG
856 842 lnkkind = stat.S_IFLNK
857 843 join = self._join
858 844 dirsfound = []
859 845 foundadd = dirsfound.append
860 846 dirsnotfound = []
861 847 notfoundadd = dirsnotfound.append
862 848
863 849 if not match.isexact() and self._checkcase:
864 850 normalize = self._normalize
865 851 else:
866 852 normalize = None
867 853
868 854 files = sorted(match.files())
869 855 subrepos.sort()
870 856 i, j = 0, 0
871 857 while i < len(files) and j < len(subrepos):
872 858 subpath = subrepos[j] + b"/"
873 859 if files[i] < subpath:
874 860 i += 1
875 861 continue
876 862 while i < len(files) and files[i].startswith(subpath):
877 863 del files[i]
878 864 j += 1
879 865
880 866 if not files or b'' in files:
881 867 files = [b'']
882 868 # constructing the foldmap is expensive, so don't do it for the
883 869 # common case where files is ['']
884 870 normalize = None
885 871 results = dict.fromkeys(subrepos)
886 872 results[b'.hg'] = None
887 873
888 874 for ff in files:
889 875 if normalize:
890 876 nf = normalize(ff, False, True)
891 877 else:
892 878 nf = ff
893 879 if nf in results:
894 880 continue
895 881
896 882 try:
897 883 st = lstat(join(nf))
898 884 kind = getkind(st.st_mode)
899 885 if kind == dirkind:
900 886 if nf in dmap:
901 887 # file replaced by dir on disk but still in dirstate
902 888 results[nf] = None
903 889 foundadd((nf, ff))
904 890 elif kind == regkind or kind == lnkkind:
905 891 results[nf] = st
906 892 else:
907 893 badfn(ff, badtype(kind))
908 894 if nf in dmap:
909 895 results[nf] = None
910 896 except OSError as inst: # nf not found on disk - it is dirstate only
911 897 if nf in dmap: # does it exactly match a missing file?
912 898 results[nf] = None
913 899 else: # does it match a missing directory?
914 900 if self._map.hasdir(nf):
915 901 notfoundadd(nf)
916 902 else:
917 903 badfn(ff, encoding.strtolocal(inst.strerror))
918 904
919 905 # match.files() may contain explicitly-specified paths that shouldn't
920 906 # be taken; drop them from the list of files found. dirsfound/notfound
921 907 # aren't filtered here because they will be tested later.
922 908 if match.anypats():
923 909 for f in list(results):
924 910 if f == b'.hg' or f in subrepos:
925 911 # keep sentinel to disable further out-of-repo walks
926 912 continue
927 913 if not match(f):
928 914 del results[f]
929 915
930 916 # Case insensitive filesystems cannot rely on lstat() failing to detect
931 917 # a case-only rename. Prune the stat object for any file that does not
932 918 # match the case in the filesystem, if there are multiple files that
933 919 # normalize to the same path.
934 920 if match.isexact() and self._checkcase:
935 921 normed = {}
936 922
937 923 for f, st in pycompat.iteritems(results):
938 924 if st is None:
939 925 continue
940 926
941 927 nc = util.normcase(f)
942 928 paths = normed.get(nc)
943 929
944 930 if paths is None:
945 931 paths = set()
946 932 normed[nc] = paths
947 933
948 934 paths.add(f)
949 935
950 936 for norm, paths in pycompat.iteritems(normed):
951 937 if len(paths) > 1:
952 938 for path in paths:
953 939 folded = self._discoverpath(
954 940 path, norm, True, None, self._map.dirfoldmap
955 941 )
956 942 if path != folded:
957 943 results[path] = None
958 944
959 945 return results, dirsfound, dirsnotfound
960 946
961 947 def walk(self, match, subrepos, unknown, ignored, full=True):
962 948 """
963 949 Walk recursively through the directory tree, finding all files
964 950 matched by match.
965 951
966 952 If full is False, maybe skip some known-clean files.
967 953
968 954 Return a dict mapping filename to stat-like object (either
969 955 mercurial.osutil.stat instance or return value of os.stat()).
970 956
971 957 """
972 958 # full is a flag that extensions that hook into walk can use -- this
973 959 # implementation doesn't use it at all. This satisfies the contract
974 960 # because we only guarantee a "maybe".
975 961
976 962 if ignored:
977 963 ignore = util.never
978 964 dirignore = util.never
979 965 elif unknown:
980 966 ignore = self._ignore
981 967 dirignore = self._dirignore
982 968 else:
983 969 # if not unknown and not ignored, drop dir recursion and step 2
984 970 ignore = util.always
985 971 dirignore = util.always
986 972
987 973 matchfn = match.matchfn
988 974 matchalways = match.always()
989 975 matchtdir = match.traversedir
990 976 dmap = self._map
991 977 listdir = util.listdir
992 978 lstat = os.lstat
993 979 dirkind = stat.S_IFDIR
994 980 regkind = stat.S_IFREG
995 981 lnkkind = stat.S_IFLNK
996 982 join = self._join
997 983
998 984 exact = skipstep3 = False
999 985 if match.isexact(): # match.exact
1000 986 exact = True
1001 987 dirignore = util.always # skip step 2
1002 988 elif match.prefix(): # match.match, no patterns
1003 989 skipstep3 = True
1004 990
1005 991 if not exact and self._checkcase:
1006 992 normalize = self._normalize
1007 993 normalizefile = self._normalizefile
1008 994 skipstep3 = False
1009 995 else:
1010 996 normalize = self._normalize
1011 997 normalizefile = None
1012 998
1013 999 # step 1: find all explicit files
1014 1000 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1015 1001 if matchtdir:
1016 1002 for d in work:
1017 1003 matchtdir(d[0])
1018 1004 for d in dirsnotfound:
1019 1005 matchtdir(d)
1020 1006
1021 1007 skipstep3 = skipstep3 and not (work or dirsnotfound)
1022 1008 work = [d for d in work if not dirignore(d[0])]
1023 1009
1024 1010 # step 2: visit subdirectories
1025 1011 def traverse(work, alreadynormed):
1026 1012 wadd = work.append
1027 1013 while work:
1028 1014 tracing.counter('dirstate.walk work', len(work))
1029 1015 nd = work.pop()
1030 1016 visitentries = match.visitchildrenset(nd)
1031 1017 if not visitentries:
1032 1018 continue
1033 1019 if visitentries == b'this' or visitentries == b'all':
1034 1020 visitentries = None
1035 1021 skip = None
1036 1022 if nd != b'':
1037 1023 skip = b'.hg'
1038 1024 try:
1039 1025 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1040 1026 entries = listdir(join(nd), stat=True, skip=skip)
1041 1027 except OSError as inst:
1042 1028 if inst.errno in (errno.EACCES, errno.ENOENT):
1043 1029 match.bad(
1044 1030 self.pathto(nd), encoding.strtolocal(inst.strerror)
1045 1031 )
1046 1032 continue
1047 1033 raise
1048 1034 for f, kind, st in entries:
1049 1035 # Some matchers may return files in the visitentries set,
1050 1036 # instead of 'this', if the matcher explicitly mentions them
1051 1037 # and is not an exactmatcher. This is acceptable; we do not
1052 1038 # make any hard assumptions about file-or-directory below
1053 1039 # based on the presence of `f` in visitentries. If
1054 1040 # visitchildrenset returned a set, we can always skip the
1055 1041 # entries *not* in the set it provided regardless of whether
1056 1042 # they're actually a file or a directory.
1057 1043 if visitentries and f not in visitentries:
1058 1044 continue
1059 1045 if normalizefile:
1060 1046 # even though f might be a directory, we're only
1061 1047 # interested in comparing it to files currently in the
1062 1048 # dmap -- therefore normalizefile is enough
1063 1049 nf = normalizefile(
1064 1050 nd and (nd + b"/" + f) or f, True, True
1065 1051 )
1066 1052 else:
1067 1053 nf = nd and (nd + b"/" + f) or f
1068 1054 if nf not in results:
1069 1055 if kind == dirkind:
1070 1056 if not ignore(nf):
1071 1057 if matchtdir:
1072 1058 matchtdir(nf)
1073 1059 wadd(nf)
1074 1060 if nf in dmap and (matchalways or matchfn(nf)):
1075 1061 results[nf] = None
1076 1062 elif kind == regkind or kind == lnkkind:
1077 1063 if nf in dmap:
1078 1064 if matchalways or matchfn(nf):
1079 1065 results[nf] = st
1080 1066 elif (matchalways or matchfn(nf)) and not ignore(
1081 1067 nf
1082 1068 ):
1083 1069 # unknown file -- normalize if necessary
1084 1070 if not alreadynormed:
1085 1071 nf = normalize(nf, False, True)
1086 1072 results[nf] = st
1087 1073 elif nf in dmap and (matchalways or matchfn(nf)):
1088 1074 results[nf] = None
1089 1075
1090 1076 for nd, d in work:
1091 1077 # alreadynormed means that processwork doesn't have to do any
1092 1078 # expensive directory normalization
1093 1079 alreadynormed = not normalize or nd == d
1094 1080 traverse([d], alreadynormed)
1095 1081
1096 1082 for s in subrepos:
1097 1083 del results[s]
1098 1084 del results[b'.hg']
1099 1085
1100 1086 # step 3: visit remaining files from dmap
1101 1087 if not skipstep3 and not exact:
1102 1088 # If a dmap file is not in results yet, it was either
1103 1089 # a) not matching matchfn b) ignored, c) missing, or d) under a
1104 1090 # symlink directory.
1105 1091 if not results and matchalways:
1106 1092 visit = [f for f in dmap]
1107 1093 else:
1108 1094 visit = [f for f in dmap if f not in results and matchfn(f)]
1109 1095 visit.sort()
1110 1096
1111 1097 if unknown:
1112 1098 # unknown == True means we walked all dirs under the roots
1113 1099 # that wasn't ignored, and everything that matched was stat'ed
1114 1100 # and is already in results.
1115 1101 # The rest must thus be ignored or under a symlink.
1116 1102 audit_path = pathutil.pathauditor(self._root, cached=True)
1117 1103
1118 1104 for nf in iter(visit):
1119 1105 # If a stat for the same file was already added with a
1120 1106 # different case, don't add one for this, since that would
1121 1107 # make it appear as if the file exists under both names
1122 1108 # on disk.
1123 1109 if (
1124 1110 normalizefile
1125 1111 and normalizefile(nf, True, True) in results
1126 1112 ):
1127 1113 results[nf] = None
1128 1114 # Report ignored items in the dmap as long as they are not
1129 1115 # under a symlink directory.
1130 1116 elif audit_path.check(nf):
1131 1117 try:
1132 1118 results[nf] = lstat(join(nf))
1133 1119 # file was just ignored, no links, and exists
1134 1120 except OSError:
1135 1121 # file doesn't exist
1136 1122 results[nf] = None
1137 1123 else:
1138 1124 # It's either missing or under a symlink directory
1139 1125 # which we in this case report as missing
1140 1126 results[nf] = None
1141 1127 else:
1142 1128 # We may not have walked the full directory tree above,
1143 1129 # so stat and check everything we missed.
1144 1130 iv = iter(visit)
1145 1131 for st in util.statfiles([join(i) for i in visit]):
1146 1132 results[next(iv)] = st
1147 1133 return results
1148 1134
1149 1135 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1150 1136 # Force Rayon (Rust parallelism library) to respect the number of
1151 1137 # workers. This is a temporary workaround until Rust code knows
1152 1138 # how to read the config file.
1153 1139 numcpus = self._ui.configint(b"worker", b"numcpus")
1154 1140 if numcpus is not None:
1155 1141 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1156 1142
1157 1143 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1158 1144 if not workers_enabled:
1159 1145 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1160 1146
1161 1147 (
1162 1148 lookup,
1163 1149 modified,
1164 1150 added,
1165 1151 removed,
1166 1152 deleted,
1167 1153 clean,
1168 1154 ignored,
1169 1155 unknown,
1170 1156 warnings,
1171 1157 bad,
1172 1158 traversed,
1173 1159 dirty,
1174 1160 ) = rustmod.status(
1175 1161 self._map._map,
1176 1162 matcher,
1177 1163 self._rootdir,
1178 1164 self._ignorefiles(),
1179 1165 self._checkexec,
1180 1166 bool(list_clean),
1181 1167 bool(list_ignored),
1182 1168 bool(list_unknown),
1183 1169 bool(matcher.traversedir),
1184 1170 )
1185 1171
1186 1172 self._dirty |= dirty
1187 1173
1188 1174 if matcher.traversedir:
1189 1175 for dir in traversed:
1190 1176 matcher.traversedir(dir)
1191 1177
1192 1178 if self._ui.warn:
1193 1179 for item in warnings:
1194 1180 if isinstance(item, tuple):
1195 1181 file_path, syntax = item
1196 1182 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1197 1183 file_path,
1198 1184 syntax,
1199 1185 )
1200 1186 self._ui.warn(msg)
1201 1187 else:
1202 1188 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1203 1189 self._ui.warn(
1204 1190 msg
1205 1191 % (
1206 1192 pathutil.canonpath(
1207 1193 self._rootdir, self._rootdir, item
1208 1194 ),
1209 1195 b"No such file or directory",
1210 1196 )
1211 1197 )
1212 1198
1213 1199 for (fn, message) in bad:
1214 1200 matcher.bad(fn, encoding.strtolocal(message))
1215 1201
1216 1202 status = scmutil.status(
1217 1203 modified=modified,
1218 1204 added=added,
1219 1205 removed=removed,
1220 1206 deleted=deleted,
1221 1207 unknown=unknown,
1222 1208 ignored=ignored,
1223 1209 clean=clean,
1224 1210 )
1225 1211 return (lookup, status)
1226 1212
1227 1213 def status(self, match, subrepos, ignored, clean, unknown):
1228 1214 """Determine the status of the working copy relative to the
1229 1215 dirstate and return a pair of (unsure, status), where status is of type
1230 1216 scmutil.status and:
1231 1217
1232 1218 unsure:
1233 1219 files that might have been modified since the dirstate was
1234 1220 written, but need to be read to be sure (size is the same
1235 1221 but mtime differs)
1236 1222 status.modified:
1237 1223 files that have definitely been modified since the dirstate
1238 1224 was written (different size or mode)
1239 1225 status.clean:
1240 1226 files that have definitely not been modified since the
1241 1227 dirstate was written
1242 1228 """
1243 1229 listignored, listclean, listunknown = ignored, clean, unknown
1244 1230 lookup, modified, added, unknown, ignored = [], [], [], [], []
1245 1231 removed, deleted, clean = [], [], []
1246 1232
1247 1233 dmap = self._map
1248 1234 dmap.preload()
1249 1235
1250 1236 use_rust = True
1251 1237
1252 1238 allowed_matchers = (
1253 1239 matchmod.alwaysmatcher,
1254 1240 matchmod.exactmatcher,
1255 1241 matchmod.includematcher,
1256 1242 )
1257 1243
1258 1244 if rustmod is None:
1259 1245 use_rust = False
1260 1246 elif self._checkcase:
1261 1247 # Case-insensitive filesystems are not handled yet
1262 1248 use_rust = False
1263 1249 elif subrepos:
1264 1250 use_rust = False
1265 1251 elif sparse.enabled:
1266 1252 use_rust = False
1267 1253 elif not isinstance(match, allowed_matchers):
1268 1254 # Some matchers have yet to be implemented
1269 1255 use_rust = False
1270 1256
1271 1257 # Get the time from the filesystem so we can disambiguate files that
1272 1258 # appear modified in the present or future.
1273 1259 try:
1274 1260 mtime_boundary = timestamp.get_fs_now(self._opener)
1275 1261 except OSError:
1276 1262 # In largefiles or readonly context
1277 1263 mtime_boundary = None
1278 1264
1279 1265 if use_rust:
1280 1266 try:
1281 1267 res = self._rust_status(
1282 1268 match, listclean, listignored, listunknown
1283 1269 )
1284 1270 return res + (mtime_boundary,)
1285 1271 except rustmod.FallbackError:
1286 1272 pass
1287 1273
1288 1274 def noop(f):
1289 1275 pass
1290 1276
1291 1277 dcontains = dmap.__contains__
1292 1278 dget = dmap.__getitem__
1293 1279 ladd = lookup.append # aka "unsure"
1294 1280 madd = modified.append
1295 1281 aadd = added.append
1296 1282 uadd = unknown.append if listunknown else noop
1297 1283 iadd = ignored.append if listignored else noop
1298 1284 radd = removed.append
1299 1285 dadd = deleted.append
1300 1286 cadd = clean.append if listclean else noop
1301 1287 mexact = match.exact
1302 1288 dirignore = self._dirignore
1303 1289 checkexec = self._checkexec
1304 1290 checklink = self._checklink
1305 1291 copymap = self._map.copymap
1306 1292
1307 1293 # We need to do full walks when either
1308 1294 # - we're listing all clean files, or
1309 1295 # - match.traversedir does something, because match.traversedir should
1310 1296 # be called for every dir in the working dir
1311 1297 full = listclean or match.traversedir is not None
1312 1298 for fn, st in pycompat.iteritems(
1313 1299 self.walk(match, subrepos, listunknown, listignored, full=full)
1314 1300 ):
1315 1301 if not dcontains(fn):
1316 1302 if (listignored or mexact(fn)) and dirignore(fn):
1317 1303 if listignored:
1318 1304 iadd(fn)
1319 1305 else:
1320 1306 uadd(fn)
1321 1307 continue
1322 1308
1323 1309 t = dget(fn)
1324 1310 mode = t.mode
1325 1311 size = t.size
1326 1312
1327 1313 if not st and t.tracked:
1328 1314 dadd(fn)
1329 1315 elif t.p2_info:
1330 1316 madd(fn)
1331 1317 elif t.added:
1332 1318 aadd(fn)
1333 1319 elif t.removed:
1334 1320 radd(fn)
1335 1321 elif t.tracked:
1336 1322 if not checklink and t.has_fallback_symlink:
1337 1323 # If the file system does not support symlink, the mode
1338 1324 # might not be correctly stored in the dirstate, so do not
1339 1325 # trust it.
1340 1326 ladd(fn)
1341 1327 elif not checkexec and t.has_fallback_exec:
1342 1328 # If the file system does not support exec bits, the mode
1343 1329 # might not be correctly stored in the dirstate, so do not
1344 1330 # trust it.
1345 1331 ladd(fn)
1346 1332 elif (
1347 1333 size >= 0
1348 1334 and (
1349 1335 (size != st.st_size and size != st.st_size & _rangemask)
1350 1336 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1351 1337 )
1352 1338 or fn in copymap
1353 1339 ):
1354 1340 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1355 1341 # issue6456: Size returned may be longer due to
1356 1342 # encryption on EXT-4 fscrypt, undecided.
1357 1343 ladd(fn)
1358 1344 else:
1359 1345 madd(fn)
1360 1346 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1361 1347 # There might be a change in the future if for example the
1362 1348 # internal clock is off, but this is a case where the issues
1363 1349 # the user would face would be a lot worse and there is
1364 1350 # nothing we can really do.
1365 1351 ladd(fn)
1366 1352 elif listclean:
1367 1353 cadd(fn)
1368 1354 status = scmutil.status(
1369 1355 modified, added, removed, deleted, unknown, ignored, clean
1370 1356 )
1371 1357 return (lookup, status, mtime_boundary)
1372 1358
1373 1359 def matches(self, match):
1374 1360 """
1375 1361 return files in the dirstate (in whatever state) filtered by match
1376 1362 """
1377 1363 dmap = self._map
1378 1364 if rustmod is not None:
1379 1365 dmap = self._map._map
1380 1366
1381 1367 if match.always():
1382 1368 return dmap.keys()
1383 1369 files = match.files()
1384 1370 if match.isexact():
1385 1371 # fast path -- filter the other way around, since typically files is
1386 1372 # much smaller than dmap
1387 1373 return [f for f in files if f in dmap]
1388 1374 if match.prefix() and all(fn in dmap for fn in files):
1389 1375 # fast path -- all the values are known to be files, so just return
1390 1376 # that
1391 1377 return list(files)
1392 1378 return [f for f in dmap if match(f)]
1393 1379
1394 1380 def _actualfilename(self, tr):
1395 1381 if tr:
1396 1382 return self._pendingfilename
1397 1383 else:
1398 1384 return self._filename
1399 1385
1400 1386 def savebackup(self, tr, backupname):
1401 1387 '''Save current dirstate into backup file'''
1402 1388 filename = self._actualfilename(tr)
1403 1389 assert backupname != filename
1404 1390
1405 1391 # use '_writedirstate' instead of 'write' to write changes certainly,
1406 1392 # because the latter omits writing out if transaction is running.
1407 1393 # output file will be used to create backup of dirstate at this point.
1408 1394 if self._dirty or not self._opener.exists(filename):
1409 1395 self._writedirstate(
1410 1396 tr,
1411 1397 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1412 1398 )
1413 1399
1414 1400 if tr:
1415 1401 # ensure that subsequent tr.writepending returns True for
1416 1402 # changes written out above, even if dirstate is never
1417 1403 # changed after this
1418 1404 tr.addfilegenerator(
1419 1405 b'dirstate',
1420 1406 (self._filename,),
1421 1407 lambda f: self._writedirstate(tr, f),
1422 1408 location=b'plain',
1423 1409 )
1424 1410
1425 1411 # ensure that pending file written above is unlinked at
1426 1412 # failure, even if tr.writepending isn't invoked until the
1427 1413 # end of this transaction
1428 1414 tr.registertmp(filename, location=b'plain')
1429 1415
1430 1416 self._opener.tryunlink(backupname)
1431 1417 # hardlink backup is okay because _writedirstate is always called
1432 1418 # with an "atomictemp=True" file.
1433 1419 util.copyfile(
1434 1420 self._opener.join(filename),
1435 1421 self._opener.join(backupname),
1436 1422 hardlink=True,
1437 1423 )
1438 1424
1439 1425 def restorebackup(self, tr, backupname):
1440 1426 '''Restore dirstate by backup file'''
1441 1427 # this "invalidate()" prevents "wlock.release()" from writing
1442 1428 # changes of dirstate out after restoring from backup file
1443 1429 self.invalidate()
1444 1430 filename = self._actualfilename(tr)
1445 1431 o = self._opener
1446 1432 if util.samefile(o.join(backupname), o.join(filename)):
1447 1433 o.unlink(backupname)
1448 1434 else:
1449 1435 o.rename(backupname, filename, checkambig=True)
1450 1436
1451 1437 def clearbackup(self, tr, backupname):
1452 1438 '''Clear backup file'''
1453 1439 self._opener.unlink(backupname)
1454 1440
1455 1441 def verify(self, m1, m2):
1456 1442 """check the dirstate content again the parent manifest and yield errors"""
1457 1443 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1458 1444 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1459 1445 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1460 1446 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1461 1447 for f, entry in self.items():
1462 1448 state = entry.state
1463 1449 if state in b"nr" and f not in m1:
1464 1450 yield (missing_from_p1, f, state)
1465 1451 if state in b"a" and f in m1:
1466 1452 yield (unexpected_in_p1, f, state)
1467 1453 if state in b"m" and f not in m1 and f not in m2:
1468 1454 yield (missing_from_ps, f, state)
1469 1455 for f in m1:
1470 1456 state = self.get_entry(f).state
1471 1457 if state not in b"nrm":
1472 1458 yield (missing_from_ds, f, state)
@@ -1,732 +1,732 b''
1 1 # dirstatemap.py
2 2 #
3 3 # This software may be used and distributed according to the terms of the
4 4 # GNU General Public License version 2 or any later version.
5 5
6 6 from __future__ import absolute_import
7 7
8 8 import errno
9 9
10 10 from .i18n import _
11 11
12 12 from . import (
13 13 error,
14 14 pathutil,
15 15 policy,
16 16 pycompat,
17 17 txnutil,
18 18 util,
19 19 )
20 20
21 21 from .dirstateutils import (
22 22 docket as docketmod,
23 23 v2,
24 24 )
25 25
26 26 parsers = policy.importmod('parsers')
27 27 rustmod = policy.importrust('dirstate')
28 28
29 29 propertycache = util.propertycache
30 30
31 31 if rustmod is None:
32 32 DirstateItem = parsers.DirstateItem
33 33 else:
34 34 DirstateItem = rustmod.DirstateItem
35 35
36 36 rangemask = 0x7FFFFFFF
37 37
38 38
39 39 class _dirstatemapcommon(object):
40 40 """
41 41 Methods that are identical for both implementations of the dirstatemap
42 42 class, with and without Rust extensions enabled.
43 43 """
44 44
45 45 # please pytype
46 46
47 47 _map = None
48 48 copymap = None
49 49
50 50 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
51 51 self._use_dirstate_v2 = use_dirstate_v2
52 52 self._nodeconstants = nodeconstants
53 53 self._ui = ui
54 54 self._opener = opener
55 55 self._root = root
56 56 self._filename = b'dirstate'
57 57 self._nodelen = 20 # Also update Rust code when changing this!
58 58 self._parents = None
59 59 self._dirtyparents = False
60 60 self._docket = None
61 61
62 62 # for consistent view between _pl() and _read() invocations
63 63 self._pendingmode = None
64 64
65 65 def preload(self):
66 66 """Loads the underlying data, if it's not already loaded"""
67 67 self._map
68 68
69 69 def get(self, key, default=None):
70 70 return self._map.get(key, default)
71 71
72 72 def __len__(self):
73 73 return len(self._map)
74 74
75 75 def __iter__(self):
76 76 return iter(self._map)
77 77
78 78 def __contains__(self, key):
79 79 return key in self._map
80 80
81 81 def __getitem__(self, item):
82 82 return self._map[item]
83 83
84 84 ### sub-class utility method
85 85 #
86 86 # Use to allow for generic implementation of some method while still coping
87 87 # with minor difference between implementation.
88 88
89 89 def _dirs_incr(self, filename, old_entry=None):
90 90 """incremente the dirstate counter if applicable
91 91
92 92 This might be a no-op for some subclass who deal with directory
93 93 tracking in a different way.
94 94 """
95 95
96 96 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
97 97 """decremente the dirstate counter if applicable
98 98
99 99 This might be a no-op for some subclass who deal with directory
100 100 tracking in a different way.
101 101 """
102 102
103 103 def _refresh_entry(self, f, entry):
104 104 """record updated state of an entry"""
105 105
106 106 def _insert_entry(self, f, entry):
107 107 """add a new dirstate entry (or replace an unrelated one)
108 108
109 109 The fact it is actually new is the responsability of the caller
110 110 """
111 111
112 112 def _drop_entry(self, f):
113 113 """remove any entry for file f
114 114
115 115 This should also drop associated copy information
116 116
117 117 The fact we actually need to drop it is the responsability of the caller"""
118 118
119 119 ### method to manipulate the entries
120 120
121 121 def set_possibly_dirty(self, filename):
122 122 """record that the current state of the file on disk is unknown"""
123 123 entry = self[filename]
124 124 entry.set_possibly_dirty()
125 125 self._refresh_entry(filename, entry)
126 126
127 127 def set_clean(self, filename, mode, size, mtime):
128 128 """mark a file as back to a clean state"""
129 129 entry = self[filename]
130 130 size = size & rangemask
131 131 entry.set_clean(mode, size, mtime)
132 132 self._refresh_entry(filename, entry)
133 133 self.copymap.pop(filename, None)
134 134
135 135 def set_tracked(self, filename):
136 136 new = False
137 137 entry = self.get(filename)
138 138 if entry is None:
139 139 self._dirs_incr(filename)
140 140 entry = DirstateItem(
141 141 wc_tracked=True,
142 142 )
143 143
144 144 self._insert_entry(filename, entry)
145 145 new = True
146 146 elif not entry.tracked:
147 147 self._dirs_incr(filename, entry)
148 148 entry.set_tracked()
149 149 self._refresh_entry(filename, entry)
150 150 new = True
151 151 else:
152 152 # XXX This is probably overkill for more case, but we need this to
153 153 # fully replace the `normallookup` call with `set_tracked` one.
154 154 # Consider smoothing this in the future.
155 155 entry.set_possibly_dirty()
156 156 self._refresh_entry(filename, entry)
157 157 return new
158 158
159 159 def set_untracked(self, f):
160 160 """Mark a file as no longer tracked in the dirstate map"""
161 161 entry = self.get(f)
162 162 if entry is None:
163 163 return False
164 164 else:
165 165 self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added)
166 166 if not entry.p2_info:
167 167 self.copymap.pop(f, None)
168 168 entry.set_untracked()
169 169 self._refresh_entry(f, entry)
170 170 return True
171 171
172 172 def reset_state(
173 173 self,
174 174 filename,
175 175 wc_tracked=False,
176 176 p1_tracked=False,
177 177 p2_info=False,
178 178 has_meaningful_mtime=True,
179 179 has_meaningful_data=True,
180 180 parentfiledata=None,
181 181 ):
182 182 """Set a entry to a given state, diregarding all previous state
183 183
184 184 This is to be used by the part of the dirstate API dedicated to
185 185 adjusting the dirstate after a update/merge.
186 186
187 187 note: calling this might result to no entry existing at all if the
188 188 dirstate map does not see any point at having one for this file
189 189 anymore.
190 190 """
191 191 # copy information are now outdated
192 192 # (maybe new information should be in directly passed to this function)
193 193 self.copymap.pop(filename, None)
194 194
195 195 if not (p1_tracked or p2_info or wc_tracked):
196 196 old_entry = self._map.get(filename)
197 197 self._drop_entry(filename)
198 198 self._dirs_decr(filename, old_entry=old_entry)
199 199 return
200 200
201 201 old_entry = self._map.get(filename)
202 202 self._dirs_incr(filename, old_entry)
203 203 entry = DirstateItem(
204 204 wc_tracked=wc_tracked,
205 205 p1_tracked=p1_tracked,
206 206 p2_info=p2_info,
207 207 has_meaningful_mtime=has_meaningful_mtime,
208 208 parentfiledata=parentfiledata,
209 209 )
210 210 self._insert_entry(filename, entry)
211 211
212 212 ### disk interaction
213 213
214 214 def _opendirstatefile(self):
215 215 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
216 216 if self._pendingmode is not None and self._pendingmode != mode:
217 217 fp.close()
218 218 raise error.Abort(
219 219 _(b'working directory state may be changed parallelly')
220 220 )
221 221 self._pendingmode = mode
222 222 return fp
223 223
224 224 def _readdirstatefile(self, size=-1):
225 225 try:
226 226 with self._opendirstatefile() as fp:
227 227 return fp.read(size)
228 228 except IOError as err:
229 229 if err.errno != errno.ENOENT:
230 230 raise
231 231 # File doesn't exist, so the current state is empty
232 232 return b''
233 233
234 234 @property
235 235 def docket(self):
236 236 if not self._docket:
237 237 if not self._use_dirstate_v2:
238 238 raise error.ProgrammingError(
239 239 b'dirstate only has a docket in v2 format'
240 240 )
241 241 self._docket = docketmod.DirstateDocket.parse(
242 242 self._readdirstatefile(), self._nodeconstants
243 243 )
244 244 return self._docket
245 245
246 246 def write_v2_no_append(self, tr, st, meta, packed):
247 247 old_docket = self.docket
248 248 new_docket = docketmod.DirstateDocket.with_new_uuid(
249 249 self.parents(), len(packed), meta
250 250 )
251 251 data_filename = new_docket.data_filename()
252 252 if tr:
253 253 tr.add(data_filename, 0)
254 254 self._opener.write(data_filename, packed)
255 255 # Write the new docket after the new data file has been
256 256 # written. Because `st` was opened with `atomictemp=True`,
257 257 # the actual `.hg/dirstate` file is only affected on close.
258 258 st.write(new_docket.serialize())
259 259 st.close()
260 260 # Remove the old data file after the new docket pointing to
261 261 # the new data file was written.
262 262 if old_docket.uuid:
263 263 data_filename = old_docket.data_filename()
264 264 unlink = lambda _tr=None: self._opener.unlink(data_filename)
265 265 if tr:
266 266 category = b"dirstate-v2-clean-" + old_docket.uuid
267 267 tr.addpostclose(category, unlink)
268 268 else:
269 269 unlink()
270 270 self._docket = new_docket
271 271
272 272 ### reading/setting parents
273 273
274 274 def parents(self):
275 275 if not self._parents:
276 276 if self._use_dirstate_v2:
277 277 self._parents = self.docket.parents
278 278 else:
279 279 read_len = self._nodelen * 2
280 280 st = self._readdirstatefile(read_len)
281 281 l = len(st)
282 282 if l == read_len:
283 283 self._parents = (
284 284 st[: self._nodelen],
285 285 st[self._nodelen : 2 * self._nodelen],
286 286 )
287 287 elif l == 0:
288 288 self._parents = (
289 289 self._nodeconstants.nullid,
290 290 self._nodeconstants.nullid,
291 291 )
292 292 else:
293 293 raise error.Abort(
294 294 _(b'working directory state appears damaged!')
295 295 )
296 296
297 297 return self._parents
298 298
299 299
300 300 class dirstatemap(_dirstatemapcommon):
301 301 """Map encapsulating the dirstate's contents.
302 302
303 303 The dirstate contains the following state:
304 304
305 305 - `identity` is the identity of the dirstate file, which can be used to
306 306 detect when changes have occurred to the dirstate file.
307 307
308 308 - `parents` is a pair containing the parents of the working copy. The
309 309 parents are updated by calling `setparents`.
310 310
311 311 - the state map maps filenames to tuples of (state, mode, size, mtime),
312 312 where state is a single character representing 'normal', 'added',
313 313 'removed', or 'merged'. It is read by treating the dirstate as a
314 314 dict. File state is updated by calling various methods (see each
315 315 documentation for details):
316 316
317 317 - `reset_state`,
318 318 - `set_tracked`
319 319 - `set_untracked`
320 320 - `set_clean`
321 321 - `set_possibly_dirty`
322 322
323 323 - `copymap` maps destination filenames to their source filename.
324 324
325 325 The dirstate also provides the following views onto the state:
326 326
327 327 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
328 328 form that they appear as in the dirstate.
329 329
330 330 - `dirfoldmap` is a dict mapping normalized directory names to the
331 331 denormalized form that they appear as in the dirstate.
332 332 """
333 333
334 334 ### Core data storage and access
335 335
336 336 @propertycache
337 337 def _map(self):
338 338 self._map = {}
339 339 self.read()
340 340 return self._map
341 341
342 342 @propertycache
343 343 def copymap(self):
344 344 self.copymap = {}
345 345 self._map
346 346 return self.copymap
347 347
348 348 def clear(self):
349 349 self._map.clear()
350 350 self.copymap.clear()
351 351 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
352 352 util.clearcachedproperty(self, b"_dirs")
353 353 util.clearcachedproperty(self, b"_alldirs")
354 354 util.clearcachedproperty(self, b"filefoldmap")
355 355 util.clearcachedproperty(self, b"dirfoldmap")
356 356
357 357 def items(self):
358 358 return pycompat.iteritems(self._map)
359 359
360 360 # forward for python2,3 compat
361 361 iteritems = items
362 362
363 363 def debug_iter(self, all):
364 364 """
365 365 Return an iterator of (filename, state, mode, size, mtime) tuples
366 366
367 367 `all` is unused when Rust is not enabled
368 368 """
369 369 for (filename, item) in self.items():
370 370 yield (filename, item.state, item.mode, item.size, item.mtime)
371 371
372 372 def keys(self):
373 373 return self._map.keys()
374 374
375 375 ### reading/setting parents
376 376
377 377 def setparents(self, p1, p2, fold_p2=False):
378 378 self._parents = (p1, p2)
379 379 self._dirtyparents = True
380 380 copies = {}
381 381 if fold_p2:
382 382 for f, s in pycompat.iteritems(self._map):
383 383 # Discard "merged" markers when moving away from a merge state
384 384 if s.p2_info:
385 385 source = self.copymap.pop(f, None)
386 386 if source:
387 387 copies[f] = source
388 388 s.drop_merge_data()
389 389 return copies
390 390
391 391 ### disk interaction
392 392
393 393 def read(self):
394 394 # ignore HG_PENDING because identity is used only for writing
395 395 self.identity = util.filestat.frompath(
396 396 self._opener.join(self._filename)
397 397 )
398 398
399 399 if self._use_dirstate_v2:
400 400 if not self.docket.uuid:
401 401 return
402 402 st = self._opener.read(self.docket.data_filename())
403 403 else:
404 404 st = self._readdirstatefile()
405 405
406 406 if not st:
407 407 return
408 408
409 409 # TODO: adjust this estimate for dirstate-v2
410 410 if util.safehasattr(parsers, b'dict_new_presized'):
411 411 # Make an estimate of the number of files in the dirstate based on
412 412 # its size. This trades wasting some memory for avoiding costly
413 413 # resizes. Each entry have a prefix of 17 bytes followed by one or
414 414 # two path names. Studies on various large-scale real-world repositories
415 415 # found 54 bytes a reasonable upper limit for the average path names.
416 416 # Copy entries are ignored for the sake of this estimate.
417 417 self._map = parsers.dict_new_presized(len(st) // 71)
418 418
419 419 # Python's garbage collector triggers a GC each time a certain number
420 420 # of container objects (the number being defined by
421 421 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
422 422 # for each file in the dirstate. The C version then immediately marks
423 423 # them as not to be tracked by the collector. However, this has no
424 424 # effect on when GCs are triggered, only on what objects the GC looks
425 425 # into. This means that O(number of files) GCs are unavoidable.
426 426 # Depending on when in the process's lifetime the dirstate is parsed,
427 427 # this can get very expensive. As a workaround, disable GC while
428 428 # parsing the dirstate.
429 429 #
430 430 # (we cannot decorate the function directly since it is in a C module)
431 431 if self._use_dirstate_v2:
432 432 p = self.docket.parents
433 433 meta = self.docket.tree_metadata
434 434 parse_dirstate = util.nogc(v2.parse_dirstate)
435 435 parse_dirstate(self._map, self.copymap, st, meta)
436 436 else:
437 437 parse_dirstate = util.nogc(parsers.parse_dirstate)
438 438 p = parse_dirstate(self._map, self.copymap, st)
439 439 if not self._dirtyparents:
440 440 self.setparents(*p)
441 441
442 442 # Avoid excess attribute lookups by fast pathing certain checks
443 443 self.__contains__ = self._map.__contains__
444 444 self.__getitem__ = self._map.__getitem__
445 445 self.get = self._map.get
446 446
447 def write(self, tr, st, now):
447 def write(self, tr, st):
448 448 if self._use_dirstate_v2:
449 449 packed, meta = v2.pack_dirstate(self._map, self.copymap)
450 450 self.write_v2_no_append(tr, st, meta, packed)
451 451 else:
452 452 packed = parsers.pack_dirstate(
453 453 self._map, self.copymap, self.parents()
454 454 )
455 455 st.write(packed)
456 456 st.close()
457 457 self._dirtyparents = False
458 458
459 459 @propertycache
460 460 def identity(self):
461 461 self._map
462 462 return self.identity
463 463
464 464 ### code related to maintaining and accessing "extra" property
465 465 # (e.g. "has_dir")
466 466
467 467 def _dirs_incr(self, filename, old_entry=None):
468 468 """incremente the dirstate counter if applicable"""
469 469 if (
470 470 old_entry is None or old_entry.removed
471 471 ) and "_dirs" in self.__dict__:
472 472 self._dirs.addpath(filename)
473 473 if old_entry is None and "_alldirs" in self.__dict__:
474 474 self._alldirs.addpath(filename)
475 475
476 476 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
477 477 """decremente the dirstate counter if applicable"""
478 478 if old_entry is not None:
479 479 if "_dirs" in self.__dict__ and not old_entry.removed:
480 480 self._dirs.delpath(filename)
481 481 if "_alldirs" in self.__dict__ and not remove_variant:
482 482 self._alldirs.delpath(filename)
483 483 elif remove_variant and "_alldirs" in self.__dict__:
484 484 self._alldirs.addpath(filename)
485 485 if "filefoldmap" in self.__dict__:
486 486 normed = util.normcase(filename)
487 487 self.filefoldmap.pop(normed, None)
488 488
489 489 @propertycache
490 490 def filefoldmap(self):
491 491 """Returns a dictionary mapping normalized case paths to their
492 492 non-normalized versions.
493 493 """
494 494 try:
495 495 makefilefoldmap = parsers.make_file_foldmap
496 496 except AttributeError:
497 497 pass
498 498 else:
499 499 return makefilefoldmap(
500 500 self._map, util.normcasespec, util.normcasefallback
501 501 )
502 502
503 503 f = {}
504 504 normcase = util.normcase
505 505 for name, s in pycompat.iteritems(self._map):
506 506 if not s.removed:
507 507 f[normcase(name)] = name
508 508 f[b'.'] = b'.' # prevents useless util.fspath() invocation
509 509 return f
510 510
511 511 @propertycache
512 512 def dirfoldmap(self):
513 513 f = {}
514 514 normcase = util.normcase
515 515 for name in self._dirs:
516 516 f[normcase(name)] = name
517 517 return f
518 518
519 519 def hastrackeddir(self, d):
520 520 """
521 521 Returns True if the dirstate contains a tracked (not removed) file
522 522 in this directory.
523 523 """
524 524 return d in self._dirs
525 525
526 526 def hasdir(self, d):
527 527 """
528 528 Returns True if the dirstate contains a file (tracked or removed)
529 529 in this directory.
530 530 """
531 531 return d in self._alldirs
532 532
533 533 @propertycache
534 534 def _dirs(self):
535 535 return pathutil.dirs(self._map, only_tracked=True)
536 536
537 537 @propertycache
538 538 def _alldirs(self):
539 539 return pathutil.dirs(self._map)
540 540
541 541 ### code related to manipulation of entries and copy-sources
542 542
543 543 def _refresh_entry(self, f, entry):
544 544 if not entry.any_tracked:
545 545 self._map.pop(f, None)
546 546
547 547 def _insert_entry(self, f, entry):
548 548 self._map[f] = entry
549 549
550 550 def _drop_entry(self, f):
551 551 self._map.pop(f, None)
552 552 self.copymap.pop(f, None)
553 553
554 554
555 555 if rustmod is not None:
556 556
557 557 class dirstatemap(_dirstatemapcommon):
558 558
559 559 ### Core data storage and access
560 560
561 561 @propertycache
562 562 def _map(self):
563 563 """
564 564 Fills the Dirstatemap when called.
565 565 """
566 566 # ignore HG_PENDING because identity is used only for writing
567 567 self.identity = util.filestat.frompath(
568 568 self._opener.join(self._filename)
569 569 )
570 570
571 571 if self._use_dirstate_v2:
572 572 if self.docket.uuid:
573 573 # TODO: use mmap when possible
574 574 data = self._opener.read(self.docket.data_filename())
575 575 else:
576 576 data = b''
577 577 self._map = rustmod.DirstateMap.new_v2(
578 578 data, self.docket.data_size, self.docket.tree_metadata
579 579 )
580 580 parents = self.docket.parents
581 581 else:
582 582 self._map, parents = rustmod.DirstateMap.new_v1(
583 583 self._readdirstatefile()
584 584 )
585 585
586 586 if parents and not self._dirtyparents:
587 587 self.setparents(*parents)
588 588
589 589 self.__contains__ = self._map.__contains__
590 590 self.__getitem__ = self._map.__getitem__
591 591 self.get = self._map.get
592 592 return self._map
593 593
594 594 @property
595 595 def copymap(self):
596 596 return self._map.copymap()
597 597
598 598 def debug_iter(self, all):
599 599 """
600 600 Return an iterator of (filename, state, mode, size, mtime) tuples
601 601
602 602 `all`: also include with `state == b' '` dirstate tree nodes that
603 603 don't have an associated `DirstateItem`.
604 604
605 605 """
606 606 return self._map.debug_iter(all)
607 607
608 608 def clear(self):
609 609 self._map.clear()
610 610 self.setparents(
611 611 self._nodeconstants.nullid, self._nodeconstants.nullid
612 612 )
613 613 util.clearcachedproperty(self, b"_dirs")
614 614 util.clearcachedproperty(self, b"_alldirs")
615 615 util.clearcachedproperty(self, b"dirfoldmap")
616 616
617 617 def items(self):
618 618 return self._map.items()
619 619
620 620 # forward for python2,3 compat
621 621 iteritems = items
622 622
623 623 def keys(self):
624 624 return iter(self._map)
625 625
626 626 ### reading/setting parents
627 627
628 628 def setparents(self, p1, p2, fold_p2=False):
629 629 self._parents = (p1, p2)
630 630 self._dirtyparents = True
631 631 copies = {}
632 632 if fold_p2:
633 633 # Collect into an intermediate list to avoid a `RuntimeError`
634 634 # exception due to mutation during iteration.
635 635 # TODO: move this the whole loop to Rust where `iter_mut`
636 636 # enables in-place mutation of elements of a collection while
637 637 # iterating it, without mutating the collection itself.
638 638 files_with_p2_info = [
639 639 f for f, s in self._map.items() if s.p2_info
640 640 ]
641 641 rust_map = self._map
642 642 for f in files_with_p2_info:
643 643 e = rust_map.get(f)
644 644 source = self.copymap.pop(f, None)
645 645 if source:
646 646 copies[f] = source
647 647 e.drop_merge_data()
648 648 rust_map.set_dirstate_item(f, e)
649 649 return copies
650 650
651 651 ### disk interaction
652 652
653 653 @propertycache
654 654 def identity(self):
655 655 self._map
656 656 return self.identity
657 657
658 def write(self, tr, st, now):
658 def write(self, tr, st):
659 659 if not self._use_dirstate_v2:
660 660 p1, p2 = self.parents()
661 661 packed = self._map.write_v1(p1, p2)
662 662 st.write(packed)
663 663 st.close()
664 664 self._dirtyparents = False
665 665 return
666 666
667 667 # We can only append to an existing data file if there is one
668 668 can_append = self.docket.uuid is not None
669 669 packed, meta, append = self._map.write_v2(can_append)
670 670 if append:
671 671 docket = self.docket
672 672 data_filename = docket.data_filename()
673 673 if tr:
674 674 tr.add(data_filename, docket.data_size)
675 675 with self._opener(data_filename, b'r+b') as fp:
676 676 fp.seek(docket.data_size)
677 677 assert fp.tell() == docket.data_size
678 678 written = fp.write(packed)
679 679 if written is not None: # py2 may return None
680 680 assert written == len(packed), (written, len(packed))
681 681 docket.data_size += len(packed)
682 682 docket.parents = self.parents()
683 683 docket.tree_metadata = meta
684 684 st.write(docket.serialize())
685 685 st.close()
686 686 else:
687 687 self.write_v2_no_append(tr, st, meta, packed)
688 688 # Reload from the newly-written file
689 689 util.clearcachedproperty(self, b"_map")
690 690 self._dirtyparents = False
691 691
692 692 ### code related to maintaining and accessing "extra" property
693 693 # (e.g. "has_dir")
694 694
695 695 @propertycache
696 696 def filefoldmap(self):
697 697 """Returns a dictionary mapping normalized case paths to their
698 698 non-normalized versions.
699 699 """
700 700 return self._map.filefoldmapasdict()
701 701
702 702 def hastrackeddir(self, d):
703 703 return self._map.hastrackeddir(d)
704 704
705 705 def hasdir(self, d):
706 706 return self._map.hasdir(d)
707 707
708 708 @propertycache
709 709 def dirfoldmap(self):
710 710 f = {}
711 711 normcase = util.normcase
712 712 for name in self._map.tracked_dirs():
713 713 f[normcase(name)] = name
714 714 return f
715 715
716 716 ### code related to manipulation of entries and copy-sources
717 717
718 718 def _refresh_entry(self, f, entry):
719 719 if not entry.any_tracked:
720 720 self._map.drop_item_and_copy_source(f)
721 721 else:
722 722 self._map.addfile(f, entry)
723 723
724 724 def _insert_entry(self, f, entry):
725 725 self._map.addfile(f, entry)
726 726
727 727 def _drop_entry(self, f):
728 728 self._map.drop_item_and_copy_source(f)
729 729
730 730 def __setitem__(self, key, value):
731 731 assert isinstance(value, DirstateItem)
732 732 self._map.set_dirstate_item(key, value)
General Comments 0
You need to be logged in to leave comments. Login now