##// END OF EJS Templates
dirstate: add missing return on platforms without exec or symlink...
Raphaël Gomès -
r49102:8f54d9c7 default
parent child Browse files
Show More
@@ -1,1526 +1,1528 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .pycompat import delattr
18 18
19 19 from hgdemandimport import tracing
20 20
21 21 from . import (
22 22 dirstatemap,
23 23 encoding,
24 24 error,
25 25 match as matchmod,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 sparse,
31 31 util,
32 32 )
33 33
34 34 from .dirstateutils import (
35 35 timestamp,
36 36 )
37 37
38 38 from .interfaces import (
39 39 dirstate as intdirstate,
40 40 util as interfaceutil,
41 41 )
42 42
43 43 parsers = policy.importmod('parsers')
44 44 rustmod = policy.importrust('dirstate')
45 45
46 46 HAS_FAST_DIRSTATE_V2 = rustmod is not None
47 47
48 48 propertycache = util.propertycache
49 49 filecache = scmutil.filecache
50 50 _rangemask = dirstatemap.rangemask
51 51
52 52 DirstateItem = dirstatemap.DirstateItem
53 53
54 54
55 55 class repocache(filecache):
56 56 """filecache for files in .hg/"""
57 57
58 58 def join(self, obj, fname):
59 59 return obj._opener.join(fname)
60 60
61 61
62 62 class rootcache(filecache):
63 63 """filecache for files in the repository root"""
64 64
65 65 def join(self, obj, fname):
66 66 return obj._join(fname)
67 67
68 68
69 69 def _getfsnow(vfs):
70 70 '''Get "now" timestamp on filesystem'''
71 71 tmpfd, tmpname = vfs.mkstemp()
72 72 try:
73 73 return timestamp.mtime_of(os.fstat(tmpfd))
74 74 finally:
75 75 os.close(tmpfd)
76 76 vfs.unlink(tmpname)
77 77
78 78
79 79 def requires_parents_change(func):
80 80 def wrap(self, *args, **kwargs):
81 81 if not self.pendingparentchange():
82 82 msg = 'calling `%s` outside of a parentchange context'
83 83 msg %= func.__name__
84 84 raise error.ProgrammingError(msg)
85 85 return func(self, *args, **kwargs)
86 86
87 87 return wrap
88 88
89 89
90 90 def requires_no_parents_change(func):
91 91 def wrap(self, *args, **kwargs):
92 92 if self.pendingparentchange():
93 93 msg = 'calling `%s` inside of a parentchange context'
94 94 msg %= func.__name__
95 95 raise error.ProgrammingError(msg)
96 96 return func(self, *args, **kwargs)
97 97
98 98 return wrap
99 99
100 100
101 101 @interfaceutil.implementer(intdirstate.idirstate)
102 102 class dirstate(object):
103 103 def __init__(
104 104 self,
105 105 opener,
106 106 ui,
107 107 root,
108 108 validate,
109 109 sparsematchfn,
110 110 nodeconstants,
111 111 use_dirstate_v2,
112 112 ):
113 113 """Create a new dirstate object.
114 114
115 115 opener is an open()-like callable that can be used to open the
116 116 dirstate file; root is the root of the directory tracked by
117 117 the dirstate.
118 118 """
119 119 self._use_dirstate_v2 = use_dirstate_v2
120 120 self._nodeconstants = nodeconstants
121 121 self._opener = opener
122 122 self._validate = validate
123 123 self._root = root
124 124 self._sparsematchfn = sparsematchfn
125 125 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
126 126 # UNC path pointing to root share (issue4557)
127 127 self._rootdir = pathutil.normasprefix(root)
128 128 self._dirty = False
129 129 self._lastnormaltime = timestamp.zero()
130 130 self._ui = ui
131 131 self._filecache = {}
132 132 self._parentwriters = 0
133 133 self._filename = b'dirstate'
134 134 self._pendingfilename = b'%s.pending' % self._filename
135 135 self._plchangecallbacks = {}
136 136 self._origpl = None
137 137 self._mapcls = dirstatemap.dirstatemap
138 138 # Access and cache cwd early, so we don't access it for the first time
139 139 # after a working-copy update caused it to not exist (accessing it then
140 140 # raises an exception).
141 141 self._cwd
142 142
143 143 def prefetch_parents(self):
144 144 """make sure the parents are loaded
145 145
146 146 Used to avoid a race condition.
147 147 """
148 148 self._pl
149 149
150 150 @contextlib.contextmanager
151 151 def parentchange(self):
152 152 """Context manager for handling dirstate parents.
153 153
154 154 If an exception occurs in the scope of the context manager,
155 155 the incoherent dirstate won't be written when wlock is
156 156 released.
157 157 """
158 158 self._parentwriters += 1
159 159 yield
160 160 # Typically we want the "undo" step of a context manager in a
161 161 # finally block so it happens even when an exception
162 162 # occurs. In this case, however, we only want to decrement
163 163 # parentwriters if the code in the with statement exits
164 164 # normally, so we don't have a try/finally here on purpose.
165 165 self._parentwriters -= 1
166 166
167 167 def pendingparentchange(self):
168 168 """Returns true if the dirstate is in the middle of a set of changes
169 169 that modify the dirstate parent.
170 170 """
171 171 return self._parentwriters > 0
172 172
173 173 @propertycache
174 174 def _map(self):
175 175 """Return the dirstate contents (see documentation for dirstatemap)."""
176 176 self._map = self._mapcls(
177 177 self._ui,
178 178 self._opener,
179 179 self._root,
180 180 self._nodeconstants,
181 181 self._use_dirstate_v2,
182 182 )
183 183 return self._map
184 184
185 185 @property
186 186 def _sparsematcher(self):
187 187 """The matcher for the sparse checkout.
188 188
189 189 The working directory may not include every file from a manifest. The
190 190 matcher obtained by this property will match a path if it is to be
191 191 included in the working directory.
192 192 """
193 193 # TODO there is potential to cache this property. For now, the matcher
194 194 # is resolved on every access. (But the called function does use a
195 195 # cache to keep the lookup fast.)
196 196 return self._sparsematchfn()
197 197
198 198 @repocache(b'branch')
199 199 def _branch(self):
200 200 try:
201 201 return self._opener.read(b"branch").strip() or b"default"
202 202 except IOError as inst:
203 203 if inst.errno != errno.ENOENT:
204 204 raise
205 205 return b"default"
206 206
207 207 @property
208 208 def _pl(self):
209 209 return self._map.parents()
210 210
211 211 def hasdir(self, d):
212 212 return self._map.hastrackeddir(d)
213 213
214 214 @rootcache(b'.hgignore')
215 215 def _ignore(self):
216 216 files = self._ignorefiles()
217 217 if not files:
218 218 return matchmod.never()
219 219
220 220 pats = [b'include:%s' % f for f in files]
221 221 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
222 222
223 223 @propertycache
224 224 def _slash(self):
225 225 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
226 226
227 227 @propertycache
228 228 def _checklink(self):
229 229 return util.checklink(self._root)
230 230
231 231 @propertycache
232 232 def _checkexec(self):
233 233 return bool(util.checkexec(self._root))
234 234
235 235 @propertycache
236 236 def _checkcase(self):
237 237 return not util.fscasesensitive(self._join(b'.hg'))
238 238
239 239 def _join(self, f):
240 240 # much faster than os.path.join()
241 241 # it's safe because f is always a relative path
242 242 return self._rootdir + f
243 243
244 244 def flagfunc(self, buildfallback):
245 245 if self._checklink and self._checkexec:
246 246
247 247 def f(x):
248 248 try:
249 249 st = os.lstat(self._join(x))
250 250 if util.statislink(st):
251 251 return b'l'
252 252 if util.statisexec(st):
253 253 return b'x'
254 254 except OSError:
255 255 pass
256 256 return b''
257 257
258 258 return f
259 259
260 260 fallback = buildfallback()
261 261 if self._checklink:
262 262
263 263 def f(x):
264 264 if os.path.islink(self._join(x)):
265 265 return b'l'
266 266 entry = self.get_entry(x)
267 267 if entry.has_fallback_exec:
268 268 if entry.fallback_exec:
269 269 return b'x'
270 270 elif b'x' in fallback(x):
271 271 return b'x'
272 272 return b''
273 273
274 274 return f
275 275 if self._checkexec:
276 276
277 277 def f(x):
278 278 if b'l' in fallback(x):
279 279 return b'l'
280 280 entry = self.get_entry(x)
281 281 if entry.has_fallback_symlink:
282 282 if entry.fallback_symlink:
283 283 return b'l'
284 284 if util.isexec(self._join(x)):
285 285 return b'x'
286 286 return b''
287 287
288 288 return f
289 289 else:
290 290
291 291 def f(x):
292 292 entry = self.get_entry(x)
293 293 if entry.has_fallback_symlink:
294 294 if entry.fallback_symlink:
295 295 return b'l'
296 296 if entry.has_fallback_exec:
297 297 if entry.fallback_exec:
298 298 return b'x'
299 299 elif entry.has_fallback_symlink:
300 300 return b''
301 301 return fallback(x)
302 302
303 return f
304
303 305 @propertycache
304 306 def _cwd(self):
305 307 # internal config: ui.forcecwd
306 308 forcecwd = self._ui.config(b'ui', b'forcecwd')
307 309 if forcecwd:
308 310 return forcecwd
309 311 return encoding.getcwd()
310 312
311 313 def getcwd(self):
312 314 """Return the path from which a canonical path is calculated.
313 315
314 316 This path should be used to resolve file patterns or to convert
315 317 canonical paths back to file paths for display. It shouldn't be
316 318 used to get real file paths. Use vfs functions instead.
317 319 """
318 320 cwd = self._cwd
319 321 if cwd == self._root:
320 322 return b''
321 323 # self._root ends with a path separator if self._root is '/' or 'C:\'
322 324 rootsep = self._root
323 325 if not util.endswithsep(rootsep):
324 326 rootsep += pycompat.ossep
325 327 if cwd.startswith(rootsep):
326 328 return cwd[len(rootsep) :]
327 329 else:
328 330 # we're outside the repo. return an absolute path.
329 331 return cwd
330 332
331 333 def pathto(self, f, cwd=None):
332 334 if cwd is None:
333 335 cwd = self.getcwd()
334 336 path = util.pathto(self._root, cwd, f)
335 337 if self._slash:
336 338 return util.pconvert(path)
337 339 return path
338 340
339 341 def __getitem__(self, key):
340 342 """Return the current state of key (a filename) in the dirstate.
341 343
342 344 States are:
343 345 n normal
344 346 m needs merging
345 347 r marked for removal
346 348 a marked for addition
347 349 ? not tracked
348 350
349 351 XXX The "state" is a bit obscure to be in the "public" API. we should
350 352 consider migrating all user of this to going through the dirstate entry
351 353 instead.
352 354 """
353 355 msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
354 356 util.nouideprecwarn(msg, b'6.1', stacklevel=2)
355 357 entry = self._map.get(key)
356 358 if entry is not None:
357 359 return entry.state
358 360 return b'?'
359 361
360 362 def get_entry(self, path):
361 363 """return a DirstateItem for the associated path"""
362 364 entry = self._map.get(path)
363 365 if entry is None:
364 366 return DirstateItem()
365 367 return entry
366 368
367 369 def __contains__(self, key):
368 370 return key in self._map
369 371
370 372 def __iter__(self):
371 373 return iter(sorted(self._map))
372 374
373 375 def items(self):
374 376 return pycompat.iteritems(self._map)
375 377
376 378 iteritems = items
377 379
378 380 def parents(self):
379 381 return [self._validate(p) for p in self._pl]
380 382
381 383 def p1(self):
382 384 return self._validate(self._pl[0])
383 385
384 386 def p2(self):
385 387 return self._validate(self._pl[1])
386 388
387 389 @property
388 390 def in_merge(self):
389 391 """True if a merge is in progress"""
390 392 return self._pl[1] != self._nodeconstants.nullid
391 393
392 394 def branch(self):
393 395 return encoding.tolocal(self._branch)
394 396
395 397 def setparents(self, p1, p2=None):
396 398 """Set dirstate parents to p1 and p2.
397 399
398 400 When moving from two parents to one, "merged" entries a
399 401 adjusted to normal and previous copy records discarded and
400 402 returned by the call.
401 403
402 404 See localrepo.setparents()
403 405 """
404 406 if p2 is None:
405 407 p2 = self._nodeconstants.nullid
406 408 if self._parentwriters == 0:
407 409 raise ValueError(
408 410 b"cannot set dirstate parent outside of "
409 411 b"dirstate.parentchange context manager"
410 412 )
411 413
412 414 self._dirty = True
413 415 oldp2 = self._pl[1]
414 416 if self._origpl is None:
415 417 self._origpl = self._pl
416 418 nullid = self._nodeconstants.nullid
417 419 # True if we need to fold p2 related state back to a linear case
418 420 fold_p2 = oldp2 != nullid and p2 == nullid
419 421 return self._map.setparents(p1, p2, fold_p2=fold_p2)
420 422
421 423 def setbranch(self, branch):
422 424 self.__class__._branch.set(self, encoding.fromlocal(branch))
423 425 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
424 426 try:
425 427 f.write(self._branch + b'\n')
426 428 f.close()
427 429
428 430 # make sure filecache has the correct stat info for _branch after
429 431 # replacing the underlying file
430 432 ce = self._filecache[b'_branch']
431 433 if ce:
432 434 ce.refresh()
433 435 except: # re-raises
434 436 f.discard()
435 437 raise
436 438
437 439 def invalidate(self):
438 440 """Causes the next access to reread the dirstate.
439 441
440 442 This is different from localrepo.invalidatedirstate() because it always
441 443 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
442 444 check whether the dirstate has changed before rereading it."""
443 445
444 446 for a in ("_map", "_branch", "_ignore"):
445 447 if a in self.__dict__:
446 448 delattr(self, a)
447 449 self._lastnormaltime = timestamp.zero()
448 450 self._dirty = False
449 451 self._parentwriters = 0
450 452 self._origpl = None
451 453
452 454 def copy(self, source, dest):
453 455 """Mark dest as a copy of source. Unmark dest if source is None."""
454 456 if source == dest:
455 457 return
456 458 self._dirty = True
457 459 if source is not None:
458 460 self._map.copymap[dest] = source
459 461 else:
460 462 self._map.copymap.pop(dest, None)
461 463
462 464 def copied(self, file):
463 465 return self._map.copymap.get(file, None)
464 466
465 467 def copies(self):
466 468 return self._map.copymap
467 469
468 470 @requires_no_parents_change
469 471 def set_tracked(self, filename):
470 472 """a "public" method for generic code to mark a file as tracked
471 473
472 474 This function is to be called outside of "update/merge" case. For
473 475 example by a command like `hg add X`.
474 476
475 477 return True the file was previously untracked, False otherwise.
476 478 """
477 479 self._dirty = True
478 480 entry = self._map.get(filename)
479 481 if entry is None or not entry.tracked:
480 482 self._check_new_tracked_filename(filename)
481 483 return self._map.set_tracked(filename)
482 484
483 485 @requires_no_parents_change
484 486 def set_untracked(self, filename):
485 487 """a "public" method for generic code to mark a file as untracked
486 488
487 489 This function is to be called outside of "update/merge" case. For
488 490 example by a command like `hg remove X`.
489 491
490 492 return True the file was previously tracked, False otherwise.
491 493 """
492 494 ret = self._map.set_untracked(filename)
493 495 if ret:
494 496 self._dirty = True
495 497 return ret
496 498
497 499 @requires_no_parents_change
498 500 def set_clean(self, filename, parentfiledata=None):
499 501 """record that the current state of the file on disk is known to be clean"""
500 502 self._dirty = True
501 503 if parentfiledata:
502 504 (mode, size, mtime) = parentfiledata
503 505 else:
504 506 (mode, size, mtime) = self._get_filedata(filename)
505 507 if not self._map[filename].tracked:
506 508 self._check_new_tracked_filename(filename)
507 509 self._map.set_clean(filename, mode, size, mtime)
508 510 if mtime > self._lastnormaltime:
509 511 # Remember the most recent modification timeslot for status(),
510 512 # to make sure we won't miss future size-preserving file content
511 513 # modifications that happen within the same timeslot.
512 514 self._lastnormaltime = mtime
513 515
514 516 @requires_no_parents_change
515 517 def set_possibly_dirty(self, filename):
516 518 """record that the current state of the file on disk is unknown"""
517 519 self._dirty = True
518 520 self._map.set_possibly_dirty(filename)
519 521
520 522 @requires_parents_change
521 523 def update_file_p1(
522 524 self,
523 525 filename,
524 526 p1_tracked,
525 527 ):
526 528 """Set a file as tracked in the parent (or not)
527 529
528 530 This is to be called when adjust the dirstate to a new parent after an history
529 531 rewriting operation.
530 532
531 533 It should not be called during a merge (p2 != nullid) and only within
532 534 a `with dirstate.parentchange():` context.
533 535 """
534 536 if self.in_merge:
535 537 msg = b'update_file_reference should not be called when merging'
536 538 raise error.ProgrammingError(msg)
537 539 entry = self._map.get(filename)
538 540 if entry is None:
539 541 wc_tracked = False
540 542 else:
541 543 wc_tracked = entry.tracked
542 544 if not (p1_tracked or wc_tracked):
543 545 # the file is no longer relevant to anyone
544 546 if self._map.get(filename) is not None:
545 547 self._map.reset_state(filename)
546 548 self._dirty = True
547 549 elif (not p1_tracked) and wc_tracked:
548 550 if entry is not None and entry.added:
549 551 return # avoid dropping copy information (maybe?)
550 552
551 553 parentfiledata = None
552 554 if wc_tracked and p1_tracked:
553 555 parentfiledata = self._get_filedata(filename)
554 556
555 557 self._map.reset_state(
556 558 filename,
557 559 wc_tracked,
558 560 p1_tracked,
559 561 # the underlying reference might have changed, we will have to
560 562 # check it.
561 563 has_meaningful_mtime=False,
562 564 parentfiledata=parentfiledata,
563 565 )
564 566 if (
565 567 parentfiledata is not None
566 568 and parentfiledata[2] > self._lastnormaltime
567 569 ):
568 570 # Remember the most recent modification timeslot for status(),
569 571 # to make sure we won't miss future size-preserving file content
570 572 # modifications that happen within the same timeslot.
571 573 self._lastnormaltime = parentfiledata[2]
572 574
573 575 @requires_parents_change
574 576 def update_file(
575 577 self,
576 578 filename,
577 579 wc_tracked,
578 580 p1_tracked,
579 581 p2_info=False,
580 582 possibly_dirty=False,
581 583 parentfiledata=None,
582 584 ):
583 585 """update the information about a file in the dirstate
584 586
585 587 This is to be called when the direstates parent changes to keep track
586 588 of what is the file situation in regards to the working copy and its parent.
587 589
588 590 This function must be called within a `dirstate.parentchange` context.
589 591
590 592 note: the API is at an early stage and we might need to adjust it
591 593 depending of what information ends up being relevant and useful to
592 594 other processing.
593 595 """
594 596
595 597 # note: I do not think we need to double check name clash here since we
596 598 # are in a update/merge case that should already have taken care of
597 599 # this. The test agrees
598 600
599 601 self._dirty = True
600 602
601 603 need_parent_file_data = (
602 604 not possibly_dirty and not p2_info and wc_tracked and p1_tracked
603 605 )
604 606
605 607 if need_parent_file_data and parentfiledata is None:
606 608 parentfiledata = self._get_filedata(filename)
607 609
608 610 self._map.reset_state(
609 611 filename,
610 612 wc_tracked,
611 613 p1_tracked,
612 614 p2_info=p2_info,
613 615 has_meaningful_mtime=not possibly_dirty,
614 616 parentfiledata=parentfiledata,
615 617 )
616 618 if (
617 619 parentfiledata is not None
618 620 and parentfiledata[2] > self._lastnormaltime
619 621 ):
620 622 # Remember the most recent modification timeslot for status(),
621 623 # to make sure we won't miss future size-preserving file content
622 624 # modifications that happen within the same timeslot.
623 625 self._lastnormaltime = parentfiledata[2]
624 626
625 627 def _check_new_tracked_filename(self, filename):
626 628 scmutil.checkfilename(filename)
627 629 if self._map.hastrackeddir(filename):
628 630 msg = _(b'directory %r already in dirstate')
629 631 msg %= pycompat.bytestr(filename)
630 632 raise error.Abort(msg)
631 633 # shadows
632 634 for d in pathutil.finddirs(filename):
633 635 if self._map.hastrackeddir(d):
634 636 break
635 637 entry = self._map.get(d)
636 638 if entry is not None and not entry.removed:
637 639 msg = _(b'file %r in dirstate clashes with %r')
638 640 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
639 641 raise error.Abort(msg)
640 642
641 643 def _get_filedata(self, filename):
642 644 """returns"""
643 645 s = os.lstat(self._join(filename))
644 646 mode = s.st_mode
645 647 size = s.st_size
646 648 mtime = timestamp.mtime_of(s)
647 649 return (mode, size, mtime)
648 650
649 651 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
650 652 if exists is None:
651 653 exists = os.path.lexists(os.path.join(self._root, path))
652 654 if not exists:
653 655 # Maybe a path component exists
654 656 if not ignoremissing and b'/' in path:
655 657 d, f = path.rsplit(b'/', 1)
656 658 d = self._normalize(d, False, ignoremissing, None)
657 659 folded = d + b"/" + f
658 660 else:
659 661 # No path components, preserve original case
660 662 folded = path
661 663 else:
662 664 # recursively normalize leading directory components
663 665 # against dirstate
664 666 if b'/' in normed:
665 667 d, f = normed.rsplit(b'/', 1)
666 668 d = self._normalize(d, False, ignoremissing, True)
667 669 r = self._root + b"/" + d
668 670 folded = d + b"/" + util.fspath(f, r)
669 671 else:
670 672 folded = util.fspath(normed, self._root)
671 673 storemap[normed] = folded
672 674
673 675 return folded
674 676
675 677 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
676 678 normed = util.normcase(path)
677 679 folded = self._map.filefoldmap.get(normed, None)
678 680 if folded is None:
679 681 if isknown:
680 682 folded = path
681 683 else:
682 684 folded = self._discoverpath(
683 685 path, normed, ignoremissing, exists, self._map.filefoldmap
684 686 )
685 687 return folded
686 688
687 689 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
688 690 normed = util.normcase(path)
689 691 folded = self._map.filefoldmap.get(normed, None)
690 692 if folded is None:
691 693 folded = self._map.dirfoldmap.get(normed, None)
692 694 if folded is None:
693 695 if isknown:
694 696 folded = path
695 697 else:
696 698 # store discovered result in dirfoldmap so that future
697 699 # normalizefile calls don't start matching directories
698 700 folded = self._discoverpath(
699 701 path, normed, ignoremissing, exists, self._map.dirfoldmap
700 702 )
701 703 return folded
702 704
703 705 def normalize(self, path, isknown=False, ignoremissing=False):
704 706 """
705 707 normalize the case of a pathname when on a casefolding filesystem
706 708
707 709 isknown specifies whether the filename came from walking the
708 710 disk, to avoid extra filesystem access.
709 711
710 712 If ignoremissing is True, missing path are returned
711 713 unchanged. Otherwise, we try harder to normalize possibly
712 714 existing path components.
713 715
714 716 The normalized case is determined based on the following precedence:
715 717
716 718 - version of name already stored in the dirstate
717 719 - version of name stored on disk
718 720 - version provided via command arguments
719 721 """
720 722
721 723 if self._checkcase:
722 724 return self._normalize(path, isknown, ignoremissing)
723 725 return path
724 726
725 727 def clear(self):
726 728 self._map.clear()
727 729 self._lastnormaltime = timestamp.zero()
728 730 self._dirty = True
729 731
730 732 def rebuild(self, parent, allfiles, changedfiles=None):
731 733 if changedfiles is None:
732 734 # Rebuild entire dirstate
733 735 to_lookup = allfiles
734 736 to_drop = []
735 737 lastnormaltime = self._lastnormaltime
736 738 self.clear()
737 739 self._lastnormaltime = lastnormaltime
738 740 elif len(changedfiles) < 10:
739 741 # Avoid turning allfiles into a set, which can be expensive if it's
740 742 # large.
741 743 to_lookup = []
742 744 to_drop = []
743 745 for f in changedfiles:
744 746 if f in allfiles:
745 747 to_lookup.append(f)
746 748 else:
747 749 to_drop.append(f)
748 750 else:
749 751 changedfilesset = set(changedfiles)
750 752 to_lookup = changedfilesset & set(allfiles)
751 753 to_drop = changedfilesset - to_lookup
752 754
753 755 if self._origpl is None:
754 756 self._origpl = self._pl
755 757 self._map.setparents(parent, self._nodeconstants.nullid)
756 758
757 759 for f in to_lookup:
758 760
759 761 if self.in_merge:
760 762 self.set_tracked(f)
761 763 else:
762 764 self._map.reset_state(
763 765 f,
764 766 wc_tracked=True,
765 767 p1_tracked=True,
766 768 )
767 769 for f in to_drop:
768 770 self._map.reset_state(f)
769 771
770 772 self._dirty = True
771 773
772 774 def identity(self):
773 775 """Return identity of dirstate itself to detect changing in storage
774 776
775 777 If identity of previous dirstate is equal to this, writing
776 778 changes based on the former dirstate out can keep consistency.
777 779 """
778 780 return self._map.identity
779 781
780 782 def write(self, tr):
781 783 if not self._dirty:
782 784 return
783 785
784 786 filename = self._filename
785 787 if tr:
786 788 # 'dirstate.write()' is not only for writing in-memory
787 789 # changes out, but also for dropping ambiguous timestamp.
788 790 # delayed writing re-raise "ambiguous timestamp issue".
789 791 # See also the wiki page below for detail:
790 792 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
791 793
792 794 # record when mtime start to be ambiguous
793 795 now = _getfsnow(self._opener)
794 796
795 797 # delay writing in-memory changes out
796 798 tr.addfilegenerator(
797 799 b'dirstate',
798 800 (self._filename,),
799 801 lambda f: self._writedirstate(tr, f, now=now),
800 802 location=b'plain',
801 803 )
802 804 return
803 805
804 806 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
805 807 self._writedirstate(tr, st)
806 808
807 809 def addparentchangecallback(self, category, callback):
808 810 """add a callback to be called when the wd parents are changed
809 811
810 812 Callback will be called with the following arguments:
811 813 dirstate, (oldp1, oldp2), (newp1, newp2)
812 814
813 815 Category is a unique identifier to allow overwriting an old callback
814 816 with a newer callback.
815 817 """
816 818 self._plchangecallbacks[category] = callback
817 819
818 820 def _writedirstate(self, tr, st, now=None):
819 821 # notify callbacks about parents change
820 822 if self._origpl is not None and self._origpl != self._pl:
821 823 for c, callback in sorted(
822 824 pycompat.iteritems(self._plchangecallbacks)
823 825 ):
824 826 callback(self, self._origpl, self._pl)
825 827 self._origpl = None
826 828
827 829 if now is None:
828 830 # use the modification time of the newly created temporary file as the
829 831 # filesystem's notion of 'now'
830 832 now = timestamp.mtime_of(util.fstat(st))
831 833
832 834 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
833 835 # timestamp of each entries in dirstate, because of 'now > mtime'
834 836 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
835 837 if delaywrite > 0:
836 838 # do we have any files to delay for?
837 839 for f, e in pycompat.iteritems(self._map):
838 840 if e.need_delay(now):
839 841 import time # to avoid useless import
840 842
841 843 # rather than sleep n seconds, sleep until the next
842 844 # multiple of n seconds
843 845 clock = time.time()
844 846 start = int(clock) - (int(clock) % delaywrite)
845 847 end = start + delaywrite
846 848 time.sleep(end - clock)
847 849 # trust our estimate that the end is near now
848 850 now = timestamp.timestamp((end, 0))
849 851 break
850 852
851 853 self._map.write(tr, st, now)
852 854 self._lastnormaltime = timestamp.zero()
853 855 self._dirty = False
854 856
855 857 def _dirignore(self, f):
856 858 if self._ignore(f):
857 859 return True
858 860 for p in pathutil.finddirs(f):
859 861 if self._ignore(p):
860 862 return True
861 863 return False
862 864
863 865 def _ignorefiles(self):
864 866 files = []
865 867 if os.path.exists(self._join(b'.hgignore')):
866 868 files.append(self._join(b'.hgignore'))
867 869 for name, path in self._ui.configitems(b"ui"):
868 870 if name == b'ignore' or name.startswith(b'ignore.'):
869 871 # we need to use os.path.join here rather than self._join
870 872 # because path is arbitrary and user-specified
871 873 files.append(os.path.join(self._rootdir, util.expandpath(path)))
872 874 return files
873 875
874 876 def _ignorefileandline(self, f):
875 877 files = collections.deque(self._ignorefiles())
876 878 visited = set()
877 879 while files:
878 880 i = files.popleft()
879 881 patterns = matchmod.readpatternfile(
880 882 i, self._ui.warn, sourceinfo=True
881 883 )
882 884 for pattern, lineno, line in patterns:
883 885 kind, p = matchmod._patsplit(pattern, b'glob')
884 886 if kind == b"subinclude":
885 887 if p not in visited:
886 888 files.append(p)
887 889 continue
888 890 m = matchmod.match(
889 891 self._root, b'', [], [pattern], warn=self._ui.warn
890 892 )
891 893 if m(f):
892 894 return (i, lineno, line)
893 895 visited.add(i)
894 896 return (None, -1, b"")
895 897
896 898 def _walkexplicit(self, match, subrepos):
897 899 """Get stat data about the files explicitly specified by match.
898 900
899 901 Return a triple (results, dirsfound, dirsnotfound).
900 902 - results is a mapping from filename to stat result. It also contains
901 903 listings mapping subrepos and .hg to None.
902 904 - dirsfound is a list of files found to be directories.
903 905 - dirsnotfound is a list of files that the dirstate thinks are
904 906 directories and that were not found."""
905 907
906 908 def badtype(mode):
907 909 kind = _(b'unknown')
908 910 if stat.S_ISCHR(mode):
909 911 kind = _(b'character device')
910 912 elif stat.S_ISBLK(mode):
911 913 kind = _(b'block device')
912 914 elif stat.S_ISFIFO(mode):
913 915 kind = _(b'fifo')
914 916 elif stat.S_ISSOCK(mode):
915 917 kind = _(b'socket')
916 918 elif stat.S_ISDIR(mode):
917 919 kind = _(b'directory')
918 920 return _(b'unsupported file type (type is %s)') % kind
919 921
920 922 badfn = match.bad
921 923 dmap = self._map
922 924 lstat = os.lstat
923 925 getkind = stat.S_IFMT
924 926 dirkind = stat.S_IFDIR
925 927 regkind = stat.S_IFREG
926 928 lnkkind = stat.S_IFLNK
927 929 join = self._join
928 930 dirsfound = []
929 931 foundadd = dirsfound.append
930 932 dirsnotfound = []
931 933 notfoundadd = dirsnotfound.append
932 934
933 935 if not match.isexact() and self._checkcase:
934 936 normalize = self._normalize
935 937 else:
936 938 normalize = None
937 939
938 940 files = sorted(match.files())
939 941 subrepos.sort()
940 942 i, j = 0, 0
941 943 while i < len(files) and j < len(subrepos):
942 944 subpath = subrepos[j] + b"/"
943 945 if files[i] < subpath:
944 946 i += 1
945 947 continue
946 948 while i < len(files) and files[i].startswith(subpath):
947 949 del files[i]
948 950 j += 1
949 951
950 952 if not files or b'' in files:
951 953 files = [b'']
952 954 # constructing the foldmap is expensive, so don't do it for the
953 955 # common case where files is ['']
954 956 normalize = None
955 957 results = dict.fromkeys(subrepos)
956 958 results[b'.hg'] = None
957 959
958 960 for ff in files:
959 961 if normalize:
960 962 nf = normalize(ff, False, True)
961 963 else:
962 964 nf = ff
963 965 if nf in results:
964 966 continue
965 967
966 968 try:
967 969 st = lstat(join(nf))
968 970 kind = getkind(st.st_mode)
969 971 if kind == dirkind:
970 972 if nf in dmap:
971 973 # file replaced by dir on disk but still in dirstate
972 974 results[nf] = None
973 975 foundadd((nf, ff))
974 976 elif kind == regkind or kind == lnkkind:
975 977 results[nf] = st
976 978 else:
977 979 badfn(ff, badtype(kind))
978 980 if nf in dmap:
979 981 results[nf] = None
980 982 except OSError as inst: # nf not found on disk - it is dirstate only
981 983 if nf in dmap: # does it exactly match a missing file?
982 984 results[nf] = None
983 985 else: # does it match a missing directory?
984 986 if self._map.hasdir(nf):
985 987 notfoundadd(nf)
986 988 else:
987 989 badfn(ff, encoding.strtolocal(inst.strerror))
988 990
989 991 # match.files() may contain explicitly-specified paths that shouldn't
990 992 # be taken; drop them from the list of files found. dirsfound/notfound
991 993 # aren't filtered here because they will be tested later.
992 994 if match.anypats():
993 995 for f in list(results):
994 996 if f == b'.hg' or f in subrepos:
995 997 # keep sentinel to disable further out-of-repo walks
996 998 continue
997 999 if not match(f):
998 1000 del results[f]
999 1001
1000 1002 # Case insensitive filesystems cannot rely on lstat() failing to detect
1001 1003 # a case-only rename. Prune the stat object for any file that does not
1002 1004 # match the case in the filesystem, if there are multiple files that
1003 1005 # normalize to the same path.
1004 1006 if match.isexact() and self._checkcase:
1005 1007 normed = {}
1006 1008
1007 1009 for f, st in pycompat.iteritems(results):
1008 1010 if st is None:
1009 1011 continue
1010 1012
1011 1013 nc = util.normcase(f)
1012 1014 paths = normed.get(nc)
1013 1015
1014 1016 if paths is None:
1015 1017 paths = set()
1016 1018 normed[nc] = paths
1017 1019
1018 1020 paths.add(f)
1019 1021
1020 1022 for norm, paths in pycompat.iteritems(normed):
1021 1023 if len(paths) > 1:
1022 1024 for path in paths:
1023 1025 folded = self._discoverpath(
1024 1026 path, norm, True, None, self._map.dirfoldmap
1025 1027 )
1026 1028 if path != folded:
1027 1029 results[path] = None
1028 1030
1029 1031 return results, dirsfound, dirsnotfound
1030 1032
1031 1033 def walk(self, match, subrepos, unknown, ignored, full=True):
1032 1034 """
1033 1035 Walk recursively through the directory tree, finding all files
1034 1036 matched by match.
1035 1037
1036 1038 If full is False, maybe skip some known-clean files.
1037 1039
1038 1040 Return a dict mapping filename to stat-like object (either
1039 1041 mercurial.osutil.stat instance or return value of os.stat()).
1040 1042
1041 1043 """
1042 1044 # full is a flag that extensions that hook into walk can use -- this
1043 1045 # implementation doesn't use it at all. This satisfies the contract
1044 1046 # because we only guarantee a "maybe".
1045 1047
1046 1048 if ignored:
1047 1049 ignore = util.never
1048 1050 dirignore = util.never
1049 1051 elif unknown:
1050 1052 ignore = self._ignore
1051 1053 dirignore = self._dirignore
1052 1054 else:
1053 1055 # if not unknown and not ignored, drop dir recursion and step 2
1054 1056 ignore = util.always
1055 1057 dirignore = util.always
1056 1058
1057 1059 matchfn = match.matchfn
1058 1060 matchalways = match.always()
1059 1061 matchtdir = match.traversedir
1060 1062 dmap = self._map
1061 1063 listdir = util.listdir
1062 1064 lstat = os.lstat
1063 1065 dirkind = stat.S_IFDIR
1064 1066 regkind = stat.S_IFREG
1065 1067 lnkkind = stat.S_IFLNK
1066 1068 join = self._join
1067 1069
1068 1070 exact = skipstep3 = False
1069 1071 if match.isexact(): # match.exact
1070 1072 exact = True
1071 1073 dirignore = util.always # skip step 2
1072 1074 elif match.prefix(): # match.match, no patterns
1073 1075 skipstep3 = True
1074 1076
1075 1077 if not exact and self._checkcase:
1076 1078 normalize = self._normalize
1077 1079 normalizefile = self._normalizefile
1078 1080 skipstep3 = False
1079 1081 else:
1080 1082 normalize = self._normalize
1081 1083 normalizefile = None
1082 1084
1083 1085 # step 1: find all explicit files
1084 1086 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1085 1087 if matchtdir:
1086 1088 for d in work:
1087 1089 matchtdir(d[0])
1088 1090 for d in dirsnotfound:
1089 1091 matchtdir(d)
1090 1092
1091 1093 skipstep3 = skipstep3 and not (work or dirsnotfound)
1092 1094 work = [d for d in work if not dirignore(d[0])]
1093 1095
1094 1096 # step 2: visit subdirectories
1095 1097 def traverse(work, alreadynormed):
1096 1098 wadd = work.append
1097 1099 while work:
1098 1100 tracing.counter('dirstate.walk work', len(work))
1099 1101 nd = work.pop()
1100 1102 visitentries = match.visitchildrenset(nd)
1101 1103 if not visitentries:
1102 1104 continue
1103 1105 if visitentries == b'this' or visitentries == b'all':
1104 1106 visitentries = None
1105 1107 skip = None
1106 1108 if nd != b'':
1107 1109 skip = b'.hg'
1108 1110 try:
1109 1111 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1110 1112 entries = listdir(join(nd), stat=True, skip=skip)
1111 1113 except OSError as inst:
1112 1114 if inst.errno in (errno.EACCES, errno.ENOENT):
1113 1115 match.bad(
1114 1116 self.pathto(nd), encoding.strtolocal(inst.strerror)
1115 1117 )
1116 1118 continue
1117 1119 raise
1118 1120 for f, kind, st in entries:
1119 1121 # Some matchers may return files in the visitentries set,
1120 1122 # instead of 'this', if the matcher explicitly mentions them
1121 1123 # and is not an exactmatcher. This is acceptable; we do not
1122 1124 # make any hard assumptions about file-or-directory below
1123 1125 # based on the presence of `f` in visitentries. If
1124 1126 # visitchildrenset returned a set, we can always skip the
1125 1127 # entries *not* in the set it provided regardless of whether
1126 1128 # they're actually a file or a directory.
1127 1129 if visitentries and f not in visitentries:
1128 1130 continue
1129 1131 if normalizefile:
1130 1132 # even though f might be a directory, we're only
1131 1133 # interested in comparing it to files currently in the
1132 1134 # dmap -- therefore normalizefile is enough
1133 1135 nf = normalizefile(
1134 1136 nd and (nd + b"/" + f) or f, True, True
1135 1137 )
1136 1138 else:
1137 1139 nf = nd and (nd + b"/" + f) or f
1138 1140 if nf not in results:
1139 1141 if kind == dirkind:
1140 1142 if not ignore(nf):
1141 1143 if matchtdir:
1142 1144 matchtdir(nf)
1143 1145 wadd(nf)
1144 1146 if nf in dmap and (matchalways or matchfn(nf)):
1145 1147 results[nf] = None
1146 1148 elif kind == regkind or kind == lnkkind:
1147 1149 if nf in dmap:
1148 1150 if matchalways or matchfn(nf):
1149 1151 results[nf] = st
1150 1152 elif (matchalways or matchfn(nf)) and not ignore(
1151 1153 nf
1152 1154 ):
1153 1155 # unknown file -- normalize if necessary
1154 1156 if not alreadynormed:
1155 1157 nf = normalize(nf, False, True)
1156 1158 results[nf] = st
1157 1159 elif nf in dmap and (matchalways or matchfn(nf)):
1158 1160 results[nf] = None
1159 1161
1160 1162 for nd, d in work:
1161 1163 # alreadynormed means that processwork doesn't have to do any
1162 1164 # expensive directory normalization
1163 1165 alreadynormed = not normalize or nd == d
1164 1166 traverse([d], alreadynormed)
1165 1167
1166 1168 for s in subrepos:
1167 1169 del results[s]
1168 1170 del results[b'.hg']
1169 1171
1170 1172 # step 3: visit remaining files from dmap
1171 1173 if not skipstep3 and not exact:
1172 1174 # If a dmap file is not in results yet, it was either
1173 1175 # a) not matching matchfn b) ignored, c) missing, or d) under a
1174 1176 # symlink directory.
1175 1177 if not results and matchalways:
1176 1178 visit = [f for f in dmap]
1177 1179 else:
1178 1180 visit = [f for f in dmap if f not in results and matchfn(f)]
1179 1181 visit.sort()
1180 1182
1181 1183 if unknown:
1182 1184 # unknown == True means we walked all dirs under the roots
1183 1185 # that wasn't ignored, and everything that matched was stat'ed
1184 1186 # and is already in results.
1185 1187 # The rest must thus be ignored or under a symlink.
1186 1188 audit_path = pathutil.pathauditor(self._root, cached=True)
1187 1189
1188 1190 for nf in iter(visit):
1189 1191 # If a stat for the same file was already added with a
1190 1192 # different case, don't add one for this, since that would
1191 1193 # make it appear as if the file exists under both names
1192 1194 # on disk.
1193 1195 if (
1194 1196 normalizefile
1195 1197 and normalizefile(nf, True, True) in results
1196 1198 ):
1197 1199 results[nf] = None
1198 1200 # Report ignored items in the dmap as long as they are not
1199 1201 # under a symlink directory.
1200 1202 elif audit_path.check(nf):
1201 1203 try:
1202 1204 results[nf] = lstat(join(nf))
1203 1205 # file was just ignored, no links, and exists
1204 1206 except OSError:
1205 1207 # file doesn't exist
1206 1208 results[nf] = None
1207 1209 else:
1208 1210 # It's either missing or under a symlink directory
1209 1211 # which we in this case report as missing
1210 1212 results[nf] = None
1211 1213 else:
1212 1214 # We may not have walked the full directory tree above,
1213 1215 # so stat and check everything we missed.
1214 1216 iv = iter(visit)
1215 1217 for st in util.statfiles([join(i) for i in visit]):
1216 1218 results[next(iv)] = st
1217 1219 return results
1218 1220
1219 1221 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1220 1222 # Force Rayon (Rust parallelism library) to respect the number of
1221 1223 # workers. This is a temporary workaround until Rust code knows
1222 1224 # how to read the config file.
1223 1225 numcpus = self._ui.configint(b"worker", b"numcpus")
1224 1226 if numcpus is not None:
1225 1227 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1226 1228
1227 1229 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1228 1230 if not workers_enabled:
1229 1231 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1230 1232
1231 1233 (
1232 1234 lookup,
1233 1235 modified,
1234 1236 added,
1235 1237 removed,
1236 1238 deleted,
1237 1239 clean,
1238 1240 ignored,
1239 1241 unknown,
1240 1242 warnings,
1241 1243 bad,
1242 1244 traversed,
1243 1245 dirty,
1244 1246 ) = rustmod.status(
1245 1247 self._map._map,
1246 1248 matcher,
1247 1249 self._rootdir,
1248 1250 self._ignorefiles(),
1249 1251 self._checkexec,
1250 1252 self._lastnormaltime,
1251 1253 bool(list_clean),
1252 1254 bool(list_ignored),
1253 1255 bool(list_unknown),
1254 1256 bool(matcher.traversedir),
1255 1257 )
1256 1258
1257 1259 self._dirty |= dirty
1258 1260
1259 1261 if matcher.traversedir:
1260 1262 for dir in traversed:
1261 1263 matcher.traversedir(dir)
1262 1264
1263 1265 if self._ui.warn:
1264 1266 for item in warnings:
1265 1267 if isinstance(item, tuple):
1266 1268 file_path, syntax = item
1267 1269 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1268 1270 file_path,
1269 1271 syntax,
1270 1272 )
1271 1273 self._ui.warn(msg)
1272 1274 else:
1273 1275 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1274 1276 self._ui.warn(
1275 1277 msg
1276 1278 % (
1277 1279 pathutil.canonpath(
1278 1280 self._rootdir, self._rootdir, item
1279 1281 ),
1280 1282 b"No such file or directory",
1281 1283 )
1282 1284 )
1283 1285
1284 1286 for (fn, message) in bad:
1285 1287 matcher.bad(fn, encoding.strtolocal(message))
1286 1288
1287 1289 status = scmutil.status(
1288 1290 modified=modified,
1289 1291 added=added,
1290 1292 removed=removed,
1291 1293 deleted=deleted,
1292 1294 unknown=unknown,
1293 1295 ignored=ignored,
1294 1296 clean=clean,
1295 1297 )
1296 1298 return (lookup, status)
1297 1299
1298 1300 def status(self, match, subrepos, ignored, clean, unknown):
1299 1301 """Determine the status of the working copy relative to the
1300 1302 dirstate and return a pair of (unsure, status), where status is of type
1301 1303 scmutil.status and:
1302 1304
1303 1305 unsure:
1304 1306 files that might have been modified since the dirstate was
1305 1307 written, but need to be read to be sure (size is the same
1306 1308 but mtime differs)
1307 1309 status.modified:
1308 1310 files that have definitely been modified since the dirstate
1309 1311 was written (different size or mode)
1310 1312 status.clean:
1311 1313 files that have definitely not been modified since the
1312 1314 dirstate was written
1313 1315 """
1314 1316 listignored, listclean, listunknown = ignored, clean, unknown
1315 1317 lookup, modified, added, unknown, ignored = [], [], [], [], []
1316 1318 removed, deleted, clean = [], [], []
1317 1319
1318 1320 dmap = self._map
1319 1321 dmap.preload()
1320 1322
1321 1323 use_rust = True
1322 1324
1323 1325 allowed_matchers = (
1324 1326 matchmod.alwaysmatcher,
1325 1327 matchmod.exactmatcher,
1326 1328 matchmod.includematcher,
1327 1329 )
1328 1330
1329 1331 if rustmod is None:
1330 1332 use_rust = False
1331 1333 elif self._checkcase:
1332 1334 # Case-insensitive filesystems are not handled yet
1333 1335 use_rust = False
1334 1336 elif subrepos:
1335 1337 use_rust = False
1336 1338 elif sparse.enabled:
1337 1339 use_rust = False
1338 1340 elif not isinstance(match, allowed_matchers):
1339 1341 # Some matchers have yet to be implemented
1340 1342 use_rust = False
1341 1343
1342 1344 if use_rust:
1343 1345 try:
1344 1346 return self._rust_status(
1345 1347 match, listclean, listignored, listunknown
1346 1348 )
1347 1349 except rustmod.FallbackError:
1348 1350 pass
1349 1351
1350 1352 def noop(f):
1351 1353 pass
1352 1354
1353 1355 dcontains = dmap.__contains__
1354 1356 dget = dmap.__getitem__
1355 1357 ladd = lookup.append # aka "unsure"
1356 1358 madd = modified.append
1357 1359 aadd = added.append
1358 1360 uadd = unknown.append if listunknown else noop
1359 1361 iadd = ignored.append if listignored else noop
1360 1362 radd = removed.append
1361 1363 dadd = deleted.append
1362 1364 cadd = clean.append if listclean else noop
1363 1365 mexact = match.exact
1364 1366 dirignore = self._dirignore
1365 1367 checkexec = self._checkexec
1366 1368 copymap = self._map.copymap
1367 1369 lastnormaltime = self._lastnormaltime
1368 1370
1369 1371 # We need to do full walks when either
1370 1372 # - we're listing all clean files, or
1371 1373 # - match.traversedir does something, because match.traversedir should
1372 1374 # be called for every dir in the working dir
1373 1375 full = listclean or match.traversedir is not None
1374 1376 for fn, st in pycompat.iteritems(
1375 1377 self.walk(match, subrepos, listunknown, listignored, full=full)
1376 1378 ):
1377 1379 if not dcontains(fn):
1378 1380 if (listignored or mexact(fn)) and dirignore(fn):
1379 1381 if listignored:
1380 1382 iadd(fn)
1381 1383 else:
1382 1384 uadd(fn)
1383 1385 continue
1384 1386
1385 1387 t = dget(fn)
1386 1388 mode = t.mode
1387 1389 size = t.size
1388 1390
1389 1391 if not st and t.tracked:
1390 1392 dadd(fn)
1391 1393 elif t.p2_info:
1392 1394 madd(fn)
1393 1395 elif t.added:
1394 1396 aadd(fn)
1395 1397 elif t.removed:
1396 1398 radd(fn)
1397 1399 elif t.tracked:
1398 1400 if (
1399 1401 size >= 0
1400 1402 and (
1401 1403 (size != st.st_size and size != st.st_size & _rangemask)
1402 1404 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1403 1405 )
1404 1406 or fn in copymap
1405 1407 ):
1406 1408 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1407 1409 # issue6456: Size returned may be longer due to
1408 1410 # encryption on EXT-4 fscrypt, undecided.
1409 1411 ladd(fn)
1410 1412 else:
1411 1413 madd(fn)
1412 1414 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1413 1415 ladd(fn)
1414 1416 elif timestamp.mtime_of(st) == lastnormaltime:
1415 1417 # fn may have just been marked as normal and it may have
1416 1418 # changed in the same second without changing its size.
1417 1419 # This can happen if we quickly do multiple commits.
1418 1420 # Force lookup, so we don't miss such a racy file change.
1419 1421 ladd(fn)
1420 1422 elif listclean:
1421 1423 cadd(fn)
1422 1424 status = scmutil.status(
1423 1425 modified, added, removed, deleted, unknown, ignored, clean
1424 1426 )
1425 1427 return (lookup, status)
1426 1428
1427 1429 def matches(self, match):
1428 1430 """
1429 1431 return files in the dirstate (in whatever state) filtered by match
1430 1432 """
1431 1433 dmap = self._map
1432 1434 if rustmod is not None:
1433 1435 dmap = self._map._map
1434 1436
1435 1437 if match.always():
1436 1438 return dmap.keys()
1437 1439 files = match.files()
1438 1440 if match.isexact():
1439 1441 # fast path -- filter the other way around, since typically files is
1440 1442 # much smaller than dmap
1441 1443 return [f for f in files if f in dmap]
1442 1444 if match.prefix() and all(fn in dmap for fn in files):
1443 1445 # fast path -- all the values are known to be files, so just return
1444 1446 # that
1445 1447 return list(files)
1446 1448 return [f for f in dmap if match(f)]
1447 1449
1448 1450 def _actualfilename(self, tr):
1449 1451 if tr:
1450 1452 return self._pendingfilename
1451 1453 else:
1452 1454 return self._filename
1453 1455
1454 1456 def savebackup(self, tr, backupname):
1455 1457 '''Save current dirstate into backup file'''
1456 1458 filename = self._actualfilename(tr)
1457 1459 assert backupname != filename
1458 1460
1459 1461 # use '_writedirstate' instead of 'write' to write changes certainly,
1460 1462 # because the latter omits writing out if transaction is running.
1461 1463 # output file will be used to create backup of dirstate at this point.
1462 1464 if self._dirty or not self._opener.exists(filename):
1463 1465 self._writedirstate(
1464 1466 tr,
1465 1467 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1466 1468 )
1467 1469
1468 1470 if tr:
1469 1471 # ensure that subsequent tr.writepending returns True for
1470 1472 # changes written out above, even if dirstate is never
1471 1473 # changed after this
1472 1474 tr.addfilegenerator(
1473 1475 b'dirstate',
1474 1476 (self._filename,),
1475 1477 lambda f: self._writedirstate(tr, f),
1476 1478 location=b'plain',
1477 1479 )
1478 1480
1479 1481 # ensure that pending file written above is unlinked at
1480 1482 # failure, even if tr.writepending isn't invoked until the
1481 1483 # end of this transaction
1482 1484 tr.registertmp(filename, location=b'plain')
1483 1485
1484 1486 self._opener.tryunlink(backupname)
1485 1487 # hardlink backup is okay because _writedirstate is always called
1486 1488 # with an "atomictemp=True" file.
1487 1489 util.copyfile(
1488 1490 self._opener.join(filename),
1489 1491 self._opener.join(backupname),
1490 1492 hardlink=True,
1491 1493 )
1492 1494
1493 1495 def restorebackup(self, tr, backupname):
1494 1496 '''Restore dirstate by backup file'''
1495 1497 # this "invalidate()" prevents "wlock.release()" from writing
1496 1498 # changes of dirstate out after restoring from backup file
1497 1499 self.invalidate()
1498 1500 filename = self._actualfilename(tr)
1499 1501 o = self._opener
1500 1502 if util.samefile(o.join(backupname), o.join(filename)):
1501 1503 o.unlink(backupname)
1502 1504 else:
1503 1505 o.rename(backupname, filename, checkambig=True)
1504 1506
1505 1507 def clearbackup(self, tr, backupname):
1506 1508 '''Clear backup file'''
1507 1509 self._opener.unlink(backupname)
1508 1510
1509 1511 def verify(self, m1, m2):
1510 1512 """check the dirstate content again the parent manifest and yield errors"""
1511 1513 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1512 1514 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1513 1515 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1514 1516 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1515 1517 for f, entry in self.items():
1516 1518 state = entry.state
1517 1519 if state in b"nr" and f not in m1:
1518 1520 yield (missing_from_p1, f, state)
1519 1521 if state in b"a" and f in m1:
1520 1522 yield (unexpected_in_p1, f, state)
1521 1523 if state in b"m" and f not in m1 and f not in m2:
1522 1524 yield (missing_from_ps, f, state)
1523 1525 for f in m1:
1524 1526 state = self.get_entry(f).state
1525 1527 if state not in b"nrm":
1526 1528 yield (missing_from_ds, f, state)
General Comments 0
You need to be logged in to leave comments. Login now