##// END OF EJS Templates
dirstate: move "get fs now" in the timestamp utility module...
marmoute -
r49202:08b060ab default
parent child Browse files
Show More
@@ -1,1534 +1,1524 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .pycompat import delattr
18 18
19 19 from hgdemandimport import tracing
20 20
21 21 from . import (
22 22 dirstatemap,
23 23 encoding,
24 24 error,
25 25 match as matchmod,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 sparse,
31 31 util,
32 32 )
33 33
34 34 from .dirstateutils import (
35 35 timestamp,
36 36 )
37 37
38 38 from .interfaces import (
39 39 dirstate as intdirstate,
40 40 util as interfaceutil,
41 41 )
42 42
43 43 parsers = policy.importmod('parsers')
44 44 rustmod = policy.importrust('dirstate')
45 45
46 46 HAS_FAST_DIRSTATE_V2 = rustmod is not None
47 47
48 48 propertycache = util.propertycache
49 49 filecache = scmutil.filecache
50 50 _rangemask = dirstatemap.rangemask
51 51
52 52 DirstateItem = dirstatemap.DirstateItem
53 53
54 54
55 55 class repocache(filecache):
56 56 """filecache for files in .hg/"""
57 57
58 58 def join(self, obj, fname):
59 59 return obj._opener.join(fname)
60 60
61 61
62 62 class rootcache(filecache):
63 63 """filecache for files in the repository root"""
64 64
65 65 def join(self, obj, fname):
66 66 return obj._join(fname)
67 67
68 68
69 def _getfsnow(vfs):
70 '''Get "now" timestamp on filesystem'''
71 tmpfd, tmpname = vfs.mkstemp()
72 try:
73 return timestamp.mtime_of(os.fstat(tmpfd))
74 finally:
75 os.close(tmpfd)
76 vfs.unlink(tmpname)
77
78
79 69 def requires_parents_change(func):
80 70 def wrap(self, *args, **kwargs):
81 71 if not self.pendingparentchange():
82 72 msg = 'calling `%s` outside of a parentchange context'
83 73 msg %= func.__name__
84 74 raise error.ProgrammingError(msg)
85 75 return func(self, *args, **kwargs)
86 76
87 77 return wrap
88 78
89 79
90 80 def requires_no_parents_change(func):
91 81 def wrap(self, *args, **kwargs):
92 82 if self.pendingparentchange():
93 83 msg = 'calling `%s` inside of a parentchange context'
94 84 msg %= func.__name__
95 85 raise error.ProgrammingError(msg)
96 86 return func(self, *args, **kwargs)
97 87
98 88 return wrap
99 89
100 90
101 91 @interfaceutil.implementer(intdirstate.idirstate)
102 92 class dirstate(object):
103 93 def __init__(
104 94 self,
105 95 opener,
106 96 ui,
107 97 root,
108 98 validate,
109 99 sparsematchfn,
110 100 nodeconstants,
111 101 use_dirstate_v2,
112 102 ):
113 103 """Create a new dirstate object.
114 104
115 105 opener is an open()-like callable that can be used to open the
116 106 dirstate file; root is the root of the directory tracked by
117 107 the dirstate.
118 108 """
119 109 self._use_dirstate_v2 = use_dirstate_v2
120 110 self._nodeconstants = nodeconstants
121 111 self._opener = opener
122 112 self._validate = validate
123 113 self._root = root
124 114 self._sparsematchfn = sparsematchfn
125 115 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
126 116 # UNC path pointing to root share (issue4557)
127 117 self._rootdir = pathutil.normasprefix(root)
128 118 self._dirty = False
129 119 self._lastnormaltime = timestamp.zero()
130 120 self._ui = ui
131 121 self._filecache = {}
132 122 self._parentwriters = 0
133 123 self._filename = b'dirstate'
134 124 self._pendingfilename = b'%s.pending' % self._filename
135 125 self._plchangecallbacks = {}
136 126 self._origpl = None
137 127 self._mapcls = dirstatemap.dirstatemap
138 128 # Access and cache cwd early, so we don't access it for the first time
139 129 # after a working-copy update caused it to not exist (accessing it then
140 130 # raises an exception).
141 131 self._cwd
142 132
143 133 def prefetch_parents(self):
144 134 """make sure the parents are loaded
145 135
146 136 Used to avoid a race condition.
147 137 """
148 138 self._pl
149 139
150 140 @contextlib.contextmanager
151 141 def parentchange(self):
152 142 """Context manager for handling dirstate parents.
153 143
154 144 If an exception occurs in the scope of the context manager,
155 145 the incoherent dirstate won't be written when wlock is
156 146 released.
157 147 """
158 148 self._parentwriters += 1
159 149 yield
160 150 # Typically we want the "undo" step of a context manager in a
161 151 # finally block so it happens even when an exception
162 152 # occurs. In this case, however, we only want to decrement
163 153 # parentwriters if the code in the with statement exits
164 154 # normally, so we don't have a try/finally here on purpose.
165 155 self._parentwriters -= 1
166 156
167 157 def pendingparentchange(self):
168 158 """Returns true if the dirstate is in the middle of a set of changes
169 159 that modify the dirstate parent.
170 160 """
171 161 return self._parentwriters > 0
172 162
173 163 @propertycache
174 164 def _map(self):
175 165 """Return the dirstate contents (see documentation for dirstatemap)."""
176 166 self._map = self._mapcls(
177 167 self._ui,
178 168 self._opener,
179 169 self._root,
180 170 self._nodeconstants,
181 171 self._use_dirstate_v2,
182 172 )
183 173 return self._map
184 174
185 175 @property
186 176 def _sparsematcher(self):
187 177 """The matcher for the sparse checkout.
188 178
189 179 The working directory may not include every file from a manifest. The
190 180 matcher obtained by this property will match a path if it is to be
191 181 included in the working directory.
192 182 """
193 183 # TODO there is potential to cache this property. For now, the matcher
194 184 # is resolved on every access. (But the called function does use a
195 185 # cache to keep the lookup fast.)
196 186 return self._sparsematchfn()
197 187
198 188 @repocache(b'branch')
199 189 def _branch(self):
200 190 try:
201 191 return self._opener.read(b"branch").strip() or b"default"
202 192 except IOError as inst:
203 193 if inst.errno != errno.ENOENT:
204 194 raise
205 195 return b"default"
206 196
207 197 @property
208 198 def _pl(self):
209 199 return self._map.parents()
210 200
211 201 def hasdir(self, d):
212 202 return self._map.hastrackeddir(d)
213 203
214 204 @rootcache(b'.hgignore')
215 205 def _ignore(self):
216 206 files = self._ignorefiles()
217 207 if not files:
218 208 return matchmod.never()
219 209
220 210 pats = [b'include:%s' % f for f in files]
221 211 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
222 212
223 213 @propertycache
224 214 def _slash(self):
225 215 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
226 216
227 217 @propertycache
228 218 def _checklink(self):
229 219 return util.checklink(self._root)
230 220
231 221 @propertycache
232 222 def _checkexec(self):
233 223 return bool(util.checkexec(self._root))
234 224
235 225 @propertycache
236 226 def _checkcase(self):
237 227 return not util.fscasesensitive(self._join(b'.hg'))
238 228
239 229 def _join(self, f):
240 230 # much faster than os.path.join()
241 231 # it's safe because f is always a relative path
242 232 return self._rootdir + f
243 233
244 234 def flagfunc(self, buildfallback):
245 235 """build a callable that returns flags associated with a filename
246 236
247 237 The information is extracted from three possible layers:
248 238 1. the file system if it supports the information
249 239 2. the "fallback" information stored in the dirstate if any
250 240 3. a more expensive mechanism inferring the flags from the parents.
251 241 """
252 242
253 243 # small hack to cache the result of buildfallback()
254 244 fallback_func = []
255 245
256 246 def get_flags(x):
257 247 entry = None
258 248 fallback_value = None
259 249 try:
260 250 st = os.lstat(self._join(x))
261 251 except OSError:
262 252 return b''
263 253
264 254 if self._checklink:
265 255 if util.statislink(st):
266 256 return b'l'
267 257 else:
268 258 entry = self.get_entry(x)
269 259 if entry.has_fallback_symlink:
270 260 if entry.fallback_symlink:
271 261 return b'l'
272 262 else:
273 263 if not fallback_func:
274 264 fallback_func.append(buildfallback())
275 265 fallback_value = fallback_func[0](x)
276 266 if b'l' in fallback_value:
277 267 return b'l'
278 268
279 269 if self._checkexec:
280 270 if util.statisexec(st):
281 271 return b'x'
282 272 else:
283 273 if entry is None:
284 274 entry = self.get_entry(x)
285 275 if entry.has_fallback_exec:
286 276 if entry.fallback_exec:
287 277 return b'x'
288 278 else:
289 279 if fallback_value is None:
290 280 if not fallback_func:
291 281 fallback_func.append(buildfallback())
292 282 fallback_value = fallback_func[0](x)
293 283 if b'x' in fallback_value:
294 284 return b'x'
295 285 return b''
296 286
297 287 return get_flags
298 288
299 289 @propertycache
300 290 def _cwd(self):
301 291 # internal config: ui.forcecwd
302 292 forcecwd = self._ui.config(b'ui', b'forcecwd')
303 293 if forcecwd:
304 294 return forcecwd
305 295 return encoding.getcwd()
306 296
307 297 def getcwd(self):
308 298 """Return the path from which a canonical path is calculated.
309 299
310 300 This path should be used to resolve file patterns or to convert
311 301 canonical paths back to file paths for display. It shouldn't be
312 302 used to get real file paths. Use vfs functions instead.
313 303 """
314 304 cwd = self._cwd
315 305 if cwd == self._root:
316 306 return b''
317 307 # self._root ends with a path separator if self._root is '/' or 'C:\'
318 308 rootsep = self._root
319 309 if not util.endswithsep(rootsep):
320 310 rootsep += pycompat.ossep
321 311 if cwd.startswith(rootsep):
322 312 return cwd[len(rootsep) :]
323 313 else:
324 314 # we're outside the repo. return an absolute path.
325 315 return cwd
326 316
327 317 def pathto(self, f, cwd=None):
328 318 if cwd is None:
329 319 cwd = self.getcwd()
330 320 path = util.pathto(self._root, cwd, f)
331 321 if self._slash:
332 322 return util.pconvert(path)
333 323 return path
334 324
335 325 def __getitem__(self, key):
336 326 """Return the current state of key (a filename) in the dirstate.
337 327
338 328 States are:
339 329 n normal
340 330 m needs merging
341 331 r marked for removal
342 332 a marked for addition
343 333 ? not tracked
344 334
345 335 XXX The "state" is a bit obscure to be in the "public" API. we should
346 336 consider migrating all user of this to going through the dirstate entry
347 337 instead.
348 338 """
349 339 msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
350 340 util.nouideprecwarn(msg, b'6.1', stacklevel=2)
351 341 entry = self._map.get(key)
352 342 if entry is not None:
353 343 return entry.state
354 344 return b'?'
355 345
356 346 def get_entry(self, path):
357 347 """return a DirstateItem for the associated path"""
358 348 entry = self._map.get(path)
359 349 if entry is None:
360 350 return DirstateItem()
361 351 return entry
362 352
363 353 def __contains__(self, key):
364 354 return key in self._map
365 355
366 356 def __iter__(self):
367 357 return iter(sorted(self._map))
368 358
369 359 def items(self):
370 360 return pycompat.iteritems(self._map)
371 361
372 362 iteritems = items
373 363
374 364 def parents(self):
375 365 return [self._validate(p) for p in self._pl]
376 366
377 367 def p1(self):
378 368 return self._validate(self._pl[0])
379 369
380 370 def p2(self):
381 371 return self._validate(self._pl[1])
382 372
383 373 @property
384 374 def in_merge(self):
385 375 """True if a merge is in progress"""
386 376 return self._pl[1] != self._nodeconstants.nullid
387 377
388 378 def branch(self):
389 379 return encoding.tolocal(self._branch)
390 380
391 381 def setparents(self, p1, p2=None):
392 382 """Set dirstate parents to p1 and p2.
393 383
394 384 When moving from two parents to one, "merged" entries a
395 385 adjusted to normal and previous copy records discarded and
396 386 returned by the call.
397 387
398 388 See localrepo.setparents()
399 389 """
400 390 if p2 is None:
401 391 p2 = self._nodeconstants.nullid
402 392 if self._parentwriters == 0:
403 393 raise ValueError(
404 394 b"cannot set dirstate parent outside of "
405 395 b"dirstate.parentchange context manager"
406 396 )
407 397
408 398 self._dirty = True
409 399 oldp2 = self._pl[1]
410 400 if self._origpl is None:
411 401 self._origpl = self._pl
412 402 nullid = self._nodeconstants.nullid
413 403 # True if we need to fold p2 related state back to a linear case
414 404 fold_p2 = oldp2 != nullid and p2 == nullid
415 405 return self._map.setparents(p1, p2, fold_p2=fold_p2)
416 406
417 407 def setbranch(self, branch):
418 408 self.__class__._branch.set(self, encoding.fromlocal(branch))
419 409 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
420 410 try:
421 411 f.write(self._branch + b'\n')
422 412 f.close()
423 413
424 414 # make sure filecache has the correct stat info for _branch after
425 415 # replacing the underlying file
426 416 ce = self._filecache[b'_branch']
427 417 if ce:
428 418 ce.refresh()
429 419 except: # re-raises
430 420 f.discard()
431 421 raise
432 422
433 423 def invalidate(self):
434 424 """Causes the next access to reread the dirstate.
435 425
436 426 This is different from localrepo.invalidatedirstate() because it always
437 427 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
438 428 check whether the dirstate has changed before rereading it."""
439 429
440 430 for a in ("_map", "_branch", "_ignore"):
441 431 if a in self.__dict__:
442 432 delattr(self, a)
443 433 self._lastnormaltime = timestamp.zero()
444 434 self._dirty = False
445 435 self._parentwriters = 0
446 436 self._origpl = None
447 437
448 438 def copy(self, source, dest):
449 439 """Mark dest as a copy of source. Unmark dest if source is None."""
450 440 if source == dest:
451 441 return
452 442 self._dirty = True
453 443 if source is not None:
454 444 self._map.copymap[dest] = source
455 445 else:
456 446 self._map.copymap.pop(dest, None)
457 447
458 448 def copied(self, file):
459 449 return self._map.copymap.get(file, None)
460 450
461 451 def copies(self):
462 452 return self._map.copymap
463 453
464 454 @requires_no_parents_change
465 455 def set_tracked(self, filename):
466 456 """a "public" method for generic code to mark a file as tracked
467 457
468 458 This function is to be called outside of "update/merge" case. For
469 459 example by a command like `hg add X`.
470 460
471 461 return True the file was previously untracked, False otherwise.
472 462 """
473 463 self._dirty = True
474 464 entry = self._map.get(filename)
475 465 if entry is None or not entry.tracked:
476 466 self._check_new_tracked_filename(filename)
477 467 return self._map.set_tracked(filename)
478 468
479 469 @requires_no_parents_change
480 470 def set_untracked(self, filename):
481 471 """a "public" method for generic code to mark a file as untracked
482 472
483 473 This function is to be called outside of "update/merge" case. For
484 474 example by a command like `hg remove X`.
485 475
486 476 return True the file was previously tracked, False otherwise.
487 477 """
488 478 ret = self._map.set_untracked(filename)
489 479 if ret:
490 480 self._dirty = True
491 481 return ret
492 482
493 483 @requires_no_parents_change
494 484 def set_clean(self, filename, parentfiledata=None):
495 485 """record that the current state of the file on disk is known to be clean"""
496 486 self._dirty = True
497 487 if parentfiledata:
498 488 (mode, size, mtime) = parentfiledata
499 489 else:
500 490 (mode, size, mtime) = self._get_filedata(filename)
501 491 if not self._map[filename].tracked:
502 492 self._check_new_tracked_filename(filename)
503 493 self._map.set_clean(filename, mode, size, mtime)
504 494 if mtime > self._lastnormaltime:
505 495 # Remember the most recent modification timeslot for status(),
506 496 # to make sure we won't miss future size-preserving file content
507 497 # modifications that happen within the same timeslot.
508 498 self._lastnormaltime = mtime
509 499
510 500 @requires_no_parents_change
511 501 def set_possibly_dirty(self, filename):
512 502 """record that the current state of the file on disk is unknown"""
513 503 self._dirty = True
514 504 self._map.set_possibly_dirty(filename)
515 505
516 506 @requires_parents_change
517 507 def update_file_p1(
518 508 self,
519 509 filename,
520 510 p1_tracked,
521 511 ):
522 512 """Set a file as tracked in the parent (or not)
523 513
524 514 This is to be called when adjust the dirstate to a new parent after an history
525 515 rewriting operation.
526 516
527 517 It should not be called during a merge (p2 != nullid) and only within
528 518 a `with dirstate.parentchange():` context.
529 519 """
530 520 if self.in_merge:
531 521 msg = b'update_file_reference should not be called when merging'
532 522 raise error.ProgrammingError(msg)
533 523 entry = self._map.get(filename)
534 524 if entry is None:
535 525 wc_tracked = False
536 526 else:
537 527 wc_tracked = entry.tracked
538 528 if not (p1_tracked or wc_tracked):
539 529 # the file is no longer relevant to anyone
540 530 if self._map.get(filename) is not None:
541 531 self._map.reset_state(filename)
542 532 self._dirty = True
543 533 elif (not p1_tracked) and wc_tracked:
544 534 if entry is not None and entry.added:
545 535 return # avoid dropping copy information (maybe?)
546 536
547 537 parentfiledata = None
548 538 if wc_tracked and p1_tracked:
549 539 parentfiledata = self._get_filedata(filename)
550 540
551 541 self._map.reset_state(
552 542 filename,
553 543 wc_tracked,
554 544 p1_tracked,
555 545 # the underlying reference might have changed, we will have to
556 546 # check it.
557 547 has_meaningful_mtime=False,
558 548 parentfiledata=parentfiledata,
559 549 )
560 550 if (
561 551 parentfiledata is not None
562 552 and parentfiledata[2] > self._lastnormaltime
563 553 ):
564 554 # Remember the most recent modification timeslot for status(),
565 555 # to make sure we won't miss future size-preserving file content
566 556 # modifications that happen within the same timeslot.
567 557 self._lastnormaltime = parentfiledata[2]
568 558
569 559 @requires_parents_change
570 560 def update_file(
571 561 self,
572 562 filename,
573 563 wc_tracked,
574 564 p1_tracked,
575 565 p2_info=False,
576 566 possibly_dirty=False,
577 567 parentfiledata=None,
578 568 ):
579 569 """update the information about a file in the dirstate
580 570
581 571 This is to be called when the direstates parent changes to keep track
582 572 of what is the file situation in regards to the working copy and its parent.
583 573
584 574 This function must be called within a `dirstate.parentchange` context.
585 575
586 576 note: the API is at an early stage and we might need to adjust it
587 577 depending of what information ends up being relevant and useful to
588 578 other processing.
589 579 """
590 580
591 581 # note: I do not think we need to double check name clash here since we
592 582 # are in a update/merge case that should already have taken care of
593 583 # this. The test agrees
594 584
595 585 self._dirty = True
596 586
597 587 need_parent_file_data = (
598 588 not possibly_dirty and not p2_info and wc_tracked and p1_tracked
599 589 )
600 590
601 591 if need_parent_file_data and parentfiledata is None:
602 592 parentfiledata = self._get_filedata(filename)
603 593
604 594 self._map.reset_state(
605 595 filename,
606 596 wc_tracked,
607 597 p1_tracked,
608 598 p2_info=p2_info,
609 599 has_meaningful_mtime=not possibly_dirty,
610 600 parentfiledata=parentfiledata,
611 601 )
612 602 if (
613 603 parentfiledata is not None
614 604 and parentfiledata[2] is not None
615 605 and parentfiledata[2] > self._lastnormaltime
616 606 ):
617 607 # Remember the most recent modification timeslot for status(),
618 608 # to make sure we won't miss future size-preserving file content
619 609 # modifications that happen within the same timeslot.
620 610 self._lastnormaltime = parentfiledata[2]
621 611
622 612 def _check_new_tracked_filename(self, filename):
623 613 scmutil.checkfilename(filename)
624 614 if self._map.hastrackeddir(filename):
625 615 msg = _(b'directory %r already in dirstate')
626 616 msg %= pycompat.bytestr(filename)
627 617 raise error.Abort(msg)
628 618 # shadows
629 619 for d in pathutil.finddirs(filename):
630 620 if self._map.hastrackeddir(d):
631 621 break
632 622 entry = self._map.get(d)
633 623 if entry is not None and not entry.removed:
634 624 msg = _(b'file %r in dirstate clashes with %r')
635 625 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
636 626 raise error.Abort(msg)
637 627
638 628 def _get_filedata(self, filename):
639 629 """returns"""
640 630 s = os.lstat(self._join(filename))
641 631 mode = s.st_mode
642 632 size = s.st_size
643 633 mtime = timestamp.mtime_of(s)
644 634 return (mode, size, mtime)
645 635
646 636 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
647 637 if exists is None:
648 638 exists = os.path.lexists(os.path.join(self._root, path))
649 639 if not exists:
650 640 # Maybe a path component exists
651 641 if not ignoremissing and b'/' in path:
652 642 d, f = path.rsplit(b'/', 1)
653 643 d = self._normalize(d, False, ignoremissing, None)
654 644 folded = d + b"/" + f
655 645 else:
656 646 # No path components, preserve original case
657 647 folded = path
658 648 else:
659 649 # recursively normalize leading directory components
660 650 # against dirstate
661 651 if b'/' in normed:
662 652 d, f = normed.rsplit(b'/', 1)
663 653 d = self._normalize(d, False, ignoremissing, True)
664 654 r = self._root + b"/" + d
665 655 folded = d + b"/" + util.fspath(f, r)
666 656 else:
667 657 folded = util.fspath(normed, self._root)
668 658 storemap[normed] = folded
669 659
670 660 return folded
671 661
672 662 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
673 663 normed = util.normcase(path)
674 664 folded = self._map.filefoldmap.get(normed, None)
675 665 if folded is None:
676 666 if isknown:
677 667 folded = path
678 668 else:
679 669 folded = self._discoverpath(
680 670 path, normed, ignoremissing, exists, self._map.filefoldmap
681 671 )
682 672 return folded
683 673
684 674 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
685 675 normed = util.normcase(path)
686 676 folded = self._map.filefoldmap.get(normed, None)
687 677 if folded is None:
688 678 folded = self._map.dirfoldmap.get(normed, None)
689 679 if folded is None:
690 680 if isknown:
691 681 folded = path
692 682 else:
693 683 # store discovered result in dirfoldmap so that future
694 684 # normalizefile calls don't start matching directories
695 685 folded = self._discoverpath(
696 686 path, normed, ignoremissing, exists, self._map.dirfoldmap
697 687 )
698 688 return folded
699 689
700 690 def normalize(self, path, isknown=False, ignoremissing=False):
701 691 """
702 692 normalize the case of a pathname when on a casefolding filesystem
703 693
704 694 isknown specifies whether the filename came from walking the
705 695 disk, to avoid extra filesystem access.
706 696
707 697 If ignoremissing is True, missing path are returned
708 698 unchanged. Otherwise, we try harder to normalize possibly
709 699 existing path components.
710 700
711 701 The normalized case is determined based on the following precedence:
712 702
713 703 - version of name already stored in the dirstate
714 704 - version of name stored on disk
715 705 - version provided via command arguments
716 706 """
717 707
718 708 if self._checkcase:
719 709 return self._normalize(path, isknown, ignoremissing)
720 710 return path
721 711
722 712 def clear(self):
723 713 self._map.clear()
724 714 self._lastnormaltime = timestamp.zero()
725 715 self._dirty = True
726 716
727 717 def rebuild(self, parent, allfiles, changedfiles=None):
728 718 if changedfiles is None:
729 719 # Rebuild entire dirstate
730 720 to_lookup = allfiles
731 721 to_drop = []
732 722 lastnormaltime = self._lastnormaltime
733 723 self.clear()
734 724 self._lastnormaltime = lastnormaltime
735 725 elif len(changedfiles) < 10:
736 726 # Avoid turning allfiles into a set, which can be expensive if it's
737 727 # large.
738 728 to_lookup = []
739 729 to_drop = []
740 730 for f in changedfiles:
741 731 if f in allfiles:
742 732 to_lookup.append(f)
743 733 else:
744 734 to_drop.append(f)
745 735 else:
746 736 changedfilesset = set(changedfiles)
747 737 to_lookup = changedfilesset & set(allfiles)
748 738 to_drop = changedfilesset - to_lookup
749 739
750 740 if self._origpl is None:
751 741 self._origpl = self._pl
752 742 self._map.setparents(parent, self._nodeconstants.nullid)
753 743
754 744 for f in to_lookup:
755 745
756 746 if self.in_merge:
757 747 self.set_tracked(f)
758 748 else:
759 749 self._map.reset_state(
760 750 f,
761 751 wc_tracked=True,
762 752 p1_tracked=True,
763 753 )
764 754 for f in to_drop:
765 755 self._map.reset_state(f)
766 756
767 757 self._dirty = True
768 758
769 759 def identity(self):
770 760 """Return identity of dirstate itself to detect changing in storage
771 761
772 762 If identity of previous dirstate is equal to this, writing
773 763 changes based on the former dirstate out can keep consistency.
774 764 """
775 765 return self._map.identity
776 766
777 767 def write(self, tr):
778 768 if not self._dirty:
779 769 return
780 770
781 771 filename = self._filename
782 772 if tr:
783 773 # 'dirstate.write()' is not only for writing in-memory
784 774 # changes out, but also for dropping ambiguous timestamp.
785 775 # delayed writing re-raise "ambiguous timestamp issue".
786 776 # See also the wiki page below for detail:
787 777 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
788 778
789 779 # record when mtime start to be ambiguous
790 now = _getfsnow(self._opener)
780 now = timestamp.get_fs_now(self._opener)
791 781
792 782 # delay writing in-memory changes out
793 783 tr.addfilegenerator(
794 784 b'dirstate',
795 785 (self._filename,),
796 786 lambda f: self._writedirstate(tr, f, now=now),
797 787 location=b'plain',
798 788 )
799 789 return
800 790
801 791 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
802 792 self._writedirstate(tr, st)
803 793
804 794 def addparentchangecallback(self, category, callback):
805 795 """add a callback to be called when the wd parents are changed
806 796
807 797 Callback will be called with the following arguments:
808 798 dirstate, (oldp1, oldp2), (newp1, newp2)
809 799
810 800 Category is a unique identifier to allow overwriting an old callback
811 801 with a newer callback.
812 802 """
813 803 self._plchangecallbacks[category] = callback
814 804
815 805 def _writedirstate(self, tr, st, now=None):
816 806 # notify callbacks about parents change
817 807 if self._origpl is not None and self._origpl != self._pl:
818 808 for c, callback in sorted(
819 809 pycompat.iteritems(self._plchangecallbacks)
820 810 ):
821 811 callback(self, self._origpl, self._pl)
822 812 self._origpl = None
823 813
824 814 if now is None:
825 815 # use the modification time of the newly created temporary file as the
826 816 # filesystem's notion of 'now'
827 817 now = timestamp.mtime_of(util.fstat(st))
828 818
829 819 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
830 820 # timestamp of each entries in dirstate, because of 'now > mtime'
831 821 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
832 822 if delaywrite > 0:
833 823 # do we have any files to delay for?
834 824 for f, e in pycompat.iteritems(self._map):
835 825 if e.need_delay(now):
836 826 import time # to avoid useless import
837 827
838 828 # rather than sleep n seconds, sleep until the next
839 829 # multiple of n seconds
840 830 clock = time.time()
841 831 start = int(clock) - (int(clock) % delaywrite)
842 832 end = start + delaywrite
843 833 time.sleep(end - clock)
844 834 # trust our estimate that the end is near now
845 835 now = timestamp.timestamp((end, 0))
846 836 break
847 837
848 838 self._map.write(tr, st, now)
849 839 self._lastnormaltime = timestamp.zero()
850 840 self._dirty = False
851 841
852 842 def _dirignore(self, f):
853 843 if self._ignore(f):
854 844 return True
855 845 for p in pathutil.finddirs(f):
856 846 if self._ignore(p):
857 847 return True
858 848 return False
859 849
860 850 def _ignorefiles(self):
861 851 files = []
862 852 if os.path.exists(self._join(b'.hgignore')):
863 853 files.append(self._join(b'.hgignore'))
864 854 for name, path in self._ui.configitems(b"ui"):
865 855 if name == b'ignore' or name.startswith(b'ignore.'):
866 856 # we need to use os.path.join here rather than self._join
867 857 # because path is arbitrary and user-specified
868 858 files.append(os.path.join(self._rootdir, util.expandpath(path)))
869 859 return files
870 860
871 861 def _ignorefileandline(self, f):
872 862 files = collections.deque(self._ignorefiles())
873 863 visited = set()
874 864 while files:
875 865 i = files.popleft()
876 866 patterns = matchmod.readpatternfile(
877 867 i, self._ui.warn, sourceinfo=True
878 868 )
879 869 for pattern, lineno, line in patterns:
880 870 kind, p = matchmod._patsplit(pattern, b'glob')
881 871 if kind == b"subinclude":
882 872 if p not in visited:
883 873 files.append(p)
884 874 continue
885 875 m = matchmod.match(
886 876 self._root, b'', [], [pattern], warn=self._ui.warn
887 877 )
888 878 if m(f):
889 879 return (i, lineno, line)
890 880 visited.add(i)
891 881 return (None, -1, b"")
892 882
893 883 def _walkexplicit(self, match, subrepos):
894 884 """Get stat data about the files explicitly specified by match.
895 885
896 886 Return a triple (results, dirsfound, dirsnotfound).
897 887 - results is a mapping from filename to stat result. It also contains
898 888 listings mapping subrepos and .hg to None.
899 889 - dirsfound is a list of files found to be directories.
900 890 - dirsnotfound is a list of files that the dirstate thinks are
901 891 directories and that were not found."""
902 892
903 893 def badtype(mode):
904 894 kind = _(b'unknown')
905 895 if stat.S_ISCHR(mode):
906 896 kind = _(b'character device')
907 897 elif stat.S_ISBLK(mode):
908 898 kind = _(b'block device')
909 899 elif stat.S_ISFIFO(mode):
910 900 kind = _(b'fifo')
911 901 elif stat.S_ISSOCK(mode):
912 902 kind = _(b'socket')
913 903 elif stat.S_ISDIR(mode):
914 904 kind = _(b'directory')
915 905 return _(b'unsupported file type (type is %s)') % kind
916 906
917 907 badfn = match.bad
918 908 dmap = self._map
919 909 lstat = os.lstat
920 910 getkind = stat.S_IFMT
921 911 dirkind = stat.S_IFDIR
922 912 regkind = stat.S_IFREG
923 913 lnkkind = stat.S_IFLNK
924 914 join = self._join
925 915 dirsfound = []
926 916 foundadd = dirsfound.append
927 917 dirsnotfound = []
928 918 notfoundadd = dirsnotfound.append
929 919
930 920 if not match.isexact() and self._checkcase:
931 921 normalize = self._normalize
932 922 else:
933 923 normalize = None
934 924
935 925 files = sorted(match.files())
936 926 subrepos.sort()
937 927 i, j = 0, 0
938 928 while i < len(files) and j < len(subrepos):
939 929 subpath = subrepos[j] + b"/"
940 930 if files[i] < subpath:
941 931 i += 1
942 932 continue
943 933 while i < len(files) and files[i].startswith(subpath):
944 934 del files[i]
945 935 j += 1
946 936
947 937 if not files or b'' in files:
948 938 files = [b'']
949 939 # constructing the foldmap is expensive, so don't do it for the
950 940 # common case where files is ['']
951 941 normalize = None
952 942 results = dict.fromkeys(subrepos)
953 943 results[b'.hg'] = None
954 944
955 945 for ff in files:
956 946 if normalize:
957 947 nf = normalize(ff, False, True)
958 948 else:
959 949 nf = ff
960 950 if nf in results:
961 951 continue
962 952
963 953 try:
964 954 st = lstat(join(nf))
965 955 kind = getkind(st.st_mode)
966 956 if kind == dirkind:
967 957 if nf in dmap:
968 958 # file replaced by dir on disk but still in dirstate
969 959 results[nf] = None
970 960 foundadd((nf, ff))
971 961 elif kind == regkind or kind == lnkkind:
972 962 results[nf] = st
973 963 else:
974 964 badfn(ff, badtype(kind))
975 965 if nf in dmap:
976 966 results[nf] = None
977 967 except OSError as inst: # nf not found on disk - it is dirstate only
978 968 if nf in dmap: # does it exactly match a missing file?
979 969 results[nf] = None
980 970 else: # does it match a missing directory?
981 971 if self._map.hasdir(nf):
982 972 notfoundadd(nf)
983 973 else:
984 974 badfn(ff, encoding.strtolocal(inst.strerror))
985 975
986 976 # match.files() may contain explicitly-specified paths that shouldn't
987 977 # be taken; drop them from the list of files found. dirsfound/notfound
988 978 # aren't filtered here because they will be tested later.
989 979 if match.anypats():
990 980 for f in list(results):
991 981 if f == b'.hg' or f in subrepos:
992 982 # keep sentinel to disable further out-of-repo walks
993 983 continue
994 984 if not match(f):
995 985 del results[f]
996 986
997 987 # Case insensitive filesystems cannot rely on lstat() failing to detect
998 988 # a case-only rename. Prune the stat object for any file that does not
999 989 # match the case in the filesystem, if there are multiple files that
1000 990 # normalize to the same path.
1001 991 if match.isexact() and self._checkcase:
1002 992 normed = {}
1003 993
1004 994 for f, st in pycompat.iteritems(results):
1005 995 if st is None:
1006 996 continue
1007 997
1008 998 nc = util.normcase(f)
1009 999 paths = normed.get(nc)
1010 1000
1011 1001 if paths is None:
1012 1002 paths = set()
1013 1003 normed[nc] = paths
1014 1004
1015 1005 paths.add(f)
1016 1006
1017 1007 for norm, paths in pycompat.iteritems(normed):
1018 1008 if len(paths) > 1:
1019 1009 for path in paths:
1020 1010 folded = self._discoverpath(
1021 1011 path, norm, True, None, self._map.dirfoldmap
1022 1012 )
1023 1013 if path != folded:
1024 1014 results[path] = None
1025 1015
1026 1016 return results, dirsfound, dirsnotfound
1027 1017
1028 1018 def walk(self, match, subrepos, unknown, ignored, full=True):
1029 1019 """
1030 1020 Walk recursively through the directory tree, finding all files
1031 1021 matched by match.
1032 1022
1033 1023 If full is False, maybe skip some known-clean files.
1034 1024
1035 1025 Return a dict mapping filename to stat-like object (either
1036 1026 mercurial.osutil.stat instance or return value of os.stat()).
1037 1027
1038 1028 """
1039 1029 # full is a flag that extensions that hook into walk can use -- this
1040 1030 # implementation doesn't use it at all. This satisfies the contract
1041 1031 # because we only guarantee a "maybe".
1042 1032
1043 1033 if ignored:
1044 1034 ignore = util.never
1045 1035 dirignore = util.never
1046 1036 elif unknown:
1047 1037 ignore = self._ignore
1048 1038 dirignore = self._dirignore
1049 1039 else:
1050 1040 # if not unknown and not ignored, drop dir recursion and step 2
1051 1041 ignore = util.always
1052 1042 dirignore = util.always
1053 1043
1054 1044 matchfn = match.matchfn
1055 1045 matchalways = match.always()
1056 1046 matchtdir = match.traversedir
1057 1047 dmap = self._map
1058 1048 listdir = util.listdir
1059 1049 lstat = os.lstat
1060 1050 dirkind = stat.S_IFDIR
1061 1051 regkind = stat.S_IFREG
1062 1052 lnkkind = stat.S_IFLNK
1063 1053 join = self._join
1064 1054
1065 1055 exact = skipstep3 = False
1066 1056 if match.isexact(): # match.exact
1067 1057 exact = True
1068 1058 dirignore = util.always # skip step 2
1069 1059 elif match.prefix(): # match.match, no patterns
1070 1060 skipstep3 = True
1071 1061
1072 1062 if not exact and self._checkcase:
1073 1063 normalize = self._normalize
1074 1064 normalizefile = self._normalizefile
1075 1065 skipstep3 = False
1076 1066 else:
1077 1067 normalize = self._normalize
1078 1068 normalizefile = None
1079 1069
1080 1070 # step 1: find all explicit files
1081 1071 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1082 1072 if matchtdir:
1083 1073 for d in work:
1084 1074 matchtdir(d[0])
1085 1075 for d in dirsnotfound:
1086 1076 matchtdir(d)
1087 1077
1088 1078 skipstep3 = skipstep3 and not (work or dirsnotfound)
1089 1079 work = [d for d in work if not dirignore(d[0])]
1090 1080
1091 1081 # step 2: visit subdirectories
1092 1082 def traverse(work, alreadynormed):
1093 1083 wadd = work.append
1094 1084 while work:
1095 1085 tracing.counter('dirstate.walk work', len(work))
1096 1086 nd = work.pop()
1097 1087 visitentries = match.visitchildrenset(nd)
1098 1088 if not visitentries:
1099 1089 continue
1100 1090 if visitentries == b'this' or visitentries == b'all':
1101 1091 visitentries = None
1102 1092 skip = None
1103 1093 if nd != b'':
1104 1094 skip = b'.hg'
1105 1095 try:
1106 1096 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1107 1097 entries = listdir(join(nd), stat=True, skip=skip)
1108 1098 except OSError as inst:
1109 1099 if inst.errno in (errno.EACCES, errno.ENOENT):
1110 1100 match.bad(
1111 1101 self.pathto(nd), encoding.strtolocal(inst.strerror)
1112 1102 )
1113 1103 continue
1114 1104 raise
1115 1105 for f, kind, st in entries:
1116 1106 # Some matchers may return files in the visitentries set,
1117 1107 # instead of 'this', if the matcher explicitly mentions them
1118 1108 # and is not an exactmatcher. This is acceptable; we do not
1119 1109 # make any hard assumptions about file-or-directory below
1120 1110 # based on the presence of `f` in visitentries. If
1121 1111 # visitchildrenset returned a set, we can always skip the
1122 1112 # entries *not* in the set it provided regardless of whether
1123 1113 # they're actually a file or a directory.
1124 1114 if visitentries and f not in visitentries:
1125 1115 continue
1126 1116 if normalizefile:
1127 1117 # even though f might be a directory, we're only
1128 1118 # interested in comparing it to files currently in the
1129 1119 # dmap -- therefore normalizefile is enough
1130 1120 nf = normalizefile(
1131 1121 nd and (nd + b"/" + f) or f, True, True
1132 1122 )
1133 1123 else:
1134 1124 nf = nd and (nd + b"/" + f) or f
1135 1125 if nf not in results:
1136 1126 if kind == dirkind:
1137 1127 if not ignore(nf):
1138 1128 if matchtdir:
1139 1129 matchtdir(nf)
1140 1130 wadd(nf)
1141 1131 if nf in dmap and (matchalways or matchfn(nf)):
1142 1132 results[nf] = None
1143 1133 elif kind == regkind or kind == lnkkind:
1144 1134 if nf in dmap:
1145 1135 if matchalways or matchfn(nf):
1146 1136 results[nf] = st
1147 1137 elif (matchalways or matchfn(nf)) and not ignore(
1148 1138 nf
1149 1139 ):
1150 1140 # unknown file -- normalize if necessary
1151 1141 if not alreadynormed:
1152 1142 nf = normalize(nf, False, True)
1153 1143 results[nf] = st
1154 1144 elif nf in dmap and (matchalways or matchfn(nf)):
1155 1145 results[nf] = None
1156 1146
1157 1147 for nd, d in work:
1158 1148 # alreadynormed means that processwork doesn't have to do any
1159 1149 # expensive directory normalization
1160 1150 alreadynormed = not normalize or nd == d
1161 1151 traverse([d], alreadynormed)
1162 1152
1163 1153 for s in subrepos:
1164 1154 del results[s]
1165 1155 del results[b'.hg']
1166 1156
1167 1157 # step 3: visit remaining files from dmap
1168 1158 if not skipstep3 and not exact:
1169 1159 # If a dmap file is not in results yet, it was either
1170 1160 # a) not matching matchfn b) ignored, c) missing, or d) under a
1171 1161 # symlink directory.
1172 1162 if not results and matchalways:
1173 1163 visit = [f for f in dmap]
1174 1164 else:
1175 1165 visit = [f for f in dmap if f not in results and matchfn(f)]
1176 1166 visit.sort()
1177 1167
1178 1168 if unknown:
1179 1169 # unknown == True means we walked all dirs under the roots
1180 1170 # that wasn't ignored, and everything that matched was stat'ed
1181 1171 # and is already in results.
1182 1172 # The rest must thus be ignored or under a symlink.
1183 1173 audit_path = pathutil.pathauditor(self._root, cached=True)
1184 1174
1185 1175 for nf in iter(visit):
1186 1176 # If a stat for the same file was already added with a
1187 1177 # different case, don't add one for this, since that would
1188 1178 # make it appear as if the file exists under both names
1189 1179 # on disk.
1190 1180 if (
1191 1181 normalizefile
1192 1182 and normalizefile(nf, True, True) in results
1193 1183 ):
1194 1184 results[nf] = None
1195 1185 # Report ignored items in the dmap as long as they are not
1196 1186 # under a symlink directory.
1197 1187 elif audit_path.check(nf):
1198 1188 try:
1199 1189 results[nf] = lstat(join(nf))
1200 1190 # file was just ignored, no links, and exists
1201 1191 except OSError:
1202 1192 # file doesn't exist
1203 1193 results[nf] = None
1204 1194 else:
1205 1195 # It's either missing or under a symlink directory
1206 1196 # which we in this case report as missing
1207 1197 results[nf] = None
1208 1198 else:
1209 1199 # We may not have walked the full directory tree above,
1210 1200 # so stat and check everything we missed.
1211 1201 iv = iter(visit)
1212 1202 for st in util.statfiles([join(i) for i in visit]):
1213 1203 results[next(iv)] = st
1214 1204 return results
1215 1205
1216 1206 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1217 1207 # Force Rayon (Rust parallelism library) to respect the number of
1218 1208 # workers. This is a temporary workaround until Rust code knows
1219 1209 # how to read the config file.
1220 1210 numcpus = self._ui.configint(b"worker", b"numcpus")
1221 1211 if numcpus is not None:
1222 1212 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1223 1213
1224 1214 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1225 1215 if not workers_enabled:
1226 1216 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1227 1217
1228 1218 (
1229 1219 lookup,
1230 1220 modified,
1231 1221 added,
1232 1222 removed,
1233 1223 deleted,
1234 1224 clean,
1235 1225 ignored,
1236 1226 unknown,
1237 1227 warnings,
1238 1228 bad,
1239 1229 traversed,
1240 1230 dirty,
1241 1231 ) = rustmod.status(
1242 1232 self._map._map,
1243 1233 matcher,
1244 1234 self._rootdir,
1245 1235 self._ignorefiles(),
1246 1236 self._checkexec,
1247 1237 self._lastnormaltime,
1248 1238 bool(list_clean),
1249 1239 bool(list_ignored),
1250 1240 bool(list_unknown),
1251 1241 bool(matcher.traversedir),
1252 1242 )
1253 1243
1254 1244 self._dirty |= dirty
1255 1245
1256 1246 if matcher.traversedir:
1257 1247 for dir in traversed:
1258 1248 matcher.traversedir(dir)
1259 1249
1260 1250 if self._ui.warn:
1261 1251 for item in warnings:
1262 1252 if isinstance(item, tuple):
1263 1253 file_path, syntax = item
1264 1254 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1265 1255 file_path,
1266 1256 syntax,
1267 1257 )
1268 1258 self._ui.warn(msg)
1269 1259 else:
1270 1260 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1271 1261 self._ui.warn(
1272 1262 msg
1273 1263 % (
1274 1264 pathutil.canonpath(
1275 1265 self._rootdir, self._rootdir, item
1276 1266 ),
1277 1267 b"No such file or directory",
1278 1268 )
1279 1269 )
1280 1270
1281 1271 for (fn, message) in bad:
1282 1272 matcher.bad(fn, encoding.strtolocal(message))
1283 1273
1284 1274 status = scmutil.status(
1285 1275 modified=modified,
1286 1276 added=added,
1287 1277 removed=removed,
1288 1278 deleted=deleted,
1289 1279 unknown=unknown,
1290 1280 ignored=ignored,
1291 1281 clean=clean,
1292 1282 )
1293 1283 return (lookup, status)
1294 1284
1295 1285 def status(self, match, subrepos, ignored, clean, unknown):
1296 1286 """Determine the status of the working copy relative to the
1297 1287 dirstate and return a pair of (unsure, status), where status is of type
1298 1288 scmutil.status and:
1299 1289
1300 1290 unsure:
1301 1291 files that might have been modified since the dirstate was
1302 1292 written, but need to be read to be sure (size is the same
1303 1293 but mtime differs)
1304 1294 status.modified:
1305 1295 files that have definitely been modified since the dirstate
1306 1296 was written (different size or mode)
1307 1297 status.clean:
1308 1298 files that have definitely not been modified since the
1309 1299 dirstate was written
1310 1300 """
1311 1301 listignored, listclean, listunknown = ignored, clean, unknown
1312 1302 lookup, modified, added, unknown, ignored = [], [], [], [], []
1313 1303 removed, deleted, clean = [], [], []
1314 1304
1315 1305 dmap = self._map
1316 1306 dmap.preload()
1317 1307
1318 1308 use_rust = True
1319 1309
1320 1310 allowed_matchers = (
1321 1311 matchmod.alwaysmatcher,
1322 1312 matchmod.exactmatcher,
1323 1313 matchmod.includematcher,
1324 1314 )
1325 1315
1326 1316 if rustmod is None:
1327 1317 use_rust = False
1328 1318 elif self._checkcase:
1329 1319 # Case-insensitive filesystems are not handled yet
1330 1320 use_rust = False
1331 1321 elif subrepos:
1332 1322 use_rust = False
1333 1323 elif sparse.enabled:
1334 1324 use_rust = False
1335 1325 elif not isinstance(match, allowed_matchers):
1336 1326 # Some matchers have yet to be implemented
1337 1327 use_rust = False
1338 1328
1339 1329 if use_rust:
1340 1330 try:
1341 1331 return self._rust_status(
1342 1332 match, listclean, listignored, listunknown
1343 1333 )
1344 1334 except rustmod.FallbackError:
1345 1335 pass
1346 1336
1347 1337 def noop(f):
1348 1338 pass
1349 1339
1350 1340 dcontains = dmap.__contains__
1351 1341 dget = dmap.__getitem__
1352 1342 ladd = lookup.append # aka "unsure"
1353 1343 madd = modified.append
1354 1344 aadd = added.append
1355 1345 uadd = unknown.append if listunknown else noop
1356 1346 iadd = ignored.append if listignored else noop
1357 1347 radd = removed.append
1358 1348 dadd = deleted.append
1359 1349 cadd = clean.append if listclean else noop
1360 1350 mexact = match.exact
1361 1351 dirignore = self._dirignore
1362 1352 checkexec = self._checkexec
1363 1353 checklink = self._checklink
1364 1354 copymap = self._map.copymap
1365 1355 lastnormaltime = self._lastnormaltime
1366 1356
1367 1357 # We need to do full walks when either
1368 1358 # - we're listing all clean files, or
1369 1359 # - match.traversedir does something, because match.traversedir should
1370 1360 # be called for every dir in the working dir
1371 1361 full = listclean or match.traversedir is not None
1372 1362 for fn, st in pycompat.iteritems(
1373 1363 self.walk(match, subrepos, listunknown, listignored, full=full)
1374 1364 ):
1375 1365 if not dcontains(fn):
1376 1366 if (listignored or mexact(fn)) and dirignore(fn):
1377 1367 if listignored:
1378 1368 iadd(fn)
1379 1369 else:
1380 1370 uadd(fn)
1381 1371 continue
1382 1372
1383 1373 t = dget(fn)
1384 1374 mode = t.mode
1385 1375 size = t.size
1386 1376
1387 1377 if not st and t.tracked:
1388 1378 dadd(fn)
1389 1379 elif t.p2_info:
1390 1380 madd(fn)
1391 1381 elif t.added:
1392 1382 aadd(fn)
1393 1383 elif t.removed:
1394 1384 radd(fn)
1395 1385 elif t.tracked:
1396 1386 if not checklink and t.has_fallback_symlink:
1397 1387 # If the file system does not support symlink, the mode
1398 1388 # might not be correctly stored in the dirstate, so do not
1399 1389 # trust it.
1400 1390 ladd(fn)
1401 1391 elif not checkexec and t.has_fallback_exec:
1402 1392 # If the file system does not support exec bits, the mode
1403 1393 # might not be correctly stored in the dirstate, so do not
1404 1394 # trust it.
1405 1395 ladd(fn)
1406 1396 elif (
1407 1397 size >= 0
1408 1398 and (
1409 1399 (size != st.st_size and size != st.st_size & _rangemask)
1410 1400 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1411 1401 )
1412 1402 or fn in copymap
1413 1403 ):
1414 1404 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1415 1405 # issue6456: Size returned may be longer due to
1416 1406 # encryption on EXT-4 fscrypt, undecided.
1417 1407 ladd(fn)
1418 1408 else:
1419 1409 madd(fn)
1420 1410 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1421 1411 ladd(fn)
1422 1412 elif timestamp.mtime_of(st) == lastnormaltime:
1423 1413 # fn may have just been marked as normal and it may have
1424 1414 # changed in the same second without changing its size.
1425 1415 # This can happen if we quickly do multiple commits.
1426 1416 # Force lookup, so we don't miss such a racy file change.
1427 1417 ladd(fn)
1428 1418 elif listclean:
1429 1419 cadd(fn)
1430 1420 status = scmutil.status(
1431 1421 modified, added, removed, deleted, unknown, ignored, clean
1432 1422 )
1433 1423 return (lookup, status)
1434 1424
1435 1425 def matches(self, match):
1436 1426 """
1437 1427 return files in the dirstate (in whatever state) filtered by match
1438 1428 """
1439 1429 dmap = self._map
1440 1430 if rustmod is not None:
1441 1431 dmap = self._map._map
1442 1432
1443 1433 if match.always():
1444 1434 return dmap.keys()
1445 1435 files = match.files()
1446 1436 if match.isexact():
1447 1437 # fast path -- filter the other way around, since typically files is
1448 1438 # much smaller than dmap
1449 1439 return [f for f in files if f in dmap]
1450 1440 if match.prefix() and all(fn in dmap for fn in files):
1451 1441 # fast path -- all the values are known to be files, so just return
1452 1442 # that
1453 1443 return list(files)
1454 1444 return [f for f in dmap if match(f)]
1455 1445
1456 1446 def _actualfilename(self, tr):
1457 1447 if tr:
1458 1448 return self._pendingfilename
1459 1449 else:
1460 1450 return self._filename
1461 1451
1462 1452 def savebackup(self, tr, backupname):
1463 1453 '''Save current dirstate into backup file'''
1464 1454 filename = self._actualfilename(tr)
1465 1455 assert backupname != filename
1466 1456
1467 1457 # use '_writedirstate' instead of 'write' to write changes certainly,
1468 1458 # because the latter omits writing out if transaction is running.
1469 1459 # output file will be used to create backup of dirstate at this point.
1470 1460 if self._dirty or not self._opener.exists(filename):
1471 1461 self._writedirstate(
1472 1462 tr,
1473 1463 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1474 1464 )
1475 1465
1476 1466 if tr:
1477 1467 # ensure that subsequent tr.writepending returns True for
1478 1468 # changes written out above, even if dirstate is never
1479 1469 # changed after this
1480 1470 tr.addfilegenerator(
1481 1471 b'dirstate',
1482 1472 (self._filename,),
1483 1473 lambda f: self._writedirstate(tr, f),
1484 1474 location=b'plain',
1485 1475 )
1486 1476
1487 1477 # ensure that pending file written above is unlinked at
1488 1478 # failure, even if tr.writepending isn't invoked until the
1489 1479 # end of this transaction
1490 1480 tr.registertmp(filename, location=b'plain')
1491 1481
1492 1482 self._opener.tryunlink(backupname)
1493 1483 # hardlink backup is okay because _writedirstate is always called
1494 1484 # with an "atomictemp=True" file.
1495 1485 util.copyfile(
1496 1486 self._opener.join(filename),
1497 1487 self._opener.join(backupname),
1498 1488 hardlink=True,
1499 1489 )
1500 1490
1501 1491 def restorebackup(self, tr, backupname):
1502 1492 '''Restore dirstate by backup file'''
1503 1493 # this "invalidate()" prevents "wlock.release()" from writing
1504 1494 # changes of dirstate out after restoring from backup file
1505 1495 self.invalidate()
1506 1496 filename = self._actualfilename(tr)
1507 1497 o = self._opener
1508 1498 if util.samefile(o.join(backupname), o.join(filename)):
1509 1499 o.unlink(backupname)
1510 1500 else:
1511 1501 o.rename(backupname, filename, checkambig=True)
1512 1502
1513 1503 def clearbackup(self, tr, backupname):
1514 1504 '''Clear backup file'''
1515 1505 self._opener.unlink(backupname)
1516 1506
1517 1507 def verify(self, m1, m2):
1518 1508 """check the dirstate content again the parent manifest and yield errors"""
1519 1509 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1520 1510 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1521 1511 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1522 1512 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1523 1513 for f, entry in self.items():
1524 1514 state = entry.state
1525 1515 if state in b"nr" and f not in m1:
1526 1516 yield (missing_from_p1, f, state)
1527 1517 if state in b"a" and f in m1:
1528 1518 yield (unexpected_in_p1, f, state)
1529 1519 if state in b"m" and f not in m1 and f not in m2:
1530 1520 yield (missing_from_ps, f, state)
1531 1521 for f in m1:
1532 1522 state = self.get_entry(f).state
1533 1523 if state not in b"nrm":
1534 1524 yield (missing_from_ds, f, state)
@@ -1,87 +1,101 b''
1 1 # Copyright Mercurial Contributors
2 2 #
3 3 # This software may be used and distributed according to the terms of the
4 4 # GNU General Public License version 2 or any later version.
5 5
6 6 from __future__ import absolute_import
7 7
8 8 import functools
9 import os
9 10 import stat
10 11
11 12
12 13 rangemask = 0x7FFFFFFF
13 14
14 15
15 16 @functools.total_ordering
16 17 class timestamp(tuple):
17 18 """
18 19 A Unix timestamp with optional nanoseconds precision,
19 20 modulo 2**31 seconds.
20 21
21 22 A 2-tuple containing:
22 23
23 24 `truncated_seconds`: seconds since the Unix epoch,
24 25 truncated to its lower 31 bits
25 26
26 27 `subsecond_nanoseconds`: number of nanoseconds since `truncated_seconds`.
27 28 When this is zero, the sub-second precision is considered unknown.
28 29 """
29 30
30 31 def __new__(cls, value):
31 32 truncated_seconds, subsec_nanos = value
32 33 value = (truncated_seconds & rangemask, subsec_nanos)
33 34 return super(timestamp, cls).__new__(cls, value)
34 35
35 36 def __eq__(self, other):
36 37 self_secs, self_subsec_nanos = self
37 38 other_secs, other_subsec_nanos = other
38 39 return self_secs == other_secs and (
39 40 self_subsec_nanos == other_subsec_nanos
40 41 or self_subsec_nanos == 0
41 42 or other_subsec_nanos == 0
42 43 )
43 44
44 45 def __gt__(self, other):
45 46 self_secs, self_subsec_nanos = self
46 47 other_secs, other_subsec_nanos = other
47 48 if self_secs > other_secs:
48 49 return True
49 50 if self_secs < other_secs:
50 51 return False
51 52 if self_subsec_nanos == 0 or other_subsec_nanos == 0:
52 53 # they are considered equal, so not "greater than"
53 54 return False
54 55 return self_subsec_nanos > other_subsec_nanos
55 56
56 57
58 def get_fs_now(vfs):
59 """return a timestamp for "now" in the current vfs
60
61 This will raise an exception if no temporary files could be created.
62 """
63 tmpfd, tmpname = vfs.mkstemp()
64 try:
65 return mtime_of(os.fstat(tmpfd))
66 finally:
67 os.close(tmpfd)
68 vfs.unlink(tmpname)
69
70
57 71 def zero():
58 72 """
59 73 Returns the `timestamp` at the Unix epoch.
60 74 """
61 75 return tuple.__new__(timestamp, (0, 0))
62 76
63 77
64 78 def mtime_of(stat_result):
65 79 """
66 80 Takes an `os.stat_result`-like object and returns a `timestamp` object
67 81 for its modification time.
68 82 """
69 83 try:
70 84 # TODO: add this attribute to `osutil.stat` objects,
71 85 # see `mercurial/cext/osutil.c`.
72 86 #
73 87 # This attribute is also not available on Python 2.
74 88 nanos = stat_result.st_mtime_ns
75 89 except AttributeError:
76 90 # https://docs.python.org/2/library/os.html#os.stat_float_times
77 91 # "For compatibility with older Python versions,
78 92 # accessing stat_result as a tuple always returns integers."
79 93 secs = stat_result[stat.ST_MTIME]
80 94
81 95 subsec_nanos = 0
82 96 else:
83 97 billion = int(1e9)
84 98 secs = nanos // billion
85 99 subsec_nanos = nanos % billion
86 100
87 101 return timestamp((secs, subsec_nanos))
@@ -1,107 +1,106 b''
1 1 # extension to emulate invoking 'dirstate.write()' at the time
2 2 # specified by '[fakedirstatewritetime] fakenow', only when
3 3 # 'dirstate.write()' is invoked via functions below:
4 4 #
5 5 # - 'workingctx._poststatusfixup()' (= 'repo.status()')
6 6 # - 'committablectx.markcommitted()'
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from mercurial import (
11 11 context,
12 dirstate,
13 12 dirstatemap as dirstatemapmod,
14 13 extensions,
15 14 policy,
16 15 registrar,
17 16 )
18 17 from mercurial.dirstateutils import timestamp
19 18 from mercurial.utils import dateutil
20 19
21 20 try:
22 21 from mercurial import rustext
23 22
24 23 rustext.__name__ # force actual import (see hgdemandimport)
25 24 except ImportError:
26 25 rustext = None
27 26
28 27 configtable = {}
29 28 configitem = registrar.configitem(configtable)
30 29
31 30 configitem(
32 31 b'fakedirstatewritetime',
33 32 b'fakenow',
34 33 default=None,
35 34 )
36 35
37 36 parsers = policy.importmod('parsers')
38 37 has_rust_dirstate = policy.importrust('dirstate') is not None
39 38
40 39
41 40 def pack_dirstate(fakenow, orig, dmap, copymap, pl, now):
42 41 # execute what original parsers.pack_dirstate should do actually
43 42 # for consistency
44 43 for f, e in dmap.items():
45 44 if e.need_delay(now):
46 45 e.set_possibly_dirty()
47 46
48 47 return orig(dmap, copymap, pl, fakenow)
49 48
50 49
51 50 def fakewrite(ui, func):
52 51 # fake "now" of 'pack_dirstate' only if it is invoked while 'func'
53 52
54 53 fakenow = ui.config(b'fakedirstatewritetime', b'fakenow')
55 54 if not fakenow:
56 55 # Execute original one, if fakenow isn't configured. This is
57 56 # useful to prevent subrepos from executing replaced one,
58 57 # because replacing 'parsers.pack_dirstate' is also effective
59 58 # in subrepos.
60 59 return func()
61 60
62 61 # parsing 'fakenow' in YYYYmmddHHMM format makes comparison between
63 62 # 'fakenow' value and 'touch -t YYYYmmddHHMM' argument easy
64 63 fakenow = dateutil.parsedate(fakenow, [b'%Y%m%d%H%M'])[0]
65 64 fakenow = timestamp.timestamp((fakenow, 0))
66 65
67 66 if has_rust_dirstate:
68 67 # The Rust implementation does not use public parse/pack dirstate
69 68 # to prevent conversion round-trips
70 69 orig_dirstatemap_write = dirstatemapmod.dirstatemap.write
71 70 wrapper = lambda self, tr, st, now: orig_dirstatemap_write(
72 71 self, tr, st, fakenow
73 72 )
74 73 dirstatemapmod.dirstatemap.write = wrapper
75 74
76 orig_dirstate_getfsnow = dirstate._getfsnow
75 orig_get_fs_now = timestamp.get_fs_now
77 76 wrapper = lambda *args: pack_dirstate(fakenow, orig_pack_dirstate, *args)
78 77
79 78 orig_module = parsers
80 79 orig_pack_dirstate = parsers.pack_dirstate
81 80
82 81 orig_module.pack_dirstate = wrapper
83 dirstate._getfsnow = lambda *args: fakenow
82 timestamp.get_fs_now = lambda *args: fakenow
84 83 try:
85 84 return func()
86 85 finally:
87 86 orig_module.pack_dirstate = orig_pack_dirstate
88 dirstate._getfsnow = orig_dirstate_getfsnow
87 timestamp.get_fs_now = orig_get_fs_now
89 88 if has_rust_dirstate:
90 89 dirstatemapmod.dirstatemap.write = orig_dirstatemap_write
91 90
92 91
93 92 def _poststatusfixup(orig, workingctx, status, fixup):
94 93 ui = workingctx.repo().ui
95 94 return fakewrite(ui, lambda: orig(workingctx, status, fixup))
96 95
97 96
98 97 def markcommitted(orig, committablectx, node):
99 98 ui = committablectx.repo().ui
100 99 return fakewrite(ui, lambda: orig(committablectx, node))
101 100
102 101
103 102 def extsetup(ui):
104 103 extensions.wrapfunction(
105 104 context.workingctx, '_poststatusfixup', _poststatusfixup
106 105 )
107 106 extensions.wrapfunction(context.workingctx, 'markcommitted', markcommitted)
General Comments 0
You need to be logged in to leave comments. Login now