##// END OF EJS Templates
dirstate: drop an incorrect comment...
marmoute -
r48958:de0977ec default
parent child Browse files
Show More
@@ -1,1517 +1,1514 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .pycompat import delattr
18 18
19 19 from hgdemandimport import tracing
20 20
21 21 from . import (
22 22 dirstatemap,
23 23 encoding,
24 24 error,
25 25 match as matchmod,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 sparse,
31 31 util,
32 32 )
33 33
34 34 from .interfaces import (
35 35 dirstate as intdirstate,
36 36 util as interfaceutil,
37 37 )
38 38
39 39 parsers = policy.importmod('parsers')
40 40 rustmod = policy.importrust('dirstate')
41 41
42 42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43 43
44 44 propertycache = util.propertycache
45 45 filecache = scmutil.filecache
46 46 _rangemask = dirstatemap.rangemask
47 47
48 48 DirstateItem = dirstatemap.DirstateItem
49 49
50 50
51 51 class repocache(filecache):
52 52 """filecache for files in .hg/"""
53 53
54 54 def join(self, obj, fname):
55 55 return obj._opener.join(fname)
56 56
57 57
58 58 class rootcache(filecache):
59 59 """filecache for files in the repository root"""
60 60
61 61 def join(self, obj, fname):
62 62 return obj._join(fname)
63 63
64 64
65 65 def _getfsnow(vfs):
66 66 '''Get "now" timestamp on filesystem'''
67 67 tmpfd, tmpname = vfs.mkstemp()
68 68 try:
69 69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 70 finally:
71 71 os.close(tmpfd)
72 72 vfs.unlink(tmpname)
73 73
74 74
75 75 def requires_parents_change(func):
76 76 def wrap(self, *args, **kwargs):
77 77 if not self.pendingparentchange():
78 78 msg = 'calling `%s` outside of a parentchange context'
79 79 msg %= func.__name__
80 80 raise error.ProgrammingError(msg)
81 81 return func(self, *args, **kwargs)
82 82
83 83 return wrap
84 84
85 85
86 86 def requires_no_parents_change(func):
87 87 def wrap(self, *args, **kwargs):
88 88 if self.pendingparentchange():
89 89 msg = 'calling `%s` inside of a parentchange context'
90 90 msg %= func.__name__
91 91 raise error.ProgrammingError(msg)
92 92 return func(self, *args, **kwargs)
93 93
94 94 return wrap
95 95
96 96
97 97 @interfaceutil.implementer(intdirstate.idirstate)
98 98 class dirstate(object):
99 99 def __init__(
100 100 self,
101 101 opener,
102 102 ui,
103 103 root,
104 104 validate,
105 105 sparsematchfn,
106 106 nodeconstants,
107 107 use_dirstate_v2,
108 108 ):
109 109 """Create a new dirstate object.
110 110
111 111 opener is an open()-like callable that can be used to open the
112 112 dirstate file; root is the root of the directory tracked by
113 113 the dirstate.
114 114 """
115 115 self._use_dirstate_v2 = use_dirstate_v2
116 116 self._nodeconstants = nodeconstants
117 117 self._opener = opener
118 118 self._validate = validate
119 119 self._root = root
120 120 self._sparsematchfn = sparsematchfn
121 121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 122 # UNC path pointing to root share (issue4557)
123 123 self._rootdir = pathutil.normasprefix(root)
124 124 self._dirty = False
125 125 self._lastnormaltime = 0
126 126 self._ui = ui
127 127 self._filecache = {}
128 128 self._parentwriters = 0
129 129 self._filename = b'dirstate'
130 130 self._pendingfilename = b'%s.pending' % self._filename
131 131 self._plchangecallbacks = {}
132 132 self._origpl = None
133 133 self._mapcls = dirstatemap.dirstatemap
134 134 # Access and cache cwd early, so we don't access it for the first time
135 135 # after a working-copy update caused it to not exist (accessing it then
136 136 # raises an exception).
137 137 self._cwd
138 138
139 139 def prefetch_parents(self):
140 140 """make sure the parents are loaded
141 141
142 142 Used to avoid a race condition.
143 143 """
144 144 self._pl
145 145
146 146 @contextlib.contextmanager
147 147 def parentchange(self):
148 148 """Context manager for handling dirstate parents.
149 149
150 150 If an exception occurs in the scope of the context manager,
151 151 the incoherent dirstate won't be written when wlock is
152 152 released.
153 153 """
154 154 self._parentwriters += 1
155 155 yield
156 156 # Typically we want the "undo" step of a context manager in a
157 157 # finally block so it happens even when an exception
158 158 # occurs. In this case, however, we only want to decrement
159 159 # parentwriters if the code in the with statement exits
160 160 # normally, so we don't have a try/finally here on purpose.
161 161 self._parentwriters -= 1
162 162
163 163 def pendingparentchange(self):
164 164 """Returns true if the dirstate is in the middle of a set of changes
165 165 that modify the dirstate parent.
166 166 """
167 167 return self._parentwriters > 0
168 168
169 169 @propertycache
170 170 def _map(self):
171 171 """Return the dirstate contents (see documentation for dirstatemap)."""
172 172 self._map = self._mapcls(
173 173 self._ui,
174 174 self._opener,
175 175 self._root,
176 176 self._nodeconstants,
177 177 self._use_dirstate_v2,
178 178 )
179 179 return self._map
180 180
181 181 @property
182 182 def _sparsematcher(self):
183 183 """The matcher for the sparse checkout.
184 184
185 185 The working directory may not include every file from a manifest. The
186 186 matcher obtained by this property will match a path if it is to be
187 187 included in the working directory.
188 188 """
189 189 # TODO there is potential to cache this property. For now, the matcher
190 190 # is resolved on every access. (But the called function does use a
191 191 # cache to keep the lookup fast.)
192 192 return self._sparsematchfn()
193 193
194 194 @repocache(b'branch')
195 195 def _branch(self):
196 196 try:
197 197 return self._opener.read(b"branch").strip() or b"default"
198 198 except IOError as inst:
199 199 if inst.errno != errno.ENOENT:
200 200 raise
201 201 return b"default"
202 202
203 203 @property
204 204 def _pl(self):
205 205 return self._map.parents()
206 206
207 207 def hasdir(self, d):
208 208 return self._map.hastrackeddir(d)
209 209
210 210 @rootcache(b'.hgignore')
211 211 def _ignore(self):
212 212 files = self._ignorefiles()
213 213 if not files:
214 214 return matchmod.never()
215 215
216 216 pats = [b'include:%s' % f for f in files]
217 217 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 218
219 219 @propertycache
220 220 def _slash(self):
221 221 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 222
223 223 @propertycache
224 224 def _checklink(self):
225 225 return util.checklink(self._root)
226 226
227 227 @propertycache
228 228 def _checkexec(self):
229 229 return bool(util.checkexec(self._root))
230 230
231 231 @propertycache
232 232 def _checkcase(self):
233 233 return not util.fscasesensitive(self._join(b'.hg'))
234 234
235 235 def _join(self, f):
236 236 # much faster than os.path.join()
237 237 # it's safe because f is always a relative path
238 238 return self._rootdir + f
239 239
240 240 def flagfunc(self, buildfallback):
241 241 if self._checklink and self._checkexec:
242 242
243 243 def f(x):
244 244 try:
245 245 st = os.lstat(self._join(x))
246 246 if util.statislink(st):
247 247 return b'l'
248 248 if util.statisexec(st):
249 249 return b'x'
250 250 except OSError:
251 251 pass
252 252 return b''
253 253
254 254 return f
255 255
256 256 fallback = buildfallback()
257 257 if self._checklink:
258 258
259 259 def f(x):
260 260 if os.path.islink(self._join(x)):
261 261 return b'l'
262 262 if b'x' in fallback(x):
263 263 return b'x'
264 264 return b''
265 265
266 266 return f
267 267 if self._checkexec:
268 268
269 269 def f(x):
270 270 if b'l' in fallback(x):
271 271 return b'l'
272 272 if util.isexec(self._join(x)):
273 273 return b'x'
274 274 return b''
275 275
276 276 return f
277 277 else:
278 278 return fallback
279 279
280 280 @propertycache
281 281 def _cwd(self):
282 282 # internal config: ui.forcecwd
283 283 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 284 if forcecwd:
285 285 return forcecwd
286 286 return encoding.getcwd()
287 287
288 288 def getcwd(self):
289 289 """Return the path from which a canonical path is calculated.
290 290
291 291 This path should be used to resolve file patterns or to convert
292 292 canonical paths back to file paths for display. It shouldn't be
293 293 used to get real file paths. Use vfs functions instead.
294 294 """
295 295 cwd = self._cwd
296 296 if cwd == self._root:
297 297 return b''
298 298 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 299 rootsep = self._root
300 300 if not util.endswithsep(rootsep):
301 301 rootsep += pycompat.ossep
302 302 if cwd.startswith(rootsep):
303 303 return cwd[len(rootsep) :]
304 304 else:
305 305 # we're outside the repo. return an absolute path.
306 306 return cwd
307 307
308 308 def pathto(self, f, cwd=None):
309 309 if cwd is None:
310 310 cwd = self.getcwd()
311 311 path = util.pathto(self._root, cwd, f)
312 312 if self._slash:
313 313 return util.pconvert(path)
314 314 return path
315 315
316 316 def __getitem__(self, key):
317 317 """Return the current state of key (a filename) in the dirstate.
318 318
319 319 States are:
320 320 n normal
321 321 m needs merging
322 322 r marked for removal
323 323 a marked for addition
324 324 ? not tracked
325 325
326 326 XXX The "state" is a bit obscure to be in the "public" API. we should
327 327 consider migrating all user of this to going through the dirstate entry
328 328 instead.
329 329 """
330 330 msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
331 331 util.nouideprecwarn(msg, b'6.1', stacklevel=2)
332 332 entry = self._map.get(key)
333 333 if entry is not None:
334 334 return entry.state
335 335 return b'?'
336 336
337 337 def get_entry(self, path):
338 338 """return a DirstateItem for the associated path"""
339 339 entry = self._map.get(path)
340 340 if entry is None:
341 341 return DirstateItem()
342 342 return entry
343 343
344 344 def __contains__(self, key):
345 345 return key in self._map
346 346
347 347 def __iter__(self):
348 348 return iter(sorted(self._map))
349 349
350 350 def items(self):
351 351 return pycompat.iteritems(self._map)
352 352
353 353 iteritems = items
354 354
355 355 def parents(self):
356 356 return [self._validate(p) for p in self._pl]
357 357
358 358 def p1(self):
359 359 return self._validate(self._pl[0])
360 360
361 361 def p2(self):
362 362 return self._validate(self._pl[1])
363 363
364 364 @property
365 365 def in_merge(self):
366 366 """True if a merge is in progress"""
367 367 return self._pl[1] != self._nodeconstants.nullid
368 368
369 369 def branch(self):
370 370 return encoding.tolocal(self._branch)
371 371
372 372 def setparents(self, p1, p2=None):
373 373 """Set dirstate parents to p1 and p2.
374 374
375 375 When moving from two parents to one, "merged" entries a
376 376 adjusted to normal and previous copy records discarded and
377 377 returned by the call.
378 378
379 379 See localrepo.setparents()
380 380 """
381 381 if p2 is None:
382 382 p2 = self._nodeconstants.nullid
383 383 if self._parentwriters == 0:
384 384 raise ValueError(
385 385 b"cannot set dirstate parent outside of "
386 386 b"dirstate.parentchange context manager"
387 387 )
388 388
389 389 self._dirty = True
390 390 oldp2 = self._pl[1]
391 391 if self._origpl is None:
392 392 self._origpl = self._pl
393 393 nullid = self._nodeconstants.nullid
394 394 # True if we need to fold p2 related state back to a linear case
395 395 fold_p2 = oldp2 != nullid and p2 == nullid
396 396 return self._map.setparents(p1, p2, fold_p2=fold_p2)
397 397
398 398 def setbranch(self, branch):
399 399 self.__class__._branch.set(self, encoding.fromlocal(branch))
400 400 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
401 401 try:
402 402 f.write(self._branch + b'\n')
403 403 f.close()
404 404
405 405 # make sure filecache has the correct stat info for _branch after
406 406 # replacing the underlying file
407 407 ce = self._filecache[b'_branch']
408 408 if ce:
409 409 ce.refresh()
410 410 except: # re-raises
411 411 f.discard()
412 412 raise
413 413
414 414 def invalidate(self):
415 415 """Causes the next access to reread the dirstate.
416 416
417 417 This is different from localrepo.invalidatedirstate() because it always
418 418 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
419 419 check whether the dirstate has changed before rereading it."""
420 420
421 421 for a in ("_map", "_branch", "_ignore"):
422 422 if a in self.__dict__:
423 423 delattr(self, a)
424 424 self._lastnormaltime = 0
425 425 self._dirty = False
426 426 self._parentwriters = 0
427 427 self._origpl = None
428 428
429 429 def copy(self, source, dest):
430 430 """Mark dest as a copy of source. Unmark dest if source is None."""
431 431 if source == dest:
432 432 return
433 433 self._dirty = True
434 434 if source is not None:
435 435 self._map.copymap[dest] = source
436 436 else:
437 437 self._map.copymap.pop(dest, None)
438 438
439 439 def copied(self, file):
440 440 return self._map.copymap.get(file, None)
441 441
442 442 def copies(self):
443 443 return self._map.copymap
444 444
445 445 @requires_no_parents_change
446 446 def set_tracked(self, filename):
447 447 """a "public" method for generic code to mark a file as tracked
448 448
449 449 This function is to be called outside of "update/merge" case. For
450 450 example by a command like `hg add X`.
451 451
452 452 return True the file was previously untracked, False otherwise.
453 453 """
454 454 self._dirty = True
455 455 entry = self._map.get(filename)
456 456 if entry is None or not entry.tracked:
457 457 self._check_new_tracked_filename(filename)
458 458 return self._map.set_tracked(filename)
459 459
460 460 @requires_no_parents_change
461 461 def set_untracked(self, filename):
462 462 """a "public" method for generic code to mark a file as untracked
463 463
464 464 This function is to be called outside of "update/merge" case. For
465 465 example by a command like `hg remove X`.
466 466
467 467 return True the file was previously tracked, False otherwise.
468 468 """
469 469 ret = self._map.set_untracked(filename)
470 470 if ret:
471 471 self._dirty = True
472 472 return ret
473 473
474 474 @requires_no_parents_change
475 475 def set_clean(self, filename, parentfiledata=None):
476 476 """record that the current state of the file on disk is known to be clean"""
477 477 self._dirty = True
478 478 if parentfiledata:
479 479 (mode, size, mtime) = parentfiledata
480 480 else:
481 481 (mode, size, mtime) = self._get_filedata(filename)
482 482 if not self._map[filename].tracked:
483 483 self._check_new_tracked_filename(filename)
484 484 self._map.set_clean(filename, mode, size, mtime)
485 485 if mtime > self._lastnormaltime:
486 486 # Remember the most recent modification timeslot for status(),
487 487 # to make sure we won't miss future size-preserving file content
488 488 # modifications that happen within the same timeslot.
489 489 self._lastnormaltime = mtime
490 490
491 491 @requires_no_parents_change
492 492 def set_possibly_dirty(self, filename):
493 493 """record that the current state of the file on disk is unknown"""
494 494 self._dirty = True
495 495 self._map.set_possibly_dirty(filename)
496 496
497 497 @requires_parents_change
498 498 def update_file_p1(
499 499 self,
500 500 filename,
501 501 p1_tracked,
502 502 ):
503 503 """Set a file as tracked in the parent (or not)
504 504
505 505 This is to be called when adjust the dirstate to a new parent after an history
506 506 rewriting operation.
507 507
508 508 It should not be called during a merge (p2 != nullid) and only within
509 509 a `with dirstate.parentchange():` context.
510 510 """
511 511 if self.in_merge:
512 512 msg = b'update_file_reference should not be called when merging'
513 513 raise error.ProgrammingError(msg)
514 514 entry = self._map.get(filename)
515 515 if entry is None:
516 516 wc_tracked = False
517 517 else:
518 518 wc_tracked = entry.tracked
519 519 if not (p1_tracked or wc_tracked):
520 520 # the file is no longer relevant to anyone
521 521 if self._map.get(filename) is not None:
522 522 self._map.reset_state(filename)
523 523 self._dirty = True
524 524 elif (not p1_tracked) and wc_tracked:
525 525 if entry is not None and entry.added:
526 526 return # avoid dropping copy information (maybe?)
527 527
528 528 parentfiledata = None
529 529 if wc_tracked and p1_tracked:
530 530 parentfiledata = self._get_filedata(filename)
531 531
532 532 self._map.reset_state(
533 533 filename,
534 534 wc_tracked,
535 535 p1_tracked,
536 536 # the underlying reference might have changed, we will have to
537 537 # check it.
538 538 has_meaningful_mtime=False,
539 539 parentfiledata=parentfiledata,
540 540 )
541 541 if (
542 542 parentfiledata is not None
543 543 and parentfiledata[2] > self._lastnormaltime
544 544 ):
545 545 # Remember the most recent modification timeslot for status(),
546 546 # to make sure we won't miss future size-preserving file content
547 547 # modifications that happen within the same timeslot.
548 548 self._lastnormaltime = parentfiledata[2]
549 549
550 550 @requires_parents_change
551 551 def update_file(
552 552 self,
553 553 filename,
554 554 wc_tracked,
555 555 p1_tracked,
556 556 p2_info=False,
557 557 possibly_dirty=False,
558 558 parentfiledata=None,
559 559 ):
560 560 """update the information about a file in the dirstate
561 561
562 562 This is to be called when the direstates parent changes to keep track
563 563 of what is the file situation in regards to the working copy and its parent.
564 564
565 565 This function must be called within a `dirstate.parentchange` context.
566 566
567 567 note: the API is at an early stage and we might need to adjust it
568 568 depending of what information ends up being relevant and useful to
569 569 other processing.
570 570 """
571 571
572 572 # note: I do not think we need to double check name clash here since we
573 573 # are in a update/merge case that should already have taken care of
574 574 # this. The test agrees
575 575
576 576 self._dirty = True
577 577
578 578 need_parent_file_data = (
579 579 not possibly_dirty and not p2_info and wc_tracked and p1_tracked
580 580 )
581 581
582 # this mean we are doing call for file we do not really care about the
583 # data (eg: added or removed), however this should be a minor overhead
584 # compared to the overall update process calling this.
585 if need_parent_file_data or parentfiledata is None:
582 if need_parent_file_data and parentfiledata is None:
586 583 parentfiledata = self._get_filedata(filename)
587 584
588 585 self._map.reset_state(
589 586 filename,
590 587 wc_tracked,
591 588 p1_tracked,
592 589 p2_info=p2_info,
593 590 has_meaningful_mtime=not possibly_dirty,
594 591 parentfiledata=parentfiledata,
595 592 )
596 593 if (
597 594 parentfiledata is not None
598 595 and parentfiledata[2] > self._lastnormaltime
599 596 ):
600 597 # Remember the most recent modification timeslot for status(),
601 598 # to make sure we won't miss future size-preserving file content
602 599 # modifications that happen within the same timeslot.
603 600 self._lastnormaltime = parentfiledata[2]
604 601
605 602 def _check_new_tracked_filename(self, filename):
606 603 scmutil.checkfilename(filename)
607 604 if self._map.hastrackeddir(filename):
608 605 msg = _(b'directory %r already in dirstate')
609 606 msg %= pycompat.bytestr(filename)
610 607 raise error.Abort(msg)
611 608 # shadows
612 609 for d in pathutil.finddirs(filename):
613 610 if self._map.hastrackeddir(d):
614 611 break
615 612 entry = self._map.get(d)
616 613 if entry is not None and not entry.removed:
617 614 msg = _(b'file %r in dirstate clashes with %r')
618 615 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
619 616 raise error.Abort(msg)
620 617
621 618 def _get_filedata(self, filename):
622 619 """returns"""
623 620 s = os.lstat(self._join(filename))
624 621 mode = s.st_mode
625 622 size = s.st_size
626 623 mtime = s[stat.ST_MTIME]
627 624 return (mode, size, mtime)
628 625
629 626 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
630 627 if exists is None:
631 628 exists = os.path.lexists(os.path.join(self._root, path))
632 629 if not exists:
633 630 # Maybe a path component exists
634 631 if not ignoremissing and b'/' in path:
635 632 d, f = path.rsplit(b'/', 1)
636 633 d = self._normalize(d, False, ignoremissing, None)
637 634 folded = d + b"/" + f
638 635 else:
639 636 # No path components, preserve original case
640 637 folded = path
641 638 else:
642 639 # recursively normalize leading directory components
643 640 # against dirstate
644 641 if b'/' in normed:
645 642 d, f = normed.rsplit(b'/', 1)
646 643 d = self._normalize(d, False, ignoremissing, True)
647 644 r = self._root + b"/" + d
648 645 folded = d + b"/" + util.fspath(f, r)
649 646 else:
650 647 folded = util.fspath(normed, self._root)
651 648 storemap[normed] = folded
652 649
653 650 return folded
654 651
655 652 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
656 653 normed = util.normcase(path)
657 654 folded = self._map.filefoldmap.get(normed, None)
658 655 if folded is None:
659 656 if isknown:
660 657 folded = path
661 658 else:
662 659 folded = self._discoverpath(
663 660 path, normed, ignoremissing, exists, self._map.filefoldmap
664 661 )
665 662 return folded
666 663
667 664 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
668 665 normed = util.normcase(path)
669 666 folded = self._map.filefoldmap.get(normed, None)
670 667 if folded is None:
671 668 folded = self._map.dirfoldmap.get(normed, None)
672 669 if folded is None:
673 670 if isknown:
674 671 folded = path
675 672 else:
676 673 # store discovered result in dirfoldmap so that future
677 674 # normalizefile calls don't start matching directories
678 675 folded = self._discoverpath(
679 676 path, normed, ignoremissing, exists, self._map.dirfoldmap
680 677 )
681 678 return folded
682 679
683 680 def normalize(self, path, isknown=False, ignoremissing=False):
684 681 """
685 682 normalize the case of a pathname when on a casefolding filesystem
686 683
687 684 isknown specifies whether the filename came from walking the
688 685 disk, to avoid extra filesystem access.
689 686
690 687 If ignoremissing is True, missing path are returned
691 688 unchanged. Otherwise, we try harder to normalize possibly
692 689 existing path components.
693 690
694 691 The normalized case is determined based on the following precedence:
695 692
696 693 - version of name already stored in the dirstate
697 694 - version of name stored on disk
698 695 - version provided via command arguments
699 696 """
700 697
701 698 if self._checkcase:
702 699 return self._normalize(path, isknown, ignoremissing)
703 700 return path
704 701
705 702 def clear(self):
706 703 self._map.clear()
707 704 self._lastnormaltime = 0
708 705 self._dirty = True
709 706
710 707 def rebuild(self, parent, allfiles, changedfiles=None):
711 708 if changedfiles is None:
712 709 # Rebuild entire dirstate
713 710 to_lookup = allfiles
714 711 to_drop = []
715 712 lastnormaltime = self._lastnormaltime
716 713 self.clear()
717 714 self._lastnormaltime = lastnormaltime
718 715 elif len(changedfiles) < 10:
719 716 # Avoid turning allfiles into a set, which can be expensive if it's
720 717 # large.
721 718 to_lookup = []
722 719 to_drop = []
723 720 for f in changedfiles:
724 721 if f in allfiles:
725 722 to_lookup.append(f)
726 723 else:
727 724 to_drop.append(f)
728 725 else:
729 726 changedfilesset = set(changedfiles)
730 727 to_lookup = changedfilesset & set(allfiles)
731 728 to_drop = changedfilesset - to_lookup
732 729
733 730 if self._origpl is None:
734 731 self._origpl = self._pl
735 732 self._map.setparents(parent, self._nodeconstants.nullid)
736 733
737 734 for f in to_lookup:
738 735
739 736 if self.in_merge:
740 737 self.set_tracked(f)
741 738 else:
742 739 self._map.reset_state(
743 740 f,
744 741 wc_tracked=True,
745 742 p1_tracked=True,
746 743 )
747 744 for f in to_drop:
748 745 self._map.reset_state(f)
749 746
750 747 self._dirty = True
751 748
752 749 def identity(self):
753 750 """Return identity of dirstate itself to detect changing in storage
754 751
755 752 If identity of previous dirstate is equal to this, writing
756 753 changes based on the former dirstate out can keep consistency.
757 754 """
758 755 return self._map.identity
759 756
760 757 def write(self, tr):
761 758 if not self._dirty:
762 759 return
763 760
764 761 filename = self._filename
765 762 if tr:
766 763 # 'dirstate.write()' is not only for writing in-memory
767 764 # changes out, but also for dropping ambiguous timestamp.
768 765 # delayed writing re-raise "ambiguous timestamp issue".
769 766 # See also the wiki page below for detail:
770 767 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
771 768
772 769 # record when mtime start to be ambiguous
773 770 now = _getfsnow(self._opener)
774 771
775 772 # delay writing in-memory changes out
776 773 tr.addfilegenerator(
777 774 b'dirstate',
778 775 (self._filename,),
779 776 lambda f: self._writedirstate(tr, f, now=now),
780 777 location=b'plain',
781 778 )
782 779 return
783 780
784 781 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
785 782 self._writedirstate(tr, st)
786 783
787 784 def addparentchangecallback(self, category, callback):
788 785 """add a callback to be called when the wd parents are changed
789 786
790 787 Callback will be called with the following arguments:
791 788 dirstate, (oldp1, oldp2), (newp1, newp2)
792 789
793 790 Category is a unique identifier to allow overwriting an old callback
794 791 with a newer callback.
795 792 """
796 793 self._plchangecallbacks[category] = callback
797 794
798 795 def _writedirstate(self, tr, st, now=None):
799 796 # notify callbacks about parents change
800 797 if self._origpl is not None and self._origpl != self._pl:
801 798 for c, callback in sorted(
802 799 pycompat.iteritems(self._plchangecallbacks)
803 800 ):
804 801 callback(self, self._origpl, self._pl)
805 802 self._origpl = None
806 803
807 804 if now is None:
808 805 # use the modification time of the newly created temporary file as the
809 806 # filesystem's notion of 'now'
810 807 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
811 808
812 809 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
813 810 # timestamp of each entries in dirstate, because of 'now > mtime'
814 811 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
815 812 if delaywrite > 0:
816 813 # do we have any files to delay for?
817 814 for f, e in pycompat.iteritems(self._map):
818 815 if e.need_delay(now):
819 816 import time # to avoid useless import
820 817
821 818 # rather than sleep n seconds, sleep until the next
822 819 # multiple of n seconds
823 820 clock = time.time()
824 821 start = int(clock) - (int(clock) % delaywrite)
825 822 end = start + delaywrite
826 823 time.sleep(end - clock)
827 824 now = end # trust our estimate that the end is near now
828 825 break
829 826
830 827 self._map.write(tr, st, now)
831 828 self._lastnormaltime = 0
832 829 self._dirty = False
833 830
834 831 def _dirignore(self, f):
835 832 if self._ignore(f):
836 833 return True
837 834 for p in pathutil.finddirs(f):
838 835 if self._ignore(p):
839 836 return True
840 837 return False
841 838
842 839 def _ignorefiles(self):
843 840 files = []
844 841 if os.path.exists(self._join(b'.hgignore')):
845 842 files.append(self._join(b'.hgignore'))
846 843 for name, path in self._ui.configitems(b"ui"):
847 844 if name == b'ignore' or name.startswith(b'ignore.'):
848 845 # we need to use os.path.join here rather than self._join
849 846 # because path is arbitrary and user-specified
850 847 files.append(os.path.join(self._rootdir, util.expandpath(path)))
851 848 return files
852 849
853 850 def _ignorefileandline(self, f):
854 851 files = collections.deque(self._ignorefiles())
855 852 visited = set()
856 853 while files:
857 854 i = files.popleft()
858 855 patterns = matchmod.readpatternfile(
859 856 i, self._ui.warn, sourceinfo=True
860 857 )
861 858 for pattern, lineno, line in patterns:
862 859 kind, p = matchmod._patsplit(pattern, b'glob')
863 860 if kind == b"subinclude":
864 861 if p not in visited:
865 862 files.append(p)
866 863 continue
867 864 m = matchmod.match(
868 865 self._root, b'', [], [pattern], warn=self._ui.warn
869 866 )
870 867 if m(f):
871 868 return (i, lineno, line)
872 869 visited.add(i)
873 870 return (None, -1, b"")
874 871
875 872 def _walkexplicit(self, match, subrepos):
876 873 """Get stat data about the files explicitly specified by match.
877 874
878 875 Return a triple (results, dirsfound, dirsnotfound).
879 876 - results is a mapping from filename to stat result. It also contains
880 877 listings mapping subrepos and .hg to None.
881 878 - dirsfound is a list of files found to be directories.
882 879 - dirsnotfound is a list of files that the dirstate thinks are
883 880 directories and that were not found."""
884 881
885 882 def badtype(mode):
886 883 kind = _(b'unknown')
887 884 if stat.S_ISCHR(mode):
888 885 kind = _(b'character device')
889 886 elif stat.S_ISBLK(mode):
890 887 kind = _(b'block device')
891 888 elif stat.S_ISFIFO(mode):
892 889 kind = _(b'fifo')
893 890 elif stat.S_ISSOCK(mode):
894 891 kind = _(b'socket')
895 892 elif stat.S_ISDIR(mode):
896 893 kind = _(b'directory')
897 894 return _(b'unsupported file type (type is %s)') % kind
898 895
899 896 badfn = match.bad
900 897 dmap = self._map
901 898 lstat = os.lstat
902 899 getkind = stat.S_IFMT
903 900 dirkind = stat.S_IFDIR
904 901 regkind = stat.S_IFREG
905 902 lnkkind = stat.S_IFLNK
906 903 join = self._join
907 904 dirsfound = []
908 905 foundadd = dirsfound.append
909 906 dirsnotfound = []
910 907 notfoundadd = dirsnotfound.append
911 908
912 909 if not match.isexact() and self._checkcase:
913 910 normalize = self._normalize
914 911 else:
915 912 normalize = None
916 913
917 914 files = sorted(match.files())
918 915 subrepos.sort()
919 916 i, j = 0, 0
920 917 while i < len(files) and j < len(subrepos):
921 918 subpath = subrepos[j] + b"/"
922 919 if files[i] < subpath:
923 920 i += 1
924 921 continue
925 922 while i < len(files) and files[i].startswith(subpath):
926 923 del files[i]
927 924 j += 1
928 925
929 926 if not files or b'' in files:
930 927 files = [b'']
931 928 # constructing the foldmap is expensive, so don't do it for the
932 929 # common case where files is ['']
933 930 normalize = None
934 931 results = dict.fromkeys(subrepos)
935 932 results[b'.hg'] = None
936 933
937 934 for ff in files:
938 935 if normalize:
939 936 nf = normalize(ff, False, True)
940 937 else:
941 938 nf = ff
942 939 if nf in results:
943 940 continue
944 941
945 942 try:
946 943 st = lstat(join(nf))
947 944 kind = getkind(st.st_mode)
948 945 if kind == dirkind:
949 946 if nf in dmap:
950 947 # file replaced by dir on disk but still in dirstate
951 948 results[nf] = None
952 949 foundadd((nf, ff))
953 950 elif kind == regkind or kind == lnkkind:
954 951 results[nf] = st
955 952 else:
956 953 badfn(ff, badtype(kind))
957 954 if nf in dmap:
958 955 results[nf] = None
959 956 except OSError as inst: # nf not found on disk - it is dirstate only
960 957 if nf in dmap: # does it exactly match a missing file?
961 958 results[nf] = None
962 959 else: # does it match a missing directory?
963 960 if self._map.hasdir(nf):
964 961 notfoundadd(nf)
965 962 else:
966 963 badfn(ff, encoding.strtolocal(inst.strerror))
967 964
968 965 # match.files() may contain explicitly-specified paths that shouldn't
969 966 # be taken; drop them from the list of files found. dirsfound/notfound
970 967 # aren't filtered here because they will be tested later.
971 968 if match.anypats():
972 969 for f in list(results):
973 970 if f == b'.hg' or f in subrepos:
974 971 # keep sentinel to disable further out-of-repo walks
975 972 continue
976 973 if not match(f):
977 974 del results[f]
978 975
979 976 # Case insensitive filesystems cannot rely on lstat() failing to detect
980 977 # a case-only rename. Prune the stat object for any file that does not
981 978 # match the case in the filesystem, if there are multiple files that
982 979 # normalize to the same path.
983 980 if match.isexact() and self._checkcase:
984 981 normed = {}
985 982
986 983 for f, st in pycompat.iteritems(results):
987 984 if st is None:
988 985 continue
989 986
990 987 nc = util.normcase(f)
991 988 paths = normed.get(nc)
992 989
993 990 if paths is None:
994 991 paths = set()
995 992 normed[nc] = paths
996 993
997 994 paths.add(f)
998 995
999 996 for norm, paths in pycompat.iteritems(normed):
1000 997 if len(paths) > 1:
1001 998 for path in paths:
1002 999 folded = self._discoverpath(
1003 1000 path, norm, True, None, self._map.dirfoldmap
1004 1001 )
1005 1002 if path != folded:
1006 1003 results[path] = None
1007 1004
1008 1005 return results, dirsfound, dirsnotfound
1009 1006
1010 1007 def walk(self, match, subrepos, unknown, ignored, full=True):
1011 1008 """
1012 1009 Walk recursively through the directory tree, finding all files
1013 1010 matched by match.
1014 1011
1015 1012 If full is False, maybe skip some known-clean files.
1016 1013
1017 1014 Return a dict mapping filename to stat-like object (either
1018 1015 mercurial.osutil.stat instance or return value of os.stat()).
1019 1016
1020 1017 """
1021 1018 # full is a flag that extensions that hook into walk can use -- this
1022 1019 # implementation doesn't use it at all. This satisfies the contract
1023 1020 # because we only guarantee a "maybe".
1024 1021
1025 1022 if ignored:
1026 1023 ignore = util.never
1027 1024 dirignore = util.never
1028 1025 elif unknown:
1029 1026 ignore = self._ignore
1030 1027 dirignore = self._dirignore
1031 1028 else:
1032 1029 # if not unknown and not ignored, drop dir recursion and step 2
1033 1030 ignore = util.always
1034 1031 dirignore = util.always
1035 1032
1036 1033 matchfn = match.matchfn
1037 1034 matchalways = match.always()
1038 1035 matchtdir = match.traversedir
1039 1036 dmap = self._map
1040 1037 listdir = util.listdir
1041 1038 lstat = os.lstat
1042 1039 dirkind = stat.S_IFDIR
1043 1040 regkind = stat.S_IFREG
1044 1041 lnkkind = stat.S_IFLNK
1045 1042 join = self._join
1046 1043
1047 1044 exact = skipstep3 = False
1048 1045 if match.isexact(): # match.exact
1049 1046 exact = True
1050 1047 dirignore = util.always # skip step 2
1051 1048 elif match.prefix(): # match.match, no patterns
1052 1049 skipstep3 = True
1053 1050
1054 1051 if not exact and self._checkcase:
1055 1052 normalize = self._normalize
1056 1053 normalizefile = self._normalizefile
1057 1054 skipstep3 = False
1058 1055 else:
1059 1056 normalize = self._normalize
1060 1057 normalizefile = None
1061 1058
1062 1059 # step 1: find all explicit files
1063 1060 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1064 1061 if matchtdir:
1065 1062 for d in work:
1066 1063 matchtdir(d[0])
1067 1064 for d in dirsnotfound:
1068 1065 matchtdir(d)
1069 1066
1070 1067 skipstep3 = skipstep3 and not (work or dirsnotfound)
1071 1068 work = [d for d in work if not dirignore(d[0])]
1072 1069
1073 1070 # step 2: visit subdirectories
1074 1071 def traverse(work, alreadynormed):
1075 1072 wadd = work.append
1076 1073 while work:
1077 1074 tracing.counter('dirstate.walk work', len(work))
1078 1075 nd = work.pop()
1079 1076 visitentries = match.visitchildrenset(nd)
1080 1077 if not visitentries:
1081 1078 continue
1082 1079 if visitentries == b'this' or visitentries == b'all':
1083 1080 visitentries = None
1084 1081 skip = None
1085 1082 if nd != b'':
1086 1083 skip = b'.hg'
1087 1084 try:
1088 1085 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1089 1086 entries = listdir(join(nd), stat=True, skip=skip)
1090 1087 except OSError as inst:
1091 1088 if inst.errno in (errno.EACCES, errno.ENOENT):
1092 1089 match.bad(
1093 1090 self.pathto(nd), encoding.strtolocal(inst.strerror)
1094 1091 )
1095 1092 continue
1096 1093 raise
1097 1094 for f, kind, st in entries:
1098 1095 # Some matchers may return files in the visitentries set,
1099 1096 # instead of 'this', if the matcher explicitly mentions them
1100 1097 # and is not an exactmatcher. This is acceptable; we do not
1101 1098 # make any hard assumptions about file-or-directory below
1102 1099 # based on the presence of `f` in visitentries. If
1103 1100 # visitchildrenset returned a set, we can always skip the
1104 1101 # entries *not* in the set it provided regardless of whether
1105 1102 # they're actually a file or a directory.
1106 1103 if visitentries and f not in visitentries:
1107 1104 continue
1108 1105 if normalizefile:
1109 1106 # even though f might be a directory, we're only
1110 1107 # interested in comparing it to files currently in the
1111 1108 # dmap -- therefore normalizefile is enough
1112 1109 nf = normalizefile(
1113 1110 nd and (nd + b"/" + f) or f, True, True
1114 1111 )
1115 1112 else:
1116 1113 nf = nd and (nd + b"/" + f) or f
1117 1114 if nf not in results:
1118 1115 if kind == dirkind:
1119 1116 if not ignore(nf):
1120 1117 if matchtdir:
1121 1118 matchtdir(nf)
1122 1119 wadd(nf)
1123 1120 if nf in dmap and (matchalways or matchfn(nf)):
1124 1121 results[nf] = None
1125 1122 elif kind == regkind or kind == lnkkind:
1126 1123 if nf in dmap:
1127 1124 if matchalways or matchfn(nf):
1128 1125 results[nf] = st
1129 1126 elif (matchalways or matchfn(nf)) and not ignore(
1130 1127 nf
1131 1128 ):
1132 1129 # unknown file -- normalize if necessary
1133 1130 if not alreadynormed:
1134 1131 nf = normalize(nf, False, True)
1135 1132 results[nf] = st
1136 1133 elif nf in dmap and (matchalways or matchfn(nf)):
1137 1134 results[nf] = None
1138 1135
1139 1136 for nd, d in work:
1140 1137 # alreadynormed means that processwork doesn't have to do any
1141 1138 # expensive directory normalization
1142 1139 alreadynormed = not normalize or nd == d
1143 1140 traverse([d], alreadynormed)
1144 1141
1145 1142 for s in subrepos:
1146 1143 del results[s]
1147 1144 del results[b'.hg']
1148 1145
1149 1146 # step 3: visit remaining files from dmap
1150 1147 if not skipstep3 and not exact:
1151 1148 # If a dmap file is not in results yet, it was either
1152 1149 # a) not matching matchfn b) ignored, c) missing, or d) under a
1153 1150 # symlink directory.
1154 1151 if not results and matchalways:
1155 1152 visit = [f for f in dmap]
1156 1153 else:
1157 1154 visit = [f for f in dmap if f not in results and matchfn(f)]
1158 1155 visit.sort()
1159 1156
1160 1157 if unknown:
1161 1158 # unknown == True means we walked all dirs under the roots
1162 1159 # that wasn't ignored, and everything that matched was stat'ed
1163 1160 # and is already in results.
1164 1161 # The rest must thus be ignored or under a symlink.
1165 1162 audit_path = pathutil.pathauditor(self._root, cached=True)
1166 1163
1167 1164 for nf in iter(visit):
1168 1165 # If a stat for the same file was already added with a
1169 1166 # different case, don't add one for this, since that would
1170 1167 # make it appear as if the file exists under both names
1171 1168 # on disk.
1172 1169 if (
1173 1170 normalizefile
1174 1171 and normalizefile(nf, True, True) in results
1175 1172 ):
1176 1173 results[nf] = None
1177 1174 # Report ignored items in the dmap as long as they are not
1178 1175 # under a symlink directory.
1179 1176 elif audit_path.check(nf):
1180 1177 try:
1181 1178 results[nf] = lstat(join(nf))
1182 1179 # file was just ignored, no links, and exists
1183 1180 except OSError:
1184 1181 # file doesn't exist
1185 1182 results[nf] = None
1186 1183 else:
1187 1184 # It's either missing or under a symlink directory
1188 1185 # which we in this case report as missing
1189 1186 results[nf] = None
1190 1187 else:
1191 1188 # We may not have walked the full directory tree above,
1192 1189 # so stat and check everything we missed.
1193 1190 iv = iter(visit)
1194 1191 for st in util.statfiles([join(i) for i in visit]):
1195 1192 results[next(iv)] = st
1196 1193 return results
1197 1194
1198 1195 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1199 1196 # Force Rayon (Rust parallelism library) to respect the number of
1200 1197 # workers. This is a temporary workaround until Rust code knows
1201 1198 # how to read the config file.
1202 1199 numcpus = self._ui.configint(b"worker", b"numcpus")
1203 1200 if numcpus is not None:
1204 1201 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1205 1202
1206 1203 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1207 1204 if not workers_enabled:
1208 1205 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1209 1206
1210 1207 (
1211 1208 lookup,
1212 1209 modified,
1213 1210 added,
1214 1211 removed,
1215 1212 deleted,
1216 1213 clean,
1217 1214 ignored,
1218 1215 unknown,
1219 1216 warnings,
1220 1217 bad,
1221 1218 traversed,
1222 1219 dirty,
1223 1220 ) = rustmod.status(
1224 1221 self._map._map,
1225 1222 matcher,
1226 1223 self._rootdir,
1227 1224 self._ignorefiles(),
1228 1225 self._checkexec,
1229 1226 self._lastnormaltime,
1230 1227 bool(list_clean),
1231 1228 bool(list_ignored),
1232 1229 bool(list_unknown),
1233 1230 bool(matcher.traversedir),
1234 1231 )
1235 1232
1236 1233 self._dirty |= dirty
1237 1234
1238 1235 if matcher.traversedir:
1239 1236 for dir in traversed:
1240 1237 matcher.traversedir(dir)
1241 1238
1242 1239 if self._ui.warn:
1243 1240 for item in warnings:
1244 1241 if isinstance(item, tuple):
1245 1242 file_path, syntax = item
1246 1243 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1247 1244 file_path,
1248 1245 syntax,
1249 1246 )
1250 1247 self._ui.warn(msg)
1251 1248 else:
1252 1249 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1253 1250 self._ui.warn(
1254 1251 msg
1255 1252 % (
1256 1253 pathutil.canonpath(
1257 1254 self._rootdir, self._rootdir, item
1258 1255 ),
1259 1256 b"No such file or directory",
1260 1257 )
1261 1258 )
1262 1259
1263 1260 for (fn, message) in bad:
1264 1261 matcher.bad(fn, encoding.strtolocal(message))
1265 1262
1266 1263 status = scmutil.status(
1267 1264 modified=modified,
1268 1265 added=added,
1269 1266 removed=removed,
1270 1267 deleted=deleted,
1271 1268 unknown=unknown,
1272 1269 ignored=ignored,
1273 1270 clean=clean,
1274 1271 )
1275 1272 return (lookup, status)
1276 1273
1277 1274 def status(self, match, subrepos, ignored, clean, unknown):
1278 1275 """Determine the status of the working copy relative to the
1279 1276 dirstate and return a pair of (unsure, status), where status is of type
1280 1277 scmutil.status and:
1281 1278
1282 1279 unsure:
1283 1280 files that might have been modified since the dirstate was
1284 1281 written, but need to be read to be sure (size is the same
1285 1282 but mtime differs)
1286 1283 status.modified:
1287 1284 files that have definitely been modified since the dirstate
1288 1285 was written (different size or mode)
1289 1286 status.clean:
1290 1287 files that have definitely not been modified since the
1291 1288 dirstate was written
1292 1289 """
1293 1290 listignored, listclean, listunknown = ignored, clean, unknown
1294 1291 lookup, modified, added, unknown, ignored = [], [], [], [], []
1295 1292 removed, deleted, clean = [], [], []
1296 1293
1297 1294 dmap = self._map
1298 1295 dmap.preload()
1299 1296
1300 1297 use_rust = True
1301 1298
1302 1299 allowed_matchers = (
1303 1300 matchmod.alwaysmatcher,
1304 1301 matchmod.exactmatcher,
1305 1302 matchmod.includematcher,
1306 1303 )
1307 1304
1308 1305 if rustmod is None:
1309 1306 use_rust = False
1310 1307 elif self._checkcase:
1311 1308 # Case-insensitive filesystems are not handled yet
1312 1309 use_rust = False
1313 1310 elif subrepos:
1314 1311 use_rust = False
1315 1312 elif sparse.enabled:
1316 1313 use_rust = False
1317 1314 elif not isinstance(match, allowed_matchers):
1318 1315 # Some matchers have yet to be implemented
1319 1316 use_rust = False
1320 1317
1321 1318 if use_rust:
1322 1319 try:
1323 1320 return self._rust_status(
1324 1321 match, listclean, listignored, listunknown
1325 1322 )
1326 1323 except rustmod.FallbackError:
1327 1324 pass
1328 1325
1329 1326 def noop(f):
1330 1327 pass
1331 1328
1332 1329 dcontains = dmap.__contains__
1333 1330 dget = dmap.__getitem__
1334 1331 ladd = lookup.append # aka "unsure"
1335 1332 madd = modified.append
1336 1333 aadd = added.append
1337 1334 uadd = unknown.append if listunknown else noop
1338 1335 iadd = ignored.append if listignored else noop
1339 1336 radd = removed.append
1340 1337 dadd = deleted.append
1341 1338 cadd = clean.append if listclean else noop
1342 1339 mexact = match.exact
1343 1340 dirignore = self._dirignore
1344 1341 checkexec = self._checkexec
1345 1342 copymap = self._map.copymap
1346 1343 lastnormaltime = self._lastnormaltime
1347 1344
1348 1345 # We need to do full walks when either
1349 1346 # - we're listing all clean files, or
1350 1347 # - match.traversedir does something, because match.traversedir should
1351 1348 # be called for every dir in the working dir
1352 1349 full = listclean or match.traversedir is not None
1353 1350 for fn, st in pycompat.iteritems(
1354 1351 self.walk(match, subrepos, listunknown, listignored, full=full)
1355 1352 ):
1356 1353 if not dcontains(fn):
1357 1354 if (listignored or mexact(fn)) and dirignore(fn):
1358 1355 if listignored:
1359 1356 iadd(fn)
1360 1357 else:
1361 1358 uadd(fn)
1362 1359 continue
1363 1360
1364 1361 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1365 1362 # written like that for performance reasons. dmap[fn] is not a
1366 1363 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1367 1364 # opcode has fast paths when the value to be unpacked is a tuple or
1368 1365 # a list, but falls back to creating a full-fledged iterator in
1369 1366 # general. That is much slower than simply accessing and storing the
1370 1367 # tuple members one by one.
1371 1368 t = dget(fn)
1372 1369 mode = t.mode
1373 1370 size = t.size
1374 1371 time = t.mtime
1375 1372
1376 1373 if not st and t.tracked:
1377 1374 dadd(fn)
1378 1375 elif t.merged:
1379 1376 madd(fn)
1380 1377 elif t.added:
1381 1378 aadd(fn)
1382 1379 elif t.removed:
1383 1380 radd(fn)
1384 1381 elif t.tracked:
1385 1382 if (
1386 1383 size >= 0
1387 1384 and (
1388 1385 (size != st.st_size and size != st.st_size & _rangemask)
1389 1386 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1390 1387 )
1391 1388 or t.from_p2
1392 1389 or fn in copymap
1393 1390 ):
1394 1391 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1395 1392 # issue6456: Size returned may be longer due to
1396 1393 # encryption on EXT-4 fscrypt, undecided.
1397 1394 ladd(fn)
1398 1395 else:
1399 1396 madd(fn)
1400 1397 elif (
1401 1398 time != st[stat.ST_MTIME]
1402 1399 and time != st[stat.ST_MTIME] & _rangemask
1403 1400 ):
1404 1401 ladd(fn)
1405 1402 elif st[stat.ST_MTIME] == lastnormaltime:
1406 1403 # fn may have just been marked as normal and it may have
1407 1404 # changed in the same second without changing its size.
1408 1405 # This can happen if we quickly do multiple commits.
1409 1406 # Force lookup, so we don't miss such a racy file change.
1410 1407 ladd(fn)
1411 1408 elif listclean:
1412 1409 cadd(fn)
1413 1410 status = scmutil.status(
1414 1411 modified, added, removed, deleted, unknown, ignored, clean
1415 1412 )
1416 1413 return (lookup, status)
1417 1414
1418 1415 def matches(self, match):
1419 1416 """
1420 1417 return files in the dirstate (in whatever state) filtered by match
1421 1418 """
1422 1419 dmap = self._map
1423 1420 if rustmod is not None:
1424 1421 dmap = self._map._map
1425 1422
1426 1423 if match.always():
1427 1424 return dmap.keys()
1428 1425 files = match.files()
1429 1426 if match.isexact():
1430 1427 # fast path -- filter the other way around, since typically files is
1431 1428 # much smaller than dmap
1432 1429 return [f for f in files if f in dmap]
1433 1430 if match.prefix() and all(fn in dmap for fn in files):
1434 1431 # fast path -- all the values are known to be files, so just return
1435 1432 # that
1436 1433 return list(files)
1437 1434 return [f for f in dmap if match(f)]
1438 1435
1439 1436 def _actualfilename(self, tr):
1440 1437 if tr:
1441 1438 return self._pendingfilename
1442 1439 else:
1443 1440 return self._filename
1444 1441
1445 1442 def savebackup(self, tr, backupname):
1446 1443 '''Save current dirstate into backup file'''
1447 1444 filename = self._actualfilename(tr)
1448 1445 assert backupname != filename
1449 1446
1450 1447 # use '_writedirstate' instead of 'write' to write changes certainly,
1451 1448 # because the latter omits writing out if transaction is running.
1452 1449 # output file will be used to create backup of dirstate at this point.
1453 1450 if self._dirty or not self._opener.exists(filename):
1454 1451 self._writedirstate(
1455 1452 tr,
1456 1453 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1457 1454 )
1458 1455
1459 1456 if tr:
1460 1457 # ensure that subsequent tr.writepending returns True for
1461 1458 # changes written out above, even if dirstate is never
1462 1459 # changed after this
1463 1460 tr.addfilegenerator(
1464 1461 b'dirstate',
1465 1462 (self._filename,),
1466 1463 lambda f: self._writedirstate(tr, f),
1467 1464 location=b'plain',
1468 1465 )
1469 1466
1470 1467 # ensure that pending file written above is unlinked at
1471 1468 # failure, even if tr.writepending isn't invoked until the
1472 1469 # end of this transaction
1473 1470 tr.registertmp(filename, location=b'plain')
1474 1471
1475 1472 self._opener.tryunlink(backupname)
1476 1473 # hardlink backup is okay because _writedirstate is always called
1477 1474 # with an "atomictemp=True" file.
1478 1475 util.copyfile(
1479 1476 self._opener.join(filename),
1480 1477 self._opener.join(backupname),
1481 1478 hardlink=True,
1482 1479 )
1483 1480
1484 1481 def restorebackup(self, tr, backupname):
1485 1482 '''Restore dirstate by backup file'''
1486 1483 # this "invalidate()" prevents "wlock.release()" from writing
1487 1484 # changes of dirstate out after restoring from backup file
1488 1485 self.invalidate()
1489 1486 filename = self._actualfilename(tr)
1490 1487 o = self._opener
1491 1488 if util.samefile(o.join(backupname), o.join(filename)):
1492 1489 o.unlink(backupname)
1493 1490 else:
1494 1491 o.rename(backupname, filename, checkambig=True)
1495 1492
1496 1493 def clearbackup(self, tr, backupname):
1497 1494 '''Clear backup file'''
1498 1495 self._opener.unlink(backupname)
1499 1496
1500 1497 def verify(self, m1, m2):
1501 1498 """check the dirstate content again the parent manifest and yield errors"""
1502 1499 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1503 1500 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1504 1501 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1505 1502 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1506 1503 for f, entry in self.items():
1507 1504 state = entry.state
1508 1505 if state in b"nr" and f not in m1:
1509 1506 yield (missing_from_p1, f, state)
1510 1507 if state in b"a" and f in m1:
1511 1508 yield (unexpected_in_p1, f, state)
1512 1509 if state in b"m" and f not in m1 and f not in m2:
1513 1510 yield (missing_from_ps, f, state)
1514 1511 for f in m1:
1515 1512 state = self.get_entry(f).state
1516 1513 if state not in b"nrm":
1517 1514 yield (missing_from_ds, f, state)
General Comments 0
You need to be logged in to leave comments. Login now