##// END OF EJS Templates
dirstate: drop some duplicated code...
marmoute -
r48957:180e8fa3 default
parent child Browse files
Show More
@@ -1,1526 +1,1517 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .pycompat import delattr
18 18
19 19 from hgdemandimport import tracing
20 20
21 21 from . import (
22 22 dirstatemap,
23 23 encoding,
24 24 error,
25 25 match as matchmod,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 sparse,
31 31 util,
32 32 )
33 33
34 34 from .interfaces import (
35 35 dirstate as intdirstate,
36 36 util as interfaceutil,
37 37 )
38 38
39 39 parsers = policy.importmod('parsers')
40 40 rustmod = policy.importrust('dirstate')
41 41
42 42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43 43
44 44 propertycache = util.propertycache
45 45 filecache = scmutil.filecache
46 46 _rangemask = dirstatemap.rangemask
47 47
48 48 DirstateItem = dirstatemap.DirstateItem
49 49
50 50
51 51 class repocache(filecache):
52 52 """filecache for files in .hg/"""
53 53
54 54 def join(self, obj, fname):
55 55 return obj._opener.join(fname)
56 56
57 57
58 58 class rootcache(filecache):
59 59 """filecache for files in the repository root"""
60 60
61 61 def join(self, obj, fname):
62 62 return obj._join(fname)
63 63
64 64
65 65 def _getfsnow(vfs):
66 66 '''Get "now" timestamp on filesystem'''
67 67 tmpfd, tmpname = vfs.mkstemp()
68 68 try:
69 69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 70 finally:
71 71 os.close(tmpfd)
72 72 vfs.unlink(tmpname)
73 73
74 74
75 75 def requires_parents_change(func):
76 76 def wrap(self, *args, **kwargs):
77 77 if not self.pendingparentchange():
78 78 msg = 'calling `%s` outside of a parentchange context'
79 79 msg %= func.__name__
80 80 raise error.ProgrammingError(msg)
81 81 return func(self, *args, **kwargs)
82 82
83 83 return wrap
84 84
85 85
86 86 def requires_no_parents_change(func):
87 87 def wrap(self, *args, **kwargs):
88 88 if self.pendingparentchange():
89 89 msg = 'calling `%s` inside of a parentchange context'
90 90 msg %= func.__name__
91 91 raise error.ProgrammingError(msg)
92 92 return func(self, *args, **kwargs)
93 93
94 94 return wrap
95 95
96 96
97 97 @interfaceutil.implementer(intdirstate.idirstate)
98 98 class dirstate(object):
99 99 def __init__(
100 100 self,
101 101 opener,
102 102 ui,
103 103 root,
104 104 validate,
105 105 sparsematchfn,
106 106 nodeconstants,
107 107 use_dirstate_v2,
108 108 ):
109 109 """Create a new dirstate object.
110 110
111 111 opener is an open()-like callable that can be used to open the
112 112 dirstate file; root is the root of the directory tracked by
113 113 the dirstate.
114 114 """
115 115 self._use_dirstate_v2 = use_dirstate_v2
116 116 self._nodeconstants = nodeconstants
117 117 self._opener = opener
118 118 self._validate = validate
119 119 self._root = root
120 120 self._sparsematchfn = sparsematchfn
121 121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 122 # UNC path pointing to root share (issue4557)
123 123 self._rootdir = pathutil.normasprefix(root)
124 124 self._dirty = False
125 125 self._lastnormaltime = 0
126 126 self._ui = ui
127 127 self._filecache = {}
128 128 self._parentwriters = 0
129 129 self._filename = b'dirstate'
130 130 self._pendingfilename = b'%s.pending' % self._filename
131 131 self._plchangecallbacks = {}
132 132 self._origpl = None
133 133 self._mapcls = dirstatemap.dirstatemap
134 134 # Access and cache cwd early, so we don't access it for the first time
135 135 # after a working-copy update caused it to not exist (accessing it then
136 136 # raises an exception).
137 137 self._cwd
138 138
139 139 def prefetch_parents(self):
140 140 """make sure the parents are loaded
141 141
142 142 Used to avoid a race condition.
143 143 """
144 144 self._pl
145 145
146 146 @contextlib.contextmanager
147 147 def parentchange(self):
148 148 """Context manager for handling dirstate parents.
149 149
150 150 If an exception occurs in the scope of the context manager,
151 151 the incoherent dirstate won't be written when wlock is
152 152 released.
153 153 """
154 154 self._parentwriters += 1
155 155 yield
156 156 # Typically we want the "undo" step of a context manager in a
157 157 # finally block so it happens even when an exception
158 158 # occurs. In this case, however, we only want to decrement
159 159 # parentwriters if the code in the with statement exits
160 160 # normally, so we don't have a try/finally here on purpose.
161 161 self._parentwriters -= 1
162 162
163 163 def pendingparentchange(self):
164 164 """Returns true if the dirstate is in the middle of a set of changes
165 165 that modify the dirstate parent.
166 166 """
167 167 return self._parentwriters > 0
168 168
169 169 @propertycache
170 170 def _map(self):
171 171 """Return the dirstate contents (see documentation for dirstatemap)."""
172 172 self._map = self._mapcls(
173 173 self._ui,
174 174 self._opener,
175 175 self._root,
176 176 self._nodeconstants,
177 177 self._use_dirstate_v2,
178 178 )
179 179 return self._map
180 180
181 181 @property
182 182 def _sparsematcher(self):
183 183 """The matcher for the sparse checkout.
184 184
185 185 The working directory may not include every file from a manifest. The
186 186 matcher obtained by this property will match a path if it is to be
187 187 included in the working directory.
188 188 """
189 189 # TODO there is potential to cache this property. For now, the matcher
190 190 # is resolved on every access. (But the called function does use a
191 191 # cache to keep the lookup fast.)
192 192 return self._sparsematchfn()
193 193
194 194 @repocache(b'branch')
195 195 def _branch(self):
196 196 try:
197 197 return self._opener.read(b"branch").strip() or b"default"
198 198 except IOError as inst:
199 199 if inst.errno != errno.ENOENT:
200 200 raise
201 201 return b"default"
202 202
203 203 @property
204 204 def _pl(self):
205 205 return self._map.parents()
206 206
207 207 def hasdir(self, d):
208 208 return self._map.hastrackeddir(d)
209 209
210 210 @rootcache(b'.hgignore')
211 211 def _ignore(self):
212 212 files = self._ignorefiles()
213 213 if not files:
214 214 return matchmod.never()
215 215
216 216 pats = [b'include:%s' % f for f in files]
217 217 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 218
219 219 @propertycache
220 220 def _slash(self):
221 221 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 222
223 223 @propertycache
224 224 def _checklink(self):
225 225 return util.checklink(self._root)
226 226
227 227 @propertycache
228 228 def _checkexec(self):
229 229 return bool(util.checkexec(self._root))
230 230
231 231 @propertycache
232 232 def _checkcase(self):
233 233 return not util.fscasesensitive(self._join(b'.hg'))
234 234
235 235 def _join(self, f):
236 236 # much faster than os.path.join()
237 237 # it's safe because f is always a relative path
238 238 return self._rootdir + f
239 239
240 240 def flagfunc(self, buildfallback):
241 241 if self._checklink and self._checkexec:
242 242
243 243 def f(x):
244 244 try:
245 245 st = os.lstat(self._join(x))
246 246 if util.statislink(st):
247 247 return b'l'
248 248 if util.statisexec(st):
249 249 return b'x'
250 250 except OSError:
251 251 pass
252 252 return b''
253 253
254 254 return f
255 255
256 256 fallback = buildfallback()
257 257 if self._checklink:
258 258
259 259 def f(x):
260 260 if os.path.islink(self._join(x)):
261 261 return b'l'
262 262 if b'x' in fallback(x):
263 263 return b'x'
264 264 return b''
265 265
266 266 return f
267 267 if self._checkexec:
268 268
269 269 def f(x):
270 270 if b'l' in fallback(x):
271 271 return b'l'
272 272 if util.isexec(self._join(x)):
273 273 return b'x'
274 274 return b''
275 275
276 276 return f
277 277 else:
278 278 return fallback
279 279
280 280 @propertycache
281 281 def _cwd(self):
282 282 # internal config: ui.forcecwd
283 283 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 284 if forcecwd:
285 285 return forcecwd
286 286 return encoding.getcwd()
287 287
288 288 def getcwd(self):
289 289 """Return the path from which a canonical path is calculated.
290 290
291 291 This path should be used to resolve file patterns or to convert
292 292 canonical paths back to file paths for display. It shouldn't be
293 293 used to get real file paths. Use vfs functions instead.
294 294 """
295 295 cwd = self._cwd
296 296 if cwd == self._root:
297 297 return b''
298 298 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 299 rootsep = self._root
300 300 if not util.endswithsep(rootsep):
301 301 rootsep += pycompat.ossep
302 302 if cwd.startswith(rootsep):
303 303 return cwd[len(rootsep) :]
304 304 else:
305 305 # we're outside the repo. return an absolute path.
306 306 return cwd
307 307
308 308 def pathto(self, f, cwd=None):
309 309 if cwd is None:
310 310 cwd = self.getcwd()
311 311 path = util.pathto(self._root, cwd, f)
312 312 if self._slash:
313 313 return util.pconvert(path)
314 314 return path
315 315
316 316 def __getitem__(self, key):
317 317 """Return the current state of key (a filename) in the dirstate.
318 318
319 319 States are:
320 320 n normal
321 321 m needs merging
322 322 r marked for removal
323 323 a marked for addition
324 324 ? not tracked
325 325
326 326 XXX The "state" is a bit obscure to be in the "public" API. we should
327 327 consider migrating all user of this to going through the dirstate entry
328 328 instead.
329 329 """
330 330 msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
331 331 util.nouideprecwarn(msg, b'6.1', stacklevel=2)
332 332 entry = self._map.get(key)
333 333 if entry is not None:
334 334 return entry.state
335 335 return b'?'
336 336
337 337 def get_entry(self, path):
338 338 """return a DirstateItem for the associated path"""
339 339 entry = self._map.get(path)
340 340 if entry is None:
341 341 return DirstateItem()
342 342 return entry
343 343
344 344 def __contains__(self, key):
345 345 return key in self._map
346 346
347 347 def __iter__(self):
348 348 return iter(sorted(self._map))
349 349
350 350 def items(self):
351 351 return pycompat.iteritems(self._map)
352 352
353 353 iteritems = items
354 354
355 355 def parents(self):
356 356 return [self._validate(p) for p in self._pl]
357 357
358 358 def p1(self):
359 359 return self._validate(self._pl[0])
360 360
361 361 def p2(self):
362 362 return self._validate(self._pl[1])
363 363
364 364 @property
365 365 def in_merge(self):
366 366 """True if a merge is in progress"""
367 367 return self._pl[1] != self._nodeconstants.nullid
368 368
369 369 def branch(self):
370 370 return encoding.tolocal(self._branch)
371 371
372 372 def setparents(self, p1, p2=None):
373 373 """Set dirstate parents to p1 and p2.
374 374
375 375 When moving from two parents to one, "merged" entries a
376 376 adjusted to normal and previous copy records discarded and
377 377 returned by the call.
378 378
379 379 See localrepo.setparents()
380 380 """
381 381 if p2 is None:
382 382 p2 = self._nodeconstants.nullid
383 383 if self._parentwriters == 0:
384 384 raise ValueError(
385 385 b"cannot set dirstate parent outside of "
386 386 b"dirstate.parentchange context manager"
387 387 )
388 388
389 389 self._dirty = True
390 390 oldp2 = self._pl[1]
391 391 if self._origpl is None:
392 392 self._origpl = self._pl
393 393 nullid = self._nodeconstants.nullid
394 394 # True if we need to fold p2 related state back to a linear case
395 395 fold_p2 = oldp2 != nullid and p2 == nullid
396 396 return self._map.setparents(p1, p2, fold_p2=fold_p2)
397 397
398 398 def setbranch(self, branch):
399 399 self.__class__._branch.set(self, encoding.fromlocal(branch))
400 400 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
401 401 try:
402 402 f.write(self._branch + b'\n')
403 403 f.close()
404 404
405 405 # make sure filecache has the correct stat info for _branch after
406 406 # replacing the underlying file
407 407 ce = self._filecache[b'_branch']
408 408 if ce:
409 409 ce.refresh()
410 410 except: # re-raises
411 411 f.discard()
412 412 raise
413 413
414 414 def invalidate(self):
415 415 """Causes the next access to reread the dirstate.
416 416
417 417 This is different from localrepo.invalidatedirstate() because it always
418 418 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
419 419 check whether the dirstate has changed before rereading it."""
420 420
421 421 for a in ("_map", "_branch", "_ignore"):
422 422 if a in self.__dict__:
423 423 delattr(self, a)
424 424 self._lastnormaltime = 0
425 425 self._dirty = False
426 426 self._parentwriters = 0
427 427 self._origpl = None
428 428
429 429 def copy(self, source, dest):
430 430 """Mark dest as a copy of source. Unmark dest if source is None."""
431 431 if source == dest:
432 432 return
433 433 self._dirty = True
434 434 if source is not None:
435 435 self._map.copymap[dest] = source
436 436 else:
437 437 self._map.copymap.pop(dest, None)
438 438
439 439 def copied(self, file):
440 440 return self._map.copymap.get(file, None)
441 441
442 442 def copies(self):
443 443 return self._map.copymap
444 444
445 445 @requires_no_parents_change
446 446 def set_tracked(self, filename):
447 447 """a "public" method for generic code to mark a file as tracked
448 448
449 449 This function is to be called outside of "update/merge" case. For
450 450 example by a command like `hg add X`.
451 451
452 452 return True the file was previously untracked, False otherwise.
453 453 """
454 454 self._dirty = True
455 455 entry = self._map.get(filename)
456 456 if entry is None or not entry.tracked:
457 457 self._check_new_tracked_filename(filename)
458 458 return self._map.set_tracked(filename)
459 459
460 460 @requires_no_parents_change
461 461 def set_untracked(self, filename):
462 462 """a "public" method for generic code to mark a file as untracked
463 463
464 464 This function is to be called outside of "update/merge" case. For
465 465 example by a command like `hg remove X`.
466 466
467 467 return True the file was previously tracked, False otherwise.
468 468 """
469 469 ret = self._map.set_untracked(filename)
470 470 if ret:
471 471 self._dirty = True
472 472 return ret
473 473
474 474 @requires_no_parents_change
475 475 def set_clean(self, filename, parentfiledata=None):
476 476 """record that the current state of the file on disk is known to be clean"""
477 477 self._dirty = True
478 478 if parentfiledata:
479 479 (mode, size, mtime) = parentfiledata
480 480 else:
481 481 (mode, size, mtime) = self._get_filedata(filename)
482 482 if not self._map[filename].tracked:
483 483 self._check_new_tracked_filename(filename)
484 484 self._map.set_clean(filename, mode, size, mtime)
485 485 if mtime > self._lastnormaltime:
486 486 # Remember the most recent modification timeslot for status(),
487 487 # to make sure we won't miss future size-preserving file content
488 488 # modifications that happen within the same timeslot.
489 489 self._lastnormaltime = mtime
490 490
491 491 @requires_no_parents_change
492 492 def set_possibly_dirty(self, filename):
493 493 """record that the current state of the file on disk is unknown"""
494 494 self._dirty = True
495 495 self._map.set_possibly_dirty(filename)
496 496
497 497 @requires_parents_change
498 498 def update_file_p1(
499 499 self,
500 500 filename,
501 501 p1_tracked,
502 502 ):
503 503 """Set a file as tracked in the parent (or not)
504 504
505 505 This is to be called when adjust the dirstate to a new parent after an history
506 506 rewriting operation.
507 507
508 508 It should not be called during a merge (p2 != nullid) and only within
509 509 a `with dirstate.parentchange():` context.
510 510 """
511 511 if self.in_merge:
512 512 msg = b'update_file_reference should not be called when merging'
513 513 raise error.ProgrammingError(msg)
514 514 entry = self._map.get(filename)
515 515 if entry is None:
516 516 wc_tracked = False
517 517 else:
518 518 wc_tracked = entry.tracked
519 519 if not (p1_tracked or wc_tracked):
520 520 # the file is no longer relevant to anyone
521 521 if self._map.get(filename) is not None:
522 522 self._map.reset_state(filename)
523 523 self._dirty = True
524 524 elif (not p1_tracked) and wc_tracked:
525 525 if entry is not None and entry.added:
526 526 return # avoid dropping copy information (maybe?)
527 527
528 528 parentfiledata = None
529 529 if wc_tracked and p1_tracked:
530 530 parentfiledata = self._get_filedata(filename)
531 531
532 532 self._map.reset_state(
533 533 filename,
534 534 wc_tracked,
535 535 p1_tracked,
536 536 # the underlying reference might have changed, we will have to
537 537 # check it.
538 538 has_meaningful_mtime=False,
539 539 parentfiledata=parentfiledata,
540 540 )
541 541 if (
542 542 parentfiledata is not None
543 543 and parentfiledata[2] > self._lastnormaltime
544 544 ):
545 545 # Remember the most recent modification timeslot for status(),
546 546 # to make sure we won't miss future size-preserving file content
547 547 # modifications that happen within the same timeslot.
548 548 self._lastnormaltime = parentfiledata[2]
549 549
550 550 @requires_parents_change
551 551 def update_file(
552 552 self,
553 553 filename,
554 554 wc_tracked,
555 555 p1_tracked,
556 556 p2_info=False,
557 557 possibly_dirty=False,
558 558 parentfiledata=None,
559 559 ):
560 560 """update the information about a file in the dirstate
561 561
562 562 This is to be called when the direstates parent changes to keep track
563 563 of what is the file situation in regards to the working copy and its parent.
564 564
565 565 This function must be called within a `dirstate.parentchange` context.
566 566
567 567 note: the API is at an early stage and we might need to adjust it
568 568 depending of what information ends up being relevant and useful to
569 569 other processing.
570 570 """
571 571
572 572 # note: I do not think we need to double check name clash here since we
573 573 # are in a update/merge case that should already have taken care of
574 574 # this. The test agrees
575 575
576 576 self._dirty = True
577 577
578 578 need_parent_file_data = (
579 579 not possibly_dirty and not p2_info and wc_tracked and p1_tracked
580 580 )
581 581
582 582 # this mean we are doing call for file we do not really care about the
583 583 # data (eg: added or removed), however this should be a minor overhead
584 584 # compared to the overall update process calling this.
585 if need_parent_file_data:
586 if parentfiledata is None:
587 parentfiledata = self._get_filedata(filename)
588 mtime = parentfiledata[2]
589
590 if mtime > self._lastnormaltime:
591 # Remember the most recent modification timeslot for
592 # status(), to make sure we won't miss future
593 # size-preserving file content modifications that happen
594 # within the same timeslot.
595 self._lastnormaltime = mtime
585 if need_parent_file_data or parentfiledata is None:
586 parentfiledata = self._get_filedata(filename)
596 587
597 588 self._map.reset_state(
598 589 filename,
599 590 wc_tracked,
600 591 p1_tracked,
601 592 p2_info=p2_info,
602 593 has_meaningful_mtime=not possibly_dirty,
603 594 parentfiledata=parentfiledata,
604 595 )
605 596 if (
606 597 parentfiledata is not None
607 598 and parentfiledata[2] > self._lastnormaltime
608 599 ):
609 600 # Remember the most recent modification timeslot for status(),
610 601 # to make sure we won't miss future size-preserving file content
611 602 # modifications that happen within the same timeslot.
612 603 self._lastnormaltime = parentfiledata[2]
613 604
614 605 def _check_new_tracked_filename(self, filename):
615 606 scmutil.checkfilename(filename)
616 607 if self._map.hastrackeddir(filename):
617 608 msg = _(b'directory %r already in dirstate')
618 609 msg %= pycompat.bytestr(filename)
619 610 raise error.Abort(msg)
620 611 # shadows
621 612 for d in pathutil.finddirs(filename):
622 613 if self._map.hastrackeddir(d):
623 614 break
624 615 entry = self._map.get(d)
625 616 if entry is not None and not entry.removed:
626 617 msg = _(b'file %r in dirstate clashes with %r')
627 618 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
628 619 raise error.Abort(msg)
629 620
630 621 def _get_filedata(self, filename):
631 622 """returns"""
632 623 s = os.lstat(self._join(filename))
633 624 mode = s.st_mode
634 625 size = s.st_size
635 626 mtime = s[stat.ST_MTIME]
636 627 return (mode, size, mtime)
637 628
638 629 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
639 630 if exists is None:
640 631 exists = os.path.lexists(os.path.join(self._root, path))
641 632 if not exists:
642 633 # Maybe a path component exists
643 634 if not ignoremissing and b'/' in path:
644 635 d, f = path.rsplit(b'/', 1)
645 636 d = self._normalize(d, False, ignoremissing, None)
646 637 folded = d + b"/" + f
647 638 else:
648 639 # No path components, preserve original case
649 640 folded = path
650 641 else:
651 642 # recursively normalize leading directory components
652 643 # against dirstate
653 644 if b'/' in normed:
654 645 d, f = normed.rsplit(b'/', 1)
655 646 d = self._normalize(d, False, ignoremissing, True)
656 647 r = self._root + b"/" + d
657 648 folded = d + b"/" + util.fspath(f, r)
658 649 else:
659 650 folded = util.fspath(normed, self._root)
660 651 storemap[normed] = folded
661 652
662 653 return folded
663 654
664 655 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
665 656 normed = util.normcase(path)
666 657 folded = self._map.filefoldmap.get(normed, None)
667 658 if folded is None:
668 659 if isknown:
669 660 folded = path
670 661 else:
671 662 folded = self._discoverpath(
672 663 path, normed, ignoremissing, exists, self._map.filefoldmap
673 664 )
674 665 return folded
675 666
676 667 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
677 668 normed = util.normcase(path)
678 669 folded = self._map.filefoldmap.get(normed, None)
679 670 if folded is None:
680 671 folded = self._map.dirfoldmap.get(normed, None)
681 672 if folded is None:
682 673 if isknown:
683 674 folded = path
684 675 else:
685 676 # store discovered result in dirfoldmap so that future
686 677 # normalizefile calls don't start matching directories
687 678 folded = self._discoverpath(
688 679 path, normed, ignoremissing, exists, self._map.dirfoldmap
689 680 )
690 681 return folded
691 682
692 683 def normalize(self, path, isknown=False, ignoremissing=False):
693 684 """
694 685 normalize the case of a pathname when on a casefolding filesystem
695 686
696 687 isknown specifies whether the filename came from walking the
697 688 disk, to avoid extra filesystem access.
698 689
699 690 If ignoremissing is True, missing path are returned
700 691 unchanged. Otherwise, we try harder to normalize possibly
701 692 existing path components.
702 693
703 694 The normalized case is determined based on the following precedence:
704 695
705 696 - version of name already stored in the dirstate
706 697 - version of name stored on disk
707 698 - version provided via command arguments
708 699 """
709 700
710 701 if self._checkcase:
711 702 return self._normalize(path, isknown, ignoremissing)
712 703 return path
713 704
714 705 def clear(self):
715 706 self._map.clear()
716 707 self._lastnormaltime = 0
717 708 self._dirty = True
718 709
719 710 def rebuild(self, parent, allfiles, changedfiles=None):
720 711 if changedfiles is None:
721 712 # Rebuild entire dirstate
722 713 to_lookup = allfiles
723 714 to_drop = []
724 715 lastnormaltime = self._lastnormaltime
725 716 self.clear()
726 717 self._lastnormaltime = lastnormaltime
727 718 elif len(changedfiles) < 10:
728 719 # Avoid turning allfiles into a set, which can be expensive if it's
729 720 # large.
730 721 to_lookup = []
731 722 to_drop = []
732 723 for f in changedfiles:
733 724 if f in allfiles:
734 725 to_lookup.append(f)
735 726 else:
736 727 to_drop.append(f)
737 728 else:
738 729 changedfilesset = set(changedfiles)
739 730 to_lookup = changedfilesset & set(allfiles)
740 731 to_drop = changedfilesset - to_lookup
741 732
742 733 if self._origpl is None:
743 734 self._origpl = self._pl
744 735 self._map.setparents(parent, self._nodeconstants.nullid)
745 736
746 737 for f in to_lookup:
747 738
748 739 if self.in_merge:
749 740 self.set_tracked(f)
750 741 else:
751 742 self._map.reset_state(
752 743 f,
753 744 wc_tracked=True,
754 745 p1_tracked=True,
755 746 )
756 747 for f in to_drop:
757 748 self._map.reset_state(f)
758 749
759 750 self._dirty = True
760 751
761 752 def identity(self):
762 753 """Return identity of dirstate itself to detect changing in storage
763 754
764 755 If identity of previous dirstate is equal to this, writing
765 756 changes based on the former dirstate out can keep consistency.
766 757 """
767 758 return self._map.identity
768 759
769 760 def write(self, tr):
770 761 if not self._dirty:
771 762 return
772 763
773 764 filename = self._filename
774 765 if tr:
775 766 # 'dirstate.write()' is not only for writing in-memory
776 767 # changes out, but also for dropping ambiguous timestamp.
777 768 # delayed writing re-raise "ambiguous timestamp issue".
778 769 # See also the wiki page below for detail:
779 770 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
780 771
781 772 # record when mtime start to be ambiguous
782 773 now = _getfsnow(self._opener)
783 774
784 775 # delay writing in-memory changes out
785 776 tr.addfilegenerator(
786 777 b'dirstate',
787 778 (self._filename,),
788 779 lambda f: self._writedirstate(tr, f, now=now),
789 780 location=b'plain',
790 781 )
791 782 return
792 783
793 784 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
794 785 self._writedirstate(tr, st)
795 786
796 787 def addparentchangecallback(self, category, callback):
797 788 """add a callback to be called when the wd parents are changed
798 789
799 790 Callback will be called with the following arguments:
800 791 dirstate, (oldp1, oldp2), (newp1, newp2)
801 792
802 793 Category is a unique identifier to allow overwriting an old callback
803 794 with a newer callback.
804 795 """
805 796 self._plchangecallbacks[category] = callback
806 797
807 798 def _writedirstate(self, tr, st, now=None):
808 799 # notify callbacks about parents change
809 800 if self._origpl is not None and self._origpl != self._pl:
810 801 for c, callback in sorted(
811 802 pycompat.iteritems(self._plchangecallbacks)
812 803 ):
813 804 callback(self, self._origpl, self._pl)
814 805 self._origpl = None
815 806
816 807 if now is None:
817 808 # use the modification time of the newly created temporary file as the
818 809 # filesystem's notion of 'now'
819 810 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
820 811
821 812 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
822 813 # timestamp of each entries in dirstate, because of 'now > mtime'
823 814 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
824 815 if delaywrite > 0:
825 816 # do we have any files to delay for?
826 817 for f, e in pycompat.iteritems(self._map):
827 818 if e.need_delay(now):
828 819 import time # to avoid useless import
829 820
830 821 # rather than sleep n seconds, sleep until the next
831 822 # multiple of n seconds
832 823 clock = time.time()
833 824 start = int(clock) - (int(clock) % delaywrite)
834 825 end = start + delaywrite
835 826 time.sleep(end - clock)
836 827 now = end # trust our estimate that the end is near now
837 828 break
838 829
839 830 self._map.write(tr, st, now)
840 831 self._lastnormaltime = 0
841 832 self._dirty = False
842 833
843 834 def _dirignore(self, f):
844 835 if self._ignore(f):
845 836 return True
846 837 for p in pathutil.finddirs(f):
847 838 if self._ignore(p):
848 839 return True
849 840 return False
850 841
851 842 def _ignorefiles(self):
852 843 files = []
853 844 if os.path.exists(self._join(b'.hgignore')):
854 845 files.append(self._join(b'.hgignore'))
855 846 for name, path in self._ui.configitems(b"ui"):
856 847 if name == b'ignore' or name.startswith(b'ignore.'):
857 848 # we need to use os.path.join here rather than self._join
858 849 # because path is arbitrary and user-specified
859 850 files.append(os.path.join(self._rootdir, util.expandpath(path)))
860 851 return files
861 852
862 853 def _ignorefileandline(self, f):
863 854 files = collections.deque(self._ignorefiles())
864 855 visited = set()
865 856 while files:
866 857 i = files.popleft()
867 858 patterns = matchmod.readpatternfile(
868 859 i, self._ui.warn, sourceinfo=True
869 860 )
870 861 for pattern, lineno, line in patterns:
871 862 kind, p = matchmod._patsplit(pattern, b'glob')
872 863 if kind == b"subinclude":
873 864 if p not in visited:
874 865 files.append(p)
875 866 continue
876 867 m = matchmod.match(
877 868 self._root, b'', [], [pattern], warn=self._ui.warn
878 869 )
879 870 if m(f):
880 871 return (i, lineno, line)
881 872 visited.add(i)
882 873 return (None, -1, b"")
883 874
884 875 def _walkexplicit(self, match, subrepos):
885 876 """Get stat data about the files explicitly specified by match.
886 877
887 878 Return a triple (results, dirsfound, dirsnotfound).
888 879 - results is a mapping from filename to stat result. It also contains
889 880 listings mapping subrepos and .hg to None.
890 881 - dirsfound is a list of files found to be directories.
891 882 - dirsnotfound is a list of files that the dirstate thinks are
892 883 directories and that were not found."""
893 884
894 885 def badtype(mode):
895 886 kind = _(b'unknown')
896 887 if stat.S_ISCHR(mode):
897 888 kind = _(b'character device')
898 889 elif stat.S_ISBLK(mode):
899 890 kind = _(b'block device')
900 891 elif stat.S_ISFIFO(mode):
901 892 kind = _(b'fifo')
902 893 elif stat.S_ISSOCK(mode):
903 894 kind = _(b'socket')
904 895 elif stat.S_ISDIR(mode):
905 896 kind = _(b'directory')
906 897 return _(b'unsupported file type (type is %s)') % kind
907 898
908 899 badfn = match.bad
909 900 dmap = self._map
910 901 lstat = os.lstat
911 902 getkind = stat.S_IFMT
912 903 dirkind = stat.S_IFDIR
913 904 regkind = stat.S_IFREG
914 905 lnkkind = stat.S_IFLNK
915 906 join = self._join
916 907 dirsfound = []
917 908 foundadd = dirsfound.append
918 909 dirsnotfound = []
919 910 notfoundadd = dirsnotfound.append
920 911
921 912 if not match.isexact() and self._checkcase:
922 913 normalize = self._normalize
923 914 else:
924 915 normalize = None
925 916
926 917 files = sorted(match.files())
927 918 subrepos.sort()
928 919 i, j = 0, 0
929 920 while i < len(files) and j < len(subrepos):
930 921 subpath = subrepos[j] + b"/"
931 922 if files[i] < subpath:
932 923 i += 1
933 924 continue
934 925 while i < len(files) and files[i].startswith(subpath):
935 926 del files[i]
936 927 j += 1
937 928
938 929 if not files or b'' in files:
939 930 files = [b'']
940 931 # constructing the foldmap is expensive, so don't do it for the
941 932 # common case where files is ['']
942 933 normalize = None
943 934 results = dict.fromkeys(subrepos)
944 935 results[b'.hg'] = None
945 936
946 937 for ff in files:
947 938 if normalize:
948 939 nf = normalize(ff, False, True)
949 940 else:
950 941 nf = ff
951 942 if nf in results:
952 943 continue
953 944
954 945 try:
955 946 st = lstat(join(nf))
956 947 kind = getkind(st.st_mode)
957 948 if kind == dirkind:
958 949 if nf in dmap:
959 950 # file replaced by dir on disk but still in dirstate
960 951 results[nf] = None
961 952 foundadd((nf, ff))
962 953 elif kind == regkind or kind == lnkkind:
963 954 results[nf] = st
964 955 else:
965 956 badfn(ff, badtype(kind))
966 957 if nf in dmap:
967 958 results[nf] = None
968 959 except OSError as inst: # nf not found on disk - it is dirstate only
969 960 if nf in dmap: # does it exactly match a missing file?
970 961 results[nf] = None
971 962 else: # does it match a missing directory?
972 963 if self._map.hasdir(nf):
973 964 notfoundadd(nf)
974 965 else:
975 966 badfn(ff, encoding.strtolocal(inst.strerror))
976 967
977 968 # match.files() may contain explicitly-specified paths that shouldn't
978 969 # be taken; drop them from the list of files found. dirsfound/notfound
979 970 # aren't filtered here because they will be tested later.
980 971 if match.anypats():
981 972 for f in list(results):
982 973 if f == b'.hg' or f in subrepos:
983 974 # keep sentinel to disable further out-of-repo walks
984 975 continue
985 976 if not match(f):
986 977 del results[f]
987 978
988 979 # Case insensitive filesystems cannot rely on lstat() failing to detect
989 980 # a case-only rename. Prune the stat object for any file that does not
990 981 # match the case in the filesystem, if there are multiple files that
991 982 # normalize to the same path.
992 983 if match.isexact() and self._checkcase:
993 984 normed = {}
994 985
995 986 for f, st in pycompat.iteritems(results):
996 987 if st is None:
997 988 continue
998 989
999 990 nc = util.normcase(f)
1000 991 paths = normed.get(nc)
1001 992
1002 993 if paths is None:
1003 994 paths = set()
1004 995 normed[nc] = paths
1005 996
1006 997 paths.add(f)
1007 998
1008 999 for norm, paths in pycompat.iteritems(normed):
1009 1000 if len(paths) > 1:
1010 1001 for path in paths:
1011 1002 folded = self._discoverpath(
1012 1003 path, norm, True, None, self._map.dirfoldmap
1013 1004 )
1014 1005 if path != folded:
1015 1006 results[path] = None
1016 1007
1017 1008 return results, dirsfound, dirsnotfound
1018 1009
1019 1010 def walk(self, match, subrepos, unknown, ignored, full=True):
1020 1011 """
1021 1012 Walk recursively through the directory tree, finding all files
1022 1013 matched by match.
1023 1014
1024 1015 If full is False, maybe skip some known-clean files.
1025 1016
1026 1017 Return a dict mapping filename to stat-like object (either
1027 1018 mercurial.osutil.stat instance or return value of os.stat()).
1028 1019
1029 1020 """
1030 1021 # full is a flag that extensions that hook into walk can use -- this
1031 1022 # implementation doesn't use it at all. This satisfies the contract
1032 1023 # because we only guarantee a "maybe".
1033 1024
1034 1025 if ignored:
1035 1026 ignore = util.never
1036 1027 dirignore = util.never
1037 1028 elif unknown:
1038 1029 ignore = self._ignore
1039 1030 dirignore = self._dirignore
1040 1031 else:
1041 1032 # if not unknown and not ignored, drop dir recursion and step 2
1042 1033 ignore = util.always
1043 1034 dirignore = util.always
1044 1035
1045 1036 matchfn = match.matchfn
1046 1037 matchalways = match.always()
1047 1038 matchtdir = match.traversedir
1048 1039 dmap = self._map
1049 1040 listdir = util.listdir
1050 1041 lstat = os.lstat
1051 1042 dirkind = stat.S_IFDIR
1052 1043 regkind = stat.S_IFREG
1053 1044 lnkkind = stat.S_IFLNK
1054 1045 join = self._join
1055 1046
1056 1047 exact = skipstep3 = False
1057 1048 if match.isexact(): # match.exact
1058 1049 exact = True
1059 1050 dirignore = util.always # skip step 2
1060 1051 elif match.prefix(): # match.match, no patterns
1061 1052 skipstep3 = True
1062 1053
1063 1054 if not exact and self._checkcase:
1064 1055 normalize = self._normalize
1065 1056 normalizefile = self._normalizefile
1066 1057 skipstep3 = False
1067 1058 else:
1068 1059 normalize = self._normalize
1069 1060 normalizefile = None
1070 1061
1071 1062 # step 1: find all explicit files
1072 1063 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1073 1064 if matchtdir:
1074 1065 for d in work:
1075 1066 matchtdir(d[0])
1076 1067 for d in dirsnotfound:
1077 1068 matchtdir(d)
1078 1069
1079 1070 skipstep3 = skipstep3 and not (work or dirsnotfound)
1080 1071 work = [d for d in work if not dirignore(d[0])]
1081 1072
1082 1073 # step 2: visit subdirectories
1083 1074 def traverse(work, alreadynormed):
1084 1075 wadd = work.append
1085 1076 while work:
1086 1077 tracing.counter('dirstate.walk work', len(work))
1087 1078 nd = work.pop()
1088 1079 visitentries = match.visitchildrenset(nd)
1089 1080 if not visitentries:
1090 1081 continue
1091 1082 if visitentries == b'this' or visitentries == b'all':
1092 1083 visitentries = None
1093 1084 skip = None
1094 1085 if nd != b'':
1095 1086 skip = b'.hg'
1096 1087 try:
1097 1088 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1098 1089 entries = listdir(join(nd), stat=True, skip=skip)
1099 1090 except OSError as inst:
1100 1091 if inst.errno in (errno.EACCES, errno.ENOENT):
1101 1092 match.bad(
1102 1093 self.pathto(nd), encoding.strtolocal(inst.strerror)
1103 1094 )
1104 1095 continue
1105 1096 raise
1106 1097 for f, kind, st in entries:
1107 1098 # Some matchers may return files in the visitentries set,
1108 1099 # instead of 'this', if the matcher explicitly mentions them
1109 1100 # and is not an exactmatcher. This is acceptable; we do not
1110 1101 # make any hard assumptions about file-or-directory below
1111 1102 # based on the presence of `f` in visitentries. If
1112 1103 # visitchildrenset returned a set, we can always skip the
1113 1104 # entries *not* in the set it provided regardless of whether
1114 1105 # they're actually a file or a directory.
1115 1106 if visitentries and f not in visitentries:
1116 1107 continue
1117 1108 if normalizefile:
1118 1109 # even though f might be a directory, we're only
1119 1110 # interested in comparing it to files currently in the
1120 1111 # dmap -- therefore normalizefile is enough
1121 1112 nf = normalizefile(
1122 1113 nd and (nd + b"/" + f) or f, True, True
1123 1114 )
1124 1115 else:
1125 1116 nf = nd and (nd + b"/" + f) or f
1126 1117 if nf not in results:
1127 1118 if kind == dirkind:
1128 1119 if not ignore(nf):
1129 1120 if matchtdir:
1130 1121 matchtdir(nf)
1131 1122 wadd(nf)
1132 1123 if nf in dmap and (matchalways or matchfn(nf)):
1133 1124 results[nf] = None
1134 1125 elif kind == regkind or kind == lnkkind:
1135 1126 if nf in dmap:
1136 1127 if matchalways or matchfn(nf):
1137 1128 results[nf] = st
1138 1129 elif (matchalways or matchfn(nf)) and not ignore(
1139 1130 nf
1140 1131 ):
1141 1132 # unknown file -- normalize if necessary
1142 1133 if not alreadynormed:
1143 1134 nf = normalize(nf, False, True)
1144 1135 results[nf] = st
1145 1136 elif nf in dmap and (matchalways or matchfn(nf)):
1146 1137 results[nf] = None
1147 1138
1148 1139 for nd, d in work:
1149 1140 # alreadynormed means that processwork doesn't have to do any
1150 1141 # expensive directory normalization
1151 1142 alreadynormed = not normalize or nd == d
1152 1143 traverse([d], alreadynormed)
1153 1144
1154 1145 for s in subrepos:
1155 1146 del results[s]
1156 1147 del results[b'.hg']
1157 1148
1158 1149 # step 3: visit remaining files from dmap
1159 1150 if not skipstep3 and not exact:
1160 1151 # If a dmap file is not in results yet, it was either
1161 1152 # a) not matching matchfn b) ignored, c) missing, or d) under a
1162 1153 # symlink directory.
1163 1154 if not results and matchalways:
1164 1155 visit = [f for f in dmap]
1165 1156 else:
1166 1157 visit = [f for f in dmap if f not in results and matchfn(f)]
1167 1158 visit.sort()
1168 1159
1169 1160 if unknown:
1170 1161 # unknown == True means we walked all dirs under the roots
1171 1162 # that wasn't ignored, and everything that matched was stat'ed
1172 1163 # and is already in results.
1173 1164 # The rest must thus be ignored or under a symlink.
1174 1165 audit_path = pathutil.pathauditor(self._root, cached=True)
1175 1166
1176 1167 for nf in iter(visit):
1177 1168 # If a stat for the same file was already added with a
1178 1169 # different case, don't add one for this, since that would
1179 1170 # make it appear as if the file exists under both names
1180 1171 # on disk.
1181 1172 if (
1182 1173 normalizefile
1183 1174 and normalizefile(nf, True, True) in results
1184 1175 ):
1185 1176 results[nf] = None
1186 1177 # Report ignored items in the dmap as long as they are not
1187 1178 # under a symlink directory.
1188 1179 elif audit_path.check(nf):
1189 1180 try:
1190 1181 results[nf] = lstat(join(nf))
1191 1182 # file was just ignored, no links, and exists
1192 1183 except OSError:
1193 1184 # file doesn't exist
1194 1185 results[nf] = None
1195 1186 else:
1196 1187 # It's either missing or under a symlink directory
1197 1188 # which we in this case report as missing
1198 1189 results[nf] = None
1199 1190 else:
1200 1191 # We may not have walked the full directory tree above,
1201 1192 # so stat and check everything we missed.
1202 1193 iv = iter(visit)
1203 1194 for st in util.statfiles([join(i) for i in visit]):
1204 1195 results[next(iv)] = st
1205 1196 return results
1206 1197
1207 1198 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1208 1199 # Force Rayon (Rust parallelism library) to respect the number of
1209 1200 # workers. This is a temporary workaround until Rust code knows
1210 1201 # how to read the config file.
1211 1202 numcpus = self._ui.configint(b"worker", b"numcpus")
1212 1203 if numcpus is not None:
1213 1204 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1214 1205
1215 1206 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1216 1207 if not workers_enabled:
1217 1208 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1218 1209
1219 1210 (
1220 1211 lookup,
1221 1212 modified,
1222 1213 added,
1223 1214 removed,
1224 1215 deleted,
1225 1216 clean,
1226 1217 ignored,
1227 1218 unknown,
1228 1219 warnings,
1229 1220 bad,
1230 1221 traversed,
1231 1222 dirty,
1232 1223 ) = rustmod.status(
1233 1224 self._map._map,
1234 1225 matcher,
1235 1226 self._rootdir,
1236 1227 self._ignorefiles(),
1237 1228 self._checkexec,
1238 1229 self._lastnormaltime,
1239 1230 bool(list_clean),
1240 1231 bool(list_ignored),
1241 1232 bool(list_unknown),
1242 1233 bool(matcher.traversedir),
1243 1234 )
1244 1235
1245 1236 self._dirty |= dirty
1246 1237
1247 1238 if matcher.traversedir:
1248 1239 for dir in traversed:
1249 1240 matcher.traversedir(dir)
1250 1241
1251 1242 if self._ui.warn:
1252 1243 for item in warnings:
1253 1244 if isinstance(item, tuple):
1254 1245 file_path, syntax = item
1255 1246 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1256 1247 file_path,
1257 1248 syntax,
1258 1249 )
1259 1250 self._ui.warn(msg)
1260 1251 else:
1261 1252 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1262 1253 self._ui.warn(
1263 1254 msg
1264 1255 % (
1265 1256 pathutil.canonpath(
1266 1257 self._rootdir, self._rootdir, item
1267 1258 ),
1268 1259 b"No such file or directory",
1269 1260 )
1270 1261 )
1271 1262
1272 1263 for (fn, message) in bad:
1273 1264 matcher.bad(fn, encoding.strtolocal(message))
1274 1265
1275 1266 status = scmutil.status(
1276 1267 modified=modified,
1277 1268 added=added,
1278 1269 removed=removed,
1279 1270 deleted=deleted,
1280 1271 unknown=unknown,
1281 1272 ignored=ignored,
1282 1273 clean=clean,
1283 1274 )
1284 1275 return (lookup, status)
1285 1276
1286 1277 def status(self, match, subrepos, ignored, clean, unknown):
1287 1278 """Determine the status of the working copy relative to the
1288 1279 dirstate and return a pair of (unsure, status), where status is of type
1289 1280 scmutil.status and:
1290 1281
1291 1282 unsure:
1292 1283 files that might have been modified since the dirstate was
1293 1284 written, but need to be read to be sure (size is the same
1294 1285 but mtime differs)
1295 1286 status.modified:
1296 1287 files that have definitely been modified since the dirstate
1297 1288 was written (different size or mode)
1298 1289 status.clean:
1299 1290 files that have definitely not been modified since the
1300 1291 dirstate was written
1301 1292 """
1302 1293 listignored, listclean, listunknown = ignored, clean, unknown
1303 1294 lookup, modified, added, unknown, ignored = [], [], [], [], []
1304 1295 removed, deleted, clean = [], [], []
1305 1296
1306 1297 dmap = self._map
1307 1298 dmap.preload()
1308 1299
1309 1300 use_rust = True
1310 1301
1311 1302 allowed_matchers = (
1312 1303 matchmod.alwaysmatcher,
1313 1304 matchmod.exactmatcher,
1314 1305 matchmod.includematcher,
1315 1306 )
1316 1307
1317 1308 if rustmod is None:
1318 1309 use_rust = False
1319 1310 elif self._checkcase:
1320 1311 # Case-insensitive filesystems are not handled yet
1321 1312 use_rust = False
1322 1313 elif subrepos:
1323 1314 use_rust = False
1324 1315 elif sparse.enabled:
1325 1316 use_rust = False
1326 1317 elif not isinstance(match, allowed_matchers):
1327 1318 # Some matchers have yet to be implemented
1328 1319 use_rust = False
1329 1320
1330 1321 if use_rust:
1331 1322 try:
1332 1323 return self._rust_status(
1333 1324 match, listclean, listignored, listunknown
1334 1325 )
1335 1326 except rustmod.FallbackError:
1336 1327 pass
1337 1328
1338 1329 def noop(f):
1339 1330 pass
1340 1331
1341 1332 dcontains = dmap.__contains__
1342 1333 dget = dmap.__getitem__
1343 1334 ladd = lookup.append # aka "unsure"
1344 1335 madd = modified.append
1345 1336 aadd = added.append
1346 1337 uadd = unknown.append if listunknown else noop
1347 1338 iadd = ignored.append if listignored else noop
1348 1339 radd = removed.append
1349 1340 dadd = deleted.append
1350 1341 cadd = clean.append if listclean else noop
1351 1342 mexact = match.exact
1352 1343 dirignore = self._dirignore
1353 1344 checkexec = self._checkexec
1354 1345 copymap = self._map.copymap
1355 1346 lastnormaltime = self._lastnormaltime
1356 1347
1357 1348 # We need to do full walks when either
1358 1349 # - we're listing all clean files, or
1359 1350 # - match.traversedir does something, because match.traversedir should
1360 1351 # be called for every dir in the working dir
1361 1352 full = listclean or match.traversedir is not None
1362 1353 for fn, st in pycompat.iteritems(
1363 1354 self.walk(match, subrepos, listunknown, listignored, full=full)
1364 1355 ):
1365 1356 if not dcontains(fn):
1366 1357 if (listignored or mexact(fn)) and dirignore(fn):
1367 1358 if listignored:
1368 1359 iadd(fn)
1369 1360 else:
1370 1361 uadd(fn)
1371 1362 continue
1372 1363
1373 1364 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1374 1365 # written like that for performance reasons. dmap[fn] is not a
1375 1366 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1376 1367 # opcode has fast paths when the value to be unpacked is a tuple or
1377 1368 # a list, but falls back to creating a full-fledged iterator in
1378 1369 # general. That is much slower than simply accessing and storing the
1379 1370 # tuple members one by one.
1380 1371 t = dget(fn)
1381 1372 mode = t.mode
1382 1373 size = t.size
1383 1374 time = t.mtime
1384 1375
1385 1376 if not st and t.tracked:
1386 1377 dadd(fn)
1387 1378 elif t.merged:
1388 1379 madd(fn)
1389 1380 elif t.added:
1390 1381 aadd(fn)
1391 1382 elif t.removed:
1392 1383 radd(fn)
1393 1384 elif t.tracked:
1394 1385 if (
1395 1386 size >= 0
1396 1387 and (
1397 1388 (size != st.st_size and size != st.st_size & _rangemask)
1398 1389 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1399 1390 )
1400 1391 or t.from_p2
1401 1392 or fn in copymap
1402 1393 ):
1403 1394 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1404 1395 # issue6456: Size returned may be longer due to
1405 1396 # encryption on EXT-4 fscrypt, undecided.
1406 1397 ladd(fn)
1407 1398 else:
1408 1399 madd(fn)
1409 1400 elif (
1410 1401 time != st[stat.ST_MTIME]
1411 1402 and time != st[stat.ST_MTIME] & _rangemask
1412 1403 ):
1413 1404 ladd(fn)
1414 1405 elif st[stat.ST_MTIME] == lastnormaltime:
1415 1406 # fn may have just been marked as normal and it may have
1416 1407 # changed in the same second without changing its size.
1417 1408 # This can happen if we quickly do multiple commits.
1418 1409 # Force lookup, so we don't miss such a racy file change.
1419 1410 ladd(fn)
1420 1411 elif listclean:
1421 1412 cadd(fn)
1422 1413 status = scmutil.status(
1423 1414 modified, added, removed, deleted, unknown, ignored, clean
1424 1415 )
1425 1416 return (lookup, status)
1426 1417
1427 1418 def matches(self, match):
1428 1419 """
1429 1420 return files in the dirstate (in whatever state) filtered by match
1430 1421 """
1431 1422 dmap = self._map
1432 1423 if rustmod is not None:
1433 1424 dmap = self._map._map
1434 1425
1435 1426 if match.always():
1436 1427 return dmap.keys()
1437 1428 files = match.files()
1438 1429 if match.isexact():
1439 1430 # fast path -- filter the other way around, since typically files is
1440 1431 # much smaller than dmap
1441 1432 return [f for f in files if f in dmap]
1442 1433 if match.prefix() and all(fn in dmap for fn in files):
1443 1434 # fast path -- all the values are known to be files, so just return
1444 1435 # that
1445 1436 return list(files)
1446 1437 return [f for f in dmap if match(f)]
1447 1438
1448 1439 def _actualfilename(self, tr):
1449 1440 if tr:
1450 1441 return self._pendingfilename
1451 1442 else:
1452 1443 return self._filename
1453 1444
1454 1445 def savebackup(self, tr, backupname):
1455 1446 '''Save current dirstate into backup file'''
1456 1447 filename = self._actualfilename(tr)
1457 1448 assert backupname != filename
1458 1449
1459 1450 # use '_writedirstate' instead of 'write' to write changes certainly,
1460 1451 # because the latter omits writing out if transaction is running.
1461 1452 # output file will be used to create backup of dirstate at this point.
1462 1453 if self._dirty or not self._opener.exists(filename):
1463 1454 self._writedirstate(
1464 1455 tr,
1465 1456 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1466 1457 )
1467 1458
1468 1459 if tr:
1469 1460 # ensure that subsequent tr.writepending returns True for
1470 1461 # changes written out above, even if dirstate is never
1471 1462 # changed after this
1472 1463 tr.addfilegenerator(
1473 1464 b'dirstate',
1474 1465 (self._filename,),
1475 1466 lambda f: self._writedirstate(tr, f),
1476 1467 location=b'plain',
1477 1468 )
1478 1469
1479 1470 # ensure that pending file written above is unlinked at
1480 1471 # failure, even if tr.writepending isn't invoked until the
1481 1472 # end of this transaction
1482 1473 tr.registertmp(filename, location=b'plain')
1483 1474
1484 1475 self._opener.tryunlink(backupname)
1485 1476 # hardlink backup is okay because _writedirstate is always called
1486 1477 # with an "atomictemp=True" file.
1487 1478 util.copyfile(
1488 1479 self._opener.join(filename),
1489 1480 self._opener.join(backupname),
1490 1481 hardlink=True,
1491 1482 )
1492 1483
1493 1484 def restorebackup(self, tr, backupname):
1494 1485 '''Restore dirstate by backup file'''
1495 1486 # this "invalidate()" prevents "wlock.release()" from writing
1496 1487 # changes of dirstate out after restoring from backup file
1497 1488 self.invalidate()
1498 1489 filename = self._actualfilename(tr)
1499 1490 o = self._opener
1500 1491 if util.samefile(o.join(backupname), o.join(filename)):
1501 1492 o.unlink(backupname)
1502 1493 else:
1503 1494 o.rename(backupname, filename, checkambig=True)
1504 1495
1505 1496 def clearbackup(self, tr, backupname):
1506 1497 '''Clear backup file'''
1507 1498 self._opener.unlink(backupname)
1508 1499
1509 1500 def verify(self, m1, m2):
1510 1501 """check the dirstate content again the parent manifest and yield errors"""
1511 1502 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1512 1503 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1513 1504 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1514 1505 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1515 1506 for f, entry in self.items():
1516 1507 state = entry.state
1517 1508 if state in b"nr" and f not in m1:
1518 1509 yield (missing_from_p1, f, state)
1519 1510 if state in b"a" and f in m1:
1520 1511 yield (unexpected_in_p1, f, state)
1521 1512 if state in b"m" and f not in m1 and f not in m2:
1522 1513 yield (missing_from_ps, f, state)
1523 1514 for f in m1:
1524 1515 state = self.get_entry(f).state
1525 1516 if state not in b"nrm":
1526 1517 yield (missing_from_ds, f, state)
General Comments 0
You need to be logged in to leave comments. Login now