##// END OF EJS Templates
dirstate: deprecate the `normal` method in all cases...
marmoute -
r48519:1168e54b default
parent child Browse files
Show More
@@ -1,1686 +1,1703 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .pycompat import delattr
18 18
19 19 from hgdemandimport import tracing
20 20
21 21 from . import (
22 22 dirstatemap,
23 23 encoding,
24 24 error,
25 25 match as matchmod,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 sparse,
31 31 util,
32 32 )
33 33
34 34 from .interfaces import (
35 35 dirstate as intdirstate,
36 36 util as interfaceutil,
37 37 )
38 38
39 39 parsers = policy.importmod('parsers')
40 40 rustmod = policy.importrust('dirstate')
41 41
42 42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43 43
44 44 propertycache = util.propertycache
45 45 filecache = scmutil.filecache
46 46 _rangemask = dirstatemap.rangemask
47 47
48 48 DirstateItem = parsers.DirstateItem
49 49
50 50
51 51 class repocache(filecache):
52 52 """filecache for files in .hg/"""
53 53
54 54 def join(self, obj, fname):
55 55 return obj._opener.join(fname)
56 56
57 57
58 58 class rootcache(filecache):
59 59 """filecache for files in the repository root"""
60 60
61 61 def join(self, obj, fname):
62 62 return obj._join(fname)
63 63
64 64
65 65 def _getfsnow(vfs):
66 66 '''Get "now" timestamp on filesystem'''
67 67 tmpfd, tmpname = vfs.mkstemp()
68 68 try:
69 69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 70 finally:
71 71 os.close(tmpfd)
72 72 vfs.unlink(tmpname)
73 73
74 74
75 75 def requires_parents_change(func):
76 76 def wrap(self, *args, **kwargs):
77 77 if not self.pendingparentchange():
78 78 msg = 'calling `%s` outside of a parentchange context'
79 79 msg %= func.__name__
80 80 raise error.ProgrammingError(msg)
81 81 return func(self, *args, **kwargs)
82 82
83 83 return wrap
84 84
85 85
86 86 def requires_no_parents_change(func):
87 87 def wrap(self, *args, **kwargs):
88 88 if self.pendingparentchange():
89 89 msg = 'calling `%s` inside of a parentchange context'
90 90 msg %= func.__name__
91 91 raise error.ProgrammingError(msg)
92 92 return func(self, *args, **kwargs)
93 93
94 94 return wrap
95 95
96 96
97 97 @interfaceutil.implementer(intdirstate.idirstate)
98 98 class dirstate(object):
99 99 def __init__(
100 100 self,
101 101 opener,
102 102 ui,
103 103 root,
104 104 validate,
105 105 sparsematchfn,
106 106 nodeconstants,
107 107 use_dirstate_v2,
108 108 ):
109 109 """Create a new dirstate object.
110 110
111 111 opener is an open()-like callable that can be used to open the
112 112 dirstate file; root is the root of the directory tracked by
113 113 the dirstate.
114 114 """
115 115 self._use_dirstate_v2 = use_dirstate_v2
116 116 self._nodeconstants = nodeconstants
117 117 self._opener = opener
118 118 self._validate = validate
119 119 self._root = root
120 120 self._sparsematchfn = sparsematchfn
121 121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 122 # UNC path pointing to root share (issue4557)
123 123 self._rootdir = pathutil.normasprefix(root)
124 124 self._dirty = False
125 125 self._lastnormaltime = 0
126 126 self._ui = ui
127 127 self._filecache = {}
128 128 self._parentwriters = 0
129 129 self._filename = b'dirstate'
130 130 self._pendingfilename = b'%s.pending' % self._filename
131 131 self._plchangecallbacks = {}
132 132 self._origpl = None
133 133 self._updatedfiles = set()
134 134 self._mapcls = dirstatemap.dirstatemap
135 135 # Access and cache cwd early, so we don't access it for the first time
136 136 # after a working-copy update caused it to not exist (accessing it then
137 137 # raises an exception).
138 138 self._cwd
139 139
140 140 def prefetch_parents(self):
141 141 """make sure the parents are loaded
142 142
143 143 Used to avoid a race condition.
144 144 """
145 145 self._pl
146 146
147 147 @contextlib.contextmanager
148 148 def parentchange(self):
149 149 """Context manager for handling dirstate parents.
150 150
151 151 If an exception occurs in the scope of the context manager,
152 152 the incoherent dirstate won't be written when wlock is
153 153 released.
154 154 """
155 155 self._parentwriters += 1
156 156 yield
157 157 # Typically we want the "undo" step of a context manager in a
158 158 # finally block so it happens even when an exception
159 159 # occurs. In this case, however, we only want to decrement
160 160 # parentwriters if the code in the with statement exits
161 161 # normally, so we don't have a try/finally here on purpose.
162 162 self._parentwriters -= 1
163 163
164 164 def pendingparentchange(self):
165 165 """Returns true if the dirstate is in the middle of a set of changes
166 166 that modify the dirstate parent.
167 167 """
168 168 return self._parentwriters > 0
169 169
170 170 @propertycache
171 171 def _map(self):
172 172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 173 self._map = self._mapcls(
174 174 self._ui,
175 175 self._opener,
176 176 self._root,
177 177 self._nodeconstants,
178 178 self._use_dirstate_v2,
179 179 )
180 180 return self._map
181 181
182 182 @property
183 183 def _sparsematcher(self):
184 184 """The matcher for the sparse checkout.
185 185
186 186 The working directory may not include every file from a manifest. The
187 187 matcher obtained by this property will match a path if it is to be
188 188 included in the working directory.
189 189 """
190 190 # TODO there is potential to cache this property. For now, the matcher
191 191 # is resolved on every access. (But the called function does use a
192 192 # cache to keep the lookup fast.)
193 193 return self._sparsematchfn()
194 194
195 195 @repocache(b'branch')
196 196 def _branch(self):
197 197 try:
198 198 return self._opener.read(b"branch").strip() or b"default"
199 199 except IOError as inst:
200 200 if inst.errno != errno.ENOENT:
201 201 raise
202 202 return b"default"
203 203
204 204 @property
205 205 def _pl(self):
206 206 return self._map.parents()
207 207
208 208 def hasdir(self, d):
209 209 return self._map.hastrackeddir(d)
210 210
211 211 @rootcache(b'.hgignore')
212 212 def _ignore(self):
213 213 files = self._ignorefiles()
214 214 if not files:
215 215 return matchmod.never()
216 216
217 217 pats = [b'include:%s' % f for f in files]
218 218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219 219
220 220 @propertycache
221 221 def _slash(self):
222 222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223 223
224 224 @propertycache
225 225 def _checklink(self):
226 226 return util.checklink(self._root)
227 227
228 228 @propertycache
229 229 def _checkexec(self):
230 230 return bool(util.checkexec(self._root))
231 231
232 232 @propertycache
233 233 def _checkcase(self):
234 234 return not util.fscasesensitive(self._join(b'.hg'))
235 235
236 236 def _join(self, f):
237 237 # much faster than os.path.join()
238 238 # it's safe because f is always a relative path
239 239 return self._rootdir + f
240 240
241 241 def flagfunc(self, buildfallback):
242 242 if self._checklink and self._checkexec:
243 243
244 244 def f(x):
245 245 try:
246 246 st = os.lstat(self._join(x))
247 247 if util.statislink(st):
248 248 return b'l'
249 249 if util.statisexec(st):
250 250 return b'x'
251 251 except OSError:
252 252 pass
253 253 return b''
254 254
255 255 return f
256 256
257 257 fallback = buildfallback()
258 258 if self._checklink:
259 259
260 260 def f(x):
261 261 if os.path.islink(self._join(x)):
262 262 return b'l'
263 263 if b'x' in fallback(x):
264 264 return b'x'
265 265 return b''
266 266
267 267 return f
268 268 if self._checkexec:
269 269
270 270 def f(x):
271 271 if b'l' in fallback(x):
272 272 return b'l'
273 273 if util.isexec(self._join(x)):
274 274 return b'x'
275 275 return b''
276 276
277 277 return f
278 278 else:
279 279 return fallback
280 280
281 281 @propertycache
282 282 def _cwd(self):
283 283 # internal config: ui.forcecwd
284 284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 285 if forcecwd:
286 286 return forcecwd
287 287 return encoding.getcwd()
288 288
289 289 def getcwd(self):
290 290 """Return the path from which a canonical path is calculated.
291 291
292 292 This path should be used to resolve file patterns or to convert
293 293 canonical paths back to file paths for display. It shouldn't be
294 294 used to get real file paths. Use vfs functions instead.
295 295 """
296 296 cwd = self._cwd
297 297 if cwd == self._root:
298 298 return b''
299 299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 300 rootsep = self._root
301 301 if not util.endswithsep(rootsep):
302 302 rootsep += pycompat.ossep
303 303 if cwd.startswith(rootsep):
304 304 return cwd[len(rootsep) :]
305 305 else:
306 306 # we're outside the repo. return an absolute path.
307 307 return cwd
308 308
309 309 def pathto(self, f, cwd=None):
310 310 if cwd is None:
311 311 cwd = self.getcwd()
312 312 path = util.pathto(self._root, cwd, f)
313 313 if self._slash:
314 314 return util.pconvert(path)
315 315 return path
316 316
317 317 def __getitem__(self, key):
318 318 """Return the current state of key (a filename) in the dirstate.
319 319
320 320 States are:
321 321 n normal
322 322 m needs merging
323 323 r marked for removal
324 324 a marked for addition
325 325 ? not tracked
326 326
327 327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 328 consider migrating all user of this to going through the dirstate entry
329 329 instead.
330 330 """
331 331 entry = self._map.get(key)
332 332 if entry is not None:
333 333 return entry.state
334 334 return b'?'
335 335
336 336 def __contains__(self, key):
337 337 return key in self._map
338 338
339 339 def __iter__(self):
340 340 return iter(sorted(self._map))
341 341
342 342 def items(self):
343 343 return pycompat.iteritems(self._map)
344 344
345 345 iteritems = items
346 346
347 347 def directories(self):
348 348 return self._map.directories()
349 349
350 350 def parents(self):
351 351 return [self._validate(p) for p in self._pl]
352 352
353 353 def p1(self):
354 354 return self._validate(self._pl[0])
355 355
356 356 def p2(self):
357 357 return self._validate(self._pl[1])
358 358
359 359 @property
360 360 def in_merge(self):
361 361 """True if a merge is in progress"""
362 362 return self._pl[1] != self._nodeconstants.nullid
363 363
364 364 def branch(self):
365 365 return encoding.tolocal(self._branch)
366 366
367 367 def setparents(self, p1, p2=None):
368 368 """Set dirstate parents to p1 and p2.
369 369
370 370 When moving from two parents to one, "merged" entries a
371 371 adjusted to normal and previous copy records discarded and
372 372 returned by the call.
373 373
374 374 See localrepo.setparents()
375 375 """
376 376 if p2 is None:
377 377 p2 = self._nodeconstants.nullid
378 378 if self._parentwriters == 0:
379 379 raise ValueError(
380 380 b"cannot set dirstate parent outside of "
381 381 b"dirstate.parentchange context manager"
382 382 )
383 383
384 384 self._dirty = True
385 385 oldp2 = self._pl[1]
386 386 if self._origpl is None:
387 387 self._origpl = self._pl
388 388 self._map.setparents(p1, p2)
389 389 copies = {}
390 390 if (
391 391 oldp2 != self._nodeconstants.nullid
392 392 and p2 == self._nodeconstants.nullid
393 393 ):
394 394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395 395
396 396 for f in candidatefiles:
397 397 s = self._map.get(f)
398 398 if s is None:
399 399 continue
400 400
401 401 # Discard "merged" markers when moving away from a merge state
402 402 if s.merged:
403 403 source = self._map.copymap.get(f)
404 404 if source:
405 405 copies[f] = source
406 406 self.normallookup(f)
407 407 # Also fix up otherparent markers
408 408 elif s.from_p2:
409 409 source = self._map.copymap.get(f)
410 410 if source:
411 411 copies[f] = source
412 412 self._add(f)
413 413 return copies
414 414
415 415 def setbranch(self, branch):
416 416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 418 try:
419 419 f.write(self._branch + b'\n')
420 420 f.close()
421 421
422 422 # make sure filecache has the correct stat info for _branch after
423 423 # replacing the underlying file
424 424 ce = self._filecache[b'_branch']
425 425 if ce:
426 426 ce.refresh()
427 427 except: # re-raises
428 428 f.discard()
429 429 raise
430 430
431 431 def invalidate(self):
432 432 """Causes the next access to reread the dirstate.
433 433
434 434 This is different from localrepo.invalidatedirstate() because it always
435 435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 436 check whether the dirstate has changed before rereading it."""
437 437
438 438 for a in ("_map", "_branch", "_ignore"):
439 439 if a in self.__dict__:
440 440 delattr(self, a)
441 441 self._lastnormaltime = 0
442 442 self._dirty = False
443 443 self._updatedfiles.clear()
444 444 self._parentwriters = 0
445 445 self._origpl = None
446 446
447 447 def copy(self, source, dest):
448 448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 449 if source == dest:
450 450 return
451 451 self._dirty = True
452 452 if source is not None:
453 453 self._map.copymap[dest] = source
454 454 self._updatedfiles.add(source)
455 455 self._updatedfiles.add(dest)
456 456 elif self._map.copymap.pop(dest, None):
457 457 self._updatedfiles.add(dest)
458 458
459 459 def copied(self, file):
460 460 return self._map.copymap.get(file, None)
461 461
462 462 def copies(self):
463 463 return self._map.copymap
464 464
465 465 @requires_no_parents_change
466 466 def set_tracked(self, filename):
467 467 """a "public" method for generic code to mark a file as tracked
468 468
469 469 This function is to be called outside of "update/merge" case. For
470 470 example by a command like `hg add X`.
471 471
472 472 return True the file was previously untracked, False otherwise.
473 473 """
474 474 entry = self._map.get(filename)
475 475 if entry is None:
476 476 self._add(filename)
477 477 return True
478 478 elif not entry.tracked:
479 479 self.normallookup(filename)
480 480 return True
481 481 return False
482 482
483 483 @requires_no_parents_change
484 484 def set_untracked(self, filename):
485 485 """a "public" method for generic code to mark a file as untracked
486 486
487 487 This function is to be called outside of "update/merge" case. For
488 488 example by a command like `hg remove X`.
489 489
490 490 return True the file was previously tracked, False otherwise.
491 491 """
492 492 entry = self._map.get(filename)
493 493 if entry is None:
494 494 return False
495 495 elif entry.added:
496 496 self._drop(filename)
497 497 return True
498 498 else:
499 499 self._remove(filename)
500 500 return True
501 501
502 502 @requires_no_parents_change
503 503 def set_clean(self, filename, parentfiledata=None):
504 504 """record that the current state of the file on disk is known to be clean"""
505 505 self._dirty = True
506 506 self._updatedfiles.add(filename)
507 self.normal(filename, parentfiledata=parentfiledata)
507 self._normal(filename, parentfiledata=parentfiledata)
508 508
509 509 @requires_parents_change
510 510 def update_file_p1(
511 511 self,
512 512 filename,
513 513 p1_tracked,
514 514 ):
515 515 """Set a file as tracked in the parent (or not)
516 516
517 517 This is to be called when adjust the dirstate to a new parent after an history
518 518 rewriting operation.
519 519
520 520 It should not be called during a merge (p2 != nullid) and only within
521 521 a `with dirstate.parentchange():` context.
522 522 """
523 523 if self.in_merge:
524 524 msg = b'update_file_reference should not be called when merging'
525 525 raise error.ProgrammingError(msg)
526 526 entry = self._map.get(filename)
527 527 if entry is None:
528 528 wc_tracked = False
529 529 else:
530 530 wc_tracked = entry.tracked
531 531 possibly_dirty = False
532 532 if p1_tracked and wc_tracked:
533 533 # the underlying reference might have changed, we will have to
534 534 # check it.
535 535 possibly_dirty = True
536 536 elif not (p1_tracked or wc_tracked):
537 537 # the file is no longer relevant to anyone
538 538 self._drop(filename)
539 539 elif (not p1_tracked) and wc_tracked:
540 540 if entry is not None and entry.added:
541 541 return # avoid dropping copy information (maybe?)
542 542 elif p1_tracked and not wc_tracked:
543 543 pass
544 544 else:
545 545 assert False, 'unreachable'
546 546
547 547 # this mean we are doing call for file we do not really care about the
548 548 # data (eg: added or removed), however this should be a minor overhead
549 549 # compared to the overall update process calling this.
550 550 parentfiledata = None
551 551 if wc_tracked:
552 552 parentfiledata = self._get_filedata(filename)
553 553
554 554 self._updatedfiles.add(filename)
555 555 self._map.reset_state(
556 556 filename,
557 557 wc_tracked,
558 558 p1_tracked,
559 559 possibly_dirty=possibly_dirty,
560 560 parentfiledata=parentfiledata,
561 561 )
562 562 if (
563 563 parentfiledata is not None
564 564 and parentfiledata[2] > self._lastnormaltime
565 565 ):
566 566 # Remember the most recent modification timeslot for status(),
567 567 # to make sure we won't miss future size-preserving file content
568 568 # modifications that happen within the same timeslot.
569 569 self._lastnormaltime = parentfiledata[2]
570 570
571 571 @requires_parents_change
572 572 def update_file(
573 573 self,
574 574 filename,
575 575 wc_tracked,
576 576 p1_tracked,
577 577 p2_tracked=False,
578 578 merged=False,
579 579 clean_p1=False,
580 580 clean_p2=False,
581 581 possibly_dirty=False,
582 582 parentfiledata=None,
583 583 ):
584 584 """update the information about a file in the dirstate
585 585
586 586 This is to be called when the direstates parent changes to keep track
587 587 of what is the file situation in regards to the working copy and its parent.
588 588
589 589 This function must be called within a `dirstate.parentchange` context.
590 590
591 591 note: the API is at an early stage and we might need to ajust it
592 592 depending of what information ends up being relevant and useful to
593 593 other processing.
594 594 """
595 595 if merged and (clean_p1 or clean_p2):
596 596 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
597 597 raise error.ProgrammingError(msg)
598 598
599 599 # note: I do not think we need to double check name clash here since we
600 600 # are in a update/merge case that should already have taken care of
601 601 # this. The test agrees
602 602
603 603 self._dirty = True
604 604 self._updatedfiles.add(filename)
605 605
606 606 need_parent_file_data = (
607 607 not (possibly_dirty or clean_p2 or merged)
608 608 and wc_tracked
609 609 and p1_tracked
610 610 )
611 611
612 612 # this mean we are doing call for file we do not really care about the
613 613 # data (eg: added or removed), however this should be a minor overhead
614 614 # compared to the overall update process calling this.
615 615 if need_parent_file_data:
616 616 if parentfiledata is None:
617 617 parentfiledata = self._get_filedata(filename)
618 618 mtime = parentfiledata[2]
619 619
620 620 if mtime > self._lastnormaltime:
621 621 # Remember the most recent modification timeslot for
622 622 # status(), to make sure we won't miss future
623 623 # size-preserving file content modifications that happen
624 624 # within the same timeslot.
625 625 self._lastnormaltime = mtime
626 626
627 627 self._map.reset_state(
628 628 filename,
629 629 wc_tracked,
630 630 p1_tracked,
631 631 p2_tracked=p2_tracked,
632 632 merged=merged,
633 633 clean_p1=clean_p1,
634 634 clean_p2=clean_p2,
635 635 possibly_dirty=possibly_dirty,
636 636 parentfiledata=parentfiledata,
637 637 )
638 638 if (
639 639 parentfiledata is not None
640 640 and parentfiledata[2] > self._lastnormaltime
641 641 ):
642 642 # Remember the most recent modification timeslot for status(),
643 643 # to make sure we won't miss future size-preserving file content
644 644 # modifications that happen within the same timeslot.
645 645 self._lastnormaltime = parentfiledata[2]
646 646
647 647 def _addpath(
648 648 self,
649 649 f,
650 650 mode=0,
651 651 size=None,
652 652 mtime=None,
653 653 added=False,
654 654 merged=False,
655 655 from_p2=False,
656 656 possibly_dirty=False,
657 657 ):
658 658 entry = self._map.get(f)
659 659 if added or entry is not None and entry.removed:
660 660 scmutil.checkfilename(f)
661 661 if self._map.hastrackeddir(f):
662 662 msg = _(b'directory %r already in dirstate')
663 663 msg %= pycompat.bytestr(f)
664 664 raise error.Abort(msg)
665 665 # shadows
666 666 for d in pathutil.finddirs(f):
667 667 if self._map.hastrackeddir(d):
668 668 break
669 669 entry = self._map.get(d)
670 670 if entry is not None and not entry.removed:
671 671 msg = _(b'file %r in dirstate clashes with %r')
672 672 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
673 673 raise error.Abort(msg)
674 674 self._dirty = True
675 675 self._updatedfiles.add(f)
676 676 self._map.addfile(
677 677 f,
678 678 mode=mode,
679 679 size=size,
680 680 mtime=mtime,
681 681 added=added,
682 682 merged=merged,
683 683 from_p2=from_p2,
684 684 possibly_dirty=possibly_dirty,
685 685 )
686 686
687 687 def _get_filedata(self, filename):
688 688 """returns"""
689 689 s = os.lstat(self._join(filename))
690 690 mode = s.st_mode
691 691 size = s.st_size
692 692 mtime = s[stat.ST_MTIME]
693 693 return (mode, size, mtime)
694 694
695 695 def normal(self, f, parentfiledata=None):
696 696 """Mark a file normal and clean.
697 697
698 698 parentfiledata: (mode, size, mtime) of the clean file
699 699
700 700 parentfiledata should be computed from memory (for mode,
701 701 size), as or close as possible from the point where we
702 702 determined the file was clean, to limit the risk of the
703 703 file having been changed by an external process between the
704 704 moment where the file was determined to be clean and now."""
705 if self.pendingparentchange():
706 util.nouideprecwarn(
707 b"do not use `normal` inside of update/merge context."
708 b" Use `update_file` or `update_file_p1`",
709 b'6.0',
710 stacklevel=2,
711 )
712 else:
713 util.nouideprecwarn(
714 b"do not use `normal` outside of update/merge context."
715 b" Use `set_tracked`",
716 b'6.0',
717 stacklevel=2,
718 )
719 self._normal(f, parentfiledata=parentfiledata)
720
721 def _normal(self, f, parentfiledata=None):
705 722 if parentfiledata:
706 723 (mode, size, mtime) = parentfiledata
707 724 else:
708 725 (mode, size, mtime) = self._get_filedata(f)
709 726 self._addpath(f, mode=mode, size=size, mtime=mtime)
710 727 self._map.copymap.pop(f, None)
711 728 if f in self._map.nonnormalset:
712 729 self._map.nonnormalset.remove(f)
713 730 if mtime > self._lastnormaltime:
714 731 # Remember the most recent modification timeslot for status(),
715 732 # to make sure we won't miss future size-preserving file content
716 733 # modifications that happen within the same timeslot.
717 734 self._lastnormaltime = mtime
718 735
719 736 def normallookup(self, f):
720 737 '''Mark a file normal, but possibly dirty.'''
721 738 if self.in_merge:
722 739 # if there is a merge going on and the file was either
723 740 # "merged" or coming from other parent (-2) before
724 741 # being removed, restore that state.
725 742 entry = self._map.get(f)
726 743 if entry is not None:
727 744 # XXX this should probably be dealt with a a lower level
728 745 # (see `merged_removed` and `from_p2_removed`)
729 746 if entry.merged_removed or entry.from_p2_removed:
730 747 source = self._map.copymap.get(f)
731 748 if entry.merged_removed:
732 749 self.merge(f)
733 750 elif entry.from_p2_removed:
734 751 self.otherparent(f)
735 752 if source is not None:
736 753 self.copy(source, f)
737 754 return
738 755 elif entry.merged or entry.from_p2:
739 756 return
740 757 self._addpath(f, possibly_dirty=True)
741 758 self._map.copymap.pop(f, None)
742 759
743 760 def otherparent(self, f):
744 761 '''Mark as coming from the other parent, always dirty.'''
745 762 if not self.in_merge:
746 763 msg = _(b"setting %r to other parent only allowed in merges") % f
747 764 raise error.Abort(msg)
748 765 entry = self._map.get(f)
749 766 if entry is not None and entry.tracked:
750 767 # merge-like
751 768 self._addpath(f, merged=True)
752 769 else:
753 770 # add-like
754 771 self._addpath(f, from_p2=True)
755 772 self._map.copymap.pop(f, None)
756 773
757 774 def add(self, f):
758 775 '''Mark a file added.'''
759 776 if not self.pendingparentchange():
760 777 util.nouideprecwarn(
761 778 b"do not use `add` outside of update/merge context."
762 779 b" Use `set_tracked`",
763 780 b'6.0',
764 781 stacklevel=2,
765 782 )
766 783 self._add(f)
767 784
768 785 def _add(self, filename):
769 786 """internal function to mark a file as added"""
770 787 self._addpath(filename, added=True)
771 788 self._map.copymap.pop(filename, None)
772 789
773 790 def remove(self, f):
774 791 '''Mark a file removed'''
775 792 if self.pendingparentchange():
776 793 util.nouideprecwarn(
777 794 b"do not use `remove` insde of update/merge context."
778 795 b" Use `update_file` or `update_file_p1`",
779 796 b'6.0',
780 797 stacklevel=2,
781 798 )
782 799 else:
783 800 util.nouideprecwarn(
784 801 b"do not use `remove` outside of update/merge context."
785 802 b" Use `set_untracked`",
786 803 b'6.0',
787 804 stacklevel=2,
788 805 )
789 806 self._remove(f)
790 807
791 808 def _remove(self, filename):
792 809 """internal function to mark a file removed"""
793 810 self._dirty = True
794 811 self._updatedfiles.add(filename)
795 812 self._map.removefile(filename, in_merge=self.in_merge)
796 813
797 814 def merge(self, f):
798 815 '''Mark a file merged.'''
799 816 if not self.in_merge:
800 817 return self.normallookup(f)
801 818 return self.otherparent(f)
802 819
803 820 def drop(self, f):
804 821 '''Drop a file from the dirstate'''
805 822 if not self.pendingparentchange():
806 823 util.nouideprecwarn(
807 824 b"do not use `drop` outside of update/merge context."
808 825 b" Use `set_untracked`",
809 826 b'6.0',
810 827 stacklevel=2,
811 828 )
812 829 self._drop(f)
813 830
814 831 def _drop(self, filename):
815 832 """internal function to drop a file from the dirstate"""
816 833 if self._map.dropfile(filename):
817 834 self._dirty = True
818 835 self._updatedfiles.add(filename)
819 836 self._map.copymap.pop(filename, None)
820 837
821 838 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
822 839 if exists is None:
823 840 exists = os.path.lexists(os.path.join(self._root, path))
824 841 if not exists:
825 842 # Maybe a path component exists
826 843 if not ignoremissing and b'/' in path:
827 844 d, f = path.rsplit(b'/', 1)
828 845 d = self._normalize(d, False, ignoremissing, None)
829 846 folded = d + b"/" + f
830 847 else:
831 848 # No path components, preserve original case
832 849 folded = path
833 850 else:
834 851 # recursively normalize leading directory components
835 852 # against dirstate
836 853 if b'/' in normed:
837 854 d, f = normed.rsplit(b'/', 1)
838 855 d = self._normalize(d, False, ignoremissing, True)
839 856 r = self._root + b"/" + d
840 857 folded = d + b"/" + util.fspath(f, r)
841 858 else:
842 859 folded = util.fspath(normed, self._root)
843 860 storemap[normed] = folded
844 861
845 862 return folded
846 863
847 864 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
848 865 normed = util.normcase(path)
849 866 folded = self._map.filefoldmap.get(normed, None)
850 867 if folded is None:
851 868 if isknown:
852 869 folded = path
853 870 else:
854 871 folded = self._discoverpath(
855 872 path, normed, ignoremissing, exists, self._map.filefoldmap
856 873 )
857 874 return folded
858 875
859 876 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
860 877 normed = util.normcase(path)
861 878 folded = self._map.filefoldmap.get(normed, None)
862 879 if folded is None:
863 880 folded = self._map.dirfoldmap.get(normed, None)
864 881 if folded is None:
865 882 if isknown:
866 883 folded = path
867 884 else:
868 885 # store discovered result in dirfoldmap so that future
869 886 # normalizefile calls don't start matching directories
870 887 folded = self._discoverpath(
871 888 path, normed, ignoremissing, exists, self._map.dirfoldmap
872 889 )
873 890 return folded
874 891
875 892 def normalize(self, path, isknown=False, ignoremissing=False):
876 893 """
877 894 normalize the case of a pathname when on a casefolding filesystem
878 895
879 896 isknown specifies whether the filename came from walking the
880 897 disk, to avoid extra filesystem access.
881 898
882 899 If ignoremissing is True, missing path are returned
883 900 unchanged. Otherwise, we try harder to normalize possibly
884 901 existing path components.
885 902
886 903 The normalized case is determined based on the following precedence:
887 904
888 905 - version of name already stored in the dirstate
889 906 - version of name stored on disk
890 907 - version provided via command arguments
891 908 """
892 909
893 910 if self._checkcase:
894 911 return self._normalize(path, isknown, ignoremissing)
895 912 return path
896 913
897 914 def clear(self):
898 915 self._map.clear()
899 916 self._lastnormaltime = 0
900 917 self._updatedfiles.clear()
901 918 self._dirty = True
902 919
903 920 def rebuild(self, parent, allfiles, changedfiles=None):
904 921 if changedfiles is None:
905 922 # Rebuild entire dirstate
906 923 to_lookup = allfiles
907 924 to_drop = []
908 925 lastnormaltime = self._lastnormaltime
909 926 self.clear()
910 927 self._lastnormaltime = lastnormaltime
911 928 elif len(changedfiles) < 10:
912 929 # Avoid turning allfiles into a set, which can be expensive if it's
913 930 # large.
914 931 to_lookup = []
915 932 to_drop = []
916 933 for f in changedfiles:
917 934 if f in allfiles:
918 935 to_lookup.append(f)
919 936 else:
920 937 to_drop.append(f)
921 938 else:
922 939 changedfilesset = set(changedfiles)
923 940 to_lookup = changedfilesset & set(allfiles)
924 941 to_drop = changedfilesset - to_lookup
925 942
926 943 if self._origpl is None:
927 944 self._origpl = self._pl
928 945 self._map.setparents(parent, self._nodeconstants.nullid)
929 946
930 947 for f in to_lookup:
931 948 self.normallookup(f)
932 949 for f in to_drop:
933 950 self._drop(f)
934 951
935 952 self._dirty = True
936 953
937 954 def identity(self):
938 955 """Return identity of dirstate itself to detect changing in storage
939 956
940 957 If identity of previous dirstate is equal to this, writing
941 958 changes based on the former dirstate out can keep consistency.
942 959 """
943 960 return self._map.identity
944 961
945 962 def write(self, tr):
946 963 if not self._dirty:
947 964 return
948 965
949 966 filename = self._filename
950 967 if tr:
951 968 # 'dirstate.write()' is not only for writing in-memory
952 969 # changes out, but also for dropping ambiguous timestamp.
953 970 # delayed writing re-raise "ambiguous timestamp issue".
954 971 # See also the wiki page below for detail:
955 972 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
956 973
957 974 # emulate dropping timestamp in 'parsers.pack_dirstate'
958 975 now = _getfsnow(self._opener)
959 976 self._map.clearambiguoustimes(self._updatedfiles, now)
960 977
961 978 # emulate that all 'dirstate.normal' results are written out
962 979 self._lastnormaltime = 0
963 980 self._updatedfiles.clear()
964 981
965 982 # delay writing in-memory changes out
966 983 tr.addfilegenerator(
967 984 b'dirstate',
968 985 (self._filename,),
969 986 lambda f: self._writedirstate(tr, f),
970 987 location=b'plain',
971 988 )
972 989 return
973 990
974 991 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
975 992 self._writedirstate(tr, st)
976 993
977 994 def addparentchangecallback(self, category, callback):
978 995 """add a callback to be called when the wd parents are changed
979 996
980 997 Callback will be called with the following arguments:
981 998 dirstate, (oldp1, oldp2), (newp1, newp2)
982 999
983 1000 Category is a unique identifier to allow overwriting an old callback
984 1001 with a newer callback.
985 1002 """
986 1003 self._plchangecallbacks[category] = callback
987 1004
988 1005 def _writedirstate(self, tr, st):
989 1006 # notify callbacks about parents change
990 1007 if self._origpl is not None and self._origpl != self._pl:
991 1008 for c, callback in sorted(
992 1009 pycompat.iteritems(self._plchangecallbacks)
993 1010 ):
994 1011 callback(self, self._origpl, self._pl)
995 1012 self._origpl = None
996 1013 # use the modification time of the newly created temporary file as the
997 1014 # filesystem's notion of 'now'
998 1015 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
999 1016
1000 1017 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
1001 1018 # timestamp of each entries in dirstate, because of 'now > mtime'
1002 1019 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
1003 1020 if delaywrite > 0:
1004 1021 # do we have any files to delay for?
1005 1022 for f, e in pycompat.iteritems(self._map):
1006 1023 if e.need_delay(now):
1007 1024 import time # to avoid useless import
1008 1025
1009 1026 # rather than sleep n seconds, sleep until the next
1010 1027 # multiple of n seconds
1011 1028 clock = time.time()
1012 1029 start = int(clock) - (int(clock) % delaywrite)
1013 1030 end = start + delaywrite
1014 1031 time.sleep(end - clock)
1015 1032 now = end # trust our estimate that the end is near now
1016 1033 break
1017 1034
1018 1035 self._map.write(tr, st, now)
1019 1036 self._lastnormaltime = 0
1020 1037 self._dirty = False
1021 1038
1022 1039 def _dirignore(self, f):
1023 1040 if self._ignore(f):
1024 1041 return True
1025 1042 for p in pathutil.finddirs(f):
1026 1043 if self._ignore(p):
1027 1044 return True
1028 1045 return False
1029 1046
1030 1047 def _ignorefiles(self):
1031 1048 files = []
1032 1049 if os.path.exists(self._join(b'.hgignore')):
1033 1050 files.append(self._join(b'.hgignore'))
1034 1051 for name, path in self._ui.configitems(b"ui"):
1035 1052 if name == b'ignore' or name.startswith(b'ignore.'):
1036 1053 # we need to use os.path.join here rather than self._join
1037 1054 # because path is arbitrary and user-specified
1038 1055 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1039 1056 return files
1040 1057
1041 1058 def _ignorefileandline(self, f):
1042 1059 files = collections.deque(self._ignorefiles())
1043 1060 visited = set()
1044 1061 while files:
1045 1062 i = files.popleft()
1046 1063 patterns = matchmod.readpatternfile(
1047 1064 i, self._ui.warn, sourceinfo=True
1048 1065 )
1049 1066 for pattern, lineno, line in patterns:
1050 1067 kind, p = matchmod._patsplit(pattern, b'glob')
1051 1068 if kind == b"subinclude":
1052 1069 if p not in visited:
1053 1070 files.append(p)
1054 1071 continue
1055 1072 m = matchmod.match(
1056 1073 self._root, b'', [], [pattern], warn=self._ui.warn
1057 1074 )
1058 1075 if m(f):
1059 1076 return (i, lineno, line)
1060 1077 visited.add(i)
1061 1078 return (None, -1, b"")
1062 1079
1063 1080 def _walkexplicit(self, match, subrepos):
1064 1081 """Get stat data about the files explicitly specified by match.
1065 1082
1066 1083 Return a triple (results, dirsfound, dirsnotfound).
1067 1084 - results is a mapping from filename to stat result. It also contains
1068 1085 listings mapping subrepos and .hg to None.
1069 1086 - dirsfound is a list of files found to be directories.
1070 1087 - dirsnotfound is a list of files that the dirstate thinks are
1071 1088 directories and that were not found."""
1072 1089
1073 1090 def badtype(mode):
1074 1091 kind = _(b'unknown')
1075 1092 if stat.S_ISCHR(mode):
1076 1093 kind = _(b'character device')
1077 1094 elif stat.S_ISBLK(mode):
1078 1095 kind = _(b'block device')
1079 1096 elif stat.S_ISFIFO(mode):
1080 1097 kind = _(b'fifo')
1081 1098 elif stat.S_ISSOCK(mode):
1082 1099 kind = _(b'socket')
1083 1100 elif stat.S_ISDIR(mode):
1084 1101 kind = _(b'directory')
1085 1102 return _(b'unsupported file type (type is %s)') % kind
1086 1103
1087 1104 badfn = match.bad
1088 1105 dmap = self._map
1089 1106 lstat = os.lstat
1090 1107 getkind = stat.S_IFMT
1091 1108 dirkind = stat.S_IFDIR
1092 1109 regkind = stat.S_IFREG
1093 1110 lnkkind = stat.S_IFLNK
1094 1111 join = self._join
1095 1112 dirsfound = []
1096 1113 foundadd = dirsfound.append
1097 1114 dirsnotfound = []
1098 1115 notfoundadd = dirsnotfound.append
1099 1116
1100 1117 if not match.isexact() and self._checkcase:
1101 1118 normalize = self._normalize
1102 1119 else:
1103 1120 normalize = None
1104 1121
1105 1122 files = sorted(match.files())
1106 1123 subrepos.sort()
1107 1124 i, j = 0, 0
1108 1125 while i < len(files) and j < len(subrepos):
1109 1126 subpath = subrepos[j] + b"/"
1110 1127 if files[i] < subpath:
1111 1128 i += 1
1112 1129 continue
1113 1130 while i < len(files) and files[i].startswith(subpath):
1114 1131 del files[i]
1115 1132 j += 1
1116 1133
1117 1134 if not files or b'' in files:
1118 1135 files = [b'']
1119 1136 # constructing the foldmap is expensive, so don't do it for the
1120 1137 # common case where files is ['']
1121 1138 normalize = None
1122 1139 results = dict.fromkeys(subrepos)
1123 1140 results[b'.hg'] = None
1124 1141
1125 1142 for ff in files:
1126 1143 if normalize:
1127 1144 nf = normalize(ff, False, True)
1128 1145 else:
1129 1146 nf = ff
1130 1147 if nf in results:
1131 1148 continue
1132 1149
1133 1150 try:
1134 1151 st = lstat(join(nf))
1135 1152 kind = getkind(st.st_mode)
1136 1153 if kind == dirkind:
1137 1154 if nf in dmap:
1138 1155 # file replaced by dir on disk but still in dirstate
1139 1156 results[nf] = None
1140 1157 foundadd((nf, ff))
1141 1158 elif kind == regkind or kind == lnkkind:
1142 1159 results[nf] = st
1143 1160 else:
1144 1161 badfn(ff, badtype(kind))
1145 1162 if nf in dmap:
1146 1163 results[nf] = None
1147 1164 except OSError as inst: # nf not found on disk - it is dirstate only
1148 1165 if nf in dmap: # does it exactly match a missing file?
1149 1166 results[nf] = None
1150 1167 else: # does it match a missing directory?
1151 1168 if self._map.hasdir(nf):
1152 1169 notfoundadd(nf)
1153 1170 else:
1154 1171 badfn(ff, encoding.strtolocal(inst.strerror))
1155 1172
1156 1173 # match.files() may contain explicitly-specified paths that shouldn't
1157 1174 # be taken; drop them from the list of files found. dirsfound/notfound
1158 1175 # aren't filtered here because they will be tested later.
1159 1176 if match.anypats():
1160 1177 for f in list(results):
1161 1178 if f == b'.hg' or f in subrepos:
1162 1179 # keep sentinel to disable further out-of-repo walks
1163 1180 continue
1164 1181 if not match(f):
1165 1182 del results[f]
1166 1183
1167 1184 # Case insensitive filesystems cannot rely on lstat() failing to detect
1168 1185 # a case-only rename. Prune the stat object for any file that does not
1169 1186 # match the case in the filesystem, if there are multiple files that
1170 1187 # normalize to the same path.
1171 1188 if match.isexact() and self._checkcase:
1172 1189 normed = {}
1173 1190
1174 1191 for f, st in pycompat.iteritems(results):
1175 1192 if st is None:
1176 1193 continue
1177 1194
1178 1195 nc = util.normcase(f)
1179 1196 paths = normed.get(nc)
1180 1197
1181 1198 if paths is None:
1182 1199 paths = set()
1183 1200 normed[nc] = paths
1184 1201
1185 1202 paths.add(f)
1186 1203
1187 1204 for norm, paths in pycompat.iteritems(normed):
1188 1205 if len(paths) > 1:
1189 1206 for path in paths:
1190 1207 folded = self._discoverpath(
1191 1208 path, norm, True, None, self._map.dirfoldmap
1192 1209 )
1193 1210 if path != folded:
1194 1211 results[path] = None
1195 1212
1196 1213 return results, dirsfound, dirsnotfound
1197 1214
1198 1215 def walk(self, match, subrepos, unknown, ignored, full=True):
1199 1216 """
1200 1217 Walk recursively through the directory tree, finding all files
1201 1218 matched by match.
1202 1219
1203 1220 If full is False, maybe skip some known-clean files.
1204 1221
1205 1222 Return a dict mapping filename to stat-like object (either
1206 1223 mercurial.osutil.stat instance or return value of os.stat()).
1207 1224
1208 1225 """
1209 1226 # full is a flag that extensions that hook into walk can use -- this
1210 1227 # implementation doesn't use it at all. This satisfies the contract
1211 1228 # because we only guarantee a "maybe".
1212 1229
1213 1230 if ignored:
1214 1231 ignore = util.never
1215 1232 dirignore = util.never
1216 1233 elif unknown:
1217 1234 ignore = self._ignore
1218 1235 dirignore = self._dirignore
1219 1236 else:
1220 1237 # if not unknown and not ignored, drop dir recursion and step 2
1221 1238 ignore = util.always
1222 1239 dirignore = util.always
1223 1240
1224 1241 matchfn = match.matchfn
1225 1242 matchalways = match.always()
1226 1243 matchtdir = match.traversedir
1227 1244 dmap = self._map
1228 1245 listdir = util.listdir
1229 1246 lstat = os.lstat
1230 1247 dirkind = stat.S_IFDIR
1231 1248 regkind = stat.S_IFREG
1232 1249 lnkkind = stat.S_IFLNK
1233 1250 join = self._join
1234 1251
1235 1252 exact = skipstep3 = False
1236 1253 if match.isexact(): # match.exact
1237 1254 exact = True
1238 1255 dirignore = util.always # skip step 2
1239 1256 elif match.prefix(): # match.match, no patterns
1240 1257 skipstep3 = True
1241 1258
1242 1259 if not exact and self._checkcase:
1243 1260 normalize = self._normalize
1244 1261 normalizefile = self._normalizefile
1245 1262 skipstep3 = False
1246 1263 else:
1247 1264 normalize = self._normalize
1248 1265 normalizefile = None
1249 1266
1250 1267 # step 1: find all explicit files
1251 1268 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1252 1269 if matchtdir:
1253 1270 for d in work:
1254 1271 matchtdir(d[0])
1255 1272 for d in dirsnotfound:
1256 1273 matchtdir(d)
1257 1274
1258 1275 skipstep3 = skipstep3 and not (work or dirsnotfound)
1259 1276 work = [d for d in work if not dirignore(d[0])]
1260 1277
1261 1278 # step 2: visit subdirectories
1262 1279 def traverse(work, alreadynormed):
1263 1280 wadd = work.append
1264 1281 while work:
1265 1282 tracing.counter('dirstate.walk work', len(work))
1266 1283 nd = work.pop()
1267 1284 visitentries = match.visitchildrenset(nd)
1268 1285 if not visitentries:
1269 1286 continue
1270 1287 if visitentries == b'this' or visitentries == b'all':
1271 1288 visitentries = None
1272 1289 skip = None
1273 1290 if nd != b'':
1274 1291 skip = b'.hg'
1275 1292 try:
1276 1293 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1277 1294 entries = listdir(join(nd), stat=True, skip=skip)
1278 1295 except OSError as inst:
1279 1296 if inst.errno in (errno.EACCES, errno.ENOENT):
1280 1297 match.bad(
1281 1298 self.pathto(nd), encoding.strtolocal(inst.strerror)
1282 1299 )
1283 1300 continue
1284 1301 raise
1285 1302 for f, kind, st in entries:
1286 1303 # Some matchers may return files in the visitentries set,
1287 1304 # instead of 'this', if the matcher explicitly mentions them
1288 1305 # and is not an exactmatcher. This is acceptable; we do not
1289 1306 # make any hard assumptions about file-or-directory below
1290 1307 # based on the presence of `f` in visitentries. If
1291 1308 # visitchildrenset returned a set, we can always skip the
1292 1309 # entries *not* in the set it provided regardless of whether
1293 1310 # they're actually a file or a directory.
1294 1311 if visitentries and f not in visitentries:
1295 1312 continue
1296 1313 if normalizefile:
1297 1314 # even though f might be a directory, we're only
1298 1315 # interested in comparing it to files currently in the
1299 1316 # dmap -- therefore normalizefile is enough
1300 1317 nf = normalizefile(
1301 1318 nd and (nd + b"/" + f) or f, True, True
1302 1319 )
1303 1320 else:
1304 1321 nf = nd and (nd + b"/" + f) or f
1305 1322 if nf not in results:
1306 1323 if kind == dirkind:
1307 1324 if not ignore(nf):
1308 1325 if matchtdir:
1309 1326 matchtdir(nf)
1310 1327 wadd(nf)
1311 1328 if nf in dmap and (matchalways or matchfn(nf)):
1312 1329 results[nf] = None
1313 1330 elif kind == regkind or kind == lnkkind:
1314 1331 if nf in dmap:
1315 1332 if matchalways or matchfn(nf):
1316 1333 results[nf] = st
1317 1334 elif (matchalways or matchfn(nf)) and not ignore(
1318 1335 nf
1319 1336 ):
1320 1337 # unknown file -- normalize if necessary
1321 1338 if not alreadynormed:
1322 1339 nf = normalize(nf, False, True)
1323 1340 results[nf] = st
1324 1341 elif nf in dmap and (matchalways or matchfn(nf)):
1325 1342 results[nf] = None
1326 1343
1327 1344 for nd, d in work:
1328 1345 # alreadynormed means that processwork doesn't have to do any
1329 1346 # expensive directory normalization
1330 1347 alreadynormed = not normalize or nd == d
1331 1348 traverse([d], alreadynormed)
1332 1349
1333 1350 for s in subrepos:
1334 1351 del results[s]
1335 1352 del results[b'.hg']
1336 1353
1337 1354 # step 3: visit remaining files from dmap
1338 1355 if not skipstep3 and not exact:
1339 1356 # If a dmap file is not in results yet, it was either
1340 1357 # a) not matching matchfn b) ignored, c) missing, or d) under a
1341 1358 # symlink directory.
1342 1359 if not results and matchalways:
1343 1360 visit = [f for f in dmap]
1344 1361 else:
1345 1362 visit = [f for f in dmap if f not in results and matchfn(f)]
1346 1363 visit.sort()
1347 1364
1348 1365 if unknown:
1349 1366 # unknown == True means we walked all dirs under the roots
1350 1367 # that wasn't ignored, and everything that matched was stat'ed
1351 1368 # and is already in results.
1352 1369 # The rest must thus be ignored or under a symlink.
1353 1370 audit_path = pathutil.pathauditor(self._root, cached=True)
1354 1371
1355 1372 for nf in iter(visit):
1356 1373 # If a stat for the same file was already added with a
1357 1374 # different case, don't add one for this, since that would
1358 1375 # make it appear as if the file exists under both names
1359 1376 # on disk.
1360 1377 if (
1361 1378 normalizefile
1362 1379 and normalizefile(nf, True, True) in results
1363 1380 ):
1364 1381 results[nf] = None
1365 1382 # Report ignored items in the dmap as long as they are not
1366 1383 # under a symlink directory.
1367 1384 elif audit_path.check(nf):
1368 1385 try:
1369 1386 results[nf] = lstat(join(nf))
1370 1387 # file was just ignored, no links, and exists
1371 1388 except OSError:
1372 1389 # file doesn't exist
1373 1390 results[nf] = None
1374 1391 else:
1375 1392 # It's either missing or under a symlink directory
1376 1393 # which we in this case report as missing
1377 1394 results[nf] = None
1378 1395 else:
1379 1396 # We may not have walked the full directory tree above,
1380 1397 # so stat and check everything we missed.
1381 1398 iv = iter(visit)
1382 1399 for st in util.statfiles([join(i) for i in visit]):
1383 1400 results[next(iv)] = st
1384 1401 return results
1385 1402
1386 1403 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1387 1404 # Force Rayon (Rust parallelism library) to respect the number of
1388 1405 # workers. This is a temporary workaround until Rust code knows
1389 1406 # how to read the config file.
1390 1407 numcpus = self._ui.configint(b"worker", b"numcpus")
1391 1408 if numcpus is not None:
1392 1409 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1393 1410
1394 1411 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1395 1412 if not workers_enabled:
1396 1413 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1397 1414
1398 1415 (
1399 1416 lookup,
1400 1417 modified,
1401 1418 added,
1402 1419 removed,
1403 1420 deleted,
1404 1421 clean,
1405 1422 ignored,
1406 1423 unknown,
1407 1424 warnings,
1408 1425 bad,
1409 1426 traversed,
1410 1427 dirty,
1411 1428 ) = rustmod.status(
1412 1429 self._map._rustmap,
1413 1430 matcher,
1414 1431 self._rootdir,
1415 1432 self._ignorefiles(),
1416 1433 self._checkexec,
1417 1434 self._lastnormaltime,
1418 1435 bool(list_clean),
1419 1436 bool(list_ignored),
1420 1437 bool(list_unknown),
1421 1438 bool(matcher.traversedir),
1422 1439 )
1423 1440
1424 1441 self._dirty |= dirty
1425 1442
1426 1443 if matcher.traversedir:
1427 1444 for dir in traversed:
1428 1445 matcher.traversedir(dir)
1429 1446
1430 1447 if self._ui.warn:
1431 1448 for item in warnings:
1432 1449 if isinstance(item, tuple):
1433 1450 file_path, syntax = item
1434 1451 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1435 1452 file_path,
1436 1453 syntax,
1437 1454 )
1438 1455 self._ui.warn(msg)
1439 1456 else:
1440 1457 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1441 1458 self._ui.warn(
1442 1459 msg
1443 1460 % (
1444 1461 pathutil.canonpath(
1445 1462 self._rootdir, self._rootdir, item
1446 1463 ),
1447 1464 b"No such file or directory",
1448 1465 )
1449 1466 )
1450 1467
1451 1468 for (fn, message) in bad:
1452 1469 matcher.bad(fn, encoding.strtolocal(message))
1453 1470
1454 1471 status = scmutil.status(
1455 1472 modified=modified,
1456 1473 added=added,
1457 1474 removed=removed,
1458 1475 deleted=deleted,
1459 1476 unknown=unknown,
1460 1477 ignored=ignored,
1461 1478 clean=clean,
1462 1479 )
1463 1480 return (lookup, status)
1464 1481
1465 1482 def status(self, match, subrepos, ignored, clean, unknown):
1466 1483 """Determine the status of the working copy relative to the
1467 1484 dirstate and return a pair of (unsure, status), where status is of type
1468 1485 scmutil.status and:
1469 1486
1470 1487 unsure:
1471 1488 files that might have been modified since the dirstate was
1472 1489 written, but need to be read to be sure (size is the same
1473 1490 but mtime differs)
1474 1491 status.modified:
1475 1492 files that have definitely been modified since the dirstate
1476 1493 was written (different size or mode)
1477 1494 status.clean:
1478 1495 files that have definitely not been modified since the
1479 1496 dirstate was written
1480 1497 """
1481 1498 listignored, listclean, listunknown = ignored, clean, unknown
1482 1499 lookup, modified, added, unknown, ignored = [], [], [], [], []
1483 1500 removed, deleted, clean = [], [], []
1484 1501
1485 1502 dmap = self._map
1486 1503 dmap.preload()
1487 1504
1488 1505 use_rust = True
1489 1506
1490 1507 allowed_matchers = (
1491 1508 matchmod.alwaysmatcher,
1492 1509 matchmod.exactmatcher,
1493 1510 matchmod.includematcher,
1494 1511 )
1495 1512
1496 1513 if rustmod is None:
1497 1514 use_rust = False
1498 1515 elif self._checkcase:
1499 1516 # Case-insensitive filesystems are not handled yet
1500 1517 use_rust = False
1501 1518 elif subrepos:
1502 1519 use_rust = False
1503 1520 elif sparse.enabled:
1504 1521 use_rust = False
1505 1522 elif not isinstance(match, allowed_matchers):
1506 1523 # Some matchers have yet to be implemented
1507 1524 use_rust = False
1508 1525
1509 1526 if use_rust:
1510 1527 try:
1511 1528 return self._rust_status(
1512 1529 match, listclean, listignored, listunknown
1513 1530 )
1514 1531 except rustmod.FallbackError:
1515 1532 pass
1516 1533
1517 1534 def noop(f):
1518 1535 pass
1519 1536
1520 1537 dcontains = dmap.__contains__
1521 1538 dget = dmap.__getitem__
1522 1539 ladd = lookup.append # aka "unsure"
1523 1540 madd = modified.append
1524 1541 aadd = added.append
1525 1542 uadd = unknown.append if listunknown else noop
1526 1543 iadd = ignored.append if listignored else noop
1527 1544 radd = removed.append
1528 1545 dadd = deleted.append
1529 1546 cadd = clean.append if listclean else noop
1530 1547 mexact = match.exact
1531 1548 dirignore = self._dirignore
1532 1549 checkexec = self._checkexec
1533 1550 copymap = self._map.copymap
1534 1551 lastnormaltime = self._lastnormaltime
1535 1552
1536 1553 # We need to do full walks when either
1537 1554 # - we're listing all clean files, or
1538 1555 # - match.traversedir does something, because match.traversedir should
1539 1556 # be called for every dir in the working dir
1540 1557 full = listclean or match.traversedir is not None
1541 1558 for fn, st in pycompat.iteritems(
1542 1559 self.walk(match, subrepos, listunknown, listignored, full=full)
1543 1560 ):
1544 1561 if not dcontains(fn):
1545 1562 if (listignored or mexact(fn)) and dirignore(fn):
1546 1563 if listignored:
1547 1564 iadd(fn)
1548 1565 else:
1549 1566 uadd(fn)
1550 1567 continue
1551 1568
1552 1569 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1553 1570 # written like that for performance reasons. dmap[fn] is not a
1554 1571 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1555 1572 # opcode has fast paths when the value to be unpacked is a tuple or
1556 1573 # a list, but falls back to creating a full-fledged iterator in
1557 1574 # general. That is much slower than simply accessing and storing the
1558 1575 # tuple members one by one.
1559 1576 t = dget(fn)
1560 1577 mode = t.mode
1561 1578 size = t.size
1562 1579 time = t.mtime
1563 1580
1564 1581 if not st and t.tracked:
1565 1582 dadd(fn)
1566 1583 elif t.merged:
1567 1584 madd(fn)
1568 1585 elif t.added:
1569 1586 aadd(fn)
1570 1587 elif t.removed:
1571 1588 radd(fn)
1572 1589 elif t.tracked:
1573 1590 if (
1574 1591 size >= 0
1575 1592 and (
1576 1593 (size != st.st_size and size != st.st_size & _rangemask)
1577 1594 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1578 1595 )
1579 1596 or t.from_p2
1580 1597 or fn in copymap
1581 1598 ):
1582 1599 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1583 1600 # issue6456: Size returned may be longer due to
1584 1601 # encryption on EXT-4 fscrypt, undecided.
1585 1602 ladd(fn)
1586 1603 else:
1587 1604 madd(fn)
1588 1605 elif (
1589 1606 time != st[stat.ST_MTIME]
1590 1607 and time != st[stat.ST_MTIME] & _rangemask
1591 1608 ):
1592 1609 ladd(fn)
1593 1610 elif st[stat.ST_MTIME] == lastnormaltime:
1594 1611 # fn may have just been marked as normal and it may have
1595 1612 # changed in the same second without changing its size.
1596 1613 # This can happen if we quickly do multiple commits.
1597 1614 # Force lookup, so we don't miss such a racy file change.
1598 1615 ladd(fn)
1599 1616 elif listclean:
1600 1617 cadd(fn)
1601 1618 status = scmutil.status(
1602 1619 modified, added, removed, deleted, unknown, ignored, clean
1603 1620 )
1604 1621 return (lookup, status)
1605 1622
1606 1623 def matches(self, match):
1607 1624 """
1608 1625 return files in the dirstate (in whatever state) filtered by match
1609 1626 """
1610 1627 dmap = self._map
1611 1628 if rustmod is not None:
1612 1629 dmap = self._map._rustmap
1613 1630
1614 1631 if match.always():
1615 1632 return dmap.keys()
1616 1633 files = match.files()
1617 1634 if match.isexact():
1618 1635 # fast path -- filter the other way around, since typically files is
1619 1636 # much smaller than dmap
1620 1637 return [f for f in files if f in dmap]
1621 1638 if match.prefix() and all(fn in dmap for fn in files):
1622 1639 # fast path -- all the values are known to be files, so just return
1623 1640 # that
1624 1641 return list(files)
1625 1642 return [f for f in dmap if match(f)]
1626 1643
1627 1644 def _actualfilename(self, tr):
1628 1645 if tr:
1629 1646 return self._pendingfilename
1630 1647 else:
1631 1648 return self._filename
1632 1649
1633 1650 def savebackup(self, tr, backupname):
1634 1651 '''Save current dirstate into backup file'''
1635 1652 filename = self._actualfilename(tr)
1636 1653 assert backupname != filename
1637 1654
1638 1655 # use '_writedirstate' instead of 'write' to write changes certainly,
1639 1656 # because the latter omits writing out if transaction is running.
1640 1657 # output file will be used to create backup of dirstate at this point.
1641 1658 if self._dirty or not self._opener.exists(filename):
1642 1659 self._writedirstate(
1643 1660 tr,
1644 1661 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1645 1662 )
1646 1663
1647 1664 if tr:
1648 1665 # ensure that subsequent tr.writepending returns True for
1649 1666 # changes written out above, even if dirstate is never
1650 1667 # changed after this
1651 1668 tr.addfilegenerator(
1652 1669 b'dirstate',
1653 1670 (self._filename,),
1654 1671 lambda f: self._writedirstate(tr, f),
1655 1672 location=b'plain',
1656 1673 )
1657 1674
1658 1675 # ensure that pending file written above is unlinked at
1659 1676 # failure, even if tr.writepending isn't invoked until the
1660 1677 # end of this transaction
1661 1678 tr.registertmp(filename, location=b'plain')
1662 1679
1663 1680 self._opener.tryunlink(backupname)
1664 1681 # hardlink backup is okay because _writedirstate is always called
1665 1682 # with an "atomictemp=True" file.
1666 1683 util.copyfile(
1667 1684 self._opener.join(filename),
1668 1685 self._opener.join(backupname),
1669 1686 hardlink=True,
1670 1687 )
1671 1688
1672 1689 def restorebackup(self, tr, backupname):
1673 1690 '''Restore dirstate by backup file'''
1674 1691 # this "invalidate()" prevents "wlock.release()" from writing
1675 1692 # changes of dirstate out after restoring from backup file
1676 1693 self.invalidate()
1677 1694 filename = self._actualfilename(tr)
1678 1695 o = self._opener
1679 1696 if util.samefile(o.join(backupname), o.join(filename)):
1680 1697 o.unlink(backupname)
1681 1698 else:
1682 1699 o.rename(backupname, filename, checkambig=True)
1683 1700
1684 1701 def clearbackup(self, tr, backupname):
1685 1702 '''Clear backup file'''
1686 1703 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now