##// END OF EJS Templates
dirstate: drop the `_normal` method...
marmoute -
r48728:1b3c753b default
parent child Browse files
Show More
@@ -1,1637 +1,1634 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .pycompat import delattr
18 18
19 19 from hgdemandimport import tracing
20 20
21 21 from . import (
22 22 dirstatemap,
23 23 encoding,
24 24 error,
25 25 match as matchmod,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 sparse,
31 31 util,
32 32 )
33 33
34 34 from .interfaces import (
35 35 dirstate as intdirstate,
36 36 util as interfaceutil,
37 37 )
38 38
39 39 parsers = policy.importmod('parsers')
40 40 rustmod = policy.importrust('dirstate')
41 41
42 42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43 43
44 44 propertycache = util.propertycache
45 45 filecache = scmutil.filecache
46 46 _rangemask = dirstatemap.rangemask
47 47
48 48 DirstateItem = parsers.DirstateItem
49 49
50 50
51 51 class repocache(filecache):
52 52 """filecache for files in .hg/"""
53 53
54 54 def join(self, obj, fname):
55 55 return obj._opener.join(fname)
56 56
57 57
58 58 class rootcache(filecache):
59 59 """filecache for files in the repository root"""
60 60
61 61 def join(self, obj, fname):
62 62 return obj._join(fname)
63 63
64 64
65 65 def _getfsnow(vfs):
66 66 '''Get "now" timestamp on filesystem'''
67 67 tmpfd, tmpname = vfs.mkstemp()
68 68 try:
69 69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 70 finally:
71 71 os.close(tmpfd)
72 72 vfs.unlink(tmpname)
73 73
74 74
75 75 def requires_parents_change(func):
76 76 def wrap(self, *args, **kwargs):
77 77 if not self.pendingparentchange():
78 78 msg = 'calling `%s` outside of a parentchange context'
79 79 msg %= func.__name__
80 80 raise error.ProgrammingError(msg)
81 81 return func(self, *args, **kwargs)
82 82
83 83 return wrap
84 84
85 85
86 86 def requires_no_parents_change(func):
87 87 def wrap(self, *args, **kwargs):
88 88 if self.pendingparentchange():
89 89 msg = 'calling `%s` inside of a parentchange context'
90 90 msg %= func.__name__
91 91 raise error.ProgrammingError(msg)
92 92 return func(self, *args, **kwargs)
93 93
94 94 return wrap
95 95
96 96
97 97 @interfaceutil.implementer(intdirstate.idirstate)
98 98 class dirstate(object):
99 99 def __init__(
100 100 self,
101 101 opener,
102 102 ui,
103 103 root,
104 104 validate,
105 105 sparsematchfn,
106 106 nodeconstants,
107 107 use_dirstate_v2,
108 108 ):
109 109 """Create a new dirstate object.
110 110
111 111 opener is an open()-like callable that can be used to open the
112 112 dirstate file; root is the root of the directory tracked by
113 113 the dirstate.
114 114 """
115 115 self._use_dirstate_v2 = use_dirstate_v2
116 116 self._nodeconstants = nodeconstants
117 117 self._opener = opener
118 118 self._validate = validate
119 119 self._root = root
120 120 self._sparsematchfn = sparsematchfn
121 121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 122 # UNC path pointing to root share (issue4557)
123 123 self._rootdir = pathutil.normasprefix(root)
124 124 self._dirty = False
125 125 self._lastnormaltime = 0
126 126 self._ui = ui
127 127 self._filecache = {}
128 128 self._parentwriters = 0
129 129 self._filename = b'dirstate'
130 130 self._pendingfilename = b'%s.pending' % self._filename
131 131 self._plchangecallbacks = {}
132 132 self._origpl = None
133 133 self._updatedfiles = set()
134 134 self._mapcls = dirstatemap.dirstatemap
135 135 # Access and cache cwd early, so we don't access it for the first time
136 136 # after a working-copy update caused it to not exist (accessing it then
137 137 # raises an exception).
138 138 self._cwd
139 139
140 140 def prefetch_parents(self):
141 141 """make sure the parents are loaded
142 142
143 143 Used to avoid a race condition.
144 144 """
145 145 self._pl
146 146
147 147 @contextlib.contextmanager
148 148 def parentchange(self):
149 149 """Context manager for handling dirstate parents.
150 150
151 151 If an exception occurs in the scope of the context manager,
152 152 the incoherent dirstate won't be written when wlock is
153 153 released.
154 154 """
155 155 self._parentwriters += 1
156 156 yield
157 157 # Typically we want the "undo" step of a context manager in a
158 158 # finally block so it happens even when an exception
159 159 # occurs. In this case, however, we only want to decrement
160 160 # parentwriters if the code in the with statement exits
161 161 # normally, so we don't have a try/finally here on purpose.
162 162 self._parentwriters -= 1
163 163
164 164 def pendingparentchange(self):
165 165 """Returns true if the dirstate is in the middle of a set of changes
166 166 that modify the dirstate parent.
167 167 """
168 168 return self._parentwriters > 0
169 169
170 170 @propertycache
171 171 def _map(self):
172 172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 173 self._map = self._mapcls(
174 174 self._ui,
175 175 self._opener,
176 176 self._root,
177 177 self._nodeconstants,
178 178 self._use_dirstate_v2,
179 179 )
180 180 return self._map
181 181
182 182 @property
183 183 def _sparsematcher(self):
184 184 """The matcher for the sparse checkout.
185 185
186 186 The working directory may not include every file from a manifest. The
187 187 matcher obtained by this property will match a path if it is to be
188 188 included in the working directory.
189 189 """
190 190 # TODO there is potential to cache this property. For now, the matcher
191 191 # is resolved on every access. (But the called function does use a
192 192 # cache to keep the lookup fast.)
193 193 return self._sparsematchfn()
194 194
195 195 @repocache(b'branch')
196 196 def _branch(self):
197 197 try:
198 198 return self._opener.read(b"branch").strip() or b"default"
199 199 except IOError as inst:
200 200 if inst.errno != errno.ENOENT:
201 201 raise
202 202 return b"default"
203 203
204 204 @property
205 205 def _pl(self):
206 206 return self._map.parents()
207 207
208 208 def hasdir(self, d):
209 209 return self._map.hastrackeddir(d)
210 210
211 211 @rootcache(b'.hgignore')
212 212 def _ignore(self):
213 213 files = self._ignorefiles()
214 214 if not files:
215 215 return matchmod.never()
216 216
217 217 pats = [b'include:%s' % f for f in files]
218 218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219 219
220 220 @propertycache
221 221 def _slash(self):
222 222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223 223
224 224 @propertycache
225 225 def _checklink(self):
226 226 return util.checklink(self._root)
227 227
228 228 @propertycache
229 229 def _checkexec(self):
230 230 return bool(util.checkexec(self._root))
231 231
232 232 @propertycache
233 233 def _checkcase(self):
234 234 return not util.fscasesensitive(self._join(b'.hg'))
235 235
236 236 def _join(self, f):
237 237 # much faster than os.path.join()
238 238 # it's safe because f is always a relative path
239 239 return self._rootdir + f
240 240
241 241 def flagfunc(self, buildfallback):
242 242 if self._checklink and self._checkexec:
243 243
244 244 def f(x):
245 245 try:
246 246 st = os.lstat(self._join(x))
247 247 if util.statislink(st):
248 248 return b'l'
249 249 if util.statisexec(st):
250 250 return b'x'
251 251 except OSError:
252 252 pass
253 253 return b''
254 254
255 255 return f
256 256
257 257 fallback = buildfallback()
258 258 if self._checklink:
259 259
260 260 def f(x):
261 261 if os.path.islink(self._join(x)):
262 262 return b'l'
263 263 if b'x' in fallback(x):
264 264 return b'x'
265 265 return b''
266 266
267 267 return f
268 268 if self._checkexec:
269 269
270 270 def f(x):
271 271 if b'l' in fallback(x):
272 272 return b'l'
273 273 if util.isexec(self._join(x)):
274 274 return b'x'
275 275 return b''
276 276
277 277 return f
278 278 else:
279 279 return fallback
280 280
281 281 @propertycache
282 282 def _cwd(self):
283 283 # internal config: ui.forcecwd
284 284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 285 if forcecwd:
286 286 return forcecwd
287 287 return encoding.getcwd()
288 288
289 289 def getcwd(self):
290 290 """Return the path from which a canonical path is calculated.
291 291
292 292 This path should be used to resolve file patterns or to convert
293 293 canonical paths back to file paths for display. It shouldn't be
294 294 used to get real file paths. Use vfs functions instead.
295 295 """
296 296 cwd = self._cwd
297 297 if cwd == self._root:
298 298 return b''
299 299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 300 rootsep = self._root
301 301 if not util.endswithsep(rootsep):
302 302 rootsep += pycompat.ossep
303 303 if cwd.startswith(rootsep):
304 304 return cwd[len(rootsep) :]
305 305 else:
306 306 # we're outside the repo. return an absolute path.
307 307 return cwd
308 308
309 309 def pathto(self, f, cwd=None):
310 310 if cwd is None:
311 311 cwd = self.getcwd()
312 312 path = util.pathto(self._root, cwd, f)
313 313 if self._slash:
314 314 return util.pconvert(path)
315 315 return path
316 316
317 317 def __getitem__(self, key):
318 318 """Return the current state of key (a filename) in the dirstate.
319 319
320 320 States are:
321 321 n normal
322 322 m needs merging
323 323 r marked for removal
324 324 a marked for addition
325 325 ? not tracked
326 326
327 327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 328 consider migrating all user of this to going through the dirstate entry
329 329 instead.
330 330 """
331 331 entry = self._map.get(key)
332 332 if entry is not None:
333 333 return entry.state
334 334 return b'?'
335 335
336 336 def __contains__(self, key):
337 337 return key in self._map
338 338
339 339 def __iter__(self):
340 340 return iter(sorted(self._map))
341 341
342 342 def items(self):
343 343 return pycompat.iteritems(self._map)
344 344
345 345 iteritems = items
346 346
347 347 def directories(self):
348 348 return self._map.directories()
349 349
350 350 def parents(self):
351 351 return [self._validate(p) for p in self._pl]
352 352
353 353 def p1(self):
354 354 return self._validate(self._pl[0])
355 355
356 356 def p2(self):
357 357 return self._validate(self._pl[1])
358 358
359 359 @property
360 360 def in_merge(self):
361 361 """True if a merge is in progress"""
362 362 return self._pl[1] != self._nodeconstants.nullid
363 363
364 364 def branch(self):
365 365 return encoding.tolocal(self._branch)
366 366
367 367 def setparents(self, p1, p2=None):
368 368 """Set dirstate parents to p1 and p2.
369 369
370 370 When moving from two parents to one, "merged" entries a
371 371 adjusted to normal and previous copy records discarded and
372 372 returned by the call.
373 373
374 374 See localrepo.setparents()
375 375 """
376 376 if p2 is None:
377 377 p2 = self._nodeconstants.nullid
378 378 if self._parentwriters == 0:
379 379 raise ValueError(
380 380 b"cannot set dirstate parent outside of "
381 381 b"dirstate.parentchange context manager"
382 382 )
383 383
384 384 self._dirty = True
385 385 oldp2 = self._pl[1]
386 386 if self._origpl is None:
387 387 self._origpl = self._pl
388 388 self._map.setparents(p1, p2)
389 389 copies = {}
390 390 if (
391 391 oldp2 != self._nodeconstants.nullid
392 392 and p2 == self._nodeconstants.nullid
393 393 ):
394 394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395 395
396 396 for f in candidatefiles:
397 397 s = self._map.get(f)
398 398 if s is None:
399 399 continue
400 400
401 401 # Discard "merged" markers when moving away from a merge state
402 402 if s.merged:
403 403 source = self._map.copymap.get(f)
404 404 if source:
405 405 copies[f] = source
406 406 self._normallookup(f)
407 407 # Also fix up otherparent markers
408 408 elif s.from_p2:
409 409 source = self._map.copymap.get(f)
410 410 if source:
411 411 copies[f] = source
412 412 self._add(f)
413 413 return copies
414 414
415 415 def setbranch(self, branch):
416 416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 418 try:
419 419 f.write(self._branch + b'\n')
420 420 f.close()
421 421
422 422 # make sure filecache has the correct stat info for _branch after
423 423 # replacing the underlying file
424 424 ce = self._filecache[b'_branch']
425 425 if ce:
426 426 ce.refresh()
427 427 except: # re-raises
428 428 f.discard()
429 429 raise
430 430
431 431 def invalidate(self):
432 432 """Causes the next access to reread the dirstate.
433 433
434 434 This is different from localrepo.invalidatedirstate() because it always
435 435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 436 check whether the dirstate has changed before rereading it."""
437 437
438 438 for a in ("_map", "_branch", "_ignore"):
439 439 if a in self.__dict__:
440 440 delattr(self, a)
441 441 self._lastnormaltime = 0
442 442 self._dirty = False
443 443 self._updatedfiles.clear()
444 444 self._parentwriters = 0
445 445 self._origpl = None
446 446
447 447 def copy(self, source, dest):
448 448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 449 if source == dest:
450 450 return
451 451 self._dirty = True
452 452 if source is not None:
453 453 self._map.copymap[dest] = source
454 454 self._updatedfiles.add(source)
455 455 self._updatedfiles.add(dest)
456 456 elif self._map.copymap.pop(dest, None):
457 457 self._updatedfiles.add(dest)
458 458
459 459 def copied(self, file):
460 460 return self._map.copymap.get(file, None)
461 461
462 462 def copies(self):
463 463 return self._map.copymap
464 464
465 465 @requires_no_parents_change
466 466 def set_tracked(self, filename):
467 467 """a "public" method for generic code to mark a file as tracked
468 468
469 469 This function is to be called outside of "update/merge" case. For
470 470 example by a command like `hg add X`.
471 471
472 472 return True the file was previously untracked, False otherwise.
473 473 """
474 474 entry = self._map.get(filename)
475 475 if entry is None:
476 476 self._add(filename)
477 477 return True
478 478 elif not entry.tracked:
479 479 self._normallookup(filename)
480 480 return True
481 481 # XXX This is probably overkill for more case, but we need this to
482 482 # fully replace the `normallookup` call with `set_tracked` one.
483 483 # Consider smoothing this in the future.
484 484 self.set_possibly_dirty(filename)
485 485 return False
486 486
487 487 @requires_no_parents_change
488 488 def set_untracked(self, filename):
489 489 """a "public" method for generic code to mark a file as untracked
490 490
491 491 This function is to be called outside of "update/merge" case. For
492 492 example by a command like `hg remove X`.
493 493
494 494 return True the file was previously tracked, False otherwise.
495 495 """
496 496 entry = self._map.get(filename)
497 497 if entry is None:
498 498 return False
499 499 elif entry.added:
500 500 self._drop(filename)
501 501 return True
502 502 else:
503 503 self._dirty = True
504 504 self._updatedfiles.add(filename)
505 505 self._map.set_untracked(filename)
506 506 return True
507 507
508 508 @requires_no_parents_change
509 509 def set_clean(self, filename, parentfiledata=None):
510 510 """record that the current state of the file on disk is known to be clean"""
511 511 self._dirty = True
512 512 self._updatedfiles.add(filename)
513 self._normal(filename, parentfiledata=parentfiledata)
513 if parentfiledata:
514 (mode, size, mtime) = parentfiledata
515 else:
516 (mode, size, mtime) = self._get_filedata(filename)
517 self._addpath(filename, mode=mode, size=size, mtime=mtime)
518 self._map.copymap.pop(filename, None)
519 if filename in self._map.nonnormalset:
520 self._map.nonnormalset.remove(filename)
521 if mtime > self._lastnormaltime:
522 # Remember the most recent modification timeslot for status(),
523 # to make sure we won't miss future size-preserving file content
524 # modifications that happen within the same timeslot.
525 self._lastnormaltime = mtime
514 526
515 527 @requires_no_parents_change
516 528 def set_possibly_dirty(self, filename):
517 529 """record that the current state of the file on disk is unknown"""
518 530 self._dirty = True
519 531 self._updatedfiles.add(filename)
520 532 self._map.set_possibly_dirty(filename)
521 533
522 534 @requires_parents_change
523 535 def update_file_p1(
524 536 self,
525 537 filename,
526 538 p1_tracked,
527 539 ):
528 540 """Set a file as tracked in the parent (or not)
529 541
530 542 This is to be called when adjust the dirstate to a new parent after an history
531 543 rewriting operation.
532 544
533 545 It should not be called during a merge (p2 != nullid) and only within
534 546 a `with dirstate.parentchange():` context.
535 547 """
536 548 if self.in_merge:
537 549 msg = b'update_file_reference should not be called when merging'
538 550 raise error.ProgrammingError(msg)
539 551 entry = self._map.get(filename)
540 552 if entry is None:
541 553 wc_tracked = False
542 554 else:
543 555 wc_tracked = entry.tracked
544 556 possibly_dirty = False
545 557 if p1_tracked and wc_tracked:
546 558 # the underlying reference might have changed, we will have to
547 559 # check it.
548 560 possibly_dirty = True
549 561 elif not (p1_tracked or wc_tracked):
550 562 # the file is no longer relevant to anyone
551 563 self._drop(filename)
552 564 elif (not p1_tracked) and wc_tracked:
553 565 if entry is not None and entry.added:
554 566 return # avoid dropping copy information (maybe?)
555 567 elif p1_tracked and not wc_tracked:
556 568 pass
557 569 else:
558 570 assert False, 'unreachable'
559 571
560 572 # this mean we are doing call for file we do not really care about the
561 573 # data (eg: added or removed), however this should be a minor overhead
562 574 # compared to the overall update process calling this.
563 575 parentfiledata = None
564 576 if wc_tracked:
565 577 parentfiledata = self._get_filedata(filename)
566 578
567 579 self._updatedfiles.add(filename)
568 580 self._map.reset_state(
569 581 filename,
570 582 wc_tracked,
571 583 p1_tracked,
572 584 possibly_dirty=possibly_dirty,
573 585 parentfiledata=parentfiledata,
574 586 )
575 587 if (
576 588 parentfiledata is not None
577 589 and parentfiledata[2] > self._lastnormaltime
578 590 ):
579 591 # Remember the most recent modification timeslot for status(),
580 592 # to make sure we won't miss future size-preserving file content
581 593 # modifications that happen within the same timeslot.
582 594 self._lastnormaltime = parentfiledata[2]
583 595
584 596 @requires_parents_change
585 597 def update_file(
586 598 self,
587 599 filename,
588 600 wc_tracked,
589 601 p1_tracked,
590 602 p2_tracked=False,
591 603 merged=False,
592 604 clean_p1=False,
593 605 clean_p2=False,
594 606 possibly_dirty=False,
595 607 parentfiledata=None,
596 608 ):
597 609 """update the information about a file in the dirstate
598 610
599 611 This is to be called when the direstates parent changes to keep track
600 612 of what is the file situation in regards to the working copy and its parent.
601 613
602 614 This function must be called within a `dirstate.parentchange` context.
603 615
604 616 note: the API is at an early stage and we might need to adjust it
605 617 depending of what information ends up being relevant and useful to
606 618 other processing.
607 619 """
608 620 if merged and (clean_p1 or clean_p2):
609 621 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
610 622 raise error.ProgrammingError(msg)
611 623
612 624 # note: I do not think we need to double check name clash here since we
613 625 # are in a update/merge case that should already have taken care of
614 626 # this. The test agrees
615 627
616 628 self._dirty = True
617 629 self._updatedfiles.add(filename)
618 630
619 631 need_parent_file_data = (
620 632 not (possibly_dirty or clean_p2 or merged)
621 633 and wc_tracked
622 634 and p1_tracked
623 635 )
624 636
625 637 # this mean we are doing call for file we do not really care about the
626 638 # data (eg: added or removed), however this should be a minor overhead
627 639 # compared to the overall update process calling this.
628 640 if need_parent_file_data:
629 641 if parentfiledata is None:
630 642 parentfiledata = self._get_filedata(filename)
631 643 mtime = parentfiledata[2]
632 644
633 645 if mtime > self._lastnormaltime:
634 646 # Remember the most recent modification timeslot for
635 647 # status(), to make sure we won't miss future
636 648 # size-preserving file content modifications that happen
637 649 # within the same timeslot.
638 650 self._lastnormaltime = mtime
639 651
640 652 self._map.reset_state(
641 653 filename,
642 654 wc_tracked,
643 655 p1_tracked,
644 656 p2_tracked=p2_tracked,
645 657 merged=merged,
646 658 clean_p1=clean_p1,
647 659 clean_p2=clean_p2,
648 660 possibly_dirty=possibly_dirty,
649 661 parentfiledata=parentfiledata,
650 662 )
651 663 if (
652 664 parentfiledata is not None
653 665 and parentfiledata[2] > self._lastnormaltime
654 666 ):
655 667 # Remember the most recent modification timeslot for status(),
656 668 # to make sure we won't miss future size-preserving file content
657 669 # modifications that happen within the same timeslot.
658 670 self._lastnormaltime = parentfiledata[2]
659 671
660 672 def _addpath(
661 673 self,
662 674 f,
663 675 mode=0,
664 676 size=None,
665 677 mtime=None,
666 678 added=False,
667 679 merged=False,
668 680 from_p2=False,
669 681 possibly_dirty=False,
670 682 ):
671 683 entry = self._map.get(f)
672 684 if added or entry is not None and entry.removed:
673 685 scmutil.checkfilename(f)
674 686 if self._map.hastrackeddir(f):
675 687 msg = _(b'directory %r already in dirstate')
676 688 msg %= pycompat.bytestr(f)
677 689 raise error.Abort(msg)
678 690 # shadows
679 691 for d in pathutil.finddirs(f):
680 692 if self._map.hastrackeddir(d):
681 693 break
682 694 entry = self._map.get(d)
683 695 if entry is not None and not entry.removed:
684 696 msg = _(b'file %r in dirstate clashes with %r')
685 697 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
686 698 raise error.Abort(msg)
687 699 self._dirty = True
688 700 self._updatedfiles.add(f)
689 701 self._map.addfile(
690 702 f,
691 703 mode=mode,
692 704 size=size,
693 705 mtime=mtime,
694 706 added=added,
695 707 merged=merged,
696 708 from_p2=from_p2,
697 709 possibly_dirty=possibly_dirty,
698 710 )
699 711
700 712 def _get_filedata(self, filename):
701 713 """returns"""
702 714 s = os.lstat(self._join(filename))
703 715 mode = s.st_mode
704 716 size = s.st_size
705 717 mtime = s[stat.ST_MTIME]
706 718 return (mode, size, mtime)
707 719
708 def _normal(self, f, parentfiledata=None):
709 if parentfiledata:
710 (mode, size, mtime) = parentfiledata
711 else:
712 (mode, size, mtime) = self._get_filedata(f)
713 self._addpath(f, mode=mode, size=size, mtime=mtime)
714 self._map.copymap.pop(f, None)
715 if f in self._map.nonnormalset:
716 self._map.nonnormalset.remove(f)
717 if mtime > self._lastnormaltime:
718 # Remember the most recent modification timeslot for status(),
719 # to make sure we won't miss future size-preserving file content
720 # modifications that happen within the same timeslot.
721 self._lastnormaltime = mtime
722
723 720 def _normallookup(self, f):
724 721 '''Mark a file normal, but possibly dirty.'''
725 722 if self.in_merge:
726 723 # if there is a merge going on and the file was either
727 724 # "merged" or coming from other parent (-2) before
728 725 # being removed, restore that state.
729 726 entry = self._map.get(f)
730 727 if entry is not None:
731 728 # XXX this should probably be dealt with a a lower level
732 729 # (see `merged_removed` and `from_p2_removed`)
733 730 if entry.merged_removed or entry.from_p2_removed:
734 731 source = self._map.copymap.get(f)
735 732 if entry.merged_removed:
736 733 self._otherparent(f)
737 734 elif entry.from_p2_removed:
738 735 self._otherparent(f)
739 736 if source is not None:
740 737 self.copy(source, f)
741 738 return
742 739 elif entry.merged or entry.from_p2:
743 740 return
744 741 self._addpath(f, possibly_dirty=True)
745 742 self._map.copymap.pop(f, None)
746 743
747 744 def _otherparent(self, f):
748 745 if not self.in_merge:
749 746 msg = _(b"setting %r to other parent only allowed in merges") % f
750 747 raise error.Abort(msg)
751 748 entry = self._map.get(f)
752 749 if entry is not None and entry.tracked:
753 750 # merge-like
754 751 self._addpath(f, merged=True)
755 752 else:
756 753 # add-like
757 754 self._addpath(f, from_p2=True)
758 755 self._map.copymap.pop(f, None)
759 756
760 757 def _add(self, filename):
761 758 """internal function to mark a file as added"""
762 759 self._addpath(filename, added=True)
763 760 self._map.copymap.pop(filename, None)
764 761
765 762 def _drop(self, filename):
766 763 """internal function to drop a file from the dirstate"""
767 764 if self._map.dropfile(filename):
768 765 self._dirty = True
769 766 self._updatedfiles.add(filename)
770 767 self._map.copymap.pop(filename, None)
771 768
772 769 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
773 770 if exists is None:
774 771 exists = os.path.lexists(os.path.join(self._root, path))
775 772 if not exists:
776 773 # Maybe a path component exists
777 774 if not ignoremissing and b'/' in path:
778 775 d, f = path.rsplit(b'/', 1)
779 776 d = self._normalize(d, False, ignoremissing, None)
780 777 folded = d + b"/" + f
781 778 else:
782 779 # No path components, preserve original case
783 780 folded = path
784 781 else:
785 782 # recursively normalize leading directory components
786 783 # against dirstate
787 784 if b'/' in normed:
788 785 d, f = normed.rsplit(b'/', 1)
789 786 d = self._normalize(d, False, ignoremissing, True)
790 787 r = self._root + b"/" + d
791 788 folded = d + b"/" + util.fspath(f, r)
792 789 else:
793 790 folded = util.fspath(normed, self._root)
794 791 storemap[normed] = folded
795 792
796 793 return folded
797 794
798 795 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
799 796 normed = util.normcase(path)
800 797 folded = self._map.filefoldmap.get(normed, None)
801 798 if folded is None:
802 799 if isknown:
803 800 folded = path
804 801 else:
805 802 folded = self._discoverpath(
806 803 path, normed, ignoremissing, exists, self._map.filefoldmap
807 804 )
808 805 return folded
809 806
810 807 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
811 808 normed = util.normcase(path)
812 809 folded = self._map.filefoldmap.get(normed, None)
813 810 if folded is None:
814 811 folded = self._map.dirfoldmap.get(normed, None)
815 812 if folded is None:
816 813 if isknown:
817 814 folded = path
818 815 else:
819 816 # store discovered result in dirfoldmap so that future
820 817 # normalizefile calls don't start matching directories
821 818 folded = self._discoverpath(
822 819 path, normed, ignoremissing, exists, self._map.dirfoldmap
823 820 )
824 821 return folded
825 822
826 823 def normalize(self, path, isknown=False, ignoremissing=False):
827 824 """
828 825 normalize the case of a pathname when on a casefolding filesystem
829 826
830 827 isknown specifies whether the filename came from walking the
831 828 disk, to avoid extra filesystem access.
832 829
833 830 If ignoremissing is True, missing path are returned
834 831 unchanged. Otherwise, we try harder to normalize possibly
835 832 existing path components.
836 833
837 834 The normalized case is determined based on the following precedence:
838 835
839 836 - version of name already stored in the dirstate
840 837 - version of name stored on disk
841 838 - version provided via command arguments
842 839 """
843 840
844 841 if self._checkcase:
845 842 return self._normalize(path, isknown, ignoremissing)
846 843 return path
847 844
848 845 def clear(self):
849 846 self._map.clear()
850 847 self._lastnormaltime = 0
851 848 self._updatedfiles.clear()
852 849 self._dirty = True
853 850
854 851 def rebuild(self, parent, allfiles, changedfiles=None):
855 852 if changedfiles is None:
856 853 # Rebuild entire dirstate
857 854 to_lookup = allfiles
858 855 to_drop = []
859 856 lastnormaltime = self._lastnormaltime
860 857 self.clear()
861 858 self._lastnormaltime = lastnormaltime
862 859 elif len(changedfiles) < 10:
863 860 # Avoid turning allfiles into a set, which can be expensive if it's
864 861 # large.
865 862 to_lookup = []
866 863 to_drop = []
867 864 for f in changedfiles:
868 865 if f in allfiles:
869 866 to_lookup.append(f)
870 867 else:
871 868 to_drop.append(f)
872 869 else:
873 870 changedfilesset = set(changedfiles)
874 871 to_lookup = changedfilesset & set(allfiles)
875 872 to_drop = changedfilesset - to_lookup
876 873
877 874 if self._origpl is None:
878 875 self._origpl = self._pl
879 876 self._map.setparents(parent, self._nodeconstants.nullid)
880 877
881 878 for f in to_lookup:
882 879 self._normallookup(f)
883 880 for f in to_drop:
884 881 self._drop(f)
885 882
886 883 self._dirty = True
887 884
888 885 def identity(self):
889 886 """Return identity of dirstate itself to detect changing in storage
890 887
891 888 If identity of previous dirstate is equal to this, writing
892 889 changes based on the former dirstate out can keep consistency.
893 890 """
894 891 return self._map.identity
895 892
896 893 def write(self, tr):
897 894 if not self._dirty:
898 895 return
899 896
900 897 filename = self._filename
901 898 if tr:
902 899 # 'dirstate.write()' is not only for writing in-memory
903 900 # changes out, but also for dropping ambiguous timestamp.
904 901 # delayed writing re-raise "ambiguous timestamp issue".
905 902 # See also the wiki page below for detail:
906 903 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
907 904
908 905 # emulate dropping timestamp in 'parsers.pack_dirstate'
909 906 now = _getfsnow(self._opener)
910 907 self._map.clearambiguoustimes(self._updatedfiles, now)
911 908
912 909 # emulate that all 'dirstate.normal' results are written out
913 910 self._lastnormaltime = 0
914 911 self._updatedfiles.clear()
915 912
916 913 # delay writing in-memory changes out
917 914 tr.addfilegenerator(
918 915 b'dirstate',
919 916 (self._filename,),
920 917 lambda f: self._writedirstate(tr, f),
921 918 location=b'plain',
922 919 )
923 920 return
924 921
925 922 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
926 923 self._writedirstate(tr, st)
927 924
928 925 def addparentchangecallback(self, category, callback):
929 926 """add a callback to be called when the wd parents are changed
930 927
931 928 Callback will be called with the following arguments:
932 929 dirstate, (oldp1, oldp2), (newp1, newp2)
933 930
934 931 Category is a unique identifier to allow overwriting an old callback
935 932 with a newer callback.
936 933 """
937 934 self._plchangecallbacks[category] = callback
938 935
939 936 def _writedirstate(self, tr, st):
940 937 # notify callbacks about parents change
941 938 if self._origpl is not None and self._origpl != self._pl:
942 939 for c, callback in sorted(
943 940 pycompat.iteritems(self._plchangecallbacks)
944 941 ):
945 942 callback(self, self._origpl, self._pl)
946 943 self._origpl = None
947 944 # use the modification time of the newly created temporary file as the
948 945 # filesystem's notion of 'now'
949 946 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
950 947
951 948 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
952 949 # timestamp of each entries in dirstate, because of 'now > mtime'
953 950 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
954 951 if delaywrite > 0:
955 952 # do we have any files to delay for?
956 953 for f, e in pycompat.iteritems(self._map):
957 954 if e.need_delay(now):
958 955 import time # to avoid useless import
959 956
960 957 # rather than sleep n seconds, sleep until the next
961 958 # multiple of n seconds
962 959 clock = time.time()
963 960 start = int(clock) - (int(clock) % delaywrite)
964 961 end = start + delaywrite
965 962 time.sleep(end - clock)
966 963 now = end # trust our estimate that the end is near now
967 964 break
968 965
969 966 self._map.write(tr, st, now)
970 967 self._lastnormaltime = 0
971 968 self._dirty = False
972 969
973 970 def _dirignore(self, f):
974 971 if self._ignore(f):
975 972 return True
976 973 for p in pathutil.finddirs(f):
977 974 if self._ignore(p):
978 975 return True
979 976 return False
980 977
981 978 def _ignorefiles(self):
982 979 files = []
983 980 if os.path.exists(self._join(b'.hgignore')):
984 981 files.append(self._join(b'.hgignore'))
985 982 for name, path in self._ui.configitems(b"ui"):
986 983 if name == b'ignore' or name.startswith(b'ignore.'):
987 984 # we need to use os.path.join here rather than self._join
988 985 # because path is arbitrary and user-specified
989 986 files.append(os.path.join(self._rootdir, util.expandpath(path)))
990 987 return files
991 988
992 989 def _ignorefileandline(self, f):
993 990 files = collections.deque(self._ignorefiles())
994 991 visited = set()
995 992 while files:
996 993 i = files.popleft()
997 994 patterns = matchmod.readpatternfile(
998 995 i, self._ui.warn, sourceinfo=True
999 996 )
1000 997 for pattern, lineno, line in patterns:
1001 998 kind, p = matchmod._patsplit(pattern, b'glob')
1002 999 if kind == b"subinclude":
1003 1000 if p not in visited:
1004 1001 files.append(p)
1005 1002 continue
1006 1003 m = matchmod.match(
1007 1004 self._root, b'', [], [pattern], warn=self._ui.warn
1008 1005 )
1009 1006 if m(f):
1010 1007 return (i, lineno, line)
1011 1008 visited.add(i)
1012 1009 return (None, -1, b"")
1013 1010
1014 1011 def _walkexplicit(self, match, subrepos):
1015 1012 """Get stat data about the files explicitly specified by match.
1016 1013
1017 1014 Return a triple (results, dirsfound, dirsnotfound).
1018 1015 - results is a mapping from filename to stat result. It also contains
1019 1016 listings mapping subrepos and .hg to None.
1020 1017 - dirsfound is a list of files found to be directories.
1021 1018 - dirsnotfound is a list of files that the dirstate thinks are
1022 1019 directories and that were not found."""
1023 1020
1024 1021 def badtype(mode):
1025 1022 kind = _(b'unknown')
1026 1023 if stat.S_ISCHR(mode):
1027 1024 kind = _(b'character device')
1028 1025 elif stat.S_ISBLK(mode):
1029 1026 kind = _(b'block device')
1030 1027 elif stat.S_ISFIFO(mode):
1031 1028 kind = _(b'fifo')
1032 1029 elif stat.S_ISSOCK(mode):
1033 1030 kind = _(b'socket')
1034 1031 elif stat.S_ISDIR(mode):
1035 1032 kind = _(b'directory')
1036 1033 return _(b'unsupported file type (type is %s)') % kind
1037 1034
1038 1035 badfn = match.bad
1039 1036 dmap = self._map
1040 1037 lstat = os.lstat
1041 1038 getkind = stat.S_IFMT
1042 1039 dirkind = stat.S_IFDIR
1043 1040 regkind = stat.S_IFREG
1044 1041 lnkkind = stat.S_IFLNK
1045 1042 join = self._join
1046 1043 dirsfound = []
1047 1044 foundadd = dirsfound.append
1048 1045 dirsnotfound = []
1049 1046 notfoundadd = dirsnotfound.append
1050 1047
1051 1048 if not match.isexact() and self._checkcase:
1052 1049 normalize = self._normalize
1053 1050 else:
1054 1051 normalize = None
1055 1052
1056 1053 files = sorted(match.files())
1057 1054 subrepos.sort()
1058 1055 i, j = 0, 0
1059 1056 while i < len(files) and j < len(subrepos):
1060 1057 subpath = subrepos[j] + b"/"
1061 1058 if files[i] < subpath:
1062 1059 i += 1
1063 1060 continue
1064 1061 while i < len(files) and files[i].startswith(subpath):
1065 1062 del files[i]
1066 1063 j += 1
1067 1064
1068 1065 if not files or b'' in files:
1069 1066 files = [b'']
1070 1067 # constructing the foldmap is expensive, so don't do it for the
1071 1068 # common case where files is ['']
1072 1069 normalize = None
1073 1070 results = dict.fromkeys(subrepos)
1074 1071 results[b'.hg'] = None
1075 1072
1076 1073 for ff in files:
1077 1074 if normalize:
1078 1075 nf = normalize(ff, False, True)
1079 1076 else:
1080 1077 nf = ff
1081 1078 if nf in results:
1082 1079 continue
1083 1080
1084 1081 try:
1085 1082 st = lstat(join(nf))
1086 1083 kind = getkind(st.st_mode)
1087 1084 if kind == dirkind:
1088 1085 if nf in dmap:
1089 1086 # file replaced by dir on disk but still in dirstate
1090 1087 results[nf] = None
1091 1088 foundadd((nf, ff))
1092 1089 elif kind == regkind or kind == lnkkind:
1093 1090 results[nf] = st
1094 1091 else:
1095 1092 badfn(ff, badtype(kind))
1096 1093 if nf in dmap:
1097 1094 results[nf] = None
1098 1095 except OSError as inst: # nf not found on disk - it is dirstate only
1099 1096 if nf in dmap: # does it exactly match a missing file?
1100 1097 results[nf] = None
1101 1098 else: # does it match a missing directory?
1102 1099 if self._map.hasdir(nf):
1103 1100 notfoundadd(nf)
1104 1101 else:
1105 1102 badfn(ff, encoding.strtolocal(inst.strerror))
1106 1103
1107 1104 # match.files() may contain explicitly-specified paths that shouldn't
1108 1105 # be taken; drop them from the list of files found. dirsfound/notfound
1109 1106 # aren't filtered here because they will be tested later.
1110 1107 if match.anypats():
1111 1108 for f in list(results):
1112 1109 if f == b'.hg' or f in subrepos:
1113 1110 # keep sentinel to disable further out-of-repo walks
1114 1111 continue
1115 1112 if not match(f):
1116 1113 del results[f]
1117 1114
1118 1115 # Case insensitive filesystems cannot rely on lstat() failing to detect
1119 1116 # a case-only rename. Prune the stat object for any file that does not
1120 1117 # match the case in the filesystem, if there are multiple files that
1121 1118 # normalize to the same path.
1122 1119 if match.isexact() and self._checkcase:
1123 1120 normed = {}
1124 1121
1125 1122 for f, st in pycompat.iteritems(results):
1126 1123 if st is None:
1127 1124 continue
1128 1125
1129 1126 nc = util.normcase(f)
1130 1127 paths = normed.get(nc)
1131 1128
1132 1129 if paths is None:
1133 1130 paths = set()
1134 1131 normed[nc] = paths
1135 1132
1136 1133 paths.add(f)
1137 1134
1138 1135 for norm, paths in pycompat.iteritems(normed):
1139 1136 if len(paths) > 1:
1140 1137 for path in paths:
1141 1138 folded = self._discoverpath(
1142 1139 path, norm, True, None, self._map.dirfoldmap
1143 1140 )
1144 1141 if path != folded:
1145 1142 results[path] = None
1146 1143
1147 1144 return results, dirsfound, dirsnotfound
1148 1145
1149 1146 def walk(self, match, subrepos, unknown, ignored, full=True):
1150 1147 """
1151 1148 Walk recursively through the directory tree, finding all files
1152 1149 matched by match.
1153 1150
1154 1151 If full is False, maybe skip some known-clean files.
1155 1152
1156 1153 Return a dict mapping filename to stat-like object (either
1157 1154 mercurial.osutil.stat instance or return value of os.stat()).
1158 1155
1159 1156 """
1160 1157 # full is a flag that extensions that hook into walk can use -- this
1161 1158 # implementation doesn't use it at all. This satisfies the contract
1162 1159 # because we only guarantee a "maybe".
1163 1160
1164 1161 if ignored:
1165 1162 ignore = util.never
1166 1163 dirignore = util.never
1167 1164 elif unknown:
1168 1165 ignore = self._ignore
1169 1166 dirignore = self._dirignore
1170 1167 else:
1171 1168 # if not unknown and not ignored, drop dir recursion and step 2
1172 1169 ignore = util.always
1173 1170 dirignore = util.always
1174 1171
1175 1172 matchfn = match.matchfn
1176 1173 matchalways = match.always()
1177 1174 matchtdir = match.traversedir
1178 1175 dmap = self._map
1179 1176 listdir = util.listdir
1180 1177 lstat = os.lstat
1181 1178 dirkind = stat.S_IFDIR
1182 1179 regkind = stat.S_IFREG
1183 1180 lnkkind = stat.S_IFLNK
1184 1181 join = self._join
1185 1182
1186 1183 exact = skipstep3 = False
1187 1184 if match.isexact(): # match.exact
1188 1185 exact = True
1189 1186 dirignore = util.always # skip step 2
1190 1187 elif match.prefix(): # match.match, no patterns
1191 1188 skipstep3 = True
1192 1189
1193 1190 if not exact and self._checkcase:
1194 1191 normalize = self._normalize
1195 1192 normalizefile = self._normalizefile
1196 1193 skipstep3 = False
1197 1194 else:
1198 1195 normalize = self._normalize
1199 1196 normalizefile = None
1200 1197
1201 1198 # step 1: find all explicit files
1202 1199 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1203 1200 if matchtdir:
1204 1201 for d in work:
1205 1202 matchtdir(d[0])
1206 1203 for d in dirsnotfound:
1207 1204 matchtdir(d)
1208 1205
1209 1206 skipstep3 = skipstep3 and not (work or dirsnotfound)
1210 1207 work = [d for d in work if not dirignore(d[0])]
1211 1208
1212 1209 # step 2: visit subdirectories
1213 1210 def traverse(work, alreadynormed):
1214 1211 wadd = work.append
1215 1212 while work:
1216 1213 tracing.counter('dirstate.walk work', len(work))
1217 1214 nd = work.pop()
1218 1215 visitentries = match.visitchildrenset(nd)
1219 1216 if not visitentries:
1220 1217 continue
1221 1218 if visitentries == b'this' or visitentries == b'all':
1222 1219 visitentries = None
1223 1220 skip = None
1224 1221 if nd != b'':
1225 1222 skip = b'.hg'
1226 1223 try:
1227 1224 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1228 1225 entries = listdir(join(nd), stat=True, skip=skip)
1229 1226 except OSError as inst:
1230 1227 if inst.errno in (errno.EACCES, errno.ENOENT):
1231 1228 match.bad(
1232 1229 self.pathto(nd), encoding.strtolocal(inst.strerror)
1233 1230 )
1234 1231 continue
1235 1232 raise
1236 1233 for f, kind, st in entries:
1237 1234 # Some matchers may return files in the visitentries set,
1238 1235 # instead of 'this', if the matcher explicitly mentions them
1239 1236 # and is not an exactmatcher. This is acceptable; we do not
1240 1237 # make any hard assumptions about file-or-directory below
1241 1238 # based on the presence of `f` in visitentries. If
1242 1239 # visitchildrenset returned a set, we can always skip the
1243 1240 # entries *not* in the set it provided regardless of whether
1244 1241 # they're actually a file or a directory.
1245 1242 if visitentries and f not in visitentries:
1246 1243 continue
1247 1244 if normalizefile:
1248 1245 # even though f might be a directory, we're only
1249 1246 # interested in comparing it to files currently in the
1250 1247 # dmap -- therefore normalizefile is enough
1251 1248 nf = normalizefile(
1252 1249 nd and (nd + b"/" + f) or f, True, True
1253 1250 )
1254 1251 else:
1255 1252 nf = nd and (nd + b"/" + f) or f
1256 1253 if nf not in results:
1257 1254 if kind == dirkind:
1258 1255 if not ignore(nf):
1259 1256 if matchtdir:
1260 1257 matchtdir(nf)
1261 1258 wadd(nf)
1262 1259 if nf in dmap and (matchalways or matchfn(nf)):
1263 1260 results[nf] = None
1264 1261 elif kind == regkind or kind == lnkkind:
1265 1262 if nf in dmap:
1266 1263 if matchalways or matchfn(nf):
1267 1264 results[nf] = st
1268 1265 elif (matchalways or matchfn(nf)) and not ignore(
1269 1266 nf
1270 1267 ):
1271 1268 # unknown file -- normalize if necessary
1272 1269 if not alreadynormed:
1273 1270 nf = normalize(nf, False, True)
1274 1271 results[nf] = st
1275 1272 elif nf in dmap and (matchalways or matchfn(nf)):
1276 1273 results[nf] = None
1277 1274
1278 1275 for nd, d in work:
1279 1276 # alreadynormed means that processwork doesn't have to do any
1280 1277 # expensive directory normalization
1281 1278 alreadynormed = not normalize or nd == d
1282 1279 traverse([d], alreadynormed)
1283 1280
1284 1281 for s in subrepos:
1285 1282 del results[s]
1286 1283 del results[b'.hg']
1287 1284
1288 1285 # step 3: visit remaining files from dmap
1289 1286 if not skipstep3 and not exact:
1290 1287 # If a dmap file is not in results yet, it was either
1291 1288 # a) not matching matchfn b) ignored, c) missing, or d) under a
1292 1289 # symlink directory.
1293 1290 if not results and matchalways:
1294 1291 visit = [f for f in dmap]
1295 1292 else:
1296 1293 visit = [f for f in dmap if f not in results and matchfn(f)]
1297 1294 visit.sort()
1298 1295
1299 1296 if unknown:
1300 1297 # unknown == True means we walked all dirs under the roots
1301 1298 # that wasn't ignored, and everything that matched was stat'ed
1302 1299 # and is already in results.
1303 1300 # The rest must thus be ignored or under a symlink.
1304 1301 audit_path = pathutil.pathauditor(self._root, cached=True)
1305 1302
1306 1303 for nf in iter(visit):
1307 1304 # If a stat for the same file was already added with a
1308 1305 # different case, don't add one for this, since that would
1309 1306 # make it appear as if the file exists under both names
1310 1307 # on disk.
1311 1308 if (
1312 1309 normalizefile
1313 1310 and normalizefile(nf, True, True) in results
1314 1311 ):
1315 1312 results[nf] = None
1316 1313 # Report ignored items in the dmap as long as they are not
1317 1314 # under a symlink directory.
1318 1315 elif audit_path.check(nf):
1319 1316 try:
1320 1317 results[nf] = lstat(join(nf))
1321 1318 # file was just ignored, no links, and exists
1322 1319 except OSError:
1323 1320 # file doesn't exist
1324 1321 results[nf] = None
1325 1322 else:
1326 1323 # It's either missing or under a symlink directory
1327 1324 # which we in this case report as missing
1328 1325 results[nf] = None
1329 1326 else:
1330 1327 # We may not have walked the full directory tree above,
1331 1328 # so stat and check everything we missed.
1332 1329 iv = iter(visit)
1333 1330 for st in util.statfiles([join(i) for i in visit]):
1334 1331 results[next(iv)] = st
1335 1332 return results
1336 1333
1337 1334 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1338 1335 # Force Rayon (Rust parallelism library) to respect the number of
1339 1336 # workers. This is a temporary workaround until Rust code knows
1340 1337 # how to read the config file.
1341 1338 numcpus = self._ui.configint(b"worker", b"numcpus")
1342 1339 if numcpus is not None:
1343 1340 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1344 1341
1345 1342 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1346 1343 if not workers_enabled:
1347 1344 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1348 1345
1349 1346 (
1350 1347 lookup,
1351 1348 modified,
1352 1349 added,
1353 1350 removed,
1354 1351 deleted,
1355 1352 clean,
1356 1353 ignored,
1357 1354 unknown,
1358 1355 warnings,
1359 1356 bad,
1360 1357 traversed,
1361 1358 dirty,
1362 1359 ) = rustmod.status(
1363 1360 self._map._rustmap,
1364 1361 matcher,
1365 1362 self._rootdir,
1366 1363 self._ignorefiles(),
1367 1364 self._checkexec,
1368 1365 self._lastnormaltime,
1369 1366 bool(list_clean),
1370 1367 bool(list_ignored),
1371 1368 bool(list_unknown),
1372 1369 bool(matcher.traversedir),
1373 1370 )
1374 1371
1375 1372 self._dirty |= dirty
1376 1373
1377 1374 if matcher.traversedir:
1378 1375 for dir in traversed:
1379 1376 matcher.traversedir(dir)
1380 1377
1381 1378 if self._ui.warn:
1382 1379 for item in warnings:
1383 1380 if isinstance(item, tuple):
1384 1381 file_path, syntax = item
1385 1382 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1386 1383 file_path,
1387 1384 syntax,
1388 1385 )
1389 1386 self._ui.warn(msg)
1390 1387 else:
1391 1388 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1392 1389 self._ui.warn(
1393 1390 msg
1394 1391 % (
1395 1392 pathutil.canonpath(
1396 1393 self._rootdir, self._rootdir, item
1397 1394 ),
1398 1395 b"No such file or directory",
1399 1396 )
1400 1397 )
1401 1398
1402 1399 for (fn, message) in bad:
1403 1400 matcher.bad(fn, encoding.strtolocal(message))
1404 1401
1405 1402 status = scmutil.status(
1406 1403 modified=modified,
1407 1404 added=added,
1408 1405 removed=removed,
1409 1406 deleted=deleted,
1410 1407 unknown=unknown,
1411 1408 ignored=ignored,
1412 1409 clean=clean,
1413 1410 )
1414 1411 return (lookup, status)
1415 1412
1416 1413 def status(self, match, subrepos, ignored, clean, unknown):
1417 1414 """Determine the status of the working copy relative to the
1418 1415 dirstate and return a pair of (unsure, status), where status is of type
1419 1416 scmutil.status and:
1420 1417
1421 1418 unsure:
1422 1419 files that might have been modified since the dirstate was
1423 1420 written, but need to be read to be sure (size is the same
1424 1421 but mtime differs)
1425 1422 status.modified:
1426 1423 files that have definitely been modified since the dirstate
1427 1424 was written (different size or mode)
1428 1425 status.clean:
1429 1426 files that have definitely not been modified since the
1430 1427 dirstate was written
1431 1428 """
1432 1429 listignored, listclean, listunknown = ignored, clean, unknown
1433 1430 lookup, modified, added, unknown, ignored = [], [], [], [], []
1434 1431 removed, deleted, clean = [], [], []
1435 1432
1436 1433 dmap = self._map
1437 1434 dmap.preload()
1438 1435
1439 1436 use_rust = True
1440 1437
1441 1438 allowed_matchers = (
1442 1439 matchmod.alwaysmatcher,
1443 1440 matchmod.exactmatcher,
1444 1441 matchmod.includematcher,
1445 1442 )
1446 1443
1447 1444 if rustmod is None:
1448 1445 use_rust = False
1449 1446 elif self._checkcase:
1450 1447 # Case-insensitive filesystems are not handled yet
1451 1448 use_rust = False
1452 1449 elif subrepos:
1453 1450 use_rust = False
1454 1451 elif sparse.enabled:
1455 1452 use_rust = False
1456 1453 elif not isinstance(match, allowed_matchers):
1457 1454 # Some matchers have yet to be implemented
1458 1455 use_rust = False
1459 1456
1460 1457 if use_rust:
1461 1458 try:
1462 1459 return self._rust_status(
1463 1460 match, listclean, listignored, listunknown
1464 1461 )
1465 1462 except rustmod.FallbackError:
1466 1463 pass
1467 1464
1468 1465 def noop(f):
1469 1466 pass
1470 1467
1471 1468 dcontains = dmap.__contains__
1472 1469 dget = dmap.__getitem__
1473 1470 ladd = lookup.append # aka "unsure"
1474 1471 madd = modified.append
1475 1472 aadd = added.append
1476 1473 uadd = unknown.append if listunknown else noop
1477 1474 iadd = ignored.append if listignored else noop
1478 1475 radd = removed.append
1479 1476 dadd = deleted.append
1480 1477 cadd = clean.append if listclean else noop
1481 1478 mexact = match.exact
1482 1479 dirignore = self._dirignore
1483 1480 checkexec = self._checkexec
1484 1481 copymap = self._map.copymap
1485 1482 lastnormaltime = self._lastnormaltime
1486 1483
1487 1484 # We need to do full walks when either
1488 1485 # - we're listing all clean files, or
1489 1486 # - match.traversedir does something, because match.traversedir should
1490 1487 # be called for every dir in the working dir
1491 1488 full = listclean or match.traversedir is not None
1492 1489 for fn, st in pycompat.iteritems(
1493 1490 self.walk(match, subrepos, listunknown, listignored, full=full)
1494 1491 ):
1495 1492 if not dcontains(fn):
1496 1493 if (listignored or mexact(fn)) and dirignore(fn):
1497 1494 if listignored:
1498 1495 iadd(fn)
1499 1496 else:
1500 1497 uadd(fn)
1501 1498 continue
1502 1499
1503 1500 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1504 1501 # written like that for performance reasons. dmap[fn] is not a
1505 1502 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1506 1503 # opcode has fast paths when the value to be unpacked is a tuple or
1507 1504 # a list, but falls back to creating a full-fledged iterator in
1508 1505 # general. That is much slower than simply accessing and storing the
1509 1506 # tuple members one by one.
1510 1507 t = dget(fn)
1511 1508 mode = t.mode
1512 1509 size = t.size
1513 1510 time = t.mtime
1514 1511
1515 1512 if not st and t.tracked:
1516 1513 dadd(fn)
1517 1514 elif t.merged:
1518 1515 madd(fn)
1519 1516 elif t.added:
1520 1517 aadd(fn)
1521 1518 elif t.removed:
1522 1519 radd(fn)
1523 1520 elif t.tracked:
1524 1521 if (
1525 1522 size >= 0
1526 1523 and (
1527 1524 (size != st.st_size and size != st.st_size & _rangemask)
1528 1525 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1529 1526 )
1530 1527 or t.from_p2
1531 1528 or fn in copymap
1532 1529 ):
1533 1530 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1534 1531 # issue6456: Size returned may be longer due to
1535 1532 # encryption on EXT-4 fscrypt, undecided.
1536 1533 ladd(fn)
1537 1534 else:
1538 1535 madd(fn)
1539 1536 elif (
1540 1537 time != st[stat.ST_MTIME]
1541 1538 and time != st[stat.ST_MTIME] & _rangemask
1542 1539 ):
1543 1540 ladd(fn)
1544 1541 elif st[stat.ST_MTIME] == lastnormaltime:
1545 1542 # fn may have just been marked as normal and it may have
1546 1543 # changed in the same second without changing its size.
1547 1544 # This can happen if we quickly do multiple commits.
1548 1545 # Force lookup, so we don't miss such a racy file change.
1549 1546 ladd(fn)
1550 1547 elif listclean:
1551 1548 cadd(fn)
1552 1549 status = scmutil.status(
1553 1550 modified, added, removed, deleted, unknown, ignored, clean
1554 1551 )
1555 1552 return (lookup, status)
1556 1553
1557 1554 def matches(self, match):
1558 1555 """
1559 1556 return files in the dirstate (in whatever state) filtered by match
1560 1557 """
1561 1558 dmap = self._map
1562 1559 if rustmod is not None:
1563 1560 dmap = self._map._rustmap
1564 1561
1565 1562 if match.always():
1566 1563 return dmap.keys()
1567 1564 files = match.files()
1568 1565 if match.isexact():
1569 1566 # fast path -- filter the other way around, since typically files is
1570 1567 # much smaller than dmap
1571 1568 return [f for f in files if f in dmap]
1572 1569 if match.prefix() and all(fn in dmap for fn in files):
1573 1570 # fast path -- all the values are known to be files, so just return
1574 1571 # that
1575 1572 return list(files)
1576 1573 return [f for f in dmap if match(f)]
1577 1574
1578 1575 def _actualfilename(self, tr):
1579 1576 if tr:
1580 1577 return self._pendingfilename
1581 1578 else:
1582 1579 return self._filename
1583 1580
1584 1581 def savebackup(self, tr, backupname):
1585 1582 '''Save current dirstate into backup file'''
1586 1583 filename = self._actualfilename(tr)
1587 1584 assert backupname != filename
1588 1585
1589 1586 # use '_writedirstate' instead of 'write' to write changes certainly,
1590 1587 # because the latter omits writing out if transaction is running.
1591 1588 # output file will be used to create backup of dirstate at this point.
1592 1589 if self._dirty or not self._opener.exists(filename):
1593 1590 self._writedirstate(
1594 1591 tr,
1595 1592 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1596 1593 )
1597 1594
1598 1595 if tr:
1599 1596 # ensure that subsequent tr.writepending returns True for
1600 1597 # changes written out above, even if dirstate is never
1601 1598 # changed after this
1602 1599 tr.addfilegenerator(
1603 1600 b'dirstate',
1604 1601 (self._filename,),
1605 1602 lambda f: self._writedirstate(tr, f),
1606 1603 location=b'plain',
1607 1604 )
1608 1605
1609 1606 # ensure that pending file written above is unlinked at
1610 1607 # failure, even if tr.writepending isn't invoked until the
1611 1608 # end of this transaction
1612 1609 tr.registertmp(filename, location=b'plain')
1613 1610
1614 1611 self._opener.tryunlink(backupname)
1615 1612 # hardlink backup is okay because _writedirstate is always called
1616 1613 # with an "atomictemp=True" file.
1617 1614 util.copyfile(
1618 1615 self._opener.join(filename),
1619 1616 self._opener.join(backupname),
1620 1617 hardlink=True,
1621 1618 )
1622 1619
1623 1620 def restorebackup(self, tr, backupname):
1624 1621 '''Restore dirstate by backup file'''
1625 1622 # this "invalidate()" prevents "wlock.release()" from writing
1626 1623 # changes of dirstate out after restoring from backup file
1627 1624 self.invalidate()
1628 1625 filename = self._actualfilename(tr)
1629 1626 o = self._opener
1630 1627 if util.samefile(o.join(backupname), o.join(filename)):
1631 1628 o.unlink(backupname)
1632 1629 else:
1633 1630 o.rename(backupname, filename, checkambig=True)
1634 1631
1635 1632 def clearbackup(self, tr, backupname):
1636 1633 '''Clear backup file'''
1637 1634 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now