##// END OF EJS Templates
dirstate: fix restoration of "merged" state after a remove...
marmoute -
r48803:87b3010c default
parent child Browse files
Show More
@@ -1,1613 +1,1616
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .pycompat import delattr
18 18
19 19 from hgdemandimport import tracing
20 20
21 21 from . import (
22 22 dirstatemap,
23 23 encoding,
24 24 error,
25 25 match as matchmod,
26 26 pathutil,
27 27 policy,
28 28 pycompat,
29 29 scmutil,
30 30 sparse,
31 31 util,
32 32 )
33 33
34 34 from .interfaces import (
35 35 dirstate as intdirstate,
36 36 util as interfaceutil,
37 37 )
38 38
39 39 parsers = policy.importmod('parsers')
40 40 rustmod = policy.importrust('dirstate')
41 41
42 42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43 43
44 44 propertycache = util.propertycache
45 45 filecache = scmutil.filecache
46 46 _rangemask = dirstatemap.rangemask
47 47
48 48 DirstateItem = parsers.DirstateItem
49 49
50 50
51 51 class repocache(filecache):
52 52 """filecache for files in .hg/"""
53 53
54 54 def join(self, obj, fname):
55 55 return obj._opener.join(fname)
56 56
57 57
58 58 class rootcache(filecache):
59 59 """filecache for files in the repository root"""
60 60
61 61 def join(self, obj, fname):
62 62 return obj._join(fname)
63 63
64 64
65 65 def _getfsnow(vfs):
66 66 '''Get "now" timestamp on filesystem'''
67 67 tmpfd, tmpname = vfs.mkstemp()
68 68 try:
69 69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 70 finally:
71 71 os.close(tmpfd)
72 72 vfs.unlink(tmpname)
73 73
74 74
75 75 def requires_parents_change(func):
76 76 def wrap(self, *args, **kwargs):
77 77 if not self.pendingparentchange():
78 78 msg = 'calling `%s` outside of a parentchange context'
79 79 msg %= func.__name__
80 80 raise error.ProgrammingError(msg)
81 81 return func(self, *args, **kwargs)
82 82
83 83 return wrap
84 84
85 85
86 86 def requires_no_parents_change(func):
87 87 def wrap(self, *args, **kwargs):
88 88 if self.pendingparentchange():
89 89 msg = 'calling `%s` inside of a parentchange context'
90 90 msg %= func.__name__
91 91 raise error.ProgrammingError(msg)
92 92 return func(self, *args, **kwargs)
93 93
94 94 return wrap
95 95
96 96
97 97 @interfaceutil.implementer(intdirstate.idirstate)
98 98 class dirstate(object):
99 99 def __init__(
100 100 self,
101 101 opener,
102 102 ui,
103 103 root,
104 104 validate,
105 105 sparsematchfn,
106 106 nodeconstants,
107 107 use_dirstate_v2,
108 108 ):
109 109 """Create a new dirstate object.
110 110
111 111 opener is an open()-like callable that can be used to open the
112 112 dirstate file; root is the root of the directory tracked by
113 113 the dirstate.
114 114 """
115 115 self._use_dirstate_v2 = use_dirstate_v2
116 116 self._nodeconstants = nodeconstants
117 117 self._opener = opener
118 118 self._validate = validate
119 119 self._root = root
120 120 self._sparsematchfn = sparsematchfn
121 121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 122 # UNC path pointing to root share (issue4557)
123 123 self._rootdir = pathutil.normasprefix(root)
124 124 self._dirty = False
125 125 self._lastnormaltime = 0
126 126 self._ui = ui
127 127 self._filecache = {}
128 128 self._parentwriters = 0
129 129 self._filename = b'dirstate'
130 130 self._pendingfilename = b'%s.pending' % self._filename
131 131 self._plchangecallbacks = {}
132 132 self._origpl = None
133 133 self._updatedfiles = set()
134 134 self._mapcls = dirstatemap.dirstatemap
135 135 # Access and cache cwd early, so we don't access it for the first time
136 136 # after a working-copy update caused it to not exist (accessing it then
137 137 # raises an exception).
138 138 self._cwd
139 139
140 140 def prefetch_parents(self):
141 141 """make sure the parents are loaded
142 142
143 143 Used to avoid a race condition.
144 144 """
145 145 self._pl
146 146
147 147 @contextlib.contextmanager
148 148 def parentchange(self):
149 149 """Context manager for handling dirstate parents.
150 150
151 151 If an exception occurs in the scope of the context manager,
152 152 the incoherent dirstate won't be written when wlock is
153 153 released.
154 154 """
155 155 self._parentwriters += 1
156 156 yield
157 157 # Typically we want the "undo" step of a context manager in a
158 158 # finally block so it happens even when an exception
159 159 # occurs. In this case, however, we only want to decrement
160 160 # parentwriters if the code in the with statement exits
161 161 # normally, so we don't have a try/finally here on purpose.
162 162 self._parentwriters -= 1
163 163
164 164 def pendingparentchange(self):
165 165 """Returns true if the dirstate is in the middle of a set of changes
166 166 that modify the dirstate parent.
167 167 """
168 168 return self._parentwriters > 0
169 169
170 170 @propertycache
171 171 def _map(self):
172 172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 173 self._map = self._mapcls(
174 174 self._ui,
175 175 self._opener,
176 176 self._root,
177 177 self._nodeconstants,
178 178 self._use_dirstate_v2,
179 179 )
180 180 return self._map
181 181
182 182 @property
183 183 def _sparsematcher(self):
184 184 """The matcher for the sparse checkout.
185 185
186 186 The working directory may not include every file from a manifest. The
187 187 matcher obtained by this property will match a path if it is to be
188 188 included in the working directory.
189 189 """
190 190 # TODO there is potential to cache this property. For now, the matcher
191 191 # is resolved on every access. (But the called function does use a
192 192 # cache to keep the lookup fast.)
193 193 return self._sparsematchfn()
194 194
195 195 @repocache(b'branch')
196 196 def _branch(self):
197 197 try:
198 198 return self._opener.read(b"branch").strip() or b"default"
199 199 except IOError as inst:
200 200 if inst.errno != errno.ENOENT:
201 201 raise
202 202 return b"default"
203 203
204 204 @property
205 205 def _pl(self):
206 206 return self._map.parents()
207 207
208 208 def hasdir(self, d):
209 209 return self._map.hastrackeddir(d)
210 210
211 211 @rootcache(b'.hgignore')
212 212 def _ignore(self):
213 213 files = self._ignorefiles()
214 214 if not files:
215 215 return matchmod.never()
216 216
217 217 pats = [b'include:%s' % f for f in files]
218 218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219 219
220 220 @propertycache
221 221 def _slash(self):
222 222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223 223
224 224 @propertycache
225 225 def _checklink(self):
226 226 return util.checklink(self._root)
227 227
228 228 @propertycache
229 229 def _checkexec(self):
230 230 return bool(util.checkexec(self._root))
231 231
232 232 @propertycache
233 233 def _checkcase(self):
234 234 return not util.fscasesensitive(self._join(b'.hg'))
235 235
236 236 def _join(self, f):
237 237 # much faster than os.path.join()
238 238 # it's safe because f is always a relative path
239 239 return self._rootdir + f
240 240
241 241 def flagfunc(self, buildfallback):
242 242 if self._checklink and self._checkexec:
243 243
244 244 def f(x):
245 245 try:
246 246 st = os.lstat(self._join(x))
247 247 if util.statislink(st):
248 248 return b'l'
249 249 if util.statisexec(st):
250 250 return b'x'
251 251 except OSError:
252 252 pass
253 253 return b''
254 254
255 255 return f
256 256
257 257 fallback = buildfallback()
258 258 if self._checklink:
259 259
260 260 def f(x):
261 261 if os.path.islink(self._join(x)):
262 262 return b'l'
263 263 if b'x' in fallback(x):
264 264 return b'x'
265 265 return b''
266 266
267 267 return f
268 268 if self._checkexec:
269 269
270 270 def f(x):
271 271 if b'l' in fallback(x):
272 272 return b'l'
273 273 if util.isexec(self._join(x)):
274 274 return b'x'
275 275 return b''
276 276
277 277 return f
278 278 else:
279 279 return fallback
280 280
281 281 @propertycache
282 282 def _cwd(self):
283 283 # internal config: ui.forcecwd
284 284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 285 if forcecwd:
286 286 return forcecwd
287 287 return encoding.getcwd()
288 288
289 289 def getcwd(self):
290 290 """Return the path from which a canonical path is calculated.
291 291
292 292 This path should be used to resolve file patterns or to convert
293 293 canonical paths back to file paths for display. It shouldn't be
294 294 used to get real file paths. Use vfs functions instead.
295 295 """
296 296 cwd = self._cwd
297 297 if cwd == self._root:
298 298 return b''
299 299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 300 rootsep = self._root
301 301 if not util.endswithsep(rootsep):
302 302 rootsep += pycompat.ossep
303 303 if cwd.startswith(rootsep):
304 304 return cwd[len(rootsep) :]
305 305 else:
306 306 # we're outside the repo. return an absolute path.
307 307 return cwd
308 308
309 309 def pathto(self, f, cwd=None):
310 310 if cwd is None:
311 311 cwd = self.getcwd()
312 312 path = util.pathto(self._root, cwd, f)
313 313 if self._slash:
314 314 return util.pconvert(path)
315 315 return path
316 316
317 317 def __getitem__(self, key):
318 318 """Return the current state of key (a filename) in the dirstate.
319 319
320 320 States are:
321 321 n normal
322 322 m needs merging
323 323 r marked for removal
324 324 a marked for addition
325 325 ? not tracked
326 326
327 327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 328 consider migrating all user of this to going through the dirstate entry
329 329 instead.
330 330 """
331 331 entry = self._map.get(key)
332 332 if entry is not None:
333 333 return entry.state
334 334 return b'?'
335 335
336 336 def __contains__(self, key):
337 337 return key in self._map
338 338
339 339 def __iter__(self):
340 340 return iter(sorted(self._map))
341 341
342 342 def items(self):
343 343 return pycompat.iteritems(self._map)
344 344
345 345 iteritems = items
346 346
347 347 def directories(self):
348 348 return self._map.directories()
349 349
350 350 def parents(self):
351 351 return [self._validate(p) for p in self._pl]
352 352
353 353 def p1(self):
354 354 return self._validate(self._pl[0])
355 355
356 356 def p2(self):
357 357 return self._validate(self._pl[1])
358 358
359 359 @property
360 360 def in_merge(self):
361 361 """True if a merge is in progress"""
362 362 return self._pl[1] != self._nodeconstants.nullid
363 363
364 364 def branch(self):
365 365 return encoding.tolocal(self._branch)
366 366
367 367 def setparents(self, p1, p2=None):
368 368 """Set dirstate parents to p1 and p2.
369 369
370 370 When moving from two parents to one, "merged" entries a
371 371 adjusted to normal and previous copy records discarded and
372 372 returned by the call.
373 373
374 374 See localrepo.setparents()
375 375 """
376 376 if p2 is None:
377 377 p2 = self._nodeconstants.nullid
378 378 if self._parentwriters == 0:
379 379 raise ValueError(
380 380 b"cannot set dirstate parent outside of "
381 381 b"dirstate.parentchange context manager"
382 382 )
383 383
384 384 self._dirty = True
385 385 oldp2 = self._pl[1]
386 386 if self._origpl is None:
387 387 self._origpl = self._pl
388 388 self._map.setparents(p1, p2)
389 389 copies = {}
390 390 nullid = self._nodeconstants.nullid
391 391 if oldp2 != nullid and p2 == nullid:
392 392 candidatefiles = self._map.non_normal_or_other_parent_paths()
393 393
394 394 for f in candidatefiles:
395 395 s = self._map.get(f)
396 396 if s is None:
397 397 continue
398 398
399 399 # Discard "merged" markers when moving away from a merge state
400 400 if s.merged:
401 401 source = self._map.copymap.get(f)
402 402 if source:
403 403 copies[f] = source
404 404 self._normallookup(f)
405 405 # Also fix up otherparent markers
406 406 elif s.from_p2:
407 407 source = self._map.copymap.get(f)
408 408 if source:
409 409 copies[f] = source
410 410 self._check_new_tracked_filename(f)
411 411 self._updatedfiles.add(f)
412 412 self._map.reset_state(
413 413 f,
414 414 p1_tracked=False,
415 415 wc_tracked=True,
416 416 )
417 417 return copies
418 418
419 419 def setbranch(self, branch):
420 420 self.__class__._branch.set(self, encoding.fromlocal(branch))
421 421 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
422 422 try:
423 423 f.write(self._branch + b'\n')
424 424 f.close()
425 425
426 426 # make sure filecache has the correct stat info for _branch after
427 427 # replacing the underlying file
428 428 ce = self._filecache[b'_branch']
429 429 if ce:
430 430 ce.refresh()
431 431 except: # re-raises
432 432 f.discard()
433 433 raise
434 434
435 435 def invalidate(self):
436 436 """Causes the next access to reread the dirstate.
437 437
438 438 This is different from localrepo.invalidatedirstate() because it always
439 439 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
440 440 check whether the dirstate has changed before rereading it."""
441 441
442 442 for a in ("_map", "_branch", "_ignore"):
443 443 if a in self.__dict__:
444 444 delattr(self, a)
445 445 self._lastnormaltime = 0
446 446 self._dirty = False
447 447 self._updatedfiles.clear()
448 448 self._parentwriters = 0
449 449 self._origpl = None
450 450
451 451 def copy(self, source, dest):
452 452 """Mark dest as a copy of source. Unmark dest if source is None."""
453 453 if source == dest:
454 454 return
455 455 self._dirty = True
456 456 if source is not None:
457 457 self._map.copymap[dest] = source
458 458 self._updatedfiles.add(source)
459 459 self._updatedfiles.add(dest)
460 460 elif self._map.copymap.pop(dest, None):
461 461 self._updatedfiles.add(dest)
462 462
463 463 def copied(self, file):
464 464 return self._map.copymap.get(file, None)
465 465
466 466 def copies(self):
467 467 return self._map.copymap
468 468
469 469 @requires_no_parents_change
470 470 def set_tracked(self, filename):
471 471 """a "public" method for generic code to mark a file as tracked
472 472
473 473 This function is to be called outside of "update/merge" case. For
474 474 example by a command like `hg add X`.
475 475
476 476 return True the file was previously untracked, False otherwise.
477 477 """
478 478 self._dirty = True
479 479 self._updatedfiles.add(filename)
480 480 entry = self._map.get(filename)
481 481 if entry is None:
482 482 self._check_new_tracked_filename(filename)
483 483 self._map.addfile(filename, added=True)
484 484 return True
485 485 elif not entry.tracked:
486 486 self._normallookup(filename)
487 487 return True
488 488 # XXX This is probably overkill for more case, but we need this to
489 489 # fully replace the `normallookup` call with `set_tracked` one.
490 490 # Consider smoothing this in the future.
491 491 self.set_possibly_dirty(filename)
492 492 return False
493 493
494 494 @requires_no_parents_change
495 495 def set_untracked(self, filename):
496 496 """a "public" method for generic code to mark a file as untracked
497 497
498 498 This function is to be called outside of "update/merge" case. For
499 499 example by a command like `hg remove X`.
500 500
501 501 return True the file was previously tracked, False otherwise.
502 502 """
503 503 ret = self._map.set_untracked(filename)
504 504 if ret:
505 505 self._dirty = True
506 506 self._updatedfiles.add(filename)
507 507 return ret
508 508
509 509 @requires_no_parents_change
510 510 def set_clean(self, filename, parentfiledata=None):
511 511 """record that the current state of the file on disk is known to be clean"""
512 512 self._dirty = True
513 513 self._updatedfiles.add(filename)
514 514 if parentfiledata:
515 515 (mode, size, mtime) = parentfiledata
516 516 else:
517 517 (mode, size, mtime) = self._get_filedata(filename)
518 518 if not self._map[filename].tracked:
519 519 self._check_new_tracked_filename(filename)
520 520 self._map.set_clean(filename, mode, size, mtime)
521 521 if mtime > self._lastnormaltime:
522 522 # Remember the most recent modification timeslot for status(),
523 523 # to make sure we won't miss future size-preserving file content
524 524 # modifications that happen within the same timeslot.
525 525 self._lastnormaltime = mtime
526 526
527 527 @requires_no_parents_change
528 528 def set_possibly_dirty(self, filename):
529 529 """record that the current state of the file on disk is unknown"""
530 530 self._dirty = True
531 531 self._updatedfiles.add(filename)
532 532 self._map.set_possibly_dirty(filename)
533 533
534 534 @requires_parents_change
535 535 def update_file_p1(
536 536 self,
537 537 filename,
538 538 p1_tracked,
539 539 ):
540 540 """Set a file as tracked in the parent (or not)
541 541
542 542 This is to be called when adjust the dirstate to a new parent after an history
543 543 rewriting operation.
544 544
545 545 It should not be called during a merge (p2 != nullid) and only within
546 546 a `with dirstate.parentchange():` context.
547 547 """
548 548 if self.in_merge:
549 549 msg = b'update_file_reference should not be called when merging'
550 550 raise error.ProgrammingError(msg)
551 551 entry = self._map.get(filename)
552 552 if entry is None:
553 553 wc_tracked = False
554 554 else:
555 555 wc_tracked = entry.tracked
556 556 possibly_dirty = False
557 557 if p1_tracked and wc_tracked:
558 558 # the underlying reference might have changed, we will have to
559 559 # check it.
560 560 possibly_dirty = True
561 561 elif not (p1_tracked or wc_tracked):
562 562 # the file is no longer relevant to anyone
563 563 if self._map.dropfile(filename):
564 564 self._dirty = True
565 565 self._updatedfiles.add(filename)
566 566 elif (not p1_tracked) and wc_tracked:
567 567 if entry is not None and entry.added:
568 568 return # avoid dropping copy information (maybe?)
569 569 elif p1_tracked and not wc_tracked:
570 570 pass
571 571 else:
572 572 assert False, 'unreachable'
573 573
574 574 # this mean we are doing call for file we do not really care about the
575 575 # data (eg: added or removed), however this should be a minor overhead
576 576 # compared to the overall update process calling this.
577 577 parentfiledata = None
578 578 if wc_tracked:
579 579 parentfiledata = self._get_filedata(filename)
580 580
581 581 self._updatedfiles.add(filename)
582 582 self._map.reset_state(
583 583 filename,
584 584 wc_tracked,
585 585 p1_tracked,
586 586 possibly_dirty=possibly_dirty,
587 587 parentfiledata=parentfiledata,
588 588 )
589 589 if (
590 590 parentfiledata is not None
591 591 and parentfiledata[2] > self._lastnormaltime
592 592 ):
593 593 # Remember the most recent modification timeslot for status(),
594 594 # to make sure we won't miss future size-preserving file content
595 595 # modifications that happen within the same timeslot.
596 596 self._lastnormaltime = parentfiledata[2]
597 597
598 598 @requires_parents_change
599 599 def update_file(
600 600 self,
601 601 filename,
602 602 wc_tracked,
603 603 p1_tracked,
604 604 p2_tracked=False,
605 605 merged=False,
606 606 clean_p1=False,
607 607 clean_p2=False,
608 608 possibly_dirty=False,
609 609 parentfiledata=None,
610 610 ):
611 611 """update the information about a file in the dirstate
612 612
613 613 This is to be called when the direstates parent changes to keep track
614 614 of what is the file situation in regards to the working copy and its parent.
615 615
616 616 This function must be called within a `dirstate.parentchange` context.
617 617
618 618 note: the API is at an early stage and we might need to adjust it
619 619 depending of what information ends up being relevant and useful to
620 620 other processing.
621 621 """
622 622 if merged and (clean_p1 or clean_p2):
623 623 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
624 624 raise error.ProgrammingError(msg)
625 625
626 626 # note: I do not think we need to double check name clash here since we
627 627 # are in a update/merge case that should already have taken care of
628 628 # this. The test agrees
629 629
630 630 self._dirty = True
631 631 self._updatedfiles.add(filename)
632 632
633 633 need_parent_file_data = (
634 634 not (possibly_dirty or clean_p2 or merged)
635 635 and wc_tracked
636 636 and p1_tracked
637 637 )
638 638
639 639 # this mean we are doing call for file we do not really care about the
640 640 # data (eg: added or removed), however this should be a minor overhead
641 641 # compared to the overall update process calling this.
642 642 if need_parent_file_data:
643 643 if parentfiledata is None:
644 644 parentfiledata = self._get_filedata(filename)
645 645 mtime = parentfiledata[2]
646 646
647 647 if mtime > self._lastnormaltime:
648 648 # Remember the most recent modification timeslot for
649 649 # status(), to make sure we won't miss future
650 650 # size-preserving file content modifications that happen
651 651 # within the same timeslot.
652 652 self._lastnormaltime = mtime
653 653
654 654 self._map.reset_state(
655 655 filename,
656 656 wc_tracked,
657 657 p1_tracked,
658 658 p2_tracked=p2_tracked,
659 659 merged=merged,
660 660 clean_p1=clean_p1,
661 661 clean_p2=clean_p2,
662 662 possibly_dirty=possibly_dirty,
663 663 parentfiledata=parentfiledata,
664 664 )
665 665 if (
666 666 parentfiledata is not None
667 667 and parentfiledata[2] > self._lastnormaltime
668 668 ):
669 669 # Remember the most recent modification timeslot for status(),
670 670 # to make sure we won't miss future size-preserving file content
671 671 # modifications that happen within the same timeslot.
672 672 self._lastnormaltime = parentfiledata[2]
673 673
674 674 def _addpath(
675 675 self,
676 676 f,
677 677 mode=0,
678 678 size=None,
679 679 mtime=None,
680 680 added=False,
681 681 merged=False,
682 682 from_p2=False,
683 683 possibly_dirty=False,
684 684 ):
685 685 entry = self._map.get(f)
686 686 if added or entry is not None and not entry.tracked:
687 687 self._check_new_tracked_filename(f)
688 688 self._dirty = True
689 689 self._updatedfiles.add(f)
690 690 self._map.addfile(
691 691 f,
692 692 mode=mode,
693 693 size=size,
694 694 mtime=mtime,
695 695 added=added,
696 696 merged=merged,
697 697 from_p2=from_p2,
698 698 possibly_dirty=possibly_dirty,
699 699 )
700 700
701 701 def _check_new_tracked_filename(self, filename):
702 702 scmutil.checkfilename(filename)
703 703 if self._map.hastrackeddir(filename):
704 704 msg = _(b'directory %r already in dirstate')
705 705 msg %= pycompat.bytestr(filename)
706 706 raise error.Abort(msg)
707 707 # shadows
708 708 for d in pathutil.finddirs(filename):
709 709 if self._map.hastrackeddir(d):
710 710 break
711 711 entry = self._map.get(d)
712 712 if entry is not None and not entry.removed:
713 713 msg = _(b'file %r in dirstate clashes with %r')
714 714 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
715 715 raise error.Abort(msg)
716 716
717 717 def _get_filedata(self, filename):
718 718 """returns"""
719 719 s = os.lstat(self._join(filename))
720 720 mode = s.st_mode
721 721 size = s.st_size
722 722 mtime = s[stat.ST_MTIME]
723 723 return (mode, size, mtime)
724 724
725 725 def _normallookup(self, f):
726 726 '''Mark a file normal, but possibly dirty.'''
727 727 if self.in_merge:
728 728 # if there is a merge going on and the file was either
729 729 # "merged" or coming from other parent (-2) before
730 730 # being removed, restore that state.
731 731 entry = self._map.get(f)
732 732 if entry is not None:
733 733 # XXX this should probably be dealt with a a lower level
734 734 # (see `merged_removed` and `from_p2_removed`)
735 735 if entry.merged_removed or entry.from_p2_removed:
736 736 source = self._map.copymap.get(f)
737 if entry.merged_removed:
738 self._addpath(f, merged=True)
739 else:
737 740 self._addpath(f, from_p2=True)
738 741 self._map.copymap.pop(f, None)
739 742 if source is not None:
740 743 self.copy(source, f)
741 744 return
742 745 elif entry.merged or entry.from_p2:
743 746 return
744 747 self._addpath(f, possibly_dirty=True)
745 748 self._map.copymap.pop(f, None)
746 749
747 750 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
748 751 if exists is None:
749 752 exists = os.path.lexists(os.path.join(self._root, path))
750 753 if not exists:
751 754 # Maybe a path component exists
752 755 if not ignoremissing and b'/' in path:
753 756 d, f = path.rsplit(b'/', 1)
754 757 d = self._normalize(d, False, ignoremissing, None)
755 758 folded = d + b"/" + f
756 759 else:
757 760 # No path components, preserve original case
758 761 folded = path
759 762 else:
760 763 # recursively normalize leading directory components
761 764 # against dirstate
762 765 if b'/' in normed:
763 766 d, f = normed.rsplit(b'/', 1)
764 767 d = self._normalize(d, False, ignoremissing, True)
765 768 r = self._root + b"/" + d
766 769 folded = d + b"/" + util.fspath(f, r)
767 770 else:
768 771 folded = util.fspath(normed, self._root)
769 772 storemap[normed] = folded
770 773
771 774 return folded
772 775
773 776 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
774 777 normed = util.normcase(path)
775 778 folded = self._map.filefoldmap.get(normed, None)
776 779 if folded is None:
777 780 if isknown:
778 781 folded = path
779 782 else:
780 783 folded = self._discoverpath(
781 784 path, normed, ignoremissing, exists, self._map.filefoldmap
782 785 )
783 786 return folded
784 787
785 788 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
786 789 normed = util.normcase(path)
787 790 folded = self._map.filefoldmap.get(normed, None)
788 791 if folded is None:
789 792 folded = self._map.dirfoldmap.get(normed, None)
790 793 if folded is None:
791 794 if isknown:
792 795 folded = path
793 796 else:
794 797 # store discovered result in dirfoldmap so that future
795 798 # normalizefile calls don't start matching directories
796 799 folded = self._discoverpath(
797 800 path, normed, ignoremissing, exists, self._map.dirfoldmap
798 801 )
799 802 return folded
800 803
801 804 def normalize(self, path, isknown=False, ignoremissing=False):
802 805 """
803 806 normalize the case of a pathname when on a casefolding filesystem
804 807
805 808 isknown specifies whether the filename came from walking the
806 809 disk, to avoid extra filesystem access.
807 810
808 811 If ignoremissing is True, missing path are returned
809 812 unchanged. Otherwise, we try harder to normalize possibly
810 813 existing path components.
811 814
812 815 The normalized case is determined based on the following precedence:
813 816
814 817 - version of name already stored in the dirstate
815 818 - version of name stored on disk
816 819 - version provided via command arguments
817 820 """
818 821
819 822 if self._checkcase:
820 823 return self._normalize(path, isknown, ignoremissing)
821 824 return path
822 825
823 826 def clear(self):
824 827 self._map.clear()
825 828 self._lastnormaltime = 0
826 829 self._updatedfiles.clear()
827 830 self._dirty = True
828 831
829 832 def rebuild(self, parent, allfiles, changedfiles=None):
830 833 if changedfiles is None:
831 834 # Rebuild entire dirstate
832 835 to_lookup = allfiles
833 836 to_drop = []
834 837 lastnormaltime = self._lastnormaltime
835 838 self.clear()
836 839 self._lastnormaltime = lastnormaltime
837 840 elif len(changedfiles) < 10:
838 841 # Avoid turning allfiles into a set, which can be expensive if it's
839 842 # large.
840 843 to_lookup = []
841 844 to_drop = []
842 845 for f in changedfiles:
843 846 if f in allfiles:
844 847 to_lookup.append(f)
845 848 else:
846 849 to_drop.append(f)
847 850 else:
848 851 changedfilesset = set(changedfiles)
849 852 to_lookup = changedfilesset & set(allfiles)
850 853 to_drop = changedfilesset - to_lookup
851 854
852 855 if self._origpl is None:
853 856 self._origpl = self._pl
854 857 self._map.setparents(parent, self._nodeconstants.nullid)
855 858
856 859 for f in to_lookup:
857 860 self._normallookup(f)
858 861 for f in to_drop:
859 862 if self._map.dropfile(f):
860 863 self._updatedfiles.add(f)
861 864
862 865 self._dirty = True
863 866
864 867 def identity(self):
865 868 """Return identity of dirstate itself to detect changing in storage
866 869
867 870 If identity of previous dirstate is equal to this, writing
868 871 changes based on the former dirstate out can keep consistency.
869 872 """
870 873 return self._map.identity
871 874
872 875 def write(self, tr):
873 876 if not self._dirty:
874 877 return
875 878
876 879 filename = self._filename
877 880 if tr:
878 881 # 'dirstate.write()' is not only for writing in-memory
879 882 # changes out, but also for dropping ambiguous timestamp.
880 883 # delayed writing re-raise "ambiguous timestamp issue".
881 884 # See also the wiki page below for detail:
882 885 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
883 886
884 887 # emulate dropping timestamp in 'parsers.pack_dirstate'
885 888 now = _getfsnow(self._opener)
886 889 self._map.clearambiguoustimes(self._updatedfiles, now)
887 890
888 891 # emulate that all 'dirstate.normal' results are written out
889 892 self._lastnormaltime = 0
890 893 self._updatedfiles.clear()
891 894
892 895 # delay writing in-memory changes out
893 896 tr.addfilegenerator(
894 897 b'dirstate',
895 898 (self._filename,),
896 899 lambda f: self._writedirstate(tr, f),
897 900 location=b'plain',
898 901 )
899 902 return
900 903
901 904 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
902 905 self._writedirstate(tr, st)
903 906
904 907 def addparentchangecallback(self, category, callback):
905 908 """add a callback to be called when the wd parents are changed
906 909
907 910 Callback will be called with the following arguments:
908 911 dirstate, (oldp1, oldp2), (newp1, newp2)
909 912
910 913 Category is a unique identifier to allow overwriting an old callback
911 914 with a newer callback.
912 915 """
913 916 self._plchangecallbacks[category] = callback
914 917
915 918 def _writedirstate(self, tr, st):
916 919 # notify callbacks about parents change
917 920 if self._origpl is not None and self._origpl != self._pl:
918 921 for c, callback in sorted(
919 922 pycompat.iteritems(self._plchangecallbacks)
920 923 ):
921 924 callback(self, self._origpl, self._pl)
922 925 self._origpl = None
923 926 # use the modification time of the newly created temporary file as the
924 927 # filesystem's notion of 'now'
925 928 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
926 929
927 930 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
928 931 # timestamp of each entries in dirstate, because of 'now > mtime'
929 932 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
930 933 if delaywrite > 0:
931 934 # do we have any files to delay for?
932 935 for f, e in pycompat.iteritems(self._map):
933 936 if e.need_delay(now):
934 937 import time # to avoid useless import
935 938
936 939 # rather than sleep n seconds, sleep until the next
937 940 # multiple of n seconds
938 941 clock = time.time()
939 942 start = int(clock) - (int(clock) % delaywrite)
940 943 end = start + delaywrite
941 944 time.sleep(end - clock)
942 945 now = end # trust our estimate that the end is near now
943 946 break
944 947
945 948 self._map.write(tr, st, now)
946 949 self._lastnormaltime = 0
947 950 self._dirty = False
948 951
949 952 def _dirignore(self, f):
950 953 if self._ignore(f):
951 954 return True
952 955 for p in pathutil.finddirs(f):
953 956 if self._ignore(p):
954 957 return True
955 958 return False
956 959
957 960 def _ignorefiles(self):
958 961 files = []
959 962 if os.path.exists(self._join(b'.hgignore')):
960 963 files.append(self._join(b'.hgignore'))
961 964 for name, path in self._ui.configitems(b"ui"):
962 965 if name == b'ignore' or name.startswith(b'ignore.'):
963 966 # we need to use os.path.join here rather than self._join
964 967 # because path is arbitrary and user-specified
965 968 files.append(os.path.join(self._rootdir, util.expandpath(path)))
966 969 return files
967 970
968 971 def _ignorefileandline(self, f):
969 972 files = collections.deque(self._ignorefiles())
970 973 visited = set()
971 974 while files:
972 975 i = files.popleft()
973 976 patterns = matchmod.readpatternfile(
974 977 i, self._ui.warn, sourceinfo=True
975 978 )
976 979 for pattern, lineno, line in patterns:
977 980 kind, p = matchmod._patsplit(pattern, b'glob')
978 981 if kind == b"subinclude":
979 982 if p not in visited:
980 983 files.append(p)
981 984 continue
982 985 m = matchmod.match(
983 986 self._root, b'', [], [pattern], warn=self._ui.warn
984 987 )
985 988 if m(f):
986 989 return (i, lineno, line)
987 990 visited.add(i)
988 991 return (None, -1, b"")
989 992
990 993 def _walkexplicit(self, match, subrepos):
991 994 """Get stat data about the files explicitly specified by match.
992 995
993 996 Return a triple (results, dirsfound, dirsnotfound).
994 997 - results is a mapping from filename to stat result. It also contains
995 998 listings mapping subrepos and .hg to None.
996 999 - dirsfound is a list of files found to be directories.
997 1000 - dirsnotfound is a list of files that the dirstate thinks are
998 1001 directories and that were not found."""
999 1002
1000 1003 def badtype(mode):
1001 1004 kind = _(b'unknown')
1002 1005 if stat.S_ISCHR(mode):
1003 1006 kind = _(b'character device')
1004 1007 elif stat.S_ISBLK(mode):
1005 1008 kind = _(b'block device')
1006 1009 elif stat.S_ISFIFO(mode):
1007 1010 kind = _(b'fifo')
1008 1011 elif stat.S_ISSOCK(mode):
1009 1012 kind = _(b'socket')
1010 1013 elif stat.S_ISDIR(mode):
1011 1014 kind = _(b'directory')
1012 1015 return _(b'unsupported file type (type is %s)') % kind
1013 1016
1014 1017 badfn = match.bad
1015 1018 dmap = self._map
1016 1019 lstat = os.lstat
1017 1020 getkind = stat.S_IFMT
1018 1021 dirkind = stat.S_IFDIR
1019 1022 regkind = stat.S_IFREG
1020 1023 lnkkind = stat.S_IFLNK
1021 1024 join = self._join
1022 1025 dirsfound = []
1023 1026 foundadd = dirsfound.append
1024 1027 dirsnotfound = []
1025 1028 notfoundadd = dirsnotfound.append
1026 1029
1027 1030 if not match.isexact() and self._checkcase:
1028 1031 normalize = self._normalize
1029 1032 else:
1030 1033 normalize = None
1031 1034
1032 1035 files = sorted(match.files())
1033 1036 subrepos.sort()
1034 1037 i, j = 0, 0
1035 1038 while i < len(files) and j < len(subrepos):
1036 1039 subpath = subrepos[j] + b"/"
1037 1040 if files[i] < subpath:
1038 1041 i += 1
1039 1042 continue
1040 1043 while i < len(files) and files[i].startswith(subpath):
1041 1044 del files[i]
1042 1045 j += 1
1043 1046
1044 1047 if not files or b'' in files:
1045 1048 files = [b'']
1046 1049 # constructing the foldmap is expensive, so don't do it for the
1047 1050 # common case where files is ['']
1048 1051 normalize = None
1049 1052 results = dict.fromkeys(subrepos)
1050 1053 results[b'.hg'] = None
1051 1054
1052 1055 for ff in files:
1053 1056 if normalize:
1054 1057 nf = normalize(ff, False, True)
1055 1058 else:
1056 1059 nf = ff
1057 1060 if nf in results:
1058 1061 continue
1059 1062
1060 1063 try:
1061 1064 st = lstat(join(nf))
1062 1065 kind = getkind(st.st_mode)
1063 1066 if kind == dirkind:
1064 1067 if nf in dmap:
1065 1068 # file replaced by dir on disk but still in dirstate
1066 1069 results[nf] = None
1067 1070 foundadd((nf, ff))
1068 1071 elif kind == regkind or kind == lnkkind:
1069 1072 results[nf] = st
1070 1073 else:
1071 1074 badfn(ff, badtype(kind))
1072 1075 if nf in dmap:
1073 1076 results[nf] = None
1074 1077 except OSError as inst: # nf not found on disk - it is dirstate only
1075 1078 if nf in dmap: # does it exactly match a missing file?
1076 1079 results[nf] = None
1077 1080 else: # does it match a missing directory?
1078 1081 if self._map.hasdir(nf):
1079 1082 notfoundadd(nf)
1080 1083 else:
1081 1084 badfn(ff, encoding.strtolocal(inst.strerror))
1082 1085
1083 1086 # match.files() may contain explicitly-specified paths that shouldn't
1084 1087 # be taken; drop them from the list of files found. dirsfound/notfound
1085 1088 # aren't filtered here because they will be tested later.
1086 1089 if match.anypats():
1087 1090 for f in list(results):
1088 1091 if f == b'.hg' or f in subrepos:
1089 1092 # keep sentinel to disable further out-of-repo walks
1090 1093 continue
1091 1094 if not match(f):
1092 1095 del results[f]
1093 1096
1094 1097 # Case insensitive filesystems cannot rely on lstat() failing to detect
1095 1098 # a case-only rename. Prune the stat object for any file that does not
1096 1099 # match the case in the filesystem, if there are multiple files that
1097 1100 # normalize to the same path.
1098 1101 if match.isexact() and self._checkcase:
1099 1102 normed = {}
1100 1103
1101 1104 for f, st in pycompat.iteritems(results):
1102 1105 if st is None:
1103 1106 continue
1104 1107
1105 1108 nc = util.normcase(f)
1106 1109 paths = normed.get(nc)
1107 1110
1108 1111 if paths is None:
1109 1112 paths = set()
1110 1113 normed[nc] = paths
1111 1114
1112 1115 paths.add(f)
1113 1116
1114 1117 for norm, paths in pycompat.iteritems(normed):
1115 1118 if len(paths) > 1:
1116 1119 for path in paths:
1117 1120 folded = self._discoverpath(
1118 1121 path, norm, True, None, self._map.dirfoldmap
1119 1122 )
1120 1123 if path != folded:
1121 1124 results[path] = None
1122 1125
1123 1126 return results, dirsfound, dirsnotfound
1124 1127
1125 1128 def walk(self, match, subrepos, unknown, ignored, full=True):
1126 1129 """
1127 1130 Walk recursively through the directory tree, finding all files
1128 1131 matched by match.
1129 1132
1130 1133 If full is False, maybe skip some known-clean files.
1131 1134
1132 1135 Return a dict mapping filename to stat-like object (either
1133 1136 mercurial.osutil.stat instance or return value of os.stat()).
1134 1137
1135 1138 """
1136 1139 # full is a flag that extensions that hook into walk can use -- this
1137 1140 # implementation doesn't use it at all. This satisfies the contract
1138 1141 # because we only guarantee a "maybe".
1139 1142
1140 1143 if ignored:
1141 1144 ignore = util.never
1142 1145 dirignore = util.never
1143 1146 elif unknown:
1144 1147 ignore = self._ignore
1145 1148 dirignore = self._dirignore
1146 1149 else:
1147 1150 # if not unknown and not ignored, drop dir recursion and step 2
1148 1151 ignore = util.always
1149 1152 dirignore = util.always
1150 1153
1151 1154 matchfn = match.matchfn
1152 1155 matchalways = match.always()
1153 1156 matchtdir = match.traversedir
1154 1157 dmap = self._map
1155 1158 listdir = util.listdir
1156 1159 lstat = os.lstat
1157 1160 dirkind = stat.S_IFDIR
1158 1161 regkind = stat.S_IFREG
1159 1162 lnkkind = stat.S_IFLNK
1160 1163 join = self._join
1161 1164
1162 1165 exact = skipstep3 = False
1163 1166 if match.isexact(): # match.exact
1164 1167 exact = True
1165 1168 dirignore = util.always # skip step 2
1166 1169 elif match.prefix(): # match.match, no patterns
1167 1170 skipstep3 = True
1168 1171
1169 1172 if not exact and self._checkcase:
1170 1173 normalize = self._normalize
1171 1174 normalizefile = self._normalizefile
1172 1175 skipstep3 = False
1173 1176 else:
1174 1177 normalize = self._normalize
1175 1178 normalizefile = None
1176 1179
1177 1180 # step 1: find all explicit files
1178 1181 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1179 1182 if matchtdir:
1180 1183 for d in work:
1181 1184 matchtdir(d[0])
1182 1185 for d in dirsnotfound:
1183 1186 matchtdir(d)
1184 1187
1185 1188 skipstep3 = skipstep3 and not (work or dirsnotfound)
1186 1189 work = [d for d in work if not dirignore(d[0])]
1187 1190
1188 1191 # step 2: visit subdirectories
1189 1192 def traverse(work, alreadynormed):
1190 1193 wadd = work.append
1191 1194 while work:
1192 1195 tracing.counter('dirstate.walk work', len(work))
1193 1196 nd = work.pop()
1194 1197 visitentries = match.visitchildrenset(nd)
1195 1198 if not visitentries:
1196 1199 continue
1197 1200 if visitentries == b'this' or visitentries == b'all':
1198 1201 visitentries = None
1199 1202 skip = None
1200 1203 if nd != b'':
1201 1204 skip = b'.hg'
1202 1205 try:
1203 1206 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1204 1207 entries = listdir(join(nd), stat=True, skip=skip)
1205 1208 except OSError as inst:
1206 1209 if inst.errno in (errno.EACCES, errno.ENOENT):
1207 1210 match.bad(
1208 1211 self.pathto(nd), encoding.strtolocal(inst.strerror)
1209 1212 )
1210 1213 continue
1211 1214 raise
1212 1215 for f, kind, st in entries:
1213 1216 # Some matchers may return files in the visitentries set,
1214 1217 # instead of 'this', if the matcher explicitly mentions them
1215 1218 # and is not an exactmatcher. This is acceptable; we do not
1216 1219 # make any hard assumptions about file-or-directory below
1217 1220 # based on the presence of `f` in visitentries. If
1218 1221 # visitchildrenset returned a set, we can always skip the
1219 1222 # entries *not* in the set it provided regardless of whether
1220 1223 # they're actually a file or a directory.
1221 1224 if visitentries and f not in visitentries:
1222 1225 continue
1223 1226 if normalizefile:
1224 1227 # even though f might be a directory, we're only
1225 1228 # interested in comparing it to files currently in the
1226 1229 # dmap -- therefore normalizefile is enough
1227 1230 nf = normalizefile(
1228 1231 nd and (nd + b"/" + f) or f, True, True
1229 1232 )
1230 1233 else:
1231 1234 nf = nd and (nd + b"/" + f) or f
1232 1235 if nf not in results:
1233 1236 if kind == dirkind:
1234 1237 if not ignore(nf):
1235 1238 if matchtdir:
1236 1239 matchtdir(nf)
1237 1240 wadd(nf)
1238 1241 if nf in dmap and (matchalways or matchfn(nf)):
1239 1242 results[nf] = None
1240 1243 elif kind == regkind or kind == lnkkind:
1241 1244 if nf in dmap:
1242 1245 if matchalways or matchfn(nf):
1243 1246 results[nf] = st
1244 1247 elif (matchalways or matchfn(nf)) and not ignore(
1245 1248 nf
1246 1249 ):
1247 1250 # unknown file -- normalize if necessary
1248 1251 if not alreadynormed:
1249 1252 nf = normalize(nf, False, True)
1250 1253 results[nf] = st
1251 1254 elif nf in dmap and (matchalways or matchfn(nf)):
1252 1255 results[nf] = None
1253 1256
1254 1257 for nd, d in work:
1255 1258 # alreadynormed means that processwork doesn't have to do any
1256 1259 # expensive directory normalization
1257 1260 alreadynormed = not normalize or nd == d
1258 1261 traverse([d], alreadynormed)
1259 1262
1260 1263 for s in subrepos:
1261 1264 del results[s]
1262 1265 del results[b'.hg']
1263 1266
1264 1267 # step 3: visit remaining files from dmap
1265 1268 if not skipstep3 and not exact:
1266 1269 # If a dmap file is not in results yet, it was either
1267 1270 # a) not matching matchfn b) ignored, c) missing, or d) under a
1268 1271 # symlink directory.
1269 1272 if not results and matchalways:
1270 1273 visit = [f for f in dmap]
1271 1274 else:
1272 1275 visit = [f for f in dmap if f not in results and matchfn(f)]
1273 1276 visit.sort()
1274 1277
1275 1278 if unknown:
1276 1279 # unknown == True means we walked all dirs under the roots
1277 1280 # that wasn't ignored, and everything that matched was stat'ed
1278 1281 # and is already in results.
1279 1282 # The rest must thus be ignored or under a symlink.
1280 1283 audit_path = pathutil.pathauditor(self._root, cached=True)
1281 1284
1282 1285 for nf in iter(visit):
1283 1286 # If a stat for the same file was already added with a
1284 1287 # different case, don't add one for this, since that would
1285 1288 # make it appear as if the file exists under both names
1286 1289 # on disk.
1287 1290 if (
1288 1291 normalizefile
1289 1292 and normalizefile(nf, True, True) in results
1290 1293 ):
1291 1294 results[nf] = None
1292 1295 # Report ignored items in the dmap as long as they are not
1293 1296 # under a symlink directory.
1294 1297 elif audit_path.check(nf):
1295 1298 try:
1296 1299 results[nf] = lstat(join(nf))
1297 1300 # file was just ignored, no links, and exists
1298 1301 except OSError:
1299 1302 # file doesn't exist
1300 1303 results[nf] = None
1301 1304 else:
1302 1305 # It's either missing or under a symlink directory
1303 1306 # which we in this case report as missing
1304 1307 results[nf] = None
1305 1308 else:
1306 1309 # We may not have walked the full directory tree above,
1307 1310 # so stat and check everything we missed.
1308 1311 iv = iter(visit)
1309 1312 for st in util.statfiles([join(i) for i in visit]):
1310 1313 results[next(iv)] = st
1311 1314 return results
1312 1315
1313 1316 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1314 1317 # Force Rayon (Rust parallelism library) to respect the number of
1315 1318 # workers. This is a temporary workaround until Rust code knows
1316 1319 # how to read the config file.
1317 1320 numcpus = self._ui.configint(b"worker", b"numcpus")
1318 1321 if numcpus is not None:
1319 1322 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1320 1323
1321 1324 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1322 1325 if not workers_enabled:
1323 1326 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1324 1327
1325 1328 (
1326 1329 lookup,
1327 1330 modified,
1328 1331 added,
1329 1332 removed,
1330 1333 deleted,
1331 1334 clean,
1332 1335 ignored,
1333 1336 unknown,
1334 1337 warnings,
1335 1338 bad,
1336 1339 traversed,
1337 1340 dirty,
1338 1341 ) = rustmod.status(
1339 1342 self._map._rustmap,
1340 1343 matcher,
1341 1344 self._rootdir,
1342 1345 self._ignorefiles(),
1343 1346 self._checkexec,
1344 1347 self._lastnormaltime,
1345 1348 bool(list_clean),
1346 1349 bool(list_ignored),
1347 1350 bool(list_unknown),
1348 1351 bool(matcher.traversedir),
1349 1352 )
1350 1353
1351 1354 self._dirty |= dirty
1352 1355
1353 1356 if matcher.traversedir:
1354 1357 for dir in traversed:
1355 1358 matcher.traversedir(dir)
1356 1359
1357 1360 if self._ui.warn:
1358 1361 for item in warnings:
1359 1362 if isinstance(item, tuple):
1360 1363 file_path, syntax = item
1361 1364 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1362 1365 file_path,
1363 1366 syntax,
1364 1367 )
1365 1368 self._ui.warn(msg)
1366 1369 else:
1367 1370 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1368 1371 self._ui.warn(
1369 1372 msg
1370 1373 % (
1371 1374 pathutil.canonpath(
1372 1375 self._rootdir, self._rootdir, item
1373 1376 ),
1374 1377 b"No such file or directory",
1375 1378 )
1376 1379 )
1377 1380
1378 1381 for (fn, message) in bad:
1379 1382 matcher.bad(fn, encoding.strtolocal(message))
1380 1383
1381 1384 status = scmutil.status(
1382 1385 modified=modified,
1383 1386 added=added,
1384 1387 removed=removed,
1385 1388 deleted=deleted,
1386 1389 unknown=unknown,
1387 1390 ignored=ignored,
1388 1391 clean=clean,
1389 1392 )
1390 1393 return (lookup, status)
1391 1394
1392 1395 def status(self, match, subrepos, ignored, clean, unknown):
1393 1396 """Determine the status of the working copy relative to the
1394 1397 dirstate and return a pair of (unsure, status), where status is of type
1395 1398 scmutil.status and:
1396 1399
1397 1400 unsure:
1398 1401 files that might have been modified since the dirstate was
1399 1402 written, but need to be read to be sure (size is the same
1400 1403 but mtime differs)
1401 1404 status.modified:
1402 1405 files that have definitely been modified since the dirstate
1403 1406 was written (different size or mode)
1404 1407 status.clean:
1405 1408 files that have definitely not been modified since the
1406 1409 dirstate was written
1407 1410 """
1408 1411 listignored, listclean, listunknown = ignored, clean, unknown
1409 1412 lookup, modified, added, unknown, ignored = [], [], [], [], []
1410 1413 removed, deleted, clean = [], [], []
1411 1414
1412 1415 dmap = self._map
1413 1416 dmap.preload()
1414 1417
1415 1418 use_rust = True
1416 1419
1417 1420 allowed_matchers = (
1418 1421 matchmod.alwaysmatcher,
1419 1422 matchmod.exactmatcher,
1420 1423 matchmod.includematcher,
1421 1424 )
1422 1425
1423 1426 if rustmod is None:
1424 1427 use_rust = False
1425 1428 elif self._checkcase:
1426 1429 # Case-insensitive filesystems are not handled yet
1427 1430 use_rust = False
1428 1431 elif subrepos:
1429 1432 use_rust = False
1430 1433 elif sparse.enabled:
1431 1434 use_rust = False
1432 1435 elif not isinstance(match, allowed_matchers):
1433 1436 # Some matchers have yet to be implemented
1434 1437 use_rust = False
1435 1438
1436 1439 if use_rust:
1437 1440 try:
1438 1441 return self._rust_status(
1439 1442 match, listclean, listignored, listunknown
1440 1443 )
1441 1444 except rustmod.FallbackError:
1442 1445 pass
1443 1446
1444 1447 def noop(f):
1445 1448 pass
1446 1449
1447 1450 dcontains = dmap.__contains__
1448 1451 dget = dmap.__getitem__
1449 1452 ladd = lookup.append # aka "unsure"
1450 1453 madd = modified.append
1451 1454 aadd = added.append
1452 1455 uadd = unknown.append if listunknown else noop
1453 1456 iadd = ignored.append if listignored else noop
1454 1457 radd = removed.append
1455 1458 dadd = deleted.append
1456 1459 cadd = clean.append if listclean else noop
1457 1460 mexact = match.exact
1458 1461 dirignore = self._dirignore
1459 1462 checkexec = self._checkexec
1460 1463 copymap = self._map.copymap
1461 1464 lastnormaltime = self._lastnormaltime
1462 1465
1463 1466 # We need to do full walks when either
1464 1467 # - we're listing all clean files, or
1465 1468 # - match.traversedir does something, because match.traversedir should
1466 1469 # be called for every dir in the working dir
1467 1470 full = listclean or match.traversedir is not None
1468 1471 for fn, st in pycompat.iteritems(
1469 1472 self.walk(match, subrepos, listunknown, listignored, full=full)
1470 1473 ):
1471 1474 if not dcontains(fn):
1472 1475 if (listignored or mexact(fn)) and dirignore(fn):
1473 1476 if listignored:
1474 1477 iadd(fn)
1475 1478 else:
1476 1479 uadd(fn)
1477 1480 continue
1478 1481
1479 1482 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1480 1483 # written like that for performance reasons. dmap[fn] is not a
1481 1484 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1482 1485 # opcode has fast paths when the value to be unpacked is a tuple or
1483 1486 # a list, but falls back to creating a full-fledged iterator in
1484 1487 # general. That is much slower than simply accessing and storing the
1485 1488 # tuple members one by one.
1486 1489 t = dget(fn)
1487 1490 mode = t.mode
1488 1491 size = t.size
1489 1492 time = t.mtime
1490 1493
1491 1494 if not st and t.tracked:
1492 1495 dadd(fn)
1493 1496 elif t.merged:
1494 1497 madd(fn)
1495 1498 elif t.added:
1496 1499 aadd(fn)
1497 1500 elif t.removed:
1498 1501 radd(fn)
1499 1502 elif t.tracked:
1500 1503 if (
1501 1504 size >= 0
1502 1505 and (
1503 1506 (size != st.st_size and size != st.st_size & _rangemask)
1504 1507 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1505 1508 )
1506 1509 or t.from_p2
1507 1510 or fn in copymap
1508 1511 ):
1509 1512 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1510 1513 # issue6456: Size returned may be longer due to
1511 1514 # encryption on EXT-4 fscrypt, undecided.
1512 1515 ladd(fn)
1513 1516 else:
1514 1517 madd(fn)
1515 1518 elif (
1516 1519 time != st[stat.ST_MTIME]
1517 1520 and time != st[stat.ST_MTIME] & _rangemask
1518 1521 ):
1519 1522 ladd(fn)
1520 1523 elif st[stat.ST_MTIME] == lastnormaltime:
1521 1524 # fn may have just been marked as normal and it may have
1522 1525 # changed in the same second without changing its size.
1523 1526 # This can happen if we quickly do multiple commits.
1524 1527 # Force lookup, so we don't miss such a racy file change.
1525 1528 ladd(fn)
1526 1529 elif listclean:
1527 1530 cadd(fn)
1528 1531 status = scmutil.status(
1529 1532 modified, added, removed, deleted, unknown, ignored, clean
1530 1533 )
1531 1534 return (lookup, status)
1532 1535
1533 1536 def matches(self, match):
1534 1537 """
1535 1538 return files in the dirstate (in whatever state) filtered by match
1536 1539 """
1537 1540 dmap = self._map
1538 1541 if rustmod is not None:
1539 1542 dmap = self._map._rustmap
1540 1543
1541 1544 if match.always():
1542 1545 return dmap.keys()
1543 1546 files = match.files()
1544 1547 if match.isexact():
1545 1548 # fast path -- filter the other way around, since typically files is
1546 1549 # much smaller than dmap
1547 1550 return [f for f in files if f in dmap]
1548 1551 if match.prefix() and all(fn in dmap for fn in files):
1549 1552 # fast path -- all the values are known to be files, so just return
1550 1553 # that
1551 1554 return list(files)
1552 1555 return [f for f in dmap if match(f)]
1553 1556
1554 1557 def _actualfilename(self, tr):
1555 1558 if tr:
1556 1559 return self._pendingfilename
1557 1560 else:
1558 1561 return self._filename
1559 1562
1560 1563 def savebackup(self, tr, backupname):
1561 1564 '''Save current dirstate into backup file'''
1562 1565 filename = self._actualfilename(tr)
1563 1566 assert backupname != filename
1564 1567
1565 1568 # use '_writedirstate' instead of 'write' to write changes certainly,
1566 1569 # because the latter omits writing out if transaction is running.
1567 1570 # output file will be used to create backup of dirstate at this point.
1568 1571 if self._dirty or not self._opener.exists(filename):
1569 1572 self._writedirstate(
1570 1573 tr,
1571 1574 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1572 1575 )
1573 1576
1574 1577 if tr:
1575 1578 # ensure that subsequent tr.writepending returns True for
1576 1579 # changes written out above, even if dirstate is never
1577 1580 # changed after this
1578 1581 tr.addfilegenerator(
1579 1582 b'dirstate',
1580 1583 (self._filename,),
1581 1584 lambda f: self._writedirstate(tr, f),
1582 1585 location=b'plain',
1583 1586 )
1584 1587
1585 1588 # ensure that pending file written above is unlinked at
1586 1589 # failure, even if tr.writepending isn't invoked until the
1587 1590 # end of this transaction
1588 1591 tr.registertmp(filename, location=b'plain')
1589 1592
1590 1593 self._opener.tryunlink(backupname)
1591 1594 # hardlink backup is okay because _writedirstate is always called
1592 1595 # with an "atomictemp=True" file.
1593 1596 util.copyfile(
1594 1597 self._opener.join(filename),
1595 1598 self._opener.join(backupname),
1596 1599 hardlink=True,
1597 1600 )
1598 1601
1599 1602 def restorebackup(self, tr, backupname):
1600 1603 '''Restore dirstate by backup file'''
1601 1604 # this "invalidate()" prevents "wlock.release()" from writing
1602 1605 # changes of dirstate out after restoring from backup file
1603 1606 self.invalidate()
1604 1607 filename = self._actualfilename(tr)
1605 1608 o = self._opener
1606 1609 if util.samefile(o.join(backupname), o.join(filename)):
1607 1610 o.unlink(backupname)
1608 1611 else:
1609 1612 o.rename(backupname, filename, checkambig=True)
1610 1613
1611 1614 def clearbackup(self, tr, backupname):
1612 1615 '''Clear backup file'''
1613 1616 self._opener.unlink(backupname)
@@ -1,116 +1,116
1 1 $ hg init
2 2
3 3 $ echo foo > foo
4 4 $ echo bar > bar
5 5 $ hg ci -qAm 'add foo bar'
6 6
7 7 $ echo foo2 >> foo
8 8 $ echo bleh > bar
9 9 $ hg ci -m 'change foo bar'
10 10
11 11 $ hg up -qC 0
12 12 $ hg mv foo foo1
13 13 $ echo foo1 > foo1
14 14 $ hg cat foo >> foo1
15 15 $ hg ci -m 'mv foo foo1'
16 16 created new head
17 17
18 18 $ hg merge
19 19 merging foo1 and foo to foo1
20 20 1 files updated, 1 files merged, 0 files removed, 0 files unresolved
21 21 (branch merge, don't forget to commit)
22 22
23 23 $ hg debugstate --no-dates
24 24 m 0 -2 unset bar
25 25 m 0 -2 unset foo1
26 26 copy: foo -> foo1
27 27
28 28 $ hg st -q
29 29 M bar
30 30 M foo1
31 31
32 32
33 33 Removing foo1 and bar:
34 34
35 35 $ cp foo1 F
36 36 $ cp bar B
37 37 $ hg rm -f foo1 bar
38 38
39 39 $ hg debugstate --no-dates
40 40 r 0 -1 set bar
41 41 r 0 -1 set foo1
42 42 copy: foo -> foo1
43 43
44 44 $ hg st -qC
45 45 R bar
46 46 R foo1
47 47
48 48
49 49 Re-adding foo1 and bar:
50 50
51 51 $ cp F foo1
52 52 $ cp B bar
53 53 $ hg add -v foo1 bar
54 54 adding bar
55 55 adding foo1
56 56
57 57 $ hg debugstate --no-dates
58 n 0 -2 unset bar
59 n 0 -2 unset foo1
58 m 0 -2 unset bar
59 m 0 -2 unset foo1
60 60 copy: foo -> foo1
61 61
62 62 $ hg st -qC
63 63 M bar
64 64 M foo1
65 65 foo
66 66
67 67
68 68 Reverting foo1 and bar:
69 69
70 70 $ hg revert -vr . foo1 bar
71 71 saving current version of bar as bar.orig
72 72 saving current version of foo1 as foo1.orig
73 73 reverting bar
74 74 reverting foo1
75 75
76 76 $ hg debugstate --no-dates
77 n 0 -2 unset bar
78 n 0 -2 unset foo1
77 m 0 -2 unset bar
78 m 0 -2 unset foo1
79 79 copy: foo -> foo1
80 80
81 81 $ hg st -qC
82 82 M bar
83 83 M foo1
84 84 foo
85 85
86 86 $ hg diff
87 87
88 88 Merge should not overwrite local file that is untracked after remove
89 89
90 90 $ rm *
91 91 $ hg up -qC
92 92 $ hg rm bar
93 93 $ hg ci -m 'remove bar'
94 94 $ echo 'memories of buried pirate treasure' > bar
95 95 $ hg merge
96 96 bar: untracked file differs
97 97 abort: untracked files in working directory differ from files in requested revision
98 98 [20]
99 99 $ cat bar
100 100 memories of buried pirate treasure
101 101
102 102 Those who use force will lose
103 103
104 104 $ hg merge -f
105 105 file 'bar' was deleted in local [working copy] but was modified in other [merge rev].
106 106 You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
107 107 What do you want to do? u
108 108 merging foo1 and foo to foo1
109 109 0 files updated, 1 files merged, 0 files removed, 1 files unresolved
110 110 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
111 111 [1]
112 112 $ cat bar
113 113 bleh
114 114 $ hg st
115 115 M bar
116 116 M foo1
General Comments 0
You need to be logged in to leave comments. Login now