##// END OF EJS Templates
status: move the boundary comparison logic within the timestamp module...
marmoute -
r49218:aa8a649a default draft
parent child Browse files
Show More
@@ -1,3158 +1,3141 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 hex,
18 18 nullrev,
19 19 short,
20 20 )
21 21 from .pycompat import (
22 22 getattr,
23 23 open,
24 24 )
25 25 from . import (
26 26 dagop,
27 27 encoding,
28 28 error,
29 29 fileset,
30 30 match as matchmod,
31 31 mergestate as mergestatemod,
32 32 metadata,
33 33 obsolete as obsmod,
34 34 patch,
35 35 pathutil,
36 36 phases,
37 37 pycompat,
38 38 repoview,
39 39 scmutil,
40 40 sparse,
41 41 subrepo,
42 42 subrepoutil,
43 43 util,
44 44 )
45 45 from .utils import (
46 46 dateutil,
47 47 stringutil,
48 48 )
49 49 from .dirstateutils import (
50 50 timestamp,
51 51 )
52 52
53 53 propertycache = util.propertycache
54 54
55 55
56 56 class basectx(object):
57 57 """A basectx object represents the common logic for its children:
58 58 changectx: read-only context that is already present in the repo,
59 59 workingctx: a context that represents the working directory and can
60 60 be committed,
61 61 memctx: a context that represents changes in-memory and can also
62 62 be committed."""
63 63
64 64 def __init__(self, repo):
65 65 self._repo = repo
66 66
67 67 def __bytes__(self):
68 68 return short(self.node())
69 69
70 70 __str__ = encoding.strmethod(__bytes__)
71 71
72 72 def __repr__(self):
73 73 return "<%s %s>" % (type(self).__name__, str(self))
74 74
75 75 def __eq__(self, other):
76 76 try:
77 77 return type(self) == type(other) and self._rev == other._rev
78 78 except AttributeError:
79 79 return False
80 80
81 81 def __ne__(self, other):
82 82 return not (self == other)
83 83
84 84 def __contains__(self, key):
85 85 return key in self._manifest
86 86
87 87 def __getitem__(self, key):
88 88 return self.filectx(key)
89 89
90 90 def __iter__(self):
91 91 return iter(self._manifest)
92 92
93 93 def _buildstatusmanifest(self, status):
94 94 """Builds a manifest that includes the given status results, if this is
95 95 a working copy context. For non-working copy contexts, it just returns
96 96 the normal manifest."""
97 97 return self.manifest()
98 98
99 99 def _matchstatus(self, other, match):
100 100 """This internal method provides a way for child objects to override the
101 101 match operator.
102 102 """
103 103 return match
104 104
105 105 def _buildstatus(
106 106 self, other, s, match, listignored, listclean, listunknown
107 107 ):
108 108 """build a status with respect to another context"""
109 109 # Load earliest manifest first for caching reasons. More specifically,
110 110 # if you have revisions 1000 and 1001, 1001 is probably stored as a
111 111 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
112 112 # 1000 and cache it so that when you read 1001, we just need to apply a
113 113 # delta to what's in the cache. So that's one full reconstruction + one
114 114 # delta application.
115 115 mf2 = None
116 116 if self.rev() is not None and self.rev() < other.rev():
117 117 mf2 = self._buildstatusmanifest(s)
118 118 mf1 = other._buildstatusmanifest(s)
119 119 if mf2 is None:
120 120 mf2 = self._buildstatusmanifest(s)
121 121
122 122 modified, added = [], []
123 123 removed = []
124 124 clean = []
125 125 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
126 126 deletedset = set(deleted)
127 127 d = mf1.diff(mf2, match=match, clean=listclean)
128 128 for fn, value in pycompat.iteritems(d):
129 129 if fn in deletedset:
130 130 continue
131 131 if value is None:
132 132 clean.append(fn)
133 133 continue
134 134 (node1, flag1), (node2, flag2) = value
135 135 if node1 is None:
136 136 added.append(fn)
137 137 elif node2 is None:
138 138 removed.append(fn)
139 139 elif flag1 != flag2:
140 140 modified.append(fn)
141 141 elif node2 not in self._repo.nodeconstants.wdirfilenodeids:
142 142 # When comparing files between two commits, we save time by
143 143 # not comparing the file contents when the nodeids differ.
144 144 # Note that this means we incorrectly report a reverted change
145 145 # to a file as a modification.
146 146 modified.append(fn)
147 147 elif self[fn].cmp(other[fn]):
148 148 modified.append(fn)
149 149 else:
150 150 clean.append(fn)
151 151
152 152 if removed:
153 153 # need to filter files if they are already reported as removed
154 154 unknown = [
155 155 fn
156 156 for fn in unknown
157 157 if fn not in mf1 and (not match or match(fn))
158 158 ]
159 159 ignored = [
160 160 fn
161 161 for fn in ignored
162 162 if fn not in mf1 and (not match or match(fn))
163 163 ]
164 164 # if they're deleted, don't report them as removed
165 165 removed = [fn for fn in removed if fn not in deletedset]
166 166
167 167 return scmutil.status(
168 168 modified, added, removed, deleted, unknown, ignored, clean
169 169 )
170 170
171 171 @propertycache
172 172 def substate(self):
173 173 return subrepoutil.state(self, self._repo.ui)
174 174
175 175 def subrev(self, subpath):
176 176 return self.substate[subpath][1]
177 177
178 178 def rev(self):
179 179 return self._rev
180 180
181 181 def node(self):
182 182 return self._node
183 183
184 184 def hex(self):
185 185 return hex(self.node())
186 186
187 187 def manifest(self):
188 188 return self._manifest
189 189
190 190 def manifestctx(self):
191 191 return self._manifestctx
192 192
193 193 def repo(self):
194 194 return self._repo
195 195
196 196 def phasestr(self):
197 197 return phases.phasenames[self.phase()]
198 198
199 199 def mutable(self):
200 200 return self.phase() > phases.public
201 201
202 202 def matchfileset(self, cwd, expr, badfn=None):
203 203 return fileset.match(self, cwd, expr, badfn=badfn)
204 204
205 205 def obsolete(self):
206 206 """True if the changeset is obsolete"""
207 207 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
208 208
209 209 def extinct(self):
210 210 """True if the changeset is extinct"""
211 211 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
212 212
213 213 def orphan(self):
214 214 """True if the changeset is not obsolete, but its ancestor is"""
215 215 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
216 216
217 217 def phasedivergent(self):
218 218 """True if the changeset tries to be a successor of a public changeset
219 219
220 220 Only non-public and non-obsolete changesets may be phase-divergent.
221 221 """
222 222 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
223 223
224 224 def contentdivergent(self):
225 225 """Is a successor of a changeset with multiple possible successor sets
226 226
227 227 Only non-public and non-obsolete changesets may be content-divergent.
228 228 """
229 229 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
230 230
231 231 def isunstable(self):
232 232 """True if the changeset is either orphan, phase-divergent or
233 233 content-divergent"""
234 234 return self.orphan() or self.phasedivergent() or self.contentdivergent()
235 235
236 236 def instabilities(self):
237 237 """return the list of instabilities affecting this changeset.
238 238
239 239 Instabilities are returned as strings. possible values are:
240 240 - orphan,
241 241 - phase-divergent,
242 242 - content-divergent.
243 243 """
244 244 instabilities = []
245 245 if self.orphan():
246 246 instabilities.append(b'orphan')
247 247 if self.phasedivergent():
248 248 instabilities.append(b'phase-divergent')
249 249 if self.contentdivergent():
250 250 instabilities.append(b'content-divergent')
251 251 return instabilities
252 252
253 253 def parents(self):
254 254 """return contexts for each parent changeset"""
255 255 return self._parents
256 256
257 257 def p1(self):
258 258 return self._parents[0]
259 259
260 260 def p2(self):
261 261 parents = self._parents
262 262 if len(parents) == 2:
263 263 return parents[1]
264 264 return self._repo[nullrev]
265 265
266 266 def _fileinfo(self, path):
267 267 if '_manifest' in self.__dict__:
268 268 try:
269 269 return self._manifest.find(path)
270 270 except KeyError:
271 271 raise error.ManifestLookupError(
272 272 self._node or b'None', path, _(b'not found in manifest')
273 273 )
274 274 if '_manifestdelta' in self.__dict__ or path in self.files():
275 275 if path in self._manifestdelta:
276 276 return (
277 277 self._manifestdelta[path],
278 278 self._manifestdelta.flags(path),
279 279 )
280 280 mfl = self._repo.manifestlog
281 281 try:
282 282 node, flag = mfl[self._changeset.manifest].find(path)
283 283 except KeyError:
284 284 raise error.ManifestLookupError(
285 285 self._node or b'None', path, _(b'not found in manifest')
286 286 )
287 287
288 288 return node, flag
289 289
290 290 def filenode(self, path):
291 291 return self._fileinfo(path)[0]
292 292
293 293 def flags(self, path):
294 294 try:
295 295 return self._fileinfo(path)[1]
296 296 except error.LookupError:
297 297 return b''
298 298
299 299 @propertycache
300 300 def _copies(self):
301 301 return metadata.computechangesetcopies(self)
302 302
303 303 def p1copies(self):
304 304 return self._copies[0]
305 305
306 306 def p2copies(self):
307 307 return self._copies[1]
308 308
309 309 def sub(self, path, allowcreate=True):
310 310 '''return a subrepo for the stored revision of path, never wdir()'''
311 311 return subrepo.subrepo(self, path, allowcreate=allowcreate)
312 312
313 313 def nullsub(self, path, pctx):
314 314 return subrepo.nullsubrepo(self, path, pctx)
315 315
316 316 def workingsub(self, path):
317 317 """return a subrepo for the stored revision, or wdir if this is a wdir
318 318 context.
319 319 """
320 320 return subrepo.subrepo(self, path, allowwdir=True)
321 321
322 322 def match(
323 323 self,
324 324 pats=None,
325 325 include=None,
326 326 exclude=None,
327 327 default=b'glob',
328 328 listsubrepos=False,
329 329 badfn=None,
330 330 cwd=None,
331 331 ):
332 332 r = self._repo
333 333 if not cwd:
334 334 cwd = r.getcwd()
335 335 return matchmod.match(
336 336 r.root,
337 337 cwd,
338 338 pats,
339 339 include,
340 340 exclude,
341 341 default,
342 342 auditor=r.nofsauditor,
343 343 ctx=self,
344 344 listsubrepos=listsubrepos,
345 345 badfn=badfn,
346 346 )
347 347
348 348 def diff(
349 349 self,
350 350 ctx2=None,
351 351 match=None,
352 352 changes=None,
353 353 opts=None,
354 354 losedatafn=None,
355 355 pathfn=None,
356 356 copy=None,
357 357 copysourcematch=None,
358 358 hunksfilterfn=None,
359 359 ):
360 360 """Returns a diff generator for the given contexts and matcher"""
361 361 if ctx2 is None:
362 362 ctx2 = self.p1()
363 363 if ctx2 is not None:
364 364 ctx2 = self._repo[ctx2]
365 365 return patch.diff(
366 366 self._repo,
367 367 ctx2,
368 368 self,
369 369 match=match,
370 370 changes=changes,
371 371 opts=opts,
372 372 losedatafn=losedatafn,
373 373 pathfn=pathfn,
374 374 copy=copy,
375 375 copysourcematch=copysourcematch,
376 376 hunksfilterfn=hunksfilterfn,
377 377 )
378 378
379 379 def dirs(self):
380 380 return self._manifest.dirs()
381 381
382 382 def hasdir(self, dir):
383 383 return self._manifest.hasdir(dir)
384 384
385 385 def status(
386 386 self,
387 387 other=None,
388 388 match=None,
389 389 listignored=False,
390 390 listclean=False,
391 391 listunknown=False,
392 392 listsubrepos=False,
393 393 ):
394 394 """return status of files between two nodes or node and working
395 395 directory.
396 396
397 397 If other is None, compare this node with working directory.
398 398
399 399 ctx1.status(ctx2) returns the status of change from ctx1 to ctx2
400 400
401 401 Returns a mercurial.scmutils.status object.
402 402
403 403 Data can be accessed using either tuple notation:
404 404
405 405 (modified, added, removed, deleted, unknown, ignored, clean)
406 406
407 407 or direct attribute access:
408 408
409 409 s.modified, s.added, ...
410 410 """
411 411
412 412 ctx1 = self
413 413 ctx2 = self._repo[other]
414 414
415 415 # This next code block is, admittedly, fragile logic that tests for
416 416 # reversing the contexts and wouldn't need to exist if it weren't for
417 417 # the fast (and common) code path of comparing the working directory
418 418 # with its first parent.
419 419 #
420 420 # What we're aiming for here is the ability to call:
421 421 #
422 422 # workingctx.status(parentctx)
423 423 #
424 424 # If we always built the manifest for each context and compared those,
425 425 # then we'd be done. But the special case of the above call means we
426 426 # just copy the manifest of the parent.
427 427 reversed = False
428 428 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
429 429 reversed = True
430 430 ctx1, ctx2 = ctx2, ctx1
431 431
432 432 match = self._repo.narrowmatch(match)
433 433 match = ctx2._matchstatus(ctx1, match)
434 434 r = scmutil.status([], [], [], [], [], [], [])
435 435 r = ctx2._buildstatus(
436 436 ctx1, r, match, listignored, listclean, listunknown
437 437 )
438 438
439 439 if reversed:
440 440 # Reverse added and removed. Clear deleted, unknown and ignored as
441 441 # these make no sense to reverse.
442 442 r = scmutil.status(
443 443 r.modified, r.removed, r.added, [], [], [], r.clean
444 444 )
445 445
446 446 if listsubrepos:
447 447 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
448 448 try:
449 449 rev2 = ctx2.subrev(subpath)
450 450 except KeyError:
451 451 # A subrepo that existed in node1 was deleted between
452 452 # node1 and node2 (inclusive). Thus, ctx2's substate
453 453 # won't contain that subpath. The best we can do ignore it.
454 454 rev2 = None
455 455 submatch = matchmod.subdirmatcher(subpath, match)
456 456 s = sub.status(
457 457 rev2,
458 458 match=submatch,
459 459 ignored=listignored,
460 460 clean=listclean,
461 461 unknown=listunknown,
462 462 listsubrepos=True,
463 463 )
464 464 for k in (
465 465 'modified',
466 466 'added',
467 467 'removed',
468 468 'deleted',
469 469 'unknown',
470 470 'ignored',
471 471 'clean',
472 472 ):
473 473 rfiles, sfiles = getattr(r, k), getattr(s, k)
474 474 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
475 475
476 476 r.modified.sort()
477 477 r.added.sort()
478 478 r.removed.sort()
479 479 r.deleted.sort()
480 480 r.unknown.sort()
481 481 r.ignored.sort()
482 482 r.clean.sort()
483 483
484 484 return r
485 485
486 486 def mergestate(self, clean=False):
487 487 """Get a mergestate object for this context."""
488 488 raise NotImplementedError(
489 489 '%s does not implement mergestate()' % self.__class__
490 490 )
491 491
492 492 def isempty(self):
493 493 return not (
494 494 len(self.parents()) > 1
495 495 or self.branch() != self.p1().branch()
496 496 or self.closesbranch()
497 497 or self.files()
498 498 )
499 499
500 500
501 501 class changectx(basectx):
502 502 """A changecontext object makes access to data related to a particular
503 503 changeset convenient. It represents a read-only context already present in
504 504 the repo."""
505 505
506 506 def __init__(self, repo, rev, node, maybe_filtered=True):
507 507 super(changectx, self).__init__(repo)
508 508 self._rev = rev
509 509 self._node = node
510 510 # When maybe_filtered is True, the revision might be affected by
511 511 # changelog filtering and operation through the filtered changelog must be used.
512 512 #
513 513 # When maybe_filtered is False, the revision has already been checked
514 514 # against filtering and is not filtered. Operation through the
515 515 # unfiltered changelog might be used in some case.
516 516 self._maybe_filtered = maybe_filtered
517 517
518 518 def __hash__(self):
519 519 try:
520 520 return hash(self._rev)
521 521 except AttributeError:
522 522 return id(self)
523 523
524 524 def __nonzero__(self):
525 525 return self._rev != nullrev
526 526
527 527 __bool__ = __nonzero__
528 528
529 529 @propertycache
530 530 def _changeset(self):
531 531 if self._maybe_filtered:
532 532 repo = self._repo
533 533 else:
534 534 repo = self._repo.unfiltered()
535 535 return repo.changelog.changelogrevision(self.rev())
536 536
537 537 @propertycache
538 538 def _manifest(self):
539 539 return self._manifestctx.read()
540 540
541 541 @property
542 542 def _manifestctx(self):
543 543 return self._repo.manifestlog[self._changeset.manifest]
544 544
545 545 @propertycache
546 546 def _manifestdelta(self):
547 547 return self._manifestctx.readdelta()
548 548
549 549 @propertycache
550 550 def _parents(self):
551 551 repo = self._repo
552 552 if self._maybe_filtered:
553 553 cl = repo.changelog
554 554 else:
555 555 cl = repo.unfiltered().changelog
556 556
557 557 p1, p2 = cl.parentrevs(self._rev)
558 558 if p2 == nullrev:
559 559 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
560 560 return [
561 561 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
562 562 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
563 563 ]
564 564
565 565 def changeset(self):
566 566 c = self._changeset
567 567 return (
568 568 c.manifest,
569 569 c.user,
570 570 c.date,
571 571 c.files,
572 572 c.description,
573 573 c.extra,
574 574 )
575 575
576 576 def manifestnode(self):
577 577 return self._changeset.manifest
578 578
579 579 def user(self):
580 580 return self._changeset.user
581 581
582 582 def date(self):
583 583 return self._changeset.date
584 584
585 585 def files(self):
586 586 return self._changeset.files
587 587
588 588 def filesmodified(self):
589 589 modified = set(self.files())
590 590 modified.difference_update(self.filesadded())
591 591 modified.difference_update(self.filesremoved())
592 592 return sorted(modified)
593 593
594 594 def filesadded(self):
595 595 filesadded = self._changeset.filesadded
596 596 compute_on_none = True
597 597 if self._repo.filecopiesmode == b'changeset-sidedata':
598 598 compute_on_none = False
599 599 else:
600 600 source = self._repo.ui.config(b'experimental', b'copies.read-from')
601 601 if source == b'changeset-only':
602 602 compute_on_none = False
603 603 elif source != b'compatibility':
604 604 # filelog mode, ignore any changelog content
605 605 filesadded = None
606 606 if filesadded is None:
607 607 if compute_on_none:
608 608 filesadded = metadata.computechangesetfilesadded(self)
609 609 else:
610 610 filesadded = []
611 611 return filesadded
612 612
613 613 def filesremoved(self):
614 614 filesremoved = self._changeset.filesremoved
615 615 compute_on_none = True
616 616 if self._repo.filecopiesmode == b'changeset-sidedata':
617 617 compute_on_none = False
618 618 else:
619 619 source = self._repo.ui.config(b'experimental', b'copies.read-from')
620 620 if source == b'changeset-only':
621 621 compute_on_none = False
622 622 elif source != b'compatibility':
623 623 # filelog mode, ignore any changelog content
624 624 filesremoved = None
625 625 if filesremoved is None:
626 626 if compute_on_none:
627 627 filesremoved = metadata.computechangesetfilesremoved(self)
628 628 else:
629 629 filesremoved = []
630 630 return filesremoved
631 631
632 632 @propertycache
633 633 def _copies(self):
634 634 p1copies = self._changeset.p1copies
635 635 p2copies = self._changeset.p2copies
636 636 compute_on_none = True
637 637 if self._repo.filecopiesmode == b'changeset-sidedata':
638 638 compute_on_none = False
639 639 else:
640 640 source = self._repo.ui.config(b'experimental', b'copies.read-from')
641 641 # If config says to get copy metadata only from changeset, then
642 642 # return that, defaulting to {} if there was no copy metadata. In
643 643 # compatibility mode, we return copy data from the changeset if it
644 644 # was recorded there, and otherwise we fall back to getting it from
645 645 # the filelogs (below).
646 646 #
647 647 # If we are in compatiblity mode and there is not data in the
648 648 # changeset), we get the copy metadata from the filelogs.
649 649 #
650 650 # otherwise, when config said to read only from filelog, we get the
651 651 # copy metadata from the filelogs.
652 652 if source == b'changeset-only':
653 653 compute_on_none = False
654 654 elif source != b'compatibility':
655 655 # filelog mode, ignore any changelog content
656 656 p1copies = p2copies = None
657 657 if p1copies is None:
658 658 if compute_on_none:
659 659 p1copies, p2copies = super(changectx, self)._copies
660 660 else:
661 661 if p1copies is None:
662 662 p1copies = {}
663 663 if p2copies is None:
664 664 p2copies = {}
665 665 return p1copies, p2copies
666 666
667 667 def description(self):
668 668 return self._changeset.description
669 669
670 670 def branch(self):
671 671 return encoding.tolocal(self._changeset.extra.get(b"branch"))
672 672
673 673 def closesbranch(self):
674 674 return b'close' in self._changeset.extra
675 675
676 676 def extra(self):
677 677 """Return a dict of extra information."""
678 678 return self._changeset.extra
679 679
680 680 def tags(self):
681 681 """Return a list of byte tag names"""
682 682 return self._repo.nodetags(self._node)
683 683
684 684 def bookmarks(self):
685 685 """Return a list of byte bookmark names."""
686 686 return self._repo.nodebookmarks(self._node)
687 687
688 688 def phase(self):
689 689 return self._repo._phasecache.phase(self._repo, self._rev)
690 690
691 691 def hidden(self):
692 692 return self._rev in repoview.filterrevs(self._repo, b'visible')
693 693
694 694 def isinmemory(self):
695 695 return False
696 696
697 697 def children(self):
698 698 """return list of changectx contexts for each child changeset.
699 699
700 700 This returns only the immediate child changesets. Use descendants() to
701 701 recursively walk children.
702 702 """
703 703 c = self._repo.changelog.children(self._node)
704 704 return [self._repo[x] for x in c]
705 705
706 706 def ancestors(self):
707 707 for a in self._repo.changelog.ancestors([self._rev]):
708 708 yield self._repo[a]
709 709
710 710 def descendants(self):
711 711 """Recursively yield all children of the changeset.
712 712
713 713 For just the immediate children, use children()
714 714 """
715 715 for d in self._repo.changelog.descendants([self._rev]):
716 716 yield self._repo[d]
717 717
718 718 def filectx(self, path, fileid=None, filelog=None):
719 719 """get a file context from this changeset"""
720 720 if fileid is None:
721 721 fileid = self.filenode(path)
722 722 return filectx(
723 723 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
724 724 )
725 725
726 726 def ancestor(self, c2, warn=False):
727 727 """return the "best" ancestor context of self and c2
728 728
729 729 If there are multiple candidates, it will show a message and check
730 730 merge.preferancestor configuration before falling back to the
731 731 revlog ancestor."""
732 732 # deal with workingctxs
733 733 n2 = c2._node
734 734 if n2 is None:
735 735 n2 = c2._parents[0]._node
736 736 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
737 737 if not cahs:
738 738 anc = self._repo.nodeconstants.nullid
739 739 elif len(cahs) == 1:
740 740 anc = cahs[0]
741 741 else:
742 742 # experimental config: merge.preferancestor
743 743 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
744 744 try:
745 745 ctx = scmutil.revsymbol(self._repo, r)
746 746 except error.RepoLookupError:
747 747 continue
748 748 anc = ctx.node()
749 749 if anc in cahs:
750 750 break
751 751 else:
752 752 anc = self._repo.changelog.ancestor(self._node, n2)
753 753 if warn:
754 754 self._repo.ui.status(
755 755 (
756 756 _(b"note: using %s as ancestor of %s and %s\n")
757 757 % (short(anc), short(self._node), short(n2))
758 758 )
759 759 + b''.join(
760 760 _(
761 761 b" alternatively, use --config "
762 762 b"merge.preferancestor=%s\n"
763 763 )
764 764 % short(n)
765 765 for n in sorted(cahs)
766 766 if n != anc
767 767 )
768 768 )
769 769 return self._repo[anc]
770 770
771 771 def isancestorof(self, other):
772 772 """True if this changeset is an ancestor of other"""
773 773 return self._repo.changelog.isancestorrev(self._rev, other._rev)
774 774
775 775 def walk(self, match):
776 776 '''Generates matching file names.'''
777 777
778 778 # Wrap match.bad method to have message with nodeid
779 779 def bad(fn, msg):
780 780 # The manifest doesn't know about subrepos, so don't complain about
781 781 # paths into valid subrepos.
782 782 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
783 783 return
784 784 match.bad(fn, _(b'no such file in rev %s') % self)
785 785
786 786 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
787 787 return self._manifest.walk(m)
788 788
789 789 def matches(self, match):
790 790 return self.walk(match)
791 791
792 792
793 793 class basefilectx(object):
794 794 """A filecontext object represents the common logic for its children:
795 795 filectx: read-only access to a filerevision that is already present
796 796 in the repo,
797 797 workingfilectx: a filecontext that represents files from the working
798 798 directory,
799 799 memfilectx: a filecontext that represents files in-memory,
800 800 """
801 801
802 802 @propertycache
803 803 def _filelog(self):
804 804 return self._repo.file(self._path)
805 805
806 806 @propertycache
807 807 def _changeid(self):
808 808 if '_changectx' in self.__dict__:
809 809 return self._changectx.rev()
810 810 elif '_descendantrev' in self.__dict__:
811 811 # this file context was created from a revision with a known
812 812 # descendant, we can (lazily) correct for linkrev aliases
813 813 return self._adjustlinkrev(self._descendantrev)
814 814 else:
815 815 return self._filelog.linkrev(self._filerev)
816 816
817 817 @propertycache
818 818 def _filenode(self):
819 819 if '_fileid' in self.__dict__:
820 820 return self._filelog.lookup(self._fileid)
821 821 else:
822 822 return self._changectx.filenode(self._path)
823 823
824 824 @propertycache
825 825 def _filerev(self):
826 826 return self._filelog.rev(self._filenode)
827 827
828 828 @propertycache
829 829 def _repopath(self):
830 830 return self._path
831 831
832 832 def __nonzero__(self):
833 833 try:
834 834 self._filenode
835 835 return True
836 836 except error.LookupError:
837 837 # file is missing
838 838 return False
839 839
840 840 __bool__ = __nonzero__
841 841
842 842 def __bytes__(self):
843 843 try:
844 844 return b"%s@%s" % (self.path(), self._changectx)
845 845 except error.LookupError:
846 846 return b"%s@???" % self.path()
847 847
848 848 __str__ = encoding.strmethod(__bytes__)
849 849
850 850 def __repr__(self):
851 851 return "<%s %s>" % (type(self).__name__, str(self))
852 852
853 853 def __hash__(self):
854 854 try:
855 855 return hash((self._path, self._filenode))
856 856 except AttributeError:
857 857 return id(self)
858 858
859 859 def __eq__(self, other):
860 860 try:
861 861 return (
862 862 type(self) == type(other)
863 863 and self._path == other._path
864 864 and self._filenode == other._filenode
865 865 )
866 866 except AttributeError:
867 867 return False
868 868
869 869 def __ne__(self, other):
870 870 return not (self == other)
871 871
872 872 def filerev(self):
873 873 return self._filerev
874 874
875 875 def filenode(self):
876 876 return self._filenode
877 877
878 878 @propertycache
879 879 def _flags(self):
880 880 return self._changectx.flags(self._path)
881 881
882 882 def flags(self):
883 883 return self._flags
884 884
885 885 def filelog(self):
886 886 return self._filelog
887 887
888 888 def rev(self):
889 889 return self._changeid
890 890
891 891 def linkrev(self):
892 892 return self._filelog.linkrev(self._filerev)
893 893
894 894 def node(self):
895 895 return self._changectx.node()
896 896
897 897 def hex(self):
898 898 return self._changectx.hex()
899 899
900 900 def user(self):
901 901 return self._changectx.user()
902 902
903 903 def date(self):
904 904 return self._changectx.date()
905 905
906 906 def files(self):
907 907 return self._changectx.files()
908 908
909 909 def description(self):
910 910 return self._changectx.description()
911 911
912 912 def branch(self):
913 913 return self._changectx.branch()
914 914
915 915 def extra(self):
916 916 return self._changectx.extra()
917 917
918 918 def phase(self):
919 919 return self._changectx.phase()
920 920
921 921 def phasestr(self):
922 922 return self._changectx.phasestr()
923 923
924 924 def obsolete(self):
925 925 return self._changectx.obsolete()
926 926
927 927 def instabilities(self):
928 928 return self._changectx.instabilities()
929 929
930 930 def manifest(self):
931 931 return self._changectx.manifest()
932 932
933 933 def changectx(self):
934 934 return self._changectx
935 935
936 936 def renamed(self):
937 937 return self._copied
938 938
939 939 def copysource(self):
940 940 return self._copied and self._copied[0]
941 941
942 942 def repo(self):
943 943 return self._repo
944 944
945 945 def size(self):
946 946 return len(self.data())
947 947
948 948 def path(self):
949 949 return self._path
950 950
951 951 def isbinary(self):
952 952 try:
953 953 return stringutil.binary(self.data())
954 954 except IOError:
955 955 return False
956 956
957 957 def isexec(self):
958 958 return b'x' in self.flags()
959 959
960 960 def islink(self):
961 961 return b'l' in self.flags()
962 962
963 963 def isabsent(self):
964 964 """whether this filectx represents a file not in self._changectx
965 965
966 966 This is mainly for merge code to detect change/delete conflicts. This is
967 967 expected to be True for all subclasses of basectx."""
968 968 return False
969 969
970 970 _customcmp = False
971 971
972 972 def cmp(self, fctx):
973 973 """compare with other file context
974 974
975 975 returns True if different than fctx.
976 976 """
977 977 if fctx._customcmp:
978 978 return fctx.cmp(self)
979 979
980 980 if self._filenode is None:
981 981 raise error.ProgrammingError(
982 982 b'filectx.cmp() must be reimplemented if not backed by revlog'
983 983 )
984 984
985 985 if fctx._filenode is None:
986 986 if self._repo._encodefilterpats:
987 987 # can't rely on size() because wdir content may be decoded
988 988 return self._filelog.cmp(self._filenode, fctx.data())
989 989 if self.size() - 4 == fctx.size():
990 990 # size() can match:
991 991 # if file data starts with '\1\n', empty metadata block is
992 992 # prepended, which adds 4 bytes to filelog.size().
993 993 return self._filelog.cmp(self._filenode, fctx.data())
994 994 if self.size() == fctx.size() or self.flags() == b'l':
995 995 # size() matches: need to compare content
996 996 # issue6456: Always compare symlinks because size can represent
997 997 # encrypted string for EXT-4 encryption(fscrypt).
998 998 return self._filelog.cmp(self._filenode, fctx.data())
999 999
1000 1000 # size() differs
1001 1001 return True
1002 1002
1003 1003 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
1004 1004 """return the first ancestor of <srcrev> introducing <fnode>
1005 1005
1006 1006 If the linkrev of the file revision does not point to an ancestor of
1007 1007 srcrev, we'll walk down the ancestors until we find one introducing
1008 1008 this file revision.
1009 1009
1010 1010 :srcrev: the changeset revision we search ancestors from
1011 1011 :inclusive: if true, the src revision will also be checked
1012 1012 :stoprev: an optional revision to stop the walk at. If no introduction
1013 1013 of this file content could be found before this floor
1014 1014 revision, the function will returns "None" and stops its
1015 1015 iteration.
1016 1016 """
1017 1017 repo = self._repo
1018 1018 cl = repo.unfiltered().changelog
1019 1019 mfl = repo.manifestlog
1020 1020 # fetch the linkrev
1021 1021 lkr = self.linkrev()
1022 1022 if srcrev == lkr:
1023 1023 return lkr
1024 1024 # hack to reuse ancestor computation when searching for renames
1025 1025 memberanc = getattr(self, '_ancestrycontext', None)
1026 1026 iteranc = None
1027 1027 if srcrev is None:
1028 1028 # wctx case, used by workingfilectx during mergecopy
1029 1029 revs = [p.rev() for p in self._repo[None].parents()]
1030 1030 inclusive = True # we skipped the real (revless) source
1031 1031 else:
1032 1032 revs = [srcrev]
1033 1033 if memberanc is None:
1034 1034 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1035 1035 # check if this linkrev is an ancestor of srcrev
1036 1036 if lkr not in memberanc:
1037 1037 if iteranc is None:
1038 1038 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1039 1039 fnode = self._filenode
1040 1040 path = self._path
1041 1041 for a in iteranc:
1042 1042 if stoprev is not None and a < stoprev:
1043 1043 return None
1044 1044 ac = cl.read(a) # get changeset data (we avoid object creation)
1045 1045 if path in ac[3]: # checking the 'files' field.
1046 1046 # The file has been touched, check if the content is
1047 1047 # similar to the one we search for.
1048 1048 if fnode == mfl[ac[0]].readfast().get(path):
1049 1049 return a
1050 1050 # In theory, we should never get out of that loop without a result.
1051 1051 # But if manifest uses a buggy file revision (not children of the
1052 1052 # one it replaces) we could. Such a buggy situation will likely
1053 1053 # result is crash somewhere else at to some point.
1054 1054 return lkr
1055 1055
1056 1056 def isintroducedafter(self, changelogrev):
1057 1057 """True if a filectx has been introduced after a given floor revision"""
1058 1058 if self.linkrev() >= changelogrev:
1059 1059 return True
1060 1060 introrev = self._introrev(stoprev=changelogrev)
1061 1061 if introrev is None:
1062 1062 return False
1063 1063 return introrev >= changelogrev
1064 1064
1065 1065 def introrev(self):
1066 1066 """return the rev of the changeset which introduced this file revision
1067 1067
1068 1068 This method is different from linkrev because it take into account the
1069 1069 changeset the filectx was created from. It ensures the returned
1070 1070 revision is one of its ancestors. This prevents bugs from
1071 1071 'linkrev-shadowing' when a file revision is used by multiple
1072 1072 changesets.
1073 1073 """
1074 1074 return self._introrev()
1075 1075
1076 1076 def _introrev(self, stoprev=None):
1077 1077 """
1078 1078 Same as `introrev` but, with an extra argument to limit changelog
1079 1079 iteration range in some internal usecase.
1080 1080
1081 1081 If `stoprev` is set, the `introrev` will not be searched past that
1082 1082 `stoprev` revision and "None" might be returned. This is useful to
1083 1083 limit the iteration range.
1084 1084 """
1085 1085 toprev = None
1086 1086 attrs = vars(self)
1087 1087 if '_changeid' in attrs:
1088 1088 # We have a cached value already
1089 1089 toprev = self._changeid
1090 1090 elif '_changectx' in attrs:
1091 1091 # We know which changelog entry we are coming from
1092 1092 toprev = self._changectx.rev()
1093 1093
1094 1094 if toprev is not None:
1095 1095 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1096 1096 elif '_descendantrev' in attrs:
1097 1097 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1098 1098 # be nice and cache the result of the computation
1099 1099 if introrev is not None:
1100 1100 self._changeid = introrev
1101 1101 return introrev
1102 1102 else:
1103 1103 return self.linkrev()
1104 1104
1105 1105 def introfilectx(self):
1106 1106 """Return filectx having identical contents, but pointing to the
1107 1107 changeset revision where this filectx was introduced"""
1108 1108 introrev = self.introrev()
1109 1109 if self.rev() == introrev:
1110 1110 return self
1111 1111 return self.filectx(self.filenode(), changeid=introrev)
1112 1112
1113 1113 def _parentfilectx(self, path, fileid, filelog):
1114 1114 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1115 1115 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1116 1116 if '_changeid' in vars(self) or '_changectx' in vars(self):
1117 1117 # If self is associated with a changeset (probably explicitly
1118 1118 # fed), ensure the created filectx is associated with a
1119 1119 # changeset that is an ancestor of self.changectx.
1120 1120 # This lets us later use _adjustlinkrev to get a correct link.
1121 1121 fctx._descendantrev = self.rev()
1122 1122 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1123 1123 elif '_descendantrev' in vars(self):
1124 1124 # Otherwise propagate _descendantrev if we have one associated.
1125 1125 fctx._descendantrev = self._descendantrev
1126 1126 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1127 1127 return fctx
1128 1128
1129 1129 def parents(self):
1130 1130 _path = self._path
1131 1131 fl = self._filelog
1132 1132 parents = self._filelog.parents(self._filenode)
1133 1133 pl = [
1134 1134 (_path, node, fl)
1135 1135 for node in parents
1136 1136 if node != self._repo.nodeconstants.nullid
1137 1137 ]
1138 1138
1139 1139 r = fl.renamed(self._filenode)
1140 1140 if r:
1141 1141 # - In the simple rename case, both parent are nullid, pl is empty.
1142 1142 # - In case of merge, only one of the parent is null id and should
1143 1143 # be replaced with the rename information. This parent is -always-
1144 1144 # the first one.
1145 1145 #
1146 1146 # As null id have always been filtered out in the previous list
1147 1147 # comprehension, inserting to 0 will always result in "replacing
1148 1148 # first nullid parent with rename information.
1149 1149 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1150 1150
1151 1151 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1152 1152
1153 1153 def p1(self):
1154 1154 return self.parents()[0]
1155 1155
1156 1156 def p2(self):
1157 1157 p = self.parents()
1158 1158 if len(p) == 2:
1159 1159 return p[1]
1160 1160 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1161 1161
1162 1162 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1163 1163 """Returns a list of annotateline objects for each line in the file
1164 1164
1165 1165 - line.fctx is the filectx of the node where that line was last changed
1166 1166 - line.lineno is the line number at the first appearance in the managed
1167 1167 file
1168 1168 - line.text is the data on that line (including newline character)
1169 1169 """
1170 1170 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1171 1171
1172 1172 def parents(f):
1173 1173 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1174 1174 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1175 1175 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1176 1176 # isn't an ancestor of the srcrev.
1177 1177 f._changeid
1178 1178 pl = f.parents()
1179 1179
1180 1180 # Don't return renamed parents if we aren't following.
1181 1181 if not follow:
1182 1182 pl = [p for p in pl if p.path() == f.path()]
1183 1183
1184 1184 # renamed filectx won't have a filelog yet, so set it
1185 1185 # from the cache to save time
1186 1186 for p in pl:
1187 1187 if not '_filelog' in p.__dict__:
1188 1188 p._filelog = getlog(p.path())
1189 1189
1190 1190 return pl
1191 1191
1192 1192 # use linkrev to find the first changeset where self appeared
1193 1193 base = self.introfilectx()
1194 1194 if getattr(base, '_ancestrycontext', None) is None:
1195 1195 # it is safe to use an unfiltered repository here because we are
1196 1196 # walking ancestors only.
1197 1197 cl = self._repo.unfiltered().changelog
1198 1198 if base.rev() is None:
1199 1199 # wctx is not inclusive, but works because _ancestrycontext
1200 1200 # is used to test filelog revisions
1201 1201 ac = cl.ancestors(
1202 1202 [p.rev() for p in base.parents()], inclusive=True
1203 1203 )
1204 1204 else:
1205 1205 ac = cl.ancestors([base.rev()], inclusive=True)
1206 1206 base._ancestrycontext = ac
1207 1207
1208 1208 return dagop.annotate(
1209 1209 base, parents, skiprevs=skiprevs, diffopts=diffopts
1210 1210 )
1211 1211
1212 1212 def ancestors(self, followfirst=False):
1213 1213 visit = {}
1214 1214 c = self
1215 1215 if followfirst:
1216 1216 cut = 1
1217 1217 else:
1218 1218 cut = None
1219 1219
1220 1220 while True:
1221 1221 for parent in c.parents()[:cut]:
1222 1222 visit[(parent.linkrev(), parent.filenode())] = parent
1223 1223 if not visit:
1224 1224 break
1225 1225 c = visit.pop(max(visit))
1226 1226 yield c
1227 1227
1228 1228 def decodeddata(self):
1229 1229 """Returns `data()` after running repository decoding filters.
1230 1230
1231 1231 This is often equivalent to how the data would be expressed on disk.
1232 1232 """
1233 1233 return self._repo.wwritedata(self.path(), self.data())
1234 1234
1235 1235
1236 1236 class filectx(basefilectx):
1237 1237 """A filecontext object makes access to data related to a particular
1238 1238 filerevision convenient."""
1239 1239
1240 1240 def __init__(
1241 1241 self,
1242 1242 repo,
1243 1243 path,
1244 1244 changeid=None,
1245 1245 fileid=None,
1246 1246 filelog=None,
1247 1247 changectx=None,
1248 1248 ):
1249 1249 """changeid must be a revision number, if specified.
1250 1250 fileid can be a file revision or node."""
1251 1251 self._repo = repo
1252 1252 self._path = path
1253 1253
1254 1254 assert (
1255 1255 changeid is not None or fileid is not None or changectx is not None
1256 1256 ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
1257 1257 changeid,
1258 1258 fileid,
1259 1259 changectx,
1260 1260 )
1261 1261
1262 1262 if filelog is not None:
1263 1263 self._filelog = filelog
1264 1264
1265 1265 if changeid is not None:
1266 1266 self._changeid = changeid
1267 1267 if changectx is not None:
1268 1268 self._changectx = changectx
1269 1269 if fileid is not None:
1270 1270 self._fileid = fileid
1271 1271
1272 1272 @propertycache
1273 1273 def _changectx(self):
1274 1274 try:
1275 1275 return self._repo[self._changeid]
1276 1276 except error.FilteredRepoLookupError:
1277 1277 # Linkrev may point to any revision in the repository. When the
1278 1278 # repository is filtered this may lead to `filectx` trying to build
1279 1279 # `changectx` for filtered revision. In such case we fallback to
1280 1280 # creating `changectx` on the unfiltered version of the reposition.
1281 1281 # This fallback should not be an issue because `changectx` from
1282 1282 # `filectx` are not used in complex operations that care about
1283 1283 # filtering.
1284 1284 #
1285 1285 # This fallback is a cheap and dirty fix that prevent several
1286 1286 # crashes. It does not ensure the behavior is correct. However the
1287 1287 # behavior was not correct before filtering either and "incorrect
1288 1288 # behavior" is seen as better as "crash"
1289 1289 #
1290 1290 # Linkrevs have several serious troubles with filtering that are
1291 1291 # complicated to solve. Proper handling of the issue here should be
1292 1292 # considered when solving linkrev issue are on the table.
1293 1293 return self._repo.unfiltered()[self._changeid]
1294 1294
1295 1295 def filectx(self, fileid, changeid=None):
1296 1296 """opens an arbitrary revision of the file without
1297 1297 opening a new filelog"""
1298 1298 return filectx(
1299 1299 self._repo,
1300 1300 self._path,
1301 1301 fileid=fileid,
1302 1302 filelog=self._filelog,
1303 1303 changeid=changeid,
1304 1304 )
1305 1305
1306 1306 def rawdata(self):
1307 1307 return self._filelog.rawdata(self._filenode)
1308 1308
1309 1309 def rawflags(self):
1310 1310 """low-level revlog flags"""
1311 1311 return self._filelog.flags(self._filerev)
1312 1312
1313 1313 def data(self):
1314 1314 try:
1315 1315 return self._filelog.read(self._filenode)
1316 1316 except error.CensoredNodeError:
1317 1317 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1318 1318 return b""
1319 1319 raise error.Abort(
1320 1320 _(b"censored node: %s") % short(self._filenode),
1321 1321 hint=_(b"set censor.policy to ignore errors"),
1322 1322 )
1323 1323
1324 1324 def size(self):
1325 1325 return self._filelog.size(self._filerev)
1326 1326
1327 1327 @propertycache
1328 1328 def _copied(self):
1329 1329 """check if file was actually renamed in this changeset revision
1330 1330
1331 1331 If rename logged in file revision, we report copy for changeset only
1332 1332 if file revisions linkrev points back to the changeset in question
1333 1333 or both changeset parents contain different file revisions.
1334 1334 """
1335 1335
1336 1336 renamed = self._filelog.renamed(self._filenode)
1337 1337 if not renamed:
1338 1338 return None
1339 1339
1340 1340 if self.rev() == self.linkrev():
1341 1341 return renamed
1342 1342
1343 1343 name = self.path()
1344 1344 fnode = self._filenode
1345 1345 for p in self._changectx.parents():
1346 1346 try:
1347 1347 if fnode == p.filenode(name):
1348 1348 return None
1349 1349 except error.LookupError:
1350 1350 pass
1351 1351 return renamed
1352 1352
1353 1353 def children(self):
1354 1354 # hard for renames
1355 1355 c = self._filelog.children(self._filenode)
1356 1356 return [
1357 1357 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1358 1358 for x in c
1359 1359 ]
1360 1360
1361 1361
1362 1362 class committablectx(basectx):
1363 1363 """A committablectx object provides common functionality for a context that
1364 1364 wants the ability to commit, e.g. workingctx or memctx."""
1365 1365
1366 1366 def __init__(
1367 1367 self,
1368 1368 repo,
1369 1369 text=b"",
1370 1370 user=None,
1371 1371 date=None,
1372 1372 extra=None,
1373 1373 changes=None,
1374 1374 branch=None,
1375 1375 ):
1376 1376 super(committablectx, self).__init__(repo)
1377 1377 self._rev = None
1378 1378 self._node = None
1379 1379 self._text = text
1380 1380 if date:
1381 1381 self._date = dateutil.parsedate(date)
1382 1382 if user:
1383 1383 self._user = user
1384 1384 if changes:
1385 1385 self._status = changes
1386 1386
1387 1387 self._extra = {}
1388 1388 if extra:
1389 1389 self._extra = extra.copy()
1390 1390 if branch is not None:
1391 1391 self._extra[b'branch'] = encoding.fromlocal(branch)
1392 1392 if not self._extra.get(b'branch'):
1393 1393 self._extra[b'branch'] = b'default'
1394 1394
1395 1395 def __bytes__(self):
1396 1396 return bytes(self._parents[0]) + b"+"
1397 1397
1398 1398 def hex(self):
1399 1399 self._repo.nodeconstants.wdirhex
1400 1400
1401 1401 __str__ = encoding.strmethod(__bytes__)
1402 1402
1403 1403 def __nonzero__(self):
1404 1404 return True
1405 1405
1406 1406 __bool__ = __nonzero__
1407 1407
1408 1408 @propertycache
1409 1409 def _status(self):
1410 1410 return self._repo.status()
1411 1411
1412 1412 @propertycache
1413 1413 def _user(self):
1414 1414 return self._repo.ui.username()
1415 1415
1416 1416 @propertycache
1417 1417 def _date(self):
1418 1418 ui = self._repo.ui
1419 1419 date = ui.configdate(b'devel', b'default-date')
1420 1420 if date is None:
1421 1421 date = dateutil.makedate()
1422 1422 return date
1423 1423
1424 1424 def subrev(self, subpath):
1425 1425 return None
1426 1426
1427 1427 def manifestnode(self):
1428 1428 return None
1429 1429
1430 1430 def user(self):
1431 1431 return self._user or self._repo.ui.username()
1432 1432
1433 1433 def date(self):
1434 1434 return self._date
1435 1435
1436 1436 def description(self):
1437 1437 return self._text
1438 1438
1439 1439 def files(self):
1440 1440 return sorted(
1441 1441 self._status.modified + self._status.added + self._status.removed
1442 1442 )
1443 1443
1444 1444 def modified(self):
1445 1445 return self._status.modified
1446 1446
1447 1447 def added(self):
1448 1448 return self._status.added
1449 1449
1450 1450 def removed(self):
1451 1451 return self._status.removed
1452 1452
1453 1453 def deleted(self):
1454 1454 return self._status.deleted
1455 1455
1456 1456 filesmodified = modified
1457 1457 filesadded = added
1458 1458 filesremoved = removed
1459 1459
1460 1460 def branch(self):
1461 1461 return encoding.tolocal(self._extra[b'branch'])
1462 1462
1463 1463 def closesbranch(self):
1464 1464 return b'close' in self._extra
1465 1465
1466 1466 def extra(self):
1467 1467 return self._extra
1468 1468
1469 1469 def isinmemory(self):
1470 1470 return False
1471 1471
1472 1472 def tags(self):
1473 1473 return []
1474 1474
1475 1475 def bookmarks(self):
1476 1476 b = []
1477 1477 for p in self.parents():
1478 1478 b.extend(p.bookmarks())
1479 1479 return b
1480 1480
1481 1481 def phase(self):
1482 1482 phase = phases.newcommitphase(self._repo.ui)
1483 1483 for p in self.parents():
1484 1484 phase = max(phase, p.phase())
1485 1485 return phase
1486 1486
1487 1487 def hidden(self):
1488 1488 return False
1489 1489
1490 1490 def children(self):
1491 1491 return []
1492 1492
1493 1493 def flags(self, path):
1494 1494 if '_manifest' in self.__dict__:
1495 1495 try:
1496 1496 return self._manifest.flags(path)
1497 1497 except KeyError:
1498 1498 return b''
1499 1499
1500 1500 try:
1501 1501 return self._flagfunc(path)
1502 1502 except OSError:
1503 1503 return b''
1504 1504
1505 1505 def ancestor(self, c2):
1506 1506 """return the "best" ancestor context of self and c2"""
1507 1507 return self._parents[0].ancestor(c2) # punt on two parents for now
1508 1508
1509 1509 def ancestors(self):
1510 1510 for p in self._parents:
1511 1511 yield p
1512 1512 for a in self._repo.changelog.ancestors(
1513 1513 [p.rev() for p in self._parents]
1514 1514 ):
1515 1515 yield self._repo[a]
1516 1516
1517 1517 def markcommitted(self, node):
1518 1518 """Perform post-commit cleanup necessary after committing this ctx
1519 1519
1520 1520 Specifically, this updates backing stores this working context
1521 1521 wraps to reflect the fact that the changes reflected by this
1522 1522 workingctx have been committed. For example, it marks
1523 1523 modified and added files as normal in the dirstate.
1524 1524
1525 1525 """
1526 1526
1527 1527 def dirty(self, missing=False, merge=True, branch=True):
1528 1528 return False
1529 1529
1530 1530
1531 1531 class workingctx(committablectx):
1532 1532 """A workingctx object makes access to data related to
1533 1533 the current working directory convenient.
1534 1534 date - any valid date string or (unixtime, offset), or None.
1535 1535 user - username string, or None.
1536 1536 extra - a dictionary of extra values, or None.
1537 1537 changes - a list of file lists as returned by localrepo.status()
1538 1538 or None to use the repository status.
1539 1539 """
1540 1540
1541 1541 def __init__(
1542 1542 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1543 1543 ):
1544 1544 branch = None
1545 1545 if not extra or b'branch' not in extra:
1546 1546 try:
1547 1547 branch = repo.dirstate.branch()
1548 1548 except UnicodeDecodeError:
1549 1549 raise error.Abort(_(b'branch name not in UTF-8!'))
1550 1550 super(workingctx, self).__init__(
1551 1551 repo, text, user, date, extra, changes, branch=branch
1552 1552 )
1553 1553
1554 1554 def __iter__(self):
1555 1555 d = self._repo.dirstate
1556 1556 for f in d:
1557 1557 if d.get_entry(f).tracked:
1558 1558 yield f
1559 1559
1560 1560 def __contains__(self, key):
1561 1561 return self._repo.dirstate.get_entry(key).tracked
1562 1562
1563 1563 def hex(self):
1564 1564 return self._repo.nodeconstants.wdirhex
1565 1565
1566 1566 @propertycache
1567 1567 def _parents(self):
1568 1568 p = self._repo.dirstate.parents()
1569 1569 if p[1] == self._repo.nodeconstants.nullid:
1570 1570 p = p[:-1]
1571 1571 # use unfiltered repo to delay/avoid loading obsmarkers
1572 1572 unfi = self._repo.unfiltered()
1573 1573 return [
1574 1574 changectx(
1575 1575 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1576 1576 )
1577 1577 for n in p
1578 1578 ]
1579 1579
1580 1580 def setparents(self, p1node, p2node=None):
1581 1581 if p2node is None:
1582 1582 p2node = self._repo.nodeconstants.nullid
1583 1583 dirstate = self._repo.dirstate
1584 1584 with dirstate.parentchange():
1585 1585 copies = dirstate.setparents(p1node, p2node)
1586 1586 pctx = self._repo[p1node]
1587 1587 if copies:
1588 1588 # Adjust copy records, the dirstate cannot do it, it
1589 1589 # requires access to parents manifests. Preserve them
1590 1590 # only for entries added to first parent.
1591 1591 for f in copies:
1592 1592 if f not in pctx and copies[f] in pctx:
1593 1593 dirstate.copy(copies[f], f)
1594 1594 if p2node == self._repo.nodeconstants.nullid:
1595 1595 for f, s in sorted(dirstate.copies().items()):
1596 1596 if f not in pctx and s not in pctx:
1597 1597 dirstate.copy(None, f)
1598 1598
1599 1599 def _fileinfo(self, path):
1600 1600 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1601 1601 self._manifest
1602 1602 return super(workingctx, self)._fileinfo(path)
1603 1603
1604 1604 def _buildflagfunc(self):
1605 1605 # Create a fallback function for getting file flags when the
1606 1606 # filesystem doesn't support them
1607 1607
1608 1608 copiesget = self._repo.dirstate.copies().get
1609 1609 parents = self.parents()
1610 1610 if len(parents) < 2:
1611 1611 # when we have one parent, it's easy: copy from parent
1612 1612 man = parents[0].manifest()
1613 1613
1614 1614 def func(f):
1615 1615 f = copiesget(f, f)
1616 1616 return man.flags(f)
1617 1617
1618 1618 else:
1619 1619 # merges are tricky: we try to reconstruct the unstored
1620 1620 # result from the merge (issue1802)
1621 1621 p1, p2 = parents
1622 1622 pa = p1.ancestor(p2)
1623 1623 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1624 1624
1625 1625 def func(f):
1626 1626 f = copiesget(f, f) # may be wrong for merges with copies
1627 1627 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1628 1628 if fl1 == fl2:
1629 1629 return fl1
1630 1630 if fl1 == fla:
1631 1631 return fl2
1632 1632 if fl2 == fla:
1633 1633 return fl1
1634 1634 return b'' # punt for conflicts
1635 1635
1636 1636 return func
1637 1637
1638 1638 @propertycache
1639 1639 def _flagfunc(self):
1640 1640 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1641 1641
1642 1642 def flags(self, path):
1643 1643 try:
1644 1644 return self._flagfunc(path)
1645 1645 except OSError:
1646 1646 return b''
1647 1647
1648 1648 def filectx(self, path, filelog=None):
1649 1649 """get a file context from the working directory"""
1650 1650 return workingfilectx(
1651 1651 self._repo, path, workingctx=self, filelog=filelog
1652 1652 )
1653 1653
1654 1654 def dirty(self, missing=False, merge=True, branch=True):
1655 1655 """check whether a working directory is modified"""
1656 1656 # check subrepos first
1657 1657 for s in sorted(self.substate):
1658 1658 if self.sub(s).dirty(missing=missing):
1659 1659 return True
1660 1660 # check current working dir
1661 1661 return (
1662 1662 (merge and self.p2())
1663 1663 or (branch and self.branch() != self.p1().branch())
1664 1664 or self.modified()
1665 1665 or self.added()
1666 1666 or self.removed()
1667 1667 or (missing and self.deleted())
1668 1668 )
1669 1669
1670 1670 def add(self, list, prefix=b""):
1671 1671 with self._repo.wlock():
1672 1672 ui, ds = self._repo.ui, self._repo.dirstate
1673 1673 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1674 1674 rejected = []
1675 1675 lstat = self._repo.wvfs.lstat
1676 1676 for f in list:
1677 1677 # ds.pathto() returns an absolute file when this is invoked from
1678 1678 # the keyword extension. That gets flagged as non-portable on
1679 1679 # Windows, since it contains the drive letter and colon.
1680 1680 scmutil.checkportable(ui, os.path.join(prefix, f))
1681 1681 try:
1682 1682 st = lstat(f)
1683 1683 except OSError:
1684 1684 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1685 1685 rejected.append(f)
1686 1686 continue
1687 1687 limit = ui.configbytes(b'ui', b'large-file-limit')
1688 1688 if limit != 0 and st.st_size > limit:
1689 1689 ui.warn(
1690 1690 _(
1691 1691 b"%s: up to %d MB of RAM may be required "
1692 1692 b"to manage this file\n"
1693 1693 b"(use 'hg revert %s' to cancel the "
1694 1694 b"pending addition)\n"
1695 1695 )
1696 1696 % (f, 3 * st.st_size // 1000000, uipath(f))
1697 1697 )
1698 1698 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1699 1699 ui.warn(
1700 1700 _(
1701 1701 b"%s not added: only files and symlinks "
1702 1702 b"supported currently\n"
1703 1703 )
1704 1704 % uipath(f)
1705 1705 )
1706 1706 rejected.append(f)
1707 1707 elif not ds.set_tracked(f):
1708 1708 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1709 1709 return rejected
1710 1710
1711 1711 def forget(self, files, prefix=b""):
1712 1712 with self._repo.wlock():
1713 1713 ds = self._repo.dirstate
1714 1714 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1715 1715 rejected = []
1716 1716 for f in files:
1717 1717 if not ds.set_untracked(f):
1718 1718 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1719 1719 rejected.append(f)
1720 1720 return rejected
1721 1721
1722 1722 def copy(self, source, dest):
1723 1723 try:
1724 1724 st = self._repo.wvfs.lstat(dest)
1725 1725 except OSError as err:
1726 1726 if err.errno != errno.ENOENT:
1727 1727 raise
1728 1728 self._repo.ui.warn(
1729 1729 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1730 1730 )
1731 1731 return
1732 1732 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1733 1733 self._repo.ui.warn(
1734 1734 _(b"copy failed: %s is not a file or a symbolic link\n")
1735 1735 % self._repo.dirstate.pathto(dest)
1736 1736 )
1737 1737 else:
1738 1738 with self._repo.wlock():
1739 1739 ds = self._repo.dirstate
1740 1740 ds.set_tracked(dest)
1741 1741 ds.copy(source, dest)
1742 1742
1743 1743 def match(
1744 1744 self,
1745 1745 pats=None,
1746 1746 include=None,
1747 1747 exclude=None,
1748 1748 default=b'glob',
1749 1749 listsubrepos=False,
1750 1750 badfn=None,
1751 1751 cwd=None,
1752 1752 ):
1753 1753 r = self._repo
1754 1754 if not cwd:
1755 1755 cwd = r.getcwd()
1756 1756
1757 1757 # Only a case insensitive filesystem needs magic to translate user input
1758 1758 # to actual case in the filesystem.
1759 1759 icasefs = not util.fscasesensitive(r.root)
1760 1760 return matchmod.match(
1761 1761 r.root,
1762 1762 cwd,
1763 1763 pats,
1764 1764 include,
1765 1765 exclude,
1766 1766 default,
1767 1767 auditor=r.auditor,
1768 1768 ctx=self,
1769 1769 listsubrepos=listsubrepos,
1770 1770 badfn=badfn,
1771 1771 icasefs=icasefs,
1772 1772 )
1773 1773
1774 1774 def _filtersuspectsymlink(self, files):
1775 1775 if not files or self._repo.dirstate._checklink:
1776 1776 return files
1777 1777
1778 1778 # Symlink placeholders may get non-symlink-like contents
1779 1779 # via user error or dereferencing by NFS or Samba servers,
1780 1780 # so we filter out any placeholders that don't look like a
1781 1781 # symlink
1782 1782 sane = []
1783 1783 for f in files:
1784 1784 if self.flags(f) == b'l':
1785 1785 d = self[f].data()
1786 1786 if (
1787 1787 d == b''
1788 1788 or len(d) >= 1024
1789 1789 or b'\n' in d
1790 1790 or stringutil.binary(d)
1791 1791 ):
1792 1792 self._repo.ui.debug(
1793 1793 b'ignoring suspect symlink placeholder "%s"\n' % f
1794 1794 )
1795 1795 continue
1796 1796 sane.append(f)
1797 1797 return sane
1798 1798
1799 1799 def _checklookup(self, files, mtime_boundary):
1800 1800 # check for any possibly clean files
1801 1801 if not files:
1802 1802 return [], [], [], []
1803 1803
1804 1804 modified = []
1805 1805 deleted = []
1806 1806 clean = []
1807 1807 fixup = []
1808 1808 pctx = self._parents[0]
1809 1809 # do a full compare of any files that might have changed
1810 1810 for f in sorted(files):
1811 1811 try:
1812 1812 # This will return True for a file that got replaced by a
1813 1813 # directory in the interim, but fixing that is pretty hard.
1814 1814 if (
1815 1815 f not in pctx
1816 1816 or self.flags(f) != pctx.flags(f)
1817 1817 or pctx[f].cmp(self[f])
1818 1818 ):
1819 1819 modified.append(f)
1820 1820 elif mtime_boundary is None:
1821 1821 clean.append(f)
1822 1822 else:
1823 1823 s = self[f].lstat()
1824 1824 mode = s.st_mode
1825 1825 size = s.st_size
1826 file_mtime = timestamp.mtime_of(s)
1827 cache_info = (mode, size, file_mtime)
1828
1829 file_second = file_mtime[0]
1830 boundary_second = mtime_boundary[0]
1831 # If the mtime of the ambiguous file is younger (or equal)
1832 # to the starting point of the `status` walk, we cannot
1833 # garantee that another, racy, write will not happen right
1834 # after with the same mtime and we cannot cache the
1835 # information.
1836 #
1837 # However is the mtime is far away in the future, this is
1838 # likely some mismatch between the current clock and
1839 # previous file system operation. So mtime more than one days
1840 # in the future are considered fine.
1841 if (
1842 boundary_second
1843 <= file_second
1844 < (3600 * 24 + boundary_second)
1845 ):
1826 file_mtime = timestamp.reliable_mtime_of(s, mtime_boundary)
1827 if file_mtime is not None:
1828 cache_info = (mode, size, file_mtime)
1829 fixup.append((f, cache_info))
1830 else:
1846 1831 clean.append(f)
1847 else:
1848 fixup.append((f, cache_info))
1849 1832 except (IOError, OSError):
1850 1833 # A file become inaccessible in between? Mark it as deleted,
1851 1834 # matching dirstate behavior (issue5584).
1852 1835 # The dirstate has more complex behavior around whether a
1853 1836 # missing file matches a directory, etc, but we don't need to
1854 1837 # bother with that: if f has made it to this point, we're sure
1855 1838 # it's in the dirstate.
1856 1839 deleted.append(f)
1857 1840
1858 1841 return modified, deleted, clean, fixup
1859 1842
1860 1843 def _poststatusfixup(self, status, fixup):
1861 1844 """update dirstate for files that are actually clean"""
1862 1845 poststatus = self._repo.postdsstatus()
1863 1846 if fixup or poststatus or self._repo.dirstate._dirty:
1864 1847 try:
1865 1848 oldid = self._repo.dirstate.identity()
1866 1849
1867 1850 # updating the dirstate is optional
1868 1851 # so we don't wait on the lock
1869 1852 # wlock can invalidate the dirstate, so cache normal _after_
1870 1853 # taking the lock
1871 1854 with self._repo.wlock(False):
1872 1855 dirstate = self._repo.dirstate
1873 1856 if dirstate.identity() == oldid:
1874 1857 if fixup:
1875 1858 if dirstate.pendingparentchange():
1876 1859 normal = lambda f, pfd: dirstate.update_file(
1877 1860 f, p1_tracked=True, wc_tracked=True
1878 1861 )
1879 1862 else:
1880 1863 normal = dirstate.set_clean
1881 1864 for f, pdf in fixup:
1882 1865 normal(f, pdf)
1883 1866 # write changes out explicitly, because nesting
1884 1867 # wlock at runtime may prevent 'wlock.release()'
1885 1868 # after this block from doing so for subsequent
1886 1869 # changing files
1887 1870 tr = self._repo.currenttransaction()
1888 1871 self._repo.dirstate.write(tr)
1889 1872
1890 1873 if poststatus:
1891 1874 for ps in poststatus:
1892 1875 ps(self, status)
1893 1876 else:
1894 1877 # in this case, writing changes out breaks
1895 1878 # consistency, because .hg/dirstate was
1896 1879 # already changed simultaneously after last
1897 1880 # caching (see also issue5584 for detail)
1898 1881 self._repo.ui.debug(
1899 1882 b'skip updating dirstate: identity mismatch\n'
1900 1883 )
1901 1884 except error.LockError:
1902 1885 pass
1903 1886 finally:
1904 1887 # Even if the wlock couldn't be grabbed, clear out the list.
1905 1888 self._repo.clearpostdsstatus()
1906 1889
1907 1890 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1908 1891 '''Gets the status from the dirstate -- internal use only.'''
1909 1892 subrepos = []
1910 1893 if b'.hgsub' in self:
1911 1894 subrepos = sorted(self.substate)
1912 1895 cmp, s, mtime_boundary = self._repo.dirstate.status(
1913 1896 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1914 1897 )
1915 1898
1916 1899 # check for any possibly clean files
1917 1900 fixup = []
1918 1901 if cmp:
1919 1902 modified2, deleted2, clean_set, fixup = self._checklookup(
1920 1903 cmp, mtime_boundary
1921 1904 )
1922 1905 s.modified.extend(modified2)
1923 1906 s.deleted.extend(deleted2)
1924 1907
1925 1908 if clean_set and clean:
1926 1909 s.clean.extend(clean_set)
1927 1910 if fixup and clean:
1928 1911 s.clean.extend((f for f, _ in fixup))
1929 1912
1930 1913 self._poststatusfixup(s, fixup)
1931 1914
1932 1915 if match.always():
1933 1916 # cache for performance
1934 1917 if s.unknown or s.ignored or s.clean:
1935 1918 # "_status" is cached with list*=False in the normal route
1936 1919 self._status = scmutil.status(
1937 1920 s.modified, s.added, s.removed, s.deleted, [], [], []
1938 1921 )
1939 1922 else:
1940 1923 self._status = s
1941 1924
1942 1925 return s
1943 1926
1944 1927 @propertycache
1945 1928 def _copies(self):
1946 1929 p1copies = {}
1947 1930 p2copies = {}
1948 1931 parents = self._repo.dirstate.parents()
1949 1932 p1manifest = self._repo[parents[0]].manifest()
1950 1933 p2manifest = self._repo[parents[1]].manifest()
1951 1934 changedset = set(self.added()) | set(self.modified())
1952 1935 narrowmatch = self._repo.narrowmatch()
1953 1936 for dst, src in self._repo.dirstate.copies().items():
1954 1937 if dst not in changedset or not narrowmatch(dst):
1955 1938 continue
1956 1939 if src in p1manifest:
1957 1940 p1copies[dst] = src
1958 1941 elif src in p2manifest:
1959 1942 p2copies[dst] = src
1960 1943 return p1copies, p2copies
1961 1944
1962 1945 @propertycache
1963 1946 def _manifest(self):
1964 1947 """generate a manifest corresponding to the values in self._status
1965 1948
1966 1949 This reuse the file nodeid from parent, but we use special node
1967 1950 identifiers for added and modified files. This is used by manifests
1968 1951 merge to see that files are different and by update logic to avoid
1969 1952 deleting newly added files.
1970 1953 """
1971 1954 return self._buildstatusmanifest(self._status)
1972 1955
1973 1956 def _buildstatusmanifest(self, status):
1974 1957 """Builds a manifest that includes the given status results."""
1975 1958 parents = self.parents()
1976 1959
1977 1960 man = parents[0].manifest().copy()
1978 1961
1979 1962 ff = self._flagfunc
1980 1963 for i, l in (
1981 1964 (self._repo.nodeconstants.addednodeid, status.added),
1982 1965 (self._repo.nodeconstants.modifiednodeid, status.modified),
1983 1966 ):
1984 1967 for f in l:
1985 1968 man[f] = i
1986 1969 try:
1987 1970 man.setflag(f, ff(f))
1988 1971 except OSError:
1989 1972 pass
1990 1973
1991 1974 for f in status.deleted + status.removed:
1992 1975 if f in man:
1993 1976 del man[f]
1994 1977
1995 1978 return man
1996 1979
1997 1980 def _buildstatus(
1998 1981 self, other, s, match, listignored, listclean, listunknown
1999 1982 ):
2000 1983 """build a status with respect to another context
2001 1984
2002 1985 This includes logic for maintaining the fast path of status when
2003 1986 comparing the working directory against its parent, which is to skip
2004 1987 building a new manifest if self (working directory) is not comparing
2005 1988 against its parent (repo['.']).
2006 1989 """
2007 1990 s = self._dirstatestatus(match, listignored, listclean, listunknown)
2008 1991 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
2009 1992 # might have accidentally ended up with the entire contents of the file
2010 1993 # they are supposed to be linking to.
2011 1994 s.modified[:] = self._filtersuspectsymlink(s.modified)
2012 1995 if other != self._repo[b'.']:
2013 1996 s = super(workingctx, self)._buildstatus(
2014 1997 other, s, match, listignored, listclean, listunknown
2015 1998 )
2016 1999 return s
2017 2000
2018 2001 def _matchstatus(self, other, match):
2019 2002 """override the match method with a filter for directory patterns
2020 2003
2021 2004 We use inheritance to customize the match.bad method only in cases of
2022 2005 workingctx since it belongs only to the working directory when
2023 2006 comparing against the parent changeset.
2024 2007
2025 2008 If we aren't comparing against the working directory's parent, then we
2026 2009 just use the default match object sent to us.
2027 2010 """
2028 2011 if other != self._repo[b'.']:
2029 2012
2030 2013 def bad(f, msg):
2031 2014 # 'f' may be a directory pattern from 'match.files()',
2032 2015 # so 'f not in ctx1' is not enough
2033 2016 if f not in other and not other.hasdir(f):
2034 2017 self._repo.ui.warn(
2035 2018 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
2036 2019 )
2037 2020
2038 2021 match.bad = bad
2039 2022 return match
2040 2023
2041 2024 def walk(self, match):
2042 2025 '''Generates matching file names.'''
2043 2026 return sorted(
2044 2027 self._repo.dirstate.walk(
2045 2028 self._repo.narrowmatch(match),
2046 2029 subrepos=sorted(self.substate),
2047 2030 unknown=True,
2048 2031 ignored=False,
2049 2032 )
2050 2033 )
2051 2034
2052 2035 def matches(self, match):
2053 2036 match = self._repo.narrowmatch(match)
2054 2037 ds = self._repo.dirstate
2055 2038 return sorted(f for f in ds.matches(match) if ds.get_entry(f).tracked)
2056 2039
2057 2040 def markcommitted(self, node):
2058 2041 with self._repo.dirstate.parentchange():
2059 2042 for f in self.modified() + self.added():
2060 2043 self._repo.dirstate.update_file(
2061 2044 f, p1_tracked=True, wc_tracked=True
2062 2045 )
2063 2046 for f in self.removed():
2064 2047 self._repo.dirstate.update_file(
2065 2048 f, p1_tracked=False, wc_tracked=False
2066 2049 )
2067 2050 self._repo.dirstate.setparents(node)
2068 2051 self._repo._quick_access_changeid_invalidate()
2069 2052
2070 2053 sparse.aftercommit(self._repo, node)
2071 2054
2072 2055 # write changes out explicitly, because nesting wlock at
2073 2056 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2074 2057 # from immediately doing so for subsequent changing files
2075 2058 self._repo.dirstate.write(self._repo.currenttransaction())
2076 2059
2077 2060 def mergestate(self, clean=False):
2078 2061 if clean:
2079 2062 return mergestatemod.mergestate.clean(self._repo)
2080 2063 return mergestatemod.mergestate.read(self._repo)
2081 2064
2082 2065
2083 2066 class committablefilectx(basefilectx):
2084 2067 """A committablefilectx provides common functionality for a file context
2085 2068 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2086 2069
2087 2070 def __init__(self, repo, path, filelog=None, ctx=None):
2088 2071 self._repo = repo
2089 2072 self._path = path
2090 2073 self._changeid = None
2091 2074 self._filerev = self._filenode = None
2092 2075
2093 2076 if filelog is not None:
2094 2077 self._filelog = filelog
2095 2078 if ctx:
2096 2079 self._changectx = ctx
2097 2080
2098 2081 def __nonzero__(self):
2099 2082 return True
2100 2083
2101 2084 __bool__ = __nonzero__
2102 2085
2103 2086 def linkrev(self):
2104 2087 # linked to self._changectx no matter if file is modified or not
2105 2088 return self.rev()
2106 2089
2107 2090 def renamed(self):
2108 2091 path = self.copysource()
2109 2092 if not path:
2110 2093 return None
2111 2094 return (
2112 2095 path,
2113 2096 self._changectx._parents[0]._manifest.get(
2114 2097 path, self._repo.nodeconstants.nullid
2115 2098 ),
2116 2099 )
2117 2100
2118 2101 def parents(self):
2119 2102 '''return parent filectxs, following copies if necessary'''
2120 2103
2121 2104 def filenode(ctx, path):
2122 2105 return ctx._manifest.get(path, self._repo.nodeconstants.nullid)
2123 2106
2124 2107 path = self._path
2125 2108 fl = self._filelog
2126 2109 pcl = self._changectx._parents
2127 2110 renamed = self.renamed()
2128 2111
2129 2112 if renamed:
2130 2113 pl = [renamed + (None,)]
2131 2114 else:
2132 2115 pl = [(path, filenode(pcl[0], path), fl)]
2133 2116
2134 2117 for pc in pcl[1:]:
2135 2118 pl.append((path, filenode(pc, path), fl))
2136 2119
2137 2120 return [
2138 2121 self._parentfilectx(p, fileid=n, filelog=l)
2139 2122 for p, n, l in pl
2140 2123 if n != self._repo.nodeconstants.nullid
2141 2124 ]
2142 2125
2143 2126 def children(self):
2144 2127 return []
2145 2128
2146 2129
2147 2130 class workingfilectx(committablefilectx):
2148 2131 """A workingfilectx object makes access to data related to a particular
2149 2132 file in the working directory convenient."""
2150 2133
2151 2134 def __init__(self, repo, path, filelog=None, workingctx=None):
2152 2135 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2153 2136
2154 2137 @propertycache
2155 2138 def _changectx(self):
2156 2139 return workingctx(self._repo)
2157 2140
2158 2141 def data(self):
2159 2142 return self._repo.wread(self._path)
2160 2143
2161 2144 def copysource(self):
2162 2145 return self._repo.dirstate.copied(self._path)
2163 2146
2164 2147 def size(self):
2165 2148 return self._repo.wvfs.lstat(self._path).st_size
2166 2149
2167 2150 def lstat(self):
2168 2151 return self._repo.wvfs.lstat(self._path)
2169 2152
2170 2153 def date(self):
2171 2154 t, tz = self._changectx.date()
2172 2155 try:
2173 2156 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2174 2157 except OSError as err:
2175 2158 if err.errno != errno.ENOENT:
2176 2159 raise
2177 2160 return (t, tz)
2178 2161
2179 2162 def exists(self):
2180 2163 return self._repo.wvfs.exists(self._path)
2181 2164
2182 2165 def lexists(self):
2183 2166 return self._repo.wvfs.lexists(self._path)
2184 2167
2185 2168 def audit(self):
2186 2169 return self._repo.wvfs.audit(self._path)
2187 2170
2188 2171 def cmp(self, fctx):
2189 2172 """compare with other file context
2190 2173
2191 2174 returns True if different than fctx.
2192 2175 """
2193 2176 # fctx should be a filectx (not a workingfilectx)
2194 2177 # invert comparison to reuse the same code path
2195 2178 return fctx.cmp(self)
2196 2179
2197 2180 def remove(self, ignoremissing=False):
2198 2181 """wraps unlink for a repo's working directory"""
2199 2182 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2200 2183 self._repo.wvfs.unlinkpath(
2201 2184 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2202 2185 )
2203 2186
2204 2187 def write(self, data, flags, backgroundclose=False, **kwargs):
2205 2188 """wraps repo.wwrite"""
2206 2189 return self._repo.wwrite(
2207 2190 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2208 2191 )
2209 2192
2210 2193 def markcopied(self, src):
2211 2194 """marks this file a copy of `src`"""
2212 2195 self._repo.dirstate.copy(src, self._path)
2213 2196
2214 2197 def clearunknown(self):
2215 2198 """Removes conflicting items in the working directory so that
2216 2199 ``write()`` can be called successfully.
2217 2200 """
2218 2201 wvfs = self._repo.wvfs
2219 2202 f = self._path
2220 2203 wvfs.audit(f)
2221 2204 if self._repo.ui.configbool(
2222 2205 b'experimental', b'merge.checkpathconflicts'
2223 2206 ):
2224 2207 # remove files under the directory as they should already be
2225 2208 # warned and backed up
2226 2209 if wvfs.isdir(f) and not wvfs.islink(f):
2227 2210 wvfs.rmtree(f, forcibly=True)
2228 2211 for p in reversed(list(pathutil.finddirs(f))):
2229 2212 if wvfs.isfileorlink(p):
2230 2213 wvfs.unlink(p)
2231 2214 break
2232 2215 else:
2233 2216 # don't remove files if path conflicts are not processed
2234 2217 if wvfs.isdir(f) and not wvfs.islink(f):
2235 2218 wvfs.removedirs(f)
2236 2219
2237 2220 def setflags(self, l, x):
2238 2221 self._repo.wvfs.setflags(self._path, l, x)
2239 2222
2240 2223
2241 2224 class overlayworkingctx(committablectx):
2242 2225 """Wraps another mutable context with a write-back cache that can be
2243 2226 converted into a commit context.
2244 2227
2245 2228 self._cache[path] maps to a dict with keys: {
2246 2229 'exists': bool?
2247 2230 'date': date?
2248 2231 'data': str?
2249 2232 'flags': str?
2250 2233 'copied': str? (path or None)
2251 2234 }
2252 2235 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2253 2236 is `False`, the file was deleted.
2254 2237 """
2255 2238
2256 2239 def __init__(self, repo):
2257 2240 super(overlayworkingctx, self).__init__(repo)
2258 2241 self.clean()
2259 2242
2260 2243 def setbase(self, wrappedctx):
2261 2244 self._wrappedctx = wrappedctx
2262 2245 self._parents = [wrappedctx]
2263 2246 # Drop old manifest cache as it is now out of date.
2264 2247 # This is necessary when, e.g., rebasing several nodes with one
2265 2248 # ``overlayworkingctx`` (e.g. with --collapse).
2266 2249 util.clearcachedproperty(self, b'_manifest')
2267 2250
2268 2251 def setparents(self, p1node, p2node=None):
2269 2252 if p2node is None:
2270 2253 p2node = self._repo.nodeconstants.nullid
2271 2254 assert p1node == self._wrappedctx.node()
2272 2255 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2273 2256
2274 2257 def data(self, path):
2275 2258 if self.isdirty(path):
2276 2259 if self._cache[path][b'exists']:
2277 2260 if self._cache[path][b'data'] is not None:
2278 2261 return self._cache[path][b'data']
2279 2262 else:
2280 2263 # Must fallback here, too, because we only set flags.
2281 2264 return self._wrappedctx[path].data()
2282 2265 else:
2283 2266 raise error.ProgrammingError(
2284 2267 b"No such file or directory: %s" % path
2285 2268 )
2286 2269 else:
2287 2270 return self._wrappedctx[path].data()
2288 2271
2289 2272 @propertycache
2290 2273 def _manifest(self):
2291 2274 parents = self.parents()
2292 2275 man = parents[0].manifest().copy()
2293 2276
2294 2277 flag = self._flagfunc
2295 2278 for path in self.added():
2296 2279 man[path] = self._repo.nodeconstants.addednodeid
2297 2280 man.setflag(path, flag(path))
2298 2281 for path in self.modified():
2299 2282 man[path] = self._repo.nodeconstants.modifiednodeid
2300 2283 man.setflag(path, flag(path))
2301 2284 for path in self.removed():
2302 2285 del man[path]
2303 2286 return man
2304 2287
2305 2288 @propertycache
2306 2289 def _flagfunc(self):
2307 2290 def f(path):
2308 2291 return self._cache[path][b'flags']
2309 2292
2310 2293 return f
2311 2294
2312 2295 def files(self):
2313 2296 return sorted(self.added() + self.modified() + self.removed())
2314 2297
2315 2298 def modified(self):
2316 2299 return [
2317 2300 f
2318 2301 for f in self._cache.keys()
2319 2302 if self._cache[f][b'exists'] and self._existsinparent(f)
2320 2303 ]
2321 2304
2322 2305 def added(self):
2323 2306 return [
2324 2307 f
2325 2308 for f in self._cache.keys()
2326 2309 if self._cache[f][b'exists'] and not self._existsinparent(f)
2327 2310 ]
2328 2311
2329 2312 def removed(self):
2330 2313 return [
2331 2314 f
2332 2315 for f in self._cache.keys()
2333 2316 if not self._cache[f][b'exists'] and self._existsinparent(f)
2334 2317 ]
2335 2318
2336 2319 def p1copies(self):
2337 2320 copies = {}
2338 2321 narrowmatch = self._repo.narrowmatch()
2339 2322 for f in self._cache.keys():
2340 2323 if not narrowmatch(f):
2341 2324 continue
2342 2325 copies.pop(f, None) # delete if it exists
2343 2326 source = self._cache[f][b'copied']
2344 2327 if source:
2345 2328 copies[f] = source
2346 2329 return copies
2347 2330
2348 2331 def p2copies(self):
2349 2332 copies = {}
2350 2333 narrowmatch = self._repo.narrowmatch()
2351 2334 for f in self._cache.keys():
2352 2335 if not narrowmatch(f):
2353 2336 continue
2354 2337 copies.pop(f, None) # delete if it exists
2355 2338 source = self._cache[f][b'copied']
2356 2339 if source:
2357 2340 copies[f] = source
2358 2341 return copies
2359 2342
2360 2343 def isinmemory(self):
2361 2344 return True
2362 2345
2363 2346 def filedate(self, path):
2364 2347 if self.isdirty(path):
2365 2348 return self._cache[path][b'date']
2366 2349 else:
2367 2350 return self._wrappedctx[path].date()
2368 2351
2369 2352 def markcopied(self, path, origin):
2370 2353 self._markdirty(
2371 2354 path,
2372 2355 exists=True,
2373 2356 date=self.filedate(path),
2374 2357 flags=self.flags(path),
2375 2358 copied=origin,
2376 2359 )
2377 2360
2378 2361 def copydata(self, path):
2379 2362 if self.isdirty(path):
2380 2363 return self._cache[path][b'copied']
2381 2364 else:
2382 2365 return None
2383 2366
2384 2367 def flags(self, path):
2385 2368 if self.isdirty(path):
2386 2369 if self._cache[path][b'exists']:
2387 2370 return self._cache[path][b'flags']
2388 2371 else:
2389 2372 raise error.ProgrammingError(
2390 2373 b"No such file or directory: %s" % path
2391 2374 )
2392 2375 else:
2393 2376 return self._wrappedctx[path].flags()
2394 2377
2395 2378 def __contains__(self, key):
2396 2379 if key in self._cache:
2397 2380 return self._cache[key][b'exists']
2398 2381 return key in self.p1()
2399 2382
2400 2383 def _existsinparent(self, path):
2401 2384 try:
2402 2385 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2403 2386 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2404 2387 # with an ``exists()`` function.
2405 2388 self._wrappedctx[path]
2406 2389 return True
2407 2390 except error.ManifestLookupError:
2408 2391 return False
2409 2392
2410 2393 def _auditconflicts(self, path):
2411 2394 """Replicates conflict checks done by wvfs.write().
2412 2395
2413 2396 Since we never write to the filesystem and never call `applyupdates` in
2414 2397 IMM, we'll never check that a path is actually writable -- e.g., because
2415 2398 it adds `a/foo`, but `a` is actually a file in the other commit.
2416 2399 """
2417 2400
2418 2401 def fail(path, component):
2419 2402 # p1() is the base and we're receiving "writes" for p2()'s
2420 2403 # files.
2421 2404 if b'l' in self.p1()[component].flags():
2422 2405 raise error.Abort(
2423 2406 b"error: %s conflicts with symlink %s "
2424 2407 b"in %d." % (path, component, self.p1().rev())
2425 2408 )
2426 2409 else:
2427 2410 raise error.Abort(
2428 2411 b"error: '%s' conflicts with file '%s' in "
2429 2412 b"%d." % (path, component, self.p1().rev())
2430 2413 )
2431 2414
2432 2415 # Test that each new directory to be created to write this path from p2
2433 2416 # is not a file in p1.
2434 2417 components = path.split(b'/')
2435 2418 for i in pycompat.xrange(len(components)):
2436 2419 component = b"/".join(components[0:i])
2437 2420 if component in self:
2438 2421 fail(path, component)
2439 2422
2440 2423 # Test the other direction -- that this path from p2 isn't a directory
2441 2424 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2442 2425 match = self.match([path], default=b'path')
2443 2426 mfiles = list(self.p1().manifest().walk(match))
2444 2427 if len(mfiles) > 0:
2445 2428 if len(mfiles) == 1 and mfiles[0] == path:
2446 2429 return
2447 2430 # omit the files which are deleted in current IMM wctx
2448 2431 mfiles = [m for m in mfiles if m in self]
2449 2432 if not mfiles:
2450 2433 return
2451 2434 raise error.Abort(
2452 2435 b"error: file '%s' cannot be written because "
2453 2436 b" '%s/' is a directory in %s (containing %d "
2454 2437 b"entries: %s)"
2455 2438 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2456 2439 )
2457 2440
2458 2441 def write(self, path, data, flags=b'', **kwargs):
2459 2442 if data is None:
2460 2443 raise error.ProgrammingError(b"data must be non-None")
2461 2444 self._auditconflicts(path)
2462 2445 self._markdirty(
2463 2446 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2464 2447 )
2465 2448
2466 2449 def setflags(self, path, l, x):
2467 2450 flag = b''
2468 2451 if l:
2469 2452 flag = b'l'
2470 2453 elif x:
2471 2454 flag = b'x'
2472 2455 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2473 2456
2474 2457 def remove(self, path):
2475 2458 self._markdirty(path, exists=False)
2476 2459
2477 2460 def exists(self, path):
2478 2461 """exists behaves like `lexists`, but needs to follow symlinks and
2479 2462 return False if they are broken.
2480 2463 """
2481 2464 if self.isdirty(path):
2482 2465 # If this path exists and is a symlink, "follow" it by calling
2483 2466 # exists on the destination path.
2484 2467 if (
2485 2468 self._cache[path][b'exists']
2486 2469 and b'l' in self._cache[path][b'flags']
2487 2470 ):
2488 2471 return self.exists(self._cache[path][b'data'].strip())
2489 2472 else:
2490 2473 return self._cache[path][b'exists']
2491 2474
2492 2475 return self._existsinparent(path)
2493 2476
2494 2477 def lexists(self, path):
2495 2478 """lexists returns True if the path exists"""
2496 2479 if self.isdirty(path):
2497 2480 return self._cache[path][b'exists']
2498 2481
2499 2482 return self._existsinparent(path)
2500 2483
2501 2484 def size(self, path):
2502 2485 if self.isdirty(path):
2503 2486 if self._cache[path][b'exists']:
2504 2487 return len(self._cache[path][b'data'])
2505 2488 else:
2506 2489 raise error.ProgrammingError(
2507 2490 b"No such file or directory: %s" % path
2508 2491 )
2509 2492 return self._wrappedctx[path].size()
2510 2493
2511 2494 def tomemctx(
2512 2495 self,
2513 2496 text,
2514 2497 branch=None,
2515 2498 extra=None,
2516 2499 date=None,
2517 2500 parents=None,
2518 2501 user=None,
2519 2502 editor=None,
2520 2503 ):
2521 2504 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2522 2505 committed.
2523 2506
2524 2507 ``text`` is the commit message.
2525 2508 ``parents`` (optional) are rev numbers.
2526 2509 """
2527 2510 # Default parents to the wrapped context if not passed.
2528 2511 if parents is None:
2529 2512 parents = self.parents()
2530 2513 if len(parents) == 1:
2531 2514 parents = (parents[0], None)
2532 2515
2533 2516 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2534 2517 if parents[1] is None:
2535 2518 parents = (self._repo[parents[0]], None)
2536 2519 else:
2537 2520 parents = (self._repo[parents[0]], self._repo[parents[1]])
2538 2521
2539 2522 files = self.files()
2540 2523
2541 2524 def getfile(repo, memctx, path):
2542 2525 if self._cache[path][b'exists']:
2543 2526 return memfilectx(
2544 2527 repo,
2545 2528 memctx,
2546 2529 path,
2547 2530 self._cache[path][b'data'],
2548 2531 b'l' in self._cache[path][b'flags'],
2549 2532 b'x' in self._cache[path][b'flags'],
2550 2533 self._cache[path][b'copied'],
2551 2534 )
2552 2535 else:
2553 2536 # Returning None, but including the path in `files`, is
2554 2537 # necessary for memctx to register a deletion.
2555 2538 return None
2556 2539
2557 2540 if branch is None:
2558 2541 branch = self._wrappedctx.branch()
2559 2542
2560 2543 return memctx(
2561 2544 self._repo,
2562 2545 parents,
2563 2546 text,
2564 2547 files,
2565 2548 getfile,
2566 2549 date=date,
2567 2550 extra=extra,
2568 2551 user=user,
2569 2552 branch=branch,
2570 2553 editor=editor,
2571 2554 )
2572 2555
2573 2556 def tomemctx_for_amend(self, precursor):
2574 2557 extra = precursor.extra().copy()
2575 2558 extra[b'amend_source'] = precursor.hex()
2576 2559 return self.tomemctx(
2577 2560 text=precursor.description(),
2578 2561 branch=precursor.branch(),
2579 2562 extra=extra,
2580 2563 date=precursor.date(),
2581 2564 user=precursor.user(),
2582 2565 )
2583 2566
2584 2567 def isdirty(self, path):
2585 2568 return path in self._cache
2586 2569
2587 2570 def clean(self):
2588 2571 self._mergestate = None
2589 2572 self._cache = {}
2590 2573
2591 2574 def _compact(self):
2592 2575 """Removes keys from the cache that are actually clean, by comparing
2593 2576 them with the underlying context.
2594 2577
2595 2578 This can occur during the merge process, e.g. by passing --tool :local
2596 2579 to resolve a conflict.
2597 2580 """
2598 2581 keys = []
2599 2582 # This won't be perfect, but can help performance significantly when
2600 2583 # using things like remotefilelog.
2601 2584 scmutil.prefetchfiles(
2602 2585 self.repo(),
2603 2586 [
2604 2587 (
2605 2588 self.p1().rev(),
2606 2589 scmutil.matchfiles(self.repo(), self._cache.keys()),
2607 2590 )
2608 2591 ],
2609 2592 )
2610 2593
2611 2594 for path in self._cache.keys():
2612 2595 cache = self._cache[path]
2613 2596 try:
2614 2597 underlying = self._wrappedctx[path]
2615 2598 if (
2616 2599 underlying.data() == cache[b'data']
2617 2600 and underlying.flags() == cache[b'flags']
2618 2601 ):
2619 2602 keys.append(path)
2620 2603 except error.ManifestLookupError:
2621 2604 # Path not in the underlying manifest (created).
2622 2605 continue
2623 2606
2624 2607 for path in keys:
2625 2608 del self._cache[path]
2626 2609 return keys
2627 2610
2628 2611 def _markdirty(
2629 2612 self, path, exists, data=None, date=None, flags=b'', copied=None
2630 2613 ):
2631 2614 # data not provided, let's see if we already have some; if not, let's
2632 2615 # grab it from our underlying context, so that we always have data if
2633 2616 # the file is marked as existing.
2634 2617 if exists and data is None:
2635 2618 oldentry = self._cache.get(path) or {}
2636 2619 data = oldentry.get(b'data')
2637 2620 if data is None:
2638 2621 data = self._wrappedctx[path].data()
2639 2622
2640 2623 self._cache[path] = {
2641 2624 b'exists': exists,
2642 2625 b'data': data,
2643 2626 b'date': date,
2644 2627 b'flags': flags,
2645 2628 b'copied': copied,
2646 2629 }
2647 2630 util.clearcachedproperty(self, b'_manifest')
2648 2631
2649 2632 def filectx(self, path, filelog=None):
2650 2633 return overlayworkingfilectx(
2651 2634 self._repo, path, parent=self, filelog=filelog
2652 2635 )
2653 2636
2654 2637 def mergestate(self, clean=False):
2655 2638 if clean or self._mergestate is None:
2656 2639 self._mergestate = mergestatemod.memmergestate(self._repo)
2657 2640 return self._mergestate
2658 2641
2659 2642
2660 2643 class overlayworkingfilectx(committablefilectx):
2661 2644 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2662 2645 cache, which can be flushed through later by calling ``flush()``."""
2663 2646
2664 2647 def __init__(self, repo, path, filelog=None, parent=None):
2665 2648 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2666 2649 self._repo = repo
2667 2650 self._parent = parent
2668 2651 self._path = path
2669 2652
2670 2653 def cmp(self, fctx):
2671 2654 return self.data() != fctx.data()
2672 2655
2673 2656 def changectx(self):
2674 2657 return self._parent
2675 2658
2676 2659 def data(self):
2677 2660 return self._parent.data(self._path)
2678 2661
2679 2662 def date(self):
2680 2663 return self._parent.filedate(self._path)
2681 2664
2682 2665 def exists(self):
2683 2666 return self.lexists()
2684 2667
2685 2668 def lexists(self):
2686 2669 return self._parent.exists(self._path)
2687 2670
2688 2671 def copysource(self):
2689 2672 return self._parent.copydata(self._path)
2690 2673
2691 2674 def size(self):
2692 2675 return self._parent.size(self._path)
2693 2676
2694 2677 def markcopied(self, origin):
2695 2678 self._parent.markcopied(self._path, origin)
2696 2679
2697 2680 def audit(self):
2698 2681 pass
2699 2682
2700 2683 def flags(self):
2701 2684 return self._parent.flags(self._path)
2702 2685
2703 2686 def setflags(self, islink, isexec):
2704 2687 return self._parent.setflags(self._path, islink, isexec)
2705 2688
2706 2689 def write(self, data, flags, backgroundclose=False, **kwargs):
2707 2690 return self._parent.write(self._path, data, flags, **kwargs)
2708 2691
2709 2692 def remove(self, ignoremissing=False):
2710 2693 return self._parent.remove(self._path)
2711 2694
2712 2695 def clearunknown(self):
2713 2696 pass
2714 2697
2715 2698
2716 2699 class workingcommitctx(workingctx):
2717 2700 """A workingcommitctx object makes access to data related to
2718 2701 the revision being committed convenient.
2719 2702
2720 2703 This hides changes in the working directory, if they aren't
2721 2704 committed in this context.
2722 2705 """
2723 2706
2724 2707 def __init__(
2725 2708 self, repo, changes, text=b"", user=None, date=None, extra=None
2726 2709 ):
2727 2710 super(workingcommitctx, self).__init__(
2728 2711 repo, text, user, date, extra, changes
2729 2712 )
2730 2713
2731 2714 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2732 2715 """Return matched files only in ``self._status``
2733 2716
2734 2717 Uncommitted files appear "clean" via this context, even if
2735 2718 they aren't actually so in the working directory.
2736 2719 """
2737 2720 if clean:
2738 2721 clean = [f for f in self._manifest if f not in self._changedset]
2739 2722 else:
2740 2723 clean = []
2741 2724 return scmutil.status(
2742 2725 [f for f in self._status.modified if match(f)],
2743 2726 [f for f in self._status.added if match(f)],
2744 2727 [f for f in self._status.removed if match(f)],
2745 2728 [],
2746 2729 [],
2747 2730 [],
2748 2731 clean,
2749 2732 )
2750 2733
2751 2734 @propertycache
2752 2735 def _changedset(self):
2753 2736 """Return the set of files changed in this context"""
2754 2737 changed = set(self._status.modified)
2755 2738 changed.update(self._status.added)
2756 2739 changed.update(self._status.removed)
2757 2740 return changed
2758 2741
2759 2742
2760 2743 def makecachingfilectxfn(func):
2761 2744 """Create a filectxfn that caches based on the path.
2762 2745
2763 2746 We can't use util.cachefunc because it uses all arguments as the cache
2764 2747 key and this creates a cycle since the arguments include the repo and
2765 2748 memctx.
2766 2749 """
2767 2750 cache = {}
2768 2751
2769 2752 def getfilectx(repo, memctx, path):
2770 2753 if path not in cache:
2771 2754 cache[path] = func(repo, memctx, path)
2772 2755 return cache[path]
2773 2756
2774 2757 return getfilectx
2775 2758
2776 2759
2777 2760 def memfilefromctx(ctx):
2778 2761 """Given a context return a memfilectx for ctx[path]
2779 2762
2780 2763 This is a convenience method for building a memctx based on another
2781 2764 context.
2782 2765 """
2783 2766
2784 2767 def getfilectx(repo, memctx, path):
2785 2768 fctx = ctx[path]
2786 2769 copysource = fctx.copysource()
2787 2770 return memfilectx(
2788 2771 repo,
2789 2772 memctx,
2790 2773 path,
2791 2774 fctx.data(),
2792 2775 islink=fctx.islink(),
2793 2776 isexec=fctx.isexec(),
2794 2777 copysource=copysource,
2795 2778 )
2796 2779
2797 2780 return getfilectx
2798 2781
2799 2782
2800 2783 def memfilefrompatch(patchstore):
2801 2784 """Given a patch (e.g. patchstore object) return a memfilectx
2802 2785
2803 2786 This is a convenience method for building a memctx based on a patchstore.
2804 2787 """
2805 2788
2806 2789 def getfilectx(repo, memctx, path):
2807 2790 data, mode, copysource = patchstore.getfile(path)
2808 2791 if data is None:
2809 2792 return None
2810 2793 islink, isexec = mode
2811 2794 return memfilectx(
2812 2795 repo,
2813 2796 memctx,
2814 2797 path,
2815 2798 data,
2816 2799 islink=islink,
2817 2800 isexec=isexec,
2818 2801 copysource=copysource,
2819 2802 )
2820 2803
2821 2804 return getfilectx
2822 2805
2823 2806
2824 2807 class memctx(committablectx):
2825 2808 """Use memctx to perform in-memory commits via localrepo.commitctx().
2826 2809
2827 2810 Revision information is supplied at initialization time while
2828 2811 related files data and is made available through a callback
2829 2812 mechanism. 'repo' is the current localrepo, 'parents' is a
2830 2813 sequence of two parent revisions identifiers (pass None for every
2831 2814 missing parent), 'text' is the commit message and 'files' lists
2832 2815 names of files touched by the revision (normalized and relative to
2833 2816 repository root).
2834 2817
2835 2818 filectxfn(repo, memctx, path) is a callable receiving the
2836 2819 repository, the current memctx object and the normalized path of
2837 2820 requested file, relative to repository root. It is fired by the
2838 2821 commit function for every file in 'files', but calls order is
2839 2822 undefined. If the file is available in the revision being
2840 2823 committed (updated or added), filectxfn returns a memfilectx
2841 2824 object. If the file was removed, filectxfn return None for recent
2842 2825 Mercurial. Moved files are represented by marking the source file
2843 2826 removed and the new file added with copy information (see
2844 2827 memfilectx).
2845 2828
2846 2829 user receives the committer name and defaults to current
2847 2830 repository username, date is the commit date in any format
2848 2831 supported by dateutil.parsedate() and defaults to current date, extra
2849 2832 is a dictionary of metadata or is left empty.
2850 2833 """
2851 2834
2852 2835 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2853 2836 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2854 2837 # this field to determine what to do in filectxfn.
2855 2838 _returnnoneformissingfiles = True
2856 2839
2857 2840 def __init__(
2858 2841 self,
2859 2842 repo,
2860 2843 parents,
2861 2844 text,
2862 2845 files,
2863 2846 filectxfn,
2864 2847 user=None,
2865 2848 date=None,
2866 2849 extra=None,
2867 2850 branch=None,
2868 2851 editor=None,
2869 2852 ):
2870 2853 super(memctx, self).__init__(
2871 2854 repo, text, user, date, extra, branch=branch
2872 2855 )
2873 2856 self._rev = None
2874 2857 self._node = None
2875 2858 parents = [(p or self._repo.nodeconstants.nullid) for p in parents]
2876 2859 p1, p2 = parents
2877 2860 self._parents = [self._repo[p] for p in (p1, p2)]
2878 2861 files = sorted(set(files))
2879 2862 self._files = files
2880 2863 self.substate = {}
2881 2864
2882 2865 if isinstance(filectxfn, patch.filestore):
2883 2866 filectxfn = memfilefrompatch(filectxfn)
2884 2867 elif not callable(filectxfn):
2885 2868 # if store is not callable, wrap it in a function
2886 2869 filectxfn = memfilefromctx(filectxfn)
2887 2870
2888 2871 # memoizing increases performance for e.g. vcs convert scenarios.
2889 2872 self._filectxfn = makecachingfilectxfn(filectxfn)
2890 2873
2891 2874 if editor:
2892 2875 self._text = editor(self._repo, self, [])
2893 2876 self._repo.savecommitmessage(self._text)
2894 2877
2895 2878 def filectx(self, path, filelog=None):
2896 2879 """get a file context from the working directory
2897 2880
2898 2881 Returns None if file doesn't exist and should be removed."""
2899 2882 return self._filectxfn(self._repo, self, path)
2900 2883
2901 2884 def commit(self):
2902 2885 """commit context to the repo"""
2903 2886 return self._repo.commitctx(self)
2904 2887
2905 2888 @propertycache
2906 2889 def _manifest(self):
2907 2890 """generate a manifest based on the return values of filectxfn"""
2908 2891
2909 2892 # keep this simple for now; just worry about p1
2910 2893 pctx = self._parents[0]
2911 2894 man = pctx.manifest().copy()
2912 2895
2913 2896 for f in self._status.modified:
2914 2897 man[f] = self._repo.nodeconstants.modifiednodeid
2915 2898
2916 2899 for f in self._status.added:
2917 2900 man[f] = self._repo.nodeconstants.addednodeid
2918 2901
2919 2902 for f in self._status.removed:
2920 2903 if f in man:
2921 2904 del man[f]
2922 2905
2923 2906 return man
2924 2907
2925 2908 @propertycache
2926 2909 def _status(self):
2927 2910 """Calculate exact status from ``files`` specified at construction"""
2928 2911 man1 = self.p1().manifest()
2929 2912 p2 = self._parents[1]
2930 2913 # "1 < len(self._parents)" can't be used for checking
2931 2914 # existence of the 2nd parent, because "memctx._parents" is
2932 2915 # explicitly initialized by the list, of which length is 2.
2933 2916 if p2.rev() != nullrev:
2934 2917 man2 = p2.manifest()
2935 2918 managing = lambda f: f in man1 or f in man2
2936 2919 else:
2937 2920 managing = lambda f: f in man1
2938 2921
2939 2922 modified, added, removed = [], [], []
2940 2923 for f in self._files:
2941 2924 if not managing(f):
2942 2925 added.append(f)
2943 2926 elif self[f]:
2944 2927 modified.append(f)
2945 2928 else:
2946 2929 removed.append(f)
2947 2930
2948 2931 return scmutil.status(modified, added, removed, [], [], [], [])
2949 2932
2950 2933 def parents(self):
2951 2934 if self._parents[1].rev() == nullrev:
2952 2935 return [self._parents[0]]
2953 2936 return self._parents
2954 2937
2955 2938
2956 2939 class memfilectx(committablefilectx):
2957 2940 """memfilectx represents an in-memory file to commit.
2958 2941
2959 2942 See memctx and committablefilectx for more details.
2960 2943 """
2961 2944
2962 2945 def __init__(
2963 2946 self,
2964 2947 repo,
2965 2948 changectx,
2966 2949 path,
2967 2950 data,
2968 2951 islink=False,
2969 2952 isexec=False,
2970 2953 copysource=None,
2971 2954 ):
2972 2955 """
2973 2956 path is the normalized file path relative to repository root.
2974 2957 data is the file content as a string.
2975 2958 islink is True if the file is a symbolic link.
2976 2959 isexec is True if the file is executable.
2977 2960 copied is the source file path if current file was copied in the
2978 2961 revision being committed, or None."""
2979 2962 super(memfilectx, self).__init__(repo, path, None, changectx)
2980 2963 self._data = data
2981 2964 if islink:
2982 2965 self._flags = b'l'
2983 2966 elif isexec:
2984 2967 self._flags = b'x'
2985 2968 else:
2986 2969 self._flags = b''
2987 2970 self._copysource = copysource
2988 2971
2989 2972 def copysource(self):
2990 2973 return self._copysource
2991 2974
2992 2975 def cmp(self, fctx):
2993 2976 return self.data() != fctx.data()
2994 2977
2995 2978 def data(self):
2996 2979 return self._data
2997 2980
2998 2981 def remove(self, ignoremissing=False):
2999 2982 """wraps unlink for a repo's working directory"""
3000 2983 # need to figure out what to do here
3001 2984 del self._changectx[self._path]
3002 2985
3003 2986 def write(self, data, flags, **kwargs):
3004 2987 """wraps repo.wwrite"""
3005 2988 self._data = data
3006 2989
3007 2990
3008 2991 class metadataonlyctx(committablectx):
3009 2992 """Like memctx but it's reusing the manifest of different commit.
3010 2993 Intended to be used by lightweight operations that are creating
3011 2994 metadata-only changes.
3012 2995
3013 2996 Revision information is supplied at initialization time. 'repo' is the
3014 2997 current localrepo, 'ctx' is original revision which manifest we're reuisng
3015 2998 'parents' is a sequence of two parent revisions identifiers (pass None for
3016 2999 every missing parent), 'text' is the commit.
3017 3000
3018 3001 user receives the committer name and defaults to current repository
3019 3002 username, date is the commit date in any format supported by
3020 3003 dateutil.parsedate() and defaults to current date, extra is a dictionary of
3021 3004 metadata or is left empty.
3022 3005 """
3023 3006
3024 3007 def __init__(
3025 3008 self,
3026 3009 repo,
3027 3010 originalctx,
3028 3011 parents=None,
3029 3012 text=None,
3030 3013 user=None,
3031 3014 date=None,
3032 3015 extra=None,
3033 3016 editor=None,
3034 3017 ):
3035 3018 if text is None:
3036 3019 text = originalctx.description()
3037 3020 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
3038 3021 self._rev = None
3039 3022 self._node = None
3040 3023 self._originalctx = originalctx
3041 3024 self._manifestnode = originalctx.manifestnode()
3042 3025 if parents is None:
3043 3026 parents = originalctx.parents()
3044 3027 else:
3045 3028 parents = [repo[p] for p in parents if p is not None]
3046 3029 parents = parents[:]
3047 3030 while len(parents) < 2:
3048 3031 parents.append(repo[nullrev])
3049 3032 p1, p2 = self._parents = parents
3050 3033
3051 3034 # sanity check to ensure that the reused manifest parents are
3052 3035 # manifests of our commit parents
3053 3036 mp1, mp2 = self.manifestctx().parents
3054 3037 if p1 != self._repo.nodeconstants.nullid and p1.manifestnode() != mp1:
3055 3038 raise RuntimeError(
3056 3039 r"can't reuse the manifest: its p1 "
3057 3040 r"doesn't match the new ctx p1"
3058 3041 )
3059 3042 if p2 != self._repo.nodeconstants.nullid and p2.manifestnode() != mp2:
3060 3043 raise RuntimeError(
3061 3044 r"can't reuse the manifest: "
3062 3045 r"its p2 doesn't match the new ctx p2"
3063 3046 )
3064 3047
3065 3048 self._files = originalctx.files()
3066 3049 self.substate = {}
3067 3050
3068 3051 if editor:
3069 3052 self._text = editor(self._repo, self, [])
3070 3053 self._repo.savecommitmessage(self._text)
3071 3054
3072 3055 def manifestnode(self):
3073 3056 return self._manifestnode
3074 3057
3075 3058 @property
3076 3059 def _manifestctx(self):
3077 3060 return self._repo.manifestlog[self._manifestnode]
3078 3061
3079 3062 def filectx(self, path, filelog=None):
3080 3063 return self._originalctx.filectx(path, filelog=filelog)
3081 3064
3082 3065 def commit(self):
3083 3066 """commit context to the repo"""
3084 3067 return self._repo.commitctx(self)
3085 3068
3086 3069 @property
3087 3070 def _manifest(self):
3088 3071 return self._originalctx.manifest()
3089 3072
3090 3073 @propertycache
3091 3074 def _status(self):
3092 3075 """Calculate exact status from ``files`` specified in the ``origctx``
3093 3076 and parents manifests.
3094 3077 """
3095 3078 man1 = self.p1().manifest()
3096 3079 p2 = self._parents[1]
3097 3080 # "1 < len(self._parents)" can't be used for checking
3098 3081 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3099 3082 # explicitly initialized by the list, of which length is 2.
3100 3083 if p2.rev() != nullrev:
3101 3084 man2 = p2.manifest()
3102 3085 managing = lambda f: f in man1 or f in man2
3103 3086 else:
3104 3087 managing = lambda f: f in man1
3105 3088
3106 3089 modified, added, removed = [], [], []
3107 3090 for f in self._files:
3108 3091 if not managing(f):
3109 3092 added.append(f)
3110 3093 elif f in self:
3111 3094 modified.append(f)
3112 3095 else:
3113 3096 removed.append(f)
3114 3097
3115 3098 return scmutil.status(modified, added, removed, [], [], [], [])
3116 3099
3117 3100
3118 3101 class arbitraryfilectx(object):
3119 3102 """Allows you to use filectx-like functions on a file in an arbitrary
3120 3103 location on disk, possibly not in the working directory.
3121 3104 """
3122 3105
3123 3106 def __init__(self, path, repo=None):
3124 3107 # Repo is optional because contrib/simplemerge uses this class.
3125 3108 self._repo = repo
3126 3109 self._path = path
3127 3110
3128 3111 def cmp(self, fctx):
3129 3112 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3130 3113 # path if either side is a symlink.
3131 3114 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3132 3115 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3133 3116 # Add a fast-path for merge if both sides are disk-backed.
3134 3117 # Note that filecmp uses the opposite return values (True if same)
3135 3118 # from our cmp functions (True if different).
3136 3119 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3137 3120 return self.data() != fctx.data()
3138 3121
3139 3122 def path(self):
3140 3123 return self._path
3141 3124
3142 3125 def flags(self):
3143 3126 return b''
3144 3127
3145 3128 def data(self):
3146 3129 return util.readfile(self._path)
3147 3130
3148 3131 def decodeddata(self):
3149 3132 with open(self._path, b"rb") as f:
3150 3133 return f.read()
3151 3134
3152 3135 def remove(self):
3153 3136 util.unlink(self._path)
3154 3137
3155 3138 def write(self, data, flags, **kwargs):
3156 3139 assert not flags
3157 3140 with open(self._path, b"wb") as f:
3158 3141 f.write(data)
@@ -1,101 +1,126 b''
1 1 # Copyright Mercurial Contributors
2 2 #
3 3 # This software may be used and distributed according to the terms of the
4 4 # GNU General Public License version 2 or any later version.
5 5
6 6 from __future__ import absolute_import
7 7
8 8 import functools
9 9 import os
10 10 import stat
11 11
12 12
13 13 rangemask = 0x7FFFFFFF
14 14
15 15
16 16 @functools.total_ordering
17 17 class timestamp(tuple):
18 18 """
19 19 A Unix timestamp with optional nanoseconds precision,
20 20 modulo 2**31 seconds.
21 21
22 22 A 2-tuple containing:
23 23
24 24 `truncated_seconds`: seconds since the Unix epoch,
25 25 truncated to its lower 31 bits
26 26
27 27 `subsecond_nanoseconds`: number of nanoseconds since `truncated_seconds`.
28 28 When this is zero, the sub-second precision is considered unknown.
29 29 """
30 30
31 31 def __new__(cls, value):
32 32 truncated_seconds, subsec_nanos = value
33 33 value = (truncated_seconds & rangemask, subsec_nanos)
34 34 return super(timestamp, cls).__new__(cls, value)
35 35
36 36 def __eq__(self, other):
37 37 self_secs, self_subsec_nanos = self
38 38 other_secs, other_subsec_nanos = other
39 39 return self_secs == other_secs and (
40 40 self_subsec_nanos == other_subsec_nanos
41 41 or self_subsec_nanos == 0
42 42 or other_subsec_nanos == 0
43 43 )
44 44
45 45 def __gt__(self, other):
46 46 self_secs, self_subsec_nanos = self
47 47 other_secs, other_subsec_nanos = other
48 48 if self_secs > other_secs:
49 49 return True
50 50 if self_secs < other_secs:
51 51 return False
52 52 if self_subsec_nanos == 0 or other_subsec_nanos == 0:
53 53 # they are considered equal, so not "greater than"
54 54 return False
55 55 return self_subsec_nanos > other_subsec_nanos
56 56
57 57
58 58 def get_fs_now(vfs):
59 59 """return a timestamp for "now" in the current vfs
60 60
61 61 This will raise an exception if no temporary files could be created.
62 62 """
63 63 tmpfd, tmpname = vfs.mkstemp()
64 64 try:
65 65 return mtime_of(os.fstat(tmpfd))
66 66 finally:
67 67 os.close(tmpfd)
68 68 vfs.unlink(tmpname)
69 69
70 70
71 71 def zero():
72 72 """
73 73 Returns the `timestamp` at the Unix epoch.
74 74 """
75 75 return tuple.__new__(timestamp, (0, 0))
76 76
77 77
78 78 def mtime_of(stat_result):
79 79 """
80 80 Takes an `os.stat_result`-like object and returns a `timestamp` object
81 81 for its modification time.
82 82 """
83 83 try:
84 84 # TODO: add this attribute to `osutil.stat` objects,
85 85 # see `mercurial/cext/osutil.c`.
86 86 #
87 87 # This attribute is also not available on Python 2.
88 88 nanos = stat_result.st_mtime_ns
89 89 except AttributeError:
90 90 # https://docs.python.org/2/library/os.html#os.stat_float_times
91 91 # "For compatibility with older Python versions,
92 92 # accessing stat_result as a tuple always returns integers."
93 93 secs = stat_result[stat.ST_MTIME]
94 94
95 95 subsec_nanos = 0
96 96 else:
97 97 billion = int(1e9)
98 98 secs = nanos // billion
99 99 subsec_nanos = nanos % billion
100 100
101 101 return timestamp((secs, subsec_nanos))
102
103
104 def reliable_mtime_of(stat_result, present_mtime):
105 """same as `mtime_of`, but return None if the date might be ambiguous
106
107 A modification time is reliable if it is older than "present_time" (or
108 sufficiently in the futur).
109
110 Otherwise a concurrent modification might happens with the same mtime.
111 """
112 file_mtime = mtime_of(stat_result)
113 file_second = file_mtime[0]
114 boundary_second = present_mtime[0]
115 # If the mtime of the ambiguous file is younger (or equal) to the starting
116 # point of the `status` walk, we cannot garantee that another, racy, write
117 # will not happen right after with the same mtime and we cannot cache the
118 # information.
119 #
120 # However is the mtime is far away in the future, this is likely some
121 # mismatch between the current clock and previous file system operation. So
122 # mtime more than one days in the future are considered fine.
123 if boundary_second <= file_second < (3600 * 24 + boundary_second):
124 return None
125 else:
126 return file_mtime
General Comments 0
You need to be logged in to leave comments. Login now