##// END OF EJS Templates
status: gather fixup info at comparison time...
marmoute -
r49204:41f40f35 default
parent child Browse files
Show More
@@ -1,3123 +1,3140 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 hex,
18 18 nullrev,
19 19 short,
20 20 )
21 21 from .pycompat import (
22 22 getattr,
23 23 open,
24 24 )
25 25 from . import (
26 26 dagop,
27 27 encoding,
28 28 error,
29 29 fileset,
30 30 match as matchmod,
31 31 mergestate as mergestatemod,
32 32 metadata,
33 33 obsolete as obsmod,
34 34 patch,
35 35 pathutil,
36 36 phases,
37 37 pycompat,
38 38 repoview,
39 39 scmutil,
40 40 sparse,
41 41 subrepo,
42 42 subrepoutil,
43 43 util,
44 44 )
45 45 from .utils import (
46 46 dateutil,
47 47 stringutil,
48 48 )
49 from .dirstateutils import (
50 timestamp,
51 )
49 52
50 53 propertycache = util.propertycache
51 54
52 55
53 56 class basectx(object):
54 57 """A basectx object represents the common logic for its children:
55 58 changectx: read-only context that is already present in the repo,
56 59 workingctx: a context that represents the working directory and can
57 60 be committed,
58 61 memctx: a context that represents changes in-memory and can also
59 62 be committed."""
60 63
61 64 def __init__(self, repo):
62 65 self._repo = repo
63 66
64 67 def __bytes__(self):
65 68 return short(self.node())
66 69
67 70 __str__ = encoding.strmethod(__bytes__)
68 71
69 72 def __repr__(self):
70 73 return "<%s %s>" % (type(self).__name__, str(self))
71 74
72 75 def __eq__(self, other):
73 76 try:
74 77 return type(self) == type(other) and self._rev == other._rev
75 78 except AttributeError:
76 79 return False
77 80
78 81 def __ne__(self, other):
79 82 return not (self == other)
80 83
81 84 def __contains__(self, key):
82 85 return key in self._manifest
83 86
84 87 def __getitem__(self, key):
85 88 return self.filectx(key)
86 89
87 90 def __iter__(self):
88 91 return iter(self._manifest)
89 92
90 93 def _buildstatusmanifest(self, status):
91 94 """Builds a manifest that includes the given status results, if this is
92 95 a working copy context. For non-working copy contexts, it just returns
93 96 the normal manifest."""
94 97 return self.manifest()
95 98
96 99 def _matchstatus(self, other, match):
97 100 """This internal method provides a way for child objects to override the
98 101 match operator.
99 102 """
100 103 return match
101 104
102 105 def _buildstatus(
103 106 self, other, s, match, listignored, listclean, listunknown
104 107 ):
105 108 """build a status with respect to another context"""
106 109 # Load earliest manifest first for caching reasons. More specifically,
107 110 # if you have revisions 1000 and 1001, 1001 is probably stored as a
108 111 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
109 112 # 1000 and cache it so that when you read 1001, we just need to apply a
110 113 # delta to what's in the cache. So that's one full reconstruction + one
111 114 # delta application.
112 115 mf2 = None
113 116 if self.rev() is not None and self.rev() < other.rev():
114 117 mf2 = self._buildstatusmanifest(s)
115 118 mf1 = other._buildstatusmanifest(s)
116 119 if mf2 is None:
117 120 mf2 = self._buildstatusmanifest(s)
118 121
119 122 modified, added = [], []
120 123 removed = []
121 124 clean = []
122 125 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
123 126 deletedset = set(deleted)
124 127 d = mf1.diff(mf2, match=match, clean=listclean)
125 128 for fn, value in pycompat.iteritems(d):
126 129 if fn in deletedset:
127 130 continue
128 131 if value is None:
129 132 clean.append(fn)
130 133 continue
131 134 (node1, flag1), (node2, flag2) = value
132 135 if node1 is None:
133 136 added.append(fn)
134 137 elif node2 is None:
135 138 removed.append(fn)
136 139 elif flag1 != flag2:
137 140 modified.append(fn)
138 141 elif node2 not in self._repo.nodeconstants.wdirfilenodeids:
139 142 # When comparing files between two commits, we save time by
140 143 # not comparing the file contents when the nodeids differ.
141 144 # Note that this means we incorrectly report a reverted change
142 145 # to a file as a modification.
143 146 modified.append(fn)
144 147 elif self[fn].cmp(other[fn]):
145 148 modified.append(fn)
146 149 else:
147 150 clean.append(fn)
148 151
149 152 if removed:
150 153 # need to filter files if they are already reported as removed
151 154 unknown = [
152 155 fn
153 156 for fn in unknown
154 157 if fn not in mf1 and (not match or match(fn))
155 158 ]
156 159 ignored = [
157 160 fn
158 161 for fn in ignored
159 162 if fn not in mf1 and (not match or match(fn))
160 163 ]
161 164 # if they're deleted, don't report them as removed
162 165 removed = [fn for fn in removed if fn not in deletedset]
163 166
164 167 return scmutil.status(
165 168 modified, added, removed, deleted, unknown, ignored, clean
166 169 )
167 170
168 171 @propertycache
169 172 def substate(self):
170 173 return subrepoutil.state(self, self._repo.ui)
171 174
172 175 def subrev(self, subpath):
173 176 return self.substate[subpath][1]
174 177
175 178 def rev(self):
176 179 return self._rev
177 180
178 181 def node(self):
179 182 return self._node
180 183
181 184 def hex(self):
182 185 return hex(self.node())
183 186
184 187 def manifest(self):
185 188 return self._manifest
186 189
187 190 def manifestctx(self):
188 191 return self._manifestctx
189 192
190 193 def repo(self):
191 194 return self._repo
192 195
193 196 def phasestr(self):
194 197 return phases.phasenames[self.phase()]
195 198
196 199 def mutable(self):
197 200 return self.phase() > phases.public
198 201
199 202 def matchfileset(self, cwd, expr, badfn=None):
200 203 return fileset.match(self, cwd, expr, badfn=badfn)
201 204
202 205 def obsolete(self):
203 206 """True if the changeset is obsolete"""
204 207 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
205 208
206 209 def extinct(self):
207 210 """True if the changeset is extinct"""
208 211 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
209 212
210 213 def orphan(self):
211 214 """True if the changeset is not obsolete, but its ancestor is"""
212 215 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
213 216
214 217 def phasedivergent(self):
215 218 """True if the changeset tries to be a successor of a public changeset
216 219
217 220 Only non-public and non-obsolete changesets may be phase-divergent.
218 221 """
219 222 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
220 223
221 224 def contentdivergent(self):
222 225 """Is a successor of a changeset with multiple possible successor sets
223 226
224 227 Only non-public and non-obsolete changesets may be content-divergent.
225 228 """
226 229 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
227 230
228 231 def isunstable(self):
229 232 """True if the changeset is either orphan, phase-divergent or
230 233 content-divergent"""
231 234 return self.orphan() or self.phasedivergent() or self.contentdivergent()
232 235
233 236 def instabilities(self):
234 237 """return the list of instabilities affecting this changeset.
235 238
236 239 Instabilities are returned as strings. possible values are:
237 240 - orphan,
238 241 - phase-divergent,
239 242 - content-divergent.
240 243 """
241 244 instabilities = []
242 245 if self.orphan():
243 246 instabilities.append(b'orphan')
244 247 if self.phasedivergent():
245 248 instabilities.append(b'phase-divergent')
246 249 if self.contentdivergent():
247 250 instabilities.append(b'content-divergent')
248 251 return instabilities
249 252
250 253 def parents(self):
251 254 """return contexts for each parent changeset"""
252 255 return self._parents
253 256
254 257 def p1(self):
255 258 return self._parents[0]
256 259
257 260 def p2(self):
258 261 parents = self._parents
259 262 if len(parents) == 2:
260 263 return parents[1]
261 264 return self._repo[nullrev]
262 265
263 266 def _fileinfo(self, path):
264 267 if '_manifest' in self.__dict__:
265 268 try:
266 269 return self._manifest.find(path)
267 270 except KeyError:
268 271 raise error.ManifestLookupError(
269 272 self._node or b'None', path, _(b'not found in manifest')
270 273 )
271 274 if '_manifestdelta' in self.__dict__ or path in self.files():
272 275 if path in self._manifestdelta:
273 276 return (
274 277 self._manifestdelta[path],
275 278 self._manifestdelta.flags(path),
276 279 )
277 280 mfl = self._repo.manifestlog
278 281 try:
279 282 node, flag = mfl[self._changeset.manifest].find(path)
280 283 except KeyError:
281 284 raise error.ManifestLookupError(
282 285 self._node or b'None', path, _(b'not found in manifest')
283 286 )
284 287
285 288 return node, flag
286 289
287 290 def filenode(self, path):
288 291 return self._fileinfo(path)[0]
289 292
290 293 def flags(self, path):
291 294 try:
292 295 return self._fileinfo(path)[1]
293 296 except error.LookupError:
294 297 return b''
295 298
296 299 @propertycache
297 300 def _copies(self):
298 301 return metadata.computechangesetcopies(self)
299 302
300 303 def p1copies(self):
301 304 return self._copies[0]
302 305
303 306 def p2copies(self):
304 307 return self._copies[1]
305 308
306 309 def sub(self, path, allowcreate=True):
307 310 '''return a subrepo for the stored revision of path, never wdir()'''
308 311 return subrepo.subrepo(self, path, allowcreate=allowcreate)
309 312
310 313 def nullsub(self, path, pctx):
311 314 return subrepo.nullsubrepo(self, path, pctx)
312 315
313 316 def workingsub(self, path):
314 317 """return a subrepo for the stored revision, or wdir if this is a wdir
315 318 context.
316 319 """
317 320 return subrepo.subrepo(self, path, allowwdir=True)
318 321
319 322 def match(
320 323 self,
321 324 pats=None,
322 325 include=None,
323 326 exclude=None,
324 327 default=b'glob',
325 328 listsubrepos=False,
326 329 badfn=None,
327 330 cwd=None,
328 331 ):
329 332 r = self._repo
330 333 if not cwd:
331 334 cwd = r.getcwd()
332 335 return matchmod.match(
333 336 r.root,
334 337 cwd,
335 338 pats,
336 339 include,
337 340 exclude,
338 341 default,
339 342 auditor=r.nofsauditor,
340 343 ctx=self,
341 344 listsubrepos=listsubrepos,
342 345 badfn=badfn,
343 346 )
344 347
345 348 def diff(
346 349 self,
347 350 ctx2=None,
348 351 match=None,
349 352 changes=None,
350 353 opts=None,
351 354 losedatafn=None,
352 355 pathfn=None,
353 356 copy=None,
354 357 copysourcematch=None,
355 358 hunksfilterfn=None,
356 359 ):
357 360 """Returns a diff generator for the given contexts and matcher"""
358 361 if ctx2 is None:
359 362 ctx2 = self.p1()
360 363 if ctx2 is not None:
361 364 ctx2 = self._repo[ctx2]
362 365 return patch.diff(
363 366 self._repo,
364 367 ctx2,
365 368 self,
366 369 match=match,
367 370 changes=changes,
368 371 opts=opts,
369 372 losedatafn=losedatafn,
370 373 pathfn=pathfn,
371 374 copy=copy,
372 375 copysourcematch=copysourcematch,
373 376 hunksfilterfn=hunksfilterfn,
374 377 )
375 378
376 379 def dirs(self):
377 380 return self._manifest.dirs()
378 381
379 382 def hasdir(self, dir):
380 383 return self._manifest.hasdir(dir)
381 384
382 385 def status(
383 386 self,
384 387 other=None,
385 388 match=None,
386 389 listignored=False,
387 390 listclean=False,
388 391 listunknown=False,
389 392 listsubrepos=False,
390 393 ):
391 394 """return status of files between two nodes or node and working
392 395 directory.
393 396
394 397 If other is None, compare this node with working directory.
395 398
396 399 ctx1.status(ctx2) returns the status of change from ctx1 to ctx2
397 400
398 401 Returns a mercurial.scmutils.status object.
399 402
400 403 Data can be accessed using either tuple notation:
401 404
402 405 (modified, added, removed, deleted, unknown, ignored, clean)
403 406
404 407 or direct attribute access:
405 408
406 409 s.modified, s.added, ...
407 410 """
408 411
409 412 ctx1 = self
410 413 ctx2 = self._repo[other]
411 414
412 415 # This next code block is, admittedly, fragile logic that tests for
413 416 # reversing the contexts and wouldn't need to exist if it weren't for
414 417 # the fast (and common) code path of comparing the working directory
415 418 # with its first parent.
416 419 #
417 420 # What we're aiming for here is the ability to call:
418 421 #
419 422 # workingctx.status(parentctx)
420 423 #
421 424 # If we always built the manifest for each context and compared those,
422 425 # then we'd be done. But the special case of the above call means we
423 426 # just copy the manifest of the parent.
424 427 reversed = False
425 428 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
426 429 reversed = True
427 430 ctx1, ctx2 = ctx2, ctx1
428 431
429 432 match = self._repo.narrowmatch(match)
430 433 match = ctx2._matchstatus(ctx1, match)
431 434 r = scmutil.status([], [], [], [], [], [], [])
432 435 r = ctx2._buildstatus(
433 436 ctx1, r, match, listignored, listclean, listunknown
434 437 )
435 438
436 439 if reversed:
437 440 # Reverse added and removed. Clear deleted, unknown and ignored as
438 441 # these make no sense to reverse.
439 442 r = scmutil.status(
440 443 r.modified, r.removed, r.added, [], [], [], r.clean
441 444 )
442 445
443 446 if listsubrepos:
444 447 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
445 448 try:
446 449 rev2 = ctx2.subrev(subpath)
447 450 except KeyError:
448 451 # A subrepo that existed in node1 was deleted between
449 452 # node1 and node2 (inclusive). Thus, ctx2's substate
450 453 # won't contain that subpath. The best we can do ignore it.
451 454 rev2 = None
452 455 submatch = matchmod.subdirmatcher(subpath, match)
453 456 s = sub.status(
454 457 rev2,
455 458 match=submatch,
456 459 ignored=listignored,
457 460 clean=listclean,
458 461 unknown=listunknown,
459 462 listsubrepos=True,
460 463 )
461 464 for k in (
462 465 'modified',
463 466 'added',
464 467 'removed',
465 468 'deleted',
466 469 'unknown',
467 470 'ignored',
468 471 'clean',
469 472 ):
470 473 rfiles, sfiles = getattr(r, k), getattr(s, k)
471 474 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
472 475
473 476 r.modified.sort()
474 477 r.added.sort()
475 478 r.removed.sort()
476 479 r.deleted.sort()
477 480 r.unknown.sort()
478 481 r.ignored.sort()
479 482 r.clean.sort()
480 483
481 484 return r
482 485
483 486 def mergestate(self, clean=False):
484 487 """Get a mergestate object for this context."""
485 488 raise NotImplementedError(
486 489 '%s does not implement mergestate()' % self.__class__
487 490 )
488 491
489 492 def isempty(self):
490 493 return not (
491 494 len(self.parents()) > 1
492 495 or self.branch() != self.p1().branch()
493 496 or self.closesbranch()
494 497 or self.files()
495 498 )
496 499
497 500
498 501 class changectx(basectx):
499 502 """A changecontext object makes access to data related to a particular
500 503 changeset convenient. It represents a read-only context already present in
501 504 the repo."""
502 505
503 506 def __init__(self, repo, rev, node, maybe_filtered=True):
504 507 super(changectx, self).__init__(repo)
505 508 self._rev = rev
506 509 self._node = node
507 510 # When maybe_filtered is True, the revision might be affected by
508 511 # changelog filtering and operation through the filtered changelog must be used.
509 512 #
510 513 # When maybe_filtered is False, the revision has already been checked
511 514 # against filtering and is not filtered. Operation through the
512 515 # unfiltered changelog might be used in some case.
513 516 self._maybe_filtered = maybe_filtered
514 517
515 518 def __hash__(self):
516 519 try:
517 520 return hash(self._rev)
518 521 except AttributeError:
519 522 return id(self)
520 523
521 524 def __nonzero__(self):
522 525 return self._rev != nullrev
523 526
524 527 __bool__ = __nonzero__
525 528
526 529 @propertycache
527 530 def _changeset(self):
528 531 if self._maybe_filtered:
529 532 repo = self._repo
530 533 else:
531 534 repo = self._repo.unfiltered()
532 535 return repo.changelog.changelogrevision(self.rev())
533 536
534 537 @propertycache
535 538 def _manifest(self):
536 539 return self._manifestctx.read()
537 540
538 541 @property
539 542 def _manifestctx(self):
540 543 return self._repo.manifestlog[self._changeset.manifest]
541 544
542 545 @propertycache
543 546 def _manifestdelta(self):
544 547 return self._manifestctx.readdelta()
545 548
546 549 @propertycache
547 550 def _parents(self):
548 551 repo = self._repo
549 552 if self._maybe_filtered:
550 553 cl = repo.changelog
551 554 else:
552 555 cl = repo.unfiltered().changelog
553 556
554 557 p1, p2 = cl.parentrevs(self._rev)
555 558 if p2 == nullrev:
556 559 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
557 560 return [
558 561 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
559 562 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
560 563 ]
561 564
562 565 def changeset(self):
563 566 c = self._changeset
564 567 return (
565 568 c.manifest,
566 569 c.user,
567 570 c.date,
568 571 c.files,
569 572 c.description,
570 573 c.extra,
571 574 )
572 575
573 576 def manifestnode(self):
574 577 return self._changeset.manifest
575 578
576 579 def user(self):
577 580 return self._changeset.user
578 581
579 582 def date(self):
580 583 return self._changeset.date
581 584
582 585 def files(self):
583 586 return self._changeset.files
584 587
585 588 def filesmodified(self):
586 589 modified = set(self.files())
587 590 modified.difference_update(self.filesadded())
588 591 modified.difference_update(self.filesremoved())
589 592 return sorted(modified)
590 593
591 594 def filesadded(self):
592 595 filesadded = self._changeset.filesadded
593 596 compute_on_none = True
594 597 if self._repo.filecopiesmode == b'changeset-sidedata':
595 598 compute_on_none = False
596 599 else:
597 600 source = self._repo.ui.config(b'experimental', b'copies.read-from')
598 601 if source == b'changeset-only':
599 602 compute_on_none = False
600 603 elif source != b'compatibility':
601 604 # filelog mode, ignore any changelog content
602 605 filesadded = None
603 606 if filesadded is None:
604 607 if compute_on_none:
605 608 filesadded = metadata.computechangesetfilesadded(self)
606 609 else:
607 610 filesadded = []
608 611 return filesadded
609 612
610 613 def filesremoved(self):
611 614 filesremoved = self._changeset.filesremoved
612 615 compute_on_none = True
613 616 if self._repo.filecopiesmode == b'changeset-sidedata':
614 617 compute_on_none = False
615 618 else:
616 619 source = self._repo.ui.config(b'experimental', b'copies.read-from')
617 620 if source == b'changeset-only':
618 621 compute_on_none = False
619 622 elif source != b'compatibility':
620 623 # filelog mode, ignore any changelog content
621 624 filesremoved = None
622 625 if filesremoved is None:
623 626 if compute_on_none:
624 627 filesremoved = metadata.computechangesetfilesremoved(self)
625 628 else:
626 629 filesremoved = []
627 630 return filesremoved
628 631
629 632 @propertycache
630 633 def _copies(self):
631 634 p1copies = self._changeset.p1copies
632 635 p2copies = self._changeset.p2copies
633 636 compute_on_none = True
634 637 if self._repo.filecopiesmode == b'changeset-sidedata':
635 638 compute_on_none = False
636 639 else:
637 640 source = self._repo.ui.config(b'experimental', b'copies.read-from')
638 641 # If config says to get copy metadata only from changeset, then
639 642 # return that, defaulting to {} if there was no copy metadata. In
640 643 # compatibility mode, we return copy data from the changeset if it
641 644 # was recorded there, and otherwise we fall back to getting it from
642 645 # the filelogs (below).
643 646 #
644 647 # If we are in compatiblity mode and there is not data in the
645 648 # changeset), we get the copy metadata from the filelogs.
646 649 #
647 650 # otherwise, when config said to read only from filelog, we get the
648 651 # copy metadata from the filelogs.
649 652 if source == b'changeset-only':
650 653 compute_on_none = False
651 654 elif source != b'compatibility':
652 655 # filelog mode, ignore any changelog content
653 656 p1copies = p2copies = None
654 657 if p1copies is None:
655 658 if compute_on_none:
656 659 p1copies, p2copies = super(changectx, self)._copies
657 660 else:
658 661 if p1copies is None:
659 662 p1copies = {}
660 663 if p2copies is None:
661 664 p2copies = {}
662 665 return p1copies, p2copies
663 666
664 667 def description(self):
665 668 return self._changeset.description
666 669
667 670 def branch(self):
668 671 return encoding.tolocal(self._changeset.extra.get(b"branch"))
669 672
670 673 def closesbranch(self):
671 674 return b'close' in self._changeset.extra
672 675
673 676 def extra(self):
674 677 """Return a dict of extra information."""
675 678 return self._changeset.extra
676 679
677 680 def tags(self):
678 681 """Return a list of byte tag names"""
679 682 return self._repo.nodetags(self._node)
680 683
681 684 def bookmarks(self):
682 685 """Return a list of byte bookmark names."""
683 686 return self._repo.nodebookmarks(self._node)
684 687
685 688 def phase(self):
686 689 return self._repo._phasecache.phase(self._repo, self._rev)
687 690
688 691 def hidden(self):
689 692 return self._rev in repoview.filterrevs(self._repo, b'visible')
690 693
691 694 def isinmemory(self):
692 695 return False
693 696
694 697 def children(self):
695 698 """return list of changectx contexts for each child changeset.
696 699
697 700 This returns only the immediate child changesets. Use descendants() to
698 701 recursively walk children.
699 702 """
700 703 c = self._repo.changelog.children(self._node)
701 704 return [self._repo[x] for x in c]
702 705
703 706 def ancestors(self):
704 707 for a in self._repo.changelog.ancestors([self._rev]):
705 708 yield self._repo[a]
706 709
707 710 def descendants(self):
708 711 """Recursively yield all children of the changeset.
709 712
710 713 For just the immediate children, use children()
711 714 """
712 715 for d in self._repo.changelog.descendants([self._rev]):
713 716 yield self._repo[d]
714 717
715 718 def filectx(self, path, fileid=None, filelog=None):
716 719 """get a file context from this changeset"""
717 720 if fileid is None:
718 721 fileid = self.filenode(path)
719 722 return filectx(
720 723 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
721 724 )
722 725
723 726 def ancestor(self, c2, warn=False):
724 727 """return the "best" ancestor context of self and c2
725 728
726 729 If there are multiple candidates, it will show a message and check
727 730 merge.preferancestor configuration before falling back to the
728 731 revlog ancestor."""
729 732 # deal with workingctxs
730 733 n2 = c2._node
731 734 if n2 is None:
732 735 n2 = c2._parents[0]._node
733 736 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
734 737 if not cahs:
735 738 anc = self._repo.nodeconstants.nullid
736 739 elif len(cahs) == 1:
737 740 anc = cahs[0]
738 741 else:
739 742 # experimental config: merge.preferancestor
740 743 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
741 744 try:
742 745 ctx = scmutil.revsymbol(self._repo, r)
743 746 except error.RepoLookupError:
744 747 continue
745 748 anc = ctx.node()
746 749 if anc in cahs:
747 750 break
748 751 else:
749 752 anc = self._repo.changelog.ancestor(self._node, n2)
750 753 if warn:
751 754 self._repo.ui.status(
752 755 (
753 756 _(b"note: using %s as ancestor of %s and %s\n")
754 757 % (short(anc), short(self._node), short(n2))
755 758 )
756 759 + b''.join(
757 760 _(
758 761 b" alternatively, use --config "
759 762 b"merge.preferancestor=%s\n"
760 763 )
761 764 % short(n)
762 765 for n in sorted(cahs)
763 766 if n != anc
764 767 )
765 768 )
766 769 return self._repo[anc]
767 770
768 771 def isancestorof(self, other):
769 772 """True if this changeset is an ancestor of other"""
770 773 return self._repo.changelog.isancestorrev(self._rev, other._rev)
771 774
772 775 def walk(self, match):
773 776 '''Generates matching file names.'''
774 777
775 778 # Wrap match.bad method to have message with nodeid
776 779 def bad(fn, msg):
777 780 # The manifest doesn't know about subrepos, so don't complain about
778 781 # paths into valid subrepos.
779 782 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
780 783 return
781 784 match.bad(fn, _(b'no such file in rev %s') % self)
782 785
783 786 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
784 787 return self._manifest.walk(m)
785 788
786 789 def matches(self, match):
787 790 return self.walk(match)
788 791
789 792
790 793 class basefilectx(object):
791 794 """A filecontext object represents the common logic for its children:
792 795 filectx: read-only access to a filerevision that is already present
793 796 in the repo,
794 797 workingfilectx: a filecontext that represents files from the working
795 798 directory,
796 799 memfilectx: a filecontext that represents files in-memory,
797 800 """
798 801
799 802 @propertycache
800 803 def _filelog(self):
801 804 return self._repo.file(self._path)
802 805
803 806 @propertycache
804 807 def _changeid(self):
805 808 if '_changectx' in self.__dict__:
806 809 return self._changectx.rev()
807 810 elif '_descendantrev' in self.__dict__:
808 811 # this file context was created from a revision with a known
809 812 # descendant, we can (lazily) correct for linkrev aliases
810 813 return self._adjustlinkrev(self._descendantrev)
811 814 else:
812 815 return self._filelog.linkrev(self._filerev)
813 816
814 817 @propertycache
815 818 def _filenode(self):
816 819 if '_fileid' in self.__dict__:
817 820 return self._filelog.lookup(self._fileid)
818 821 else:
819 822 return self._changectx.filenode(self._path)
820 823
821 824 @propertycache
822 825 def _filerev(self):
823 826 return self._filelog.rev(self._filenode)
824 827
825 828 @propertycache
826 829 def _repopath(self):
827 830 return self._path
828 831
829 832 def __nonzero__(self):
830 833 try:
831 834 self._filenode
832 835 return True
833 836 except error.LookupError:
834 837 # file is missing
835 838 return False
836 839
837 840 __bool__ = __nonzero__
838 841
839 842 def __bytes__(self):
840 843 try:
841 844 return b"%s@%s" % (self.path(), self._changectx)
842 845 except error.LookupError:
843 846 return b"%s@???" % self.path()
844 847
845 848 __str__ = encoding.strmethod(__bytes__)
846 849
847 850 def __repr__(self):
848 851 return "<%s %s>" % (type(self).__name__, str(self))
849 852
850 853 def __hash__(self):
851 854 try:
852 855 return hash((self._path, self._filenode))
853 856 except AttributeError:
854 857 return id(self)
855 858
856 859 def __eq__(self, other):
857 860 try:
858 861 return (
859 862 type(self) == type(other)
860 863 and self._path == other._path
861 864 and self._filenode == other._filenode
862 865 )
863 866 except AttributeError:
864 867 return False
865 868
866 869 def __ne__(self, other):
867 870 return not (self == other)
868 871
869 872 def filerev(self):
870 873 return self._filerev
871 874
872 875 def filenode(self):
873 876 return self._filenode
874 877
875 878 @propertycache
876 879 def _flags(self):
877 880 return self._changectx.flags(self._path)
878 881
879 882 def flags(self):
880 883 return self._flags
881 884
882 885 def filelog(self):
883 886 return self._filelog
884 887
885 888 def rev(self):
886 889 return self._changeid
887 890
888 891 def linkrev(self):
889 892 return self._filelog.linkrev(self._filerev)
890 893
891 894 def node(self):
892 895 return self._changectx.node()
893 896
894 897 def hex(self):
895 898 return self._changectx.hex()
896 899
897 900 def user(self):
898 901 return self._changectx.user()
899 902
900 903 def date(self):
901 904 return self._changectx.date()
902 905
903 906 def files(self):
904 907 return self._changectx.files()
905 908
906 909 def description(self):
907 910 return self._changectx.description()
908 911
909 912 def branch(self):
910 913 return self._changectx.branch()
911 914
912 915 def extra(self):
913 916 return self._changectx.extra()
914 917
915 918 def phase(self):
916 919 return self._changectx.phase()
917 920
918 921 def phasestr(self):
919 922 return self._changectx.phasestr()
920 923
921 924 def obsolete(self):
922 925 return self._changectx.obsolete()
923 926
924 927 def instabilities(self):
925 928 return self._changectx.instabilities()
926 929
927 930 def manifest(self):
928 931 return self._changectx.manifest()
929 932
930 933 def changectx(self):
931 934 return self._changectx
932 935
933 936 def renamed(self):
934 937 return self._copied
935 938
936 939 def copysource(self):
937 940 return self._copied and self._copied[0]
938 941
939 942 def repo(self):
940 943 return self._repo
941 944
942 945 def size(self):
943 946 return len(self.data())
944 947
945 948 def path(self):
946 949 return self._path
947 950
948 951 def isbinary(self):
949 952 try:
950 953 return stringutil.binary(self.data())
951 954 except IOError:
952 955 return False
953 956
954 957 def isexec(self):
955 958 return b'x' in self.flags()
956 959
957 960 def islink(self):
958 961 return b'l' in self.flags()
959 962
960 963 def isabsent(self):
961 964 """whether this filectx represents a file not in self._changectx
962 965
963 966 This is mainly for merge code to detect change/delete conflicts. This is
964 967 expected to be True for all subclasses of basectx."""
965 968 return False
966 969
967 970 _customcmp = False
968 971
969 972 def cmp(self, fctx):
970 973 """compare with other file context
971 974
972 975 returns True if different than fctx.
973 976 """
974 977 if fctx._customcmp:
975 978 return fctx.cmp(self)
976 979
977 980 if self._filenode is None:
978 981 raise error.ProgrammingError(
979 982 b'filectx.cmp() must be reimplemented if not backed by revlog'
980 983 )
981 984
982 985 if fctx._filenode is None:
983 986 if self._repo._encodefilterpats:
984 987 # can't rely on size() because wdir content may be decoded
985 988 return self._filelog.cmp(self._filenode, fctx.data())
986 989 if self.size() - 4 == fctx.size():
987 990 # size() can match:
988 991 # if file data starts with '\1\n', empty metadata block is
989 992 # prepended, which adds 4 bytes to filelog.size().
990 993 return self._filelog.cmp(self._filenode, fctx.data())
991 994 if self.size() == fctx.size() or self.flags() == b'l':
992 995 # size() matches: need to compare content
993 996 # issue6456: Always compare symlinks because size can represent
994 997 # encrypted string for EXT-4 encryption(fscrypt).
995 998 return self._filelog.cmp(self._filenode, fctx.data())
996 999
997 1000 # size() differs
998 1001 return True
999 1002
1000 1003 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
1001 1004 """return the first ancestor of <srcrev> introducing <fnode>
1002 1005
1003 1006 If the linkrev of the file revision does not point to an ancestor of
1004 1007 srcrev, we'll walk down the ancestors until we find one introducing
1005 1008 this file revision.
1006 1009
1007 1010 :srcrev: the changeset revision we search ancestors from
1008 1011 :inclusive: if true, the src revision will also be checked
1009 1012 :stoprev: an optional revision to stop the walk at. If no introduction
1010 1013 of this file content could be found before this floor
1011 1014 revision, the function will returns "None" and stops its
1012 1015 iteration.
1013 1016 """
1014 1017 repo = self._repo
1015 1018 cl = repo.unfiltered().changelog
1016 1019 mfl = repo.manifestlog
1017 1020 # fetch the linkrev
1018 1021 lkr = self.linkrev()
1019 1022 if srcrev == lkr:
1020 1023 return lkr
1021 1024 # hack to reuse ancestor computation when searching for renames
1022 1025 memberanc = getattr(self, '_ancestrycontext', None)
1023 1026 iteranc = None
1024 1027 if srcrev is None:
1025 1028 # wctx case, used by workingfilectx during mergecopy
1026 1029 revs = [p.rev() for p in self._repo[None].parents()]
1027 1030 inclusive = True # we skipped the real (revless) source
1028 1031 else:
1029 1032 revs = [srcrev]
1030 1033 if memberanc is None:
1031 1034 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1032 1035 # check if this linkrev is an ancestor of srcrev
1033 1036 if lkr not in memberanc:
1034 1037 if iteranc is None:
1035 1038 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1036 1039 fnode = self._filenode
1037 1040 path = self._path
1038 1041 for a in iteranc:
1039 1042 if stoprev is not None and a < stoprev:
1040 1043 return None
1041 1044 ac = cl.read(a) # get changeset data (we avoid object creation)
1042 1045 if path in ac[3]: # checking the 'files' field.
1043 1046 # The file has been touched, check if the content is
1044 1047 # similar to the one we search for.
1045 1048 if fnode == mfl[ac[0]].readfast().get(path):
1046 1049 return a
1047 1050 # In theory, we should never get out of that loop without a result.
1048 1051 # But if manifest uses a buggy file revision (not children of the
1049 1052 # one it replaces) we could. Such a buggy situation will likely
1050 1053 # result is crash somewhere else at to some point.
1051 1054 return lkr
1052 1055
1053 1056 def isintroducedafter(self, changelogrev):
1054 1057 """True if a filectx has been introduced after a given floor revision"""
1055 1058 if self.linkrev() >= changelogrev:
1056 1059 return True
1057 1060 introrev = self._introrev(stoprev=changelogrev)
1058 1061 if introrev is None:
1059 1062 return False
1060 1063 return introrev >= changelogrev
1061 1064
1062 1065 def introrev(self):
1063 1066 """return the rev of the changeset which introduced this file revision
1064 1067
1065 1068 This method is different from linkrev because it take into account the
1066 1069 changeset the filectx was created from. It ensures the returned
1067 1070 revision is one of its ancestors. This prevents bugs from
1068 1071 'linkrev-shadowing' when a file revision is used by multiple
1069 1072 changesets.
1070 1073 """
1071 1074 return self._introrev()
1072 1075
1073 1076 def _introrev(self, stoprev=None):
1074 1077 """
1075 1078 Same as `introrev` but, with an extra argument to limit changelog
1076 1079 iteration range in some internal usecase.
1077 1080
1078 1081 If `stoprev` is set, the `introrev` will not be searched past that
1079 1082 `stoprev` revision and "None" might be returned. This is useful to
1080 1083 limit the iteration range.
1081 1084 """
1082 1085 toprev = None
1083 1086 attrs = vars(self)
1084 1087 if '_changeid' in attrs:
1085 1088 # We have a cached value already
1086 1089 toprev = self._changeid
1087 1090 elif '_changectx' in attrs:
1088 1091 # We know which changelog entry we are coming from
1089 1092 toprev = self._changectx.rev()
1090 1093
1091 1094 if toprev is not None:
1092 1095 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1093 1096 elif '_descendantrev' in attrs:
1094 1097 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1095 1098 # be nice and cache the result of the computation
1096 1099 if introrev is not None:
1097 1100 self._changeid = introrev
1098 1101 return introrev
1099 1102 else:
1100 1103 return self.linkrev()
1101 1104
1102 1105 def introfilectx(self):
1103 1106 """Return filectx having identical contents, but pointing to the
1104 1107 changeset revision where this filectx was introduced"""
1105 1108 introrev = self.introrev()
1106 1109 if self.rev() == introrev:
1107 1110 return self
1108 1111 return self.filectx(self.filenode(), changeid=introrev)
1109 1112
1110 1113 def _parentfilectx(self, path, fileid, filelog):
1111 1114 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1112 1115 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1113 1116 if '_changeid' in vars(self) or '_changectx' in vars(self):
1114 1117 # If self is associated with a changeset (probably explicitly
1115 1118 # fed), ensure the created filectx is associated with a
1116 1119 # changeset that is an ancestor of self.changectx.
1117 1120 # This lets us later use _adjustlinkrev to get a correct link.
1118 1121 fctx._descendantrev = self.rev()
1119 1122 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1120 1123 elif '_descendantrev' in vars(self):
1121 1124 # Otherwise propagate _descendantrev if we have one associated.
1122 1125 fctx._descendantrev = self._descendantrev
1123 1126 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1124 1127 return fctx
1125 1128
1126 1129 def parents(self):
1127 1130 _path = self._path
1128 1131 fl = self._filelog
1129 1132 parents = self._filelog.parents(self._filenode)
1130 1133 pl = [
1131 1134 (_path, node, fl)
1132 1135 for node in parents
1133 1136 if node != self._repo.nodeconstants.nullid
1134 1137 ]
1135 1138
1136 1139 r = fl.renamed(self._filenode)
1137 1140 if r:
1138 1141 # - In the simple rename case, both parent are nullid, pl is empty.
1139 1142 # - In case of merge, only one of the parent is null id and should
1140 1143 # be replaced with the rename information. This parent is -always-
1141 1144 # the first one.
1142 1145 #
1143 1146 # As null id have always been filtered out in the previous list
1144 1147 # comprehension, inserting to 0 will always result in "replacing
1145 1148 # first nullid parent with rename information.
1146 1149 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1147 1150
1148 1151 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1149 1152
1150 1153 def p1(self):
1151 1154 return self.parents()[0]
1152 1155
1153 1156 def p2(self):
1154 1157 p = self.parents()
1155 1158 if len(p) == 2:
1156 1159 return p[1]
1157 1160 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1158 1161
1159 1162 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1160 1163 """Returns a list of annotateline objects for each line in the file
1161 1164
1162 1165 - line.fctx is the filectx of the node where that line was last changed
1163 1166 - line.lineno is the line number at the first appearance in the managed
1164 1167 file
1165 1168 - line.text is the data on that line (including newline character)
1166 1169 """
1167 1170 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1168 1171
1169 1172 def parents(f):
1170 1173 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1171 1174 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1172 1175 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1173 1176 # isn't an ancestor of the srcrev.
1174 1177 f._changeid
1175 1178 pl = f.parents()
1176 1179
1177 1180 # Don't return renamed parents if we aren't following.
1178 1181 if not follow:
1179 1182 pl = [p for p in pl if p.path() == f.path()]
1180 1183
1181 1184 # renamed filectx won't have a filelog yet, so set it
1182 1185 # from the cache to save time
1183 1186 for p in pl:
1184 1187 if not '_filelog' in p.__dict__:
1185 1188 p._filelog = getlog(p.path())
1186 1189
1187 1190 return pl
1188 1191
1189 1192 # use linkrev to find the first changeset where self appeared
1190 1193 base = self.introfilectx()
1191 1194 if getattr(base, '_ancestrycontext', None) is None:
1192 1195 # it is safe to use an unfiltered repository here because we are
1193 1196 # walking ancestors only.
1194 1197 cl = self._repo.unfiltered().changelog
1195 1198 if base.rev() is None:
1196 1199 # wctx is not inclusive, but works because _ancestrycontext
1197 1200 # is used to test filelog revisions
1198 1201 ac = cl.ancestors(
1199 1202 [p.rev() for p in base.parents()], inclusive=True
1200 1203 )
1201 1204 else:
1202 1205 ac = cl.ancestors([base.rev()], inclusive=True)
1203 1206 base._ancestrycontext = ac
1204 1207
1205 1208 return dagop.annotate(
1206 1209 base, parents, skiprevs=skiprevs, diffopts=diffopts
1207 1210 )
1208 1211
1209 1212 def ancestors(self, followfirst=False):
1210 1213 visit = {}
1211 1214 c = self
1212 1215 if followfirst:
1213 1216 cut = 1
1214 1217 else:
1215 1218 cut = None
1216 1219
1217 1220 while True:
1218 1221 for parent in c.parents()[:cut]:
1219 1222 visit[(parent.linkrev(), parent.filenode())] = parent
1220 1223 if not visit:
1221 1224 break
1222 1225 c = visit.pop(max(visit))
1223 1226 yield c
1224 1227
1225 1228 def decodeddata(self):
1226 1229 """Returns `data()` after running repository decoding filters.
1227 1230
1228 1231 This is often equivalent to how the data would be expressed on disk.
1229 1232 """
1230 1233 return self._repo.wwritedata(self.path(), self.data())
1231 1234
1232 1235
1233 1236 class filectx(basefilectx):
1234 1237 """A filecontext object makes access to data related to a particular
1235 1238 filerevision convenient."""
1236 1239
1237 1240 def __init__(
1238 1241 self,
1239 1242 repo,
1240 1243 path,
1241 1244 changeid=None,
1242 1245 fileid=None,
1243 1246 filelog=None,
1244 1247 changectx=None,
1245 1248 ):
1246 1249 """changeid must be a revision number, if specified.
1247 1250 fileid can be a file revision or node."""
1248 1251 self._repo = repo
1249 1252 self._path = path
1250 1253
1251 1254 assert (
1252 1255 changeid is not None or fileid is not None or changectx is not None
1253 1256 ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
1254 1257 changeid,
1255 1258 fileid,
1256 1259 changectx,
1257 1260 )
1258 1261
1259 1262 if filelog is not None:
1260 1263 self._filelog = filelog
1261 1264
1262 1265 if changeid is not None:
1263 1266 self._changeid = changeid
1264 1267 if changectx is not None:
1265 1268 self._changectx = changectx
1266 1269 if fileid is not None:
1267 1270 self._fileid = fileid
1268 1271
1269 1272 @propertycache
1270 1273 def _changectx(self):
1271 1274 try:
1272 1275 return self._repo[self._changeid]
1273 1276 except error.FilteredRepoLookupError:
1274 1277 # Linkrev may point to any revision in the repository. When the
1275 1278 # repository is filtered this may lead to `filectx` trying to build
1276 1279 # `changectx` for filtered revision. In such case we fallback to
1277 1280 # creating `changectx` on the unfiltered version of the reposition.
1278 1281 # This fallback should not be an issue because `changectx` from
1279 1282 # `filectx` are not used in complex operations that care about
1280 1283 # filtering.
1281 1284 #
1282 1285 # This fallback is a cheap and dirty fix that prevent several
1283 1286 # crashes. It does not ensure the behavior is correct. However the
1284 1287 # behavior was not correct before filtering either and "incorrect
1285 1288 # behavior" is seen as better as "crash"
1286 1289 #
1287 1290 # Linkrevs have several serious troubles with filtering that are
1288 1291 # complicated to solve. Proper handling of the issue here should be
1289 1292 # considered when solving linkrev issue are on the table.
1290 1293 return self._repo.unfiltered()[self._changeid]
1291 1294
1292 1295 def filectx(self, fileid, changeid=None):
1293 1296 """opens an arbitrary revision of the file without
1294 1297 opening a new filelog"""
1295 1298 return filectx(
1296 1299 self._repo,
1297 1300 self._path,
1298 1301 fileid=fileid,
1299 1302 filelog=self._filelog,
1300 1303 changeid=changeid,
1301 1304 )
1302 1305
1303 1306 def rawdata(self):
1304 1307 return self._filelog.rawdata(self._filenode)
1305 1308
1306 1309 def rawflags(self):
1307 1310 """low-level revlog flags"""
1308 1311 return self._filelog.flags(self._filerev)
1309 1312
1310 1313 def data(self):
1311 1314 try:
1312 1315 return self._filelog.read(self._filenode)
1313 1316 except error.CensoredNodeError:
1314 1317 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1315 1318 return b""
1316 1319 raise error.Abort(
1317 1320 _(b"censored node: %s") % short(self._filenode),
1318 1321 hint=_(b"set censor.policy to ignore errors"),
1319 1322 )
1320 1323
1321 1324 def size(self):
1322 1325 return self._filelog.size(self._filerev)
1323 1326
1324 1327 @propertycache
1325 1328 def _copied(self):
1326 1329 """check if file was actually renamed in this changeset revision
1327 1330
1328 1331 If rename logged in file revision, we report copy for changeset only
1329 1332 if file revisions linkrev points back to the changeset in question
1330 1333 or both changeset parents contain different file revisions.
1331 1334 """
1332 1335
1333 1336 renamed = self._filelog.renamed(self._filenode)
1334 1337 if not renamed:
1335 1338 return None
1336 1339
1337 1340 if self.rev() == self.linkrev():
1338 1341 return renamed
1339 1342
1340 1343 name = self.path()
1341 1344 fnode = self._filenode
1342 1345 for p in self._changectx.parents():
1343 1346 try:
1344 1347 if fnode == p.filenode(name):
1345 1348 return None
1346 1349 except error.LookupError:
1347 1350 pass
1348 1351 return renamed
1349 1352
1350 1353 def children(self):
1351 1354 # hard for renames
1352 1355 c = self._filelog.children(self._filenode)
1353 1356 return [
1354 1357 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1355 1358 for x in c
1356 1359 ]
1357 1360
1358 1361
1359 1362 class committablectx(basectx):
1360 1363 """A committablectx object provides common functionality for a context that
1361 1364 wants the ability to commit, e.g. workingctx or memctx."""
1362 1365
1363 1366 def __init__(
1364 1367 self,
1365 1368 repo,
1366 1369 text=b"",
1367 1370 user=None,
1368 1371 date=None,
1369 1372 extra=None,
1370 1373 changes=None,
1371 1374 branch=None,
1372 1375 ):
1373 1376 super(committablectx, self).__init__(repo)
1374 1377 self._rev = None
1375 1378 self._node = None
1376 1379 self._text = text
1377 1380 if date:
1378 1381 self._date = dateutil.parsedate(date)
1379 1382 if user:
1380 1383 self._user = user
1381 1384 if changes:
1382 1385 self._status = changes
1383 1386
1384 1387 self._extra = {}
1385 1388 if extra:
1386 1389 self._extra = extra.copy()
1387 1390 if branch is not None:
1388 1391 self._extra[b'branch'] = encoding.fromlocal(branch)
1389 1392 if not self._extra.get(b'branch'):
1390 1393 self._extra[b'branch'] = b'default'
1391 1394
1392 1395 def __bytes__(self):
1393 1396 return bytes(self._parents[0]) + b"+"
1394 1397
1395 1398 def hex(self):
1396 1399 self._repo.nodeconstants.wdirhex
1397 1400
1398 1401 __str__ = encoding.strmethod(__bytes__)
1399 1402
1400 1403 def __nonzero__(self):
1401 1404 return True
1402 1405
1403 1406 __bool__ = __nonzero__
1404 1407
1405 1408 @propertycache
1406 1409 def _status(self):
1407 1410 return self._repo.status()
1408 1411
1409 1412 @propertycache
1410 1413 def _user(self):
1411 1414 return self._repo.ui.username()
1412 1415
1413 1416 @propertycache
1414 1417 def _date(self):
1415 1418 ui = self._repo.ui
1416 1419 date = ui.configdate(b'devel', b'default-date')
1417 1420 if date is None:
1418 1421 date = dateutil.makedate()
1419 1422 return date
1420 1423
1421 1424 def subrev(self, subpath):
1422 1425 return None
1423 1426
1424 1427 def manifestnode(self):
1425 1428 return None
1426 1429
1427 1430 def user(self):
1428 1431 return self._user or self._repo.ui.username()
1429 1432
1430 1433 def date(self):
1431 1434 return self._date
1432 1435
1433 1436 def description(self):
1434 1437 return self._text
1435 1438
1436 1439 def files(self):
1437 1440 return sorted(
1438 1441 self._status.modified + self._status.added + self._status.removed
1439 1442 )
1440 1443
1441 1444 def modified(self):
1442 1445 return self._status.modified
1443 1446
1444 1447 def added(self):
1445 1448 return self._status.added
1446 1449
1447 1450 def removed(self):
1448 1451 return self._status.removed
1449 1452
1450 1453 def deleted(self):
1451 1454 return self._status.deleted
1452 1455
1453 1456 filesmodified = modified
1454 1457 filesadded = added
1455 1458 filesremoved = removed
1456 1459
1457 1460 def branch(self):
1458 1461 return encoding.tolocal(self._extra[b'branch'])
1459 1462
1460 1463 def closesbranch(self):
1461 1464 return b'close' in self._extra
1462 1465
1463 1466 def extra(self):
1464 1467 return self._extra
1465 1468
1466 1469 def isinmemory(self):
1467 1470 return False
1468 1471
1469 1472 def tags(self):
1470 1473 return []
1471 1474
1472 1475 def bookmarks(self):
1473 1476 b = []
1474 1477 for p in self.parents():
1475 1478 b.extend(p.bookmarks())
1476 1479 return b
1477 1480
1478 1481 def phase(self):
1479 1482 phase = phases.newcommitphase(self._repo.ui)
1480 1483 for p in self.parents():
1481 1484 phase = max(phase, p.phase())
1482 1485 return phase
1483 1486
1484 1487 def hidden(self):
1485 1488 return False
1486 1489
1487 1490 def children(self):
1488 1491 return []
1489 1492
1490 1493 def flags(self, path):
1491 1494 if '_manifest' in self.__dict__:
1492 1495 try:
1493 1496 return self._manifest.flags(path)
1494 1497 except KeyError:
1495 1498 return b''
1496 1499
1497 1500 try:
1498 1501 return self._flagfunc(path)
1499 1502 except OSError:
1500 1503 return b''
1501 1504
1502 1505 def ancestor(self, c2):
1503 1506 """return the "best" ancestor context of self and c2"""
1504 1507 return self._parents[0].ancestor(c2) # punt on two parents for now
1505 1508
1506 1509 def ancestors(self):
1507 1510 for p in self._parents:
1508 1511 yield p
1509 1512 for a in self._repo.changelog.ancestors(
1510 1513 [p.rev() for p in self._parents]
1511 1514 ):
1512 1515 yield self._repo[a]
1513 1516
1514 1517 def markcommitted(self, node):
1515 1518 """Perform post-commit cleanup necessary after committing this ctx
1516 1519
1517 1520 Specifically, this updates backing stores this working context
1518 1521 wraps to reflect the fact that the changes reflected by this
1519 1522 workingctx have been committed. For example, it marks
1520 1523 modified and added files as normal in the dirstate.
1521 1524
1522 1525 """
1523 1526
1524 1527 def dirty(self, missing=False, merge=True, branch=True):
1525 1528 return False
1526 1529
1527 1530
1528 1531 class workingctx(committablectx):
1529 1532 """A workingctx object makes access to data related to
1530 1533 the current working directory convenient.
1531 1534 date - any valid date string or (unixtime, offset), or None.
1532 1535 user - username string, or None.
1533 1536 extra - a dictionary of extra values, or None.
1534 1537 changes - a list of file lists as returned by localrepo.status()
1535 1538 or None to use the repository status.
1536 1539 """
1537 1540
1538 1541 def __init__(
1539 1542 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1540 1543 ):
1541 1544 branch = None
1542 1545 if not extra or b'branch' not in extra:
1543 1546 try:
1544 1547 branch = repo.dirstate.branch()
1545 1548 except UnicodeDecodeError:
1546 1549 raise error.Abort(_(b'branch name not in UTF-8!'))
1547 1550 super(workingctx, self).__init__(
1548 1551 repo, text, user, date, extra, changes, branch=branch
1549 1552 )
1550 1553
1551 1554 def __iter__(self):
1552 1555 d = self._repo.dirstate
1553 1556 for f in d:
1554 1557 if d.get_entry(f).tracked:
1555 1558 yield f
1556 1559
1557 1560 def __contains__(self, key):
1558 1561 return self._repo.dirstate.get_entry(key).tracked
1559 1562
1560 1563 def hex(self):
1561 1564 return self._repo.nodeconstants.wdirhex
1562 1565
1563 1566 @propertycache
1564 1567 def _parents(self):
1565 1568 p = self._repo.dirstate.parents()
1566 1569 if p[1] == self._repo.nodeconstants.nullid:
1567 1570 p = p[:-1]
1568 1571 # use unfiltered repo to delay/avoid loading obsmarkers
1569 1572 unfi = self._repo.unfiltered()
1570 1573 return [
1571 1574 changectx(
1572 1575 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1573 1576 )
1574 1577 for n in p
1575 1578 ]
1576 1579
1577 1580 def setparents(self, p1node, p2node=None):
1578 1581 if p2node is None:
1579 1582 p2node = self._repo.nodeconstants.nullid
1580 1583 dirstate = self._repo.dirstate
1581 1584 with dirstate.parentchange():
1582 1585 copies = dirstate.setparents(p1node, p2node)
1583 1586 pctx = self._repo[p1node]
1584 1587 if copies:
1585 1588 # Adjust copy records, the dirstate cannot do it, it
1586 1589 # requires access to parents manifests. Preserve them
1587 1590 # only for entries added to first parent.
1588 1591 for f in copies:
1589 1592 if f not in pctx and copies[f] in pctx:
1590 1593 dirstate.copy(copies[f], f)
1591 1594 if p2node == self._repo.nodeconstants.nullid:
1592 1595 for f, s in sorted(dirstate.copies().items()):
1593 1596 if f not in pctx and s not in pctx:
1594 1597 dirstate.copy(None, f)
1595 1598
1596 1599 def _fileinfo(self, path):
1597 1600 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1598 1601 self._manifest
1599 1602 return super(workingctx, self)._fileinfo(path)
1600 1603
1601 1604 def _buildflagfunc(self):
1602 1605 # Create a fallback function for getting file flags when the
1603 1606 # filesystem doesn't support them
1604 1607
1605 1608 copiesget = self._repo.dirstate.copies().get
1606 1609 parents = self.parents()
1607 1610 if len(parents) < 2:
1608 1611 # when we have one parent, it's easy: copy from parent
1609 1612 man = parents[0].manifest()
1610 1613
1611 1614 def func(f):
1612 1615 f = copiesget(f, f)
1613 1616 return man.flags(f)
1614 1617
1615 1618 else:
1616 1619 # merges are tricky: we try to reconstruct the unstored
1617 1620 # result from the merge (issue1802)
1618 1621 p1, p2 = parents
1619 1622 pa = p1.ancestor(p2)
1620 1623 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1621 1624
1622 1625 def func(f):
1623 1626 f = copiesget(f, f) # may be wrong for merges with copies
1624 1627 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1625 1628 if fl1 == fl2:
1626 1629 return fl1
1627 1630 if fl1 == fla:
1628 1631 return fl2
1629 1632 if fl2 == fla:
1630 1633 return fl1
1631 1634 return b'' # punt for conflicts
1632 1635
1633 1636 return func
1634 1637
1635 1638 @propertycache
1636 1639 def _flagfunc(self):
1637 1640 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1638 1641
1639 1642 def flags(self, path):
1640 1643 try:
1641 1644 return self._flagfunc(path)
1642 1645 except OSError:
1643 1646 return b''
1644 1647
1645 1648 def filectx(self, path, filelog=None):
1646 1649 """get a file context from the working directory"""
1647 1650 return workingfilectx(
1648 1651 self._repo, path, workingctx=self, filelog=filelog
1649 1652 )
1650 1653
1651 1654 def dirty(self, missing=False, merge=True, branch=True):
1652 1655 """check whether a working directory is modified"""
1653 1656 # check subrepos first
1654 1657 for s in sorted(self.substate):
1655 1658 if self.sub(s).dirty(missing=missing):
1656 1659 return True
1657 1660 # check current working dir
1658 1661 return (
1659 1662 (merge and self.p2())
1660 1663 or (branch and self.branch() != self.p1().branch())
1661 1664 or self.modified()
1662 1665 or self.added()
1663 1666 or self.removed()
1664 1667 or (missing and self.deleted())
1665 1668 )
1666 1669
1667 1670 def add(self, list, prefix=b""):
1668 1671 with self._repo.wlock():
1669 1672 ui, ds = self._repo.ui, self._repo.dirstate
1670 1673 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1671 1674 rejected = []
1672 1675 lstat = self._repo.wvfs.lstat
1673 1676 for f in list:
1674 1677 # ds.pathto() returns an absolute file when this is invoked from
1675 1678 # the keyword extension. That gets flagged as non-portable on
1676 1679 # Windows, since it contains the drive letter and colon.
1677 1680 scmutil.checkportable(ui, os.path.join(prefix, f))
1678 1681 try:
1679 1682 st = lstat(f)
1680 1683 except OSError:
1681 1684 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1682 1685 rejected.append(f)
1683 1686 continue
1684 1687 limit = ui.configbytes(b'ui', b'large-file-limit')
1685 1688 if limit != 0 and st.st_size > limit:
1686 1689 ui.warn(
1687 1690 _(
1688 1691 b"%s: up to %d MB of RAM may be required "
1689 1692 b"to manage this file\n"
1690 1693 b"(use 'hg revert %s' to cancel the "
1691 1694 b"pending addition)\n"
1692 1695 )
1693 1696 % (f, 3 * st.st_size // 1000000, uipath(f))
1694 1697 )
1695 1698 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1696 1699 ui.warn(
1697 1700 _(
1698 1701 b"%s not added: only files and symlinks "
1699 1702 b"supported currently\n"
1700 1703 )
1701 1704 % uipath(f)
1702 1705 )
1703 1706 rejected.append(f)
1704 1707 elif not ds.set_tracked(f):
1705 1708 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1706 1709 return rejected
1707 1710
1708 1711 def forget(self, files, prefix=b""):
1709 1712 with self._repo.wlock():
1710 1713 ds = self._repo.dirstate
1711 1714 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1712 1715 rejected = []
1713 1716 for f in files:
1714 1717 if not ds.set_untracked(f):
1715 1718 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1716 1719 rejected.append(f)
1717 1720 return rejected
1718 1721
1719 1722 def copy(self, source, dest):
1720 1723 try:
1721 1724 st = self._repo.wvfs.lstat(dest)
1722 1725 except OSError as err:
1723 1726 if err.errno != errno.ENOENT:
1724 1727 raise
1725 1728 self._repo.ui.warn(
1726 1729 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1727 1730 )
1728 1731 return
1729 1732 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1730 1733 self._repo.ui.warn(
1731 1734 _(b"copy failed: %s is not a file or a symbolic link\n")
1732 1735 % self._repo.dirstate.pathto(dest)
1733 1736 )
1734 1737 else:
1735 1738 with self._repo.wlock():
1736 1739 ds = self._repo.dirstate
1737 1740 ds.set_tracked(dest)
1738 1741 ds.copy(source, dest)
1739 1742
1740 1743 def match(
1741 1744 self,
1742 1745 pats=None,
1743 1746 include=None,
1744 1747 exclude=None,
1745 1748 default=b'glob',
1746 1749 listsubrepos=False,
1747 1750 badfn=None,
1748 1751 cwd=None,
1749 1752 ):
1750 1753 r = self._repo
1751 1754 if not cwd:
1752 1755 cwd = r.getcwd()
1753 1756
1754 1757 # Only a case insensitive filesystem needs magic to translate user input
1755 1758 # to actual case in the filesystem.
1756 1759 icasefs = not util.fscasesensitive(r.root)
1757 1760 return matchmod.match(
1758 1761 r.root,
1759 1762 cwd,
1760 1763 pats,
1761 1764 include,
1762 1765 exclude,
1763 1766 default,
1764 1767 auditor=r.auditor,
1765 1768 ctx=self,
1766 1769 listsubrepos=listsubrepos,
1767 1770 badfn=badfn,
1768 1771 icasefs=icasefs,
1769 1772 )
1770 1773
1771 1774 def _filtersuspectsymlink(self, files):
1772 1775 if not files or self._repo.dirstate._checklink:
1773 1776 return files
1774 1777
1775 1778 # Symlink placeholders may get non-symlink-like contents
1776 1779 # via user error or dereferencing by NFS or Samba servers,
1777 1780 # so we filter out any placeholders that don't look like a
1778 1781 # symlink
1779 1782 sane = []
1780 1783 for f in files:
1781 1784 if self.flags(f) == b'l':
1782 1785 d = self[f].data()
1783 1786 if (
1784 1787 d == b''
1785 1788 or len(d) >= 1024
1786 1789 or b'\n' in d
1787 1790 or stringutil.binary(d)
1788 1791 ):
1789 1792 self._repo.ui.debug(
1790 1793 b'ignoring suspect symlink placeholder "%s"\n' % f
1791 1794 )
1792 1795 continue
1793 1796 sane.append(f)
1794 1797 return sane
1795 1798
1796 1799 def _checklookup(self, files):
1797 1800 # check for any possibly clean files
1798 1801 if not files:
1799 1802 return [], [], []
1800 1803
1801 1804 modified = []
1802 1805 deleted = []
1803 1806 fixup = []
1804 1807 pctx = self._parents[0]
1805 1808 # do a full compare of any files that might have changed
1806 1809 for f in sorted(files):
1807 1810 try:
1808 1811 # This will return True for a file that got replaced by a
1809 1812 # directory in the interim, but fixing that is pretty hard.
1810 1813 if (
1811 1814 f not in pctx
1812 1815 or self.flags(f) != pctx.flags(f)
1813 1816 or pctx[f].cmp(self[f])
1814 1817 ):
1815 1818 modified.append(f)
1816 1819 else:
1817 fixup.append(f)
1820 # XXX note that we have a race windows here since we gather
1821 # the stats after we compared so the file might have
1822 # changed.
1823 #
1824 # However this have always been the case and the
1825 # refactoring moving the code here is improving the
1826 # situation by narrowing the race and moving the two steps
1827 # (comparison + stat) in the same location.
1828 #
1829 # Making this code "correct" is now possible.
1830 s = self[f].lstat()
1831 mode = s.st_mode
1832 size = s.st_size
1833 mtime = timestamp.mtime_of(s)
1834 fixup.append((f, (mode, size, mtime)))
1818 1835 except (IOError, OSError):
1819 1836 # A file become inaccessible in between? Mark it as deleted,
1820 1837 # matching dirstate behavior (issue5584).
1821 1838 # The dirstate has more complex behavior around whether a
1822 1839 # missing file matches a directory, etc, but we don't need to
1823 1840 # bother with that: if f has made it to this point, we're sure
1824 1841 # it's in the dirstate.
1825 1842 deleted.append(f)
1826 1843
1827 1844 return modified, deleted, fixup
1828 1845
1829 1846 def _poststatusfixup(self, status, fixup):
1830 1847 """update dirstate for files that are actually clean"""
1831 1848 poststatus = self._repo.postdsstatus()
1832 1849 if fixup or poststatus or self._repo.dirstate._dirty:
1833 1850 try:
1834 1851 oldid = self._repo.dirstate.identity()
1835 1852
1836 1853 # updating the dirstate is optional
1837 1854 # so we don't wait on the lock
1838 1855 # wlock can invalidate the dirstate, so cache normal _after_
1839 1856 # taking the lock
1840 1857 with self._repo.wlock(False):
1841 1858 dirstate = self._repo.dirstate
1842 1859 if dirstate.identity() == oldid:
1843 1860 if fixup:
1844 1861 if dirstate.pendingparentchange():
1845 normal = lambda f: dirstate.update_file(
1862 normal = lambda f, pfd: dirstate.update_file(
1846 1863 f, p1_tracked=True, wc_tracked=True
1847 1864 )
1848 1865 else:
1849 1866 normal = dirstate.set_clean
1850 for f in fixup:
1851 normal(f)
1867 for f, pdf in fixup:
1868 normal(f, pdf)
1852 1869 # write changes out explicitly, because nesting
1853 1870 # wlock at runtime may prevent 'wlock.release()'
1854 1871 # after this block from doing so for subsequent
1855 1872 # changing files
1856 1873 tr = self._repo.currenttransaction()
1857 1874 self._repo.dirstate.write(tr)
1858 1875
1859 1876 if poststatus:
1860 1877 for ps in poststatus:
1861 1878 ps(self, status)
1862 1879 else:
1863 1880 # in this case, writing changes out breaks
1864 1881 # consistency, because .hg/dirstate was
1865 1882 # already changed simultaneously after last
1866 1883 # caching (see also issue5584 for detail)
1867 1884 self._repo.ui.debug(
1868 1885 b'skip updating dirstate: identity mismatch\n'
1869 1886 )
1870 1887 except error.LockError:
1871 1888 pass
1872 1889 finally:
1873 1890 # Even if the wlock couldn't be grabbed, clear out the list.
1874 1891 self._repo.clearpostdsstatus()
1875 1892
1876 1893 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1877 1894 '''Gets the status from the dirstate -- internal use only.'''
1878 1895 subrepos = []
1879 1896 if b'.hgsub' in self:
1880 1897 subrepos = sorted(self.substate)
1881 1898 cmp, s = self._repo.dirstate.status(
1882 1899 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1883 1900 )
1884 1901
1885 1902 # check for any possibly clean files
1886 1903 fixup = []
1887 1904 if cmp:
1888 1905 modified2, deleted2, fixup = self._checklookup(cmp)
1889 1906 s.modified.extend(modified2)
1890 1907 s.deleted.extend(deleted2)
1891 1908
1892 1909 if fixup and clean:
1893 s.clean.extend(fixup)
1910 s.clean.extend((f for f, _ in fixup))
1894 1911
1895 1912 self._poststatusfixup(s, fixup)
1896 1913
1897 1914 if match.always():
1898 1915 # cache for performance
1899 1916 if s.unknown or s.ignored or s.clean:
1900 1917 # "_status" is cached with list*=False in the normal route
1901 1918 self._status = scmutil.status(
1902 1919 s.modified, s.added, s.removed, s.deleted, [], [], []
1903 1920 )
1904 1921 else:
1905 1922 self._status = s
1906 1923
1907 1924 return s
1908 1925
1909 1926 @propertycache
1910 1927 def _copies(self):
1911 1928 p1copies = {}
1912 1929 p2copies = {}
1913 1930 parents = self._repo.dirstate.parents()
1914 1931 p1manifest = self._repo[parents[0]].manifest()
1915 1932 p2manifest = self._repo[parents[1]].manifest()
1916 1933 changedset = set(self.added()) | set(self.modified())
1917 1934 narrowmatch = self._repo.narrowmatch()
1918 1935 for dst, src in self._repo.dirstate.copies().items():
1919 1936 if dst not in changedset or not narrowmatch(dst):
1920 1937 continue
1921 1938 if src in p1manifest:
1922 1939 p1copies[dst] = src
1923 1940 elif src in p2manifest:
1924 1941 p2copies[dst] = src
1925 1942 return p1copies, p2copies
1926 1943
1927 1944 @propertycache
1928 1945 def _manifest(self):
1929 1946 """generate a manifest corresponding to the values in self._status
1930 1947
1931 1948 This reuse the file nodeid from parent, but we use special node
1932 1949 identifiers for added and modified files. This is used by manifests
1933 1950 merge to see that files are different and by update logic to avoid
1934 1951 deleting newly added files.
1935 1952 """
1936 1953 return self._buildstatusmanifest(self._status)
1937 1954
1938 1955 def _buildstatusmanifest(self, status):
1939 1956 """Builds a manifest that includes the given status results."""
1940 1957 parents = self.parents()
1941 1958
1942 1959 man = parents[0].manifest().copy()
1943 1960
1944 1961 ff = self._flagfunc
1945 1962 for i, l in (
1946 1963 (self._repo.nodeconstants.addednodeid, status.added),
1947 1964 (self._repo.nodeconstants.modifiednodeid, status.modified),
1948 1965 ):
1949 1966 for f in l:
1950 1967 man[f] = i
1951 1968 try:
1952 1969 man.setflag(f, ff(f))
1953 1970 except OSError:
1954 1971 pass
1955 1972
1956 1973 for f in status.deleted + status.removed:
1957 1974 if f in man:
1958 1975 del man[f]
1959 1976
1960 1977 return man
1961 1978
1962 1979 def _buildstatus(
1963 1980 self, other, s, match, listignored, listclean, listunknown
1964 1981 ):
1965 1982 """build a status with respect to another context
1966 1983
1967 1984 This includes logic for maintaining the fast path of status when
1968 1985 comparing the working directory against its parent, which is to skip
1969 1986 building a new manifest if self (working directory) is not comparing
1970 1987 against its parent (repo['.']).
1971 1988 """
1972 1989 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1973 1990 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1974 1991 # might have accidentally ended up with the entire contents of the file
1975 1992 # they are supposed to be linking to.
1976 1993 s.modified[:] = self._filtersuspectsymlink(s.modified)
1977 1994 if other != self._repo[b'.']:
1978 1995 s = super(workingctx, self)._buildstatus(
1979 1996 other, s, match, listignored, listclean, listunknown
1980 1997 )
1981 1998 return s
1982 1999
1983 2000 def _matchstatus(self, other, match):
1984 2001 """override the match method with a filter for directory patterns
1985 2002
1986 2003 We use inheritance to customize the match.bad method only in cases of
1987 2004 workingctx since it belongs only to the working directory when
1988 2005 comparing against the parent changeset.
1989 2006
1990 2007 If we aren't comparing against the working directory's parent, then we
1991 2008 just use the default match object sent to us.
1992 2009 """
1993 2010 if other != self._repo[b'.']:
1994 2011
1995 2012 def bad(f, msg):
1996 2013 # 'f' may be a directory pattern from 'match.files()',
1997 2014 # so 'f not in ctx1' is not enough
1998 2015 if f not in other and not other.hasdir(f):
1999 2016 self._repo.ui.warn(
2000 2017 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
2001 2018 )
2002 2019
2003 2020 match.bad = bad
2004 2021 return match
2005 2022
2006 2023 def walk(self, match):
2007 2024 '''Generates matching file names.'''
2008 2025 return sorted(
2009 2026 self._repo.dirstate.walk(
2010 2027 self._repo.narrowmatch(match),
2011 2028 subrepos=sorted(self.substate),
2012 2029 unknown=True,
2013 2030 ignored=False,
2014 2031 )
2015 2032 )
2016 2033
2017 2034 def matches(self, match):
2018 2035 match = self._repo.narrowmatch(match)
2019 2036 ds = self._repo.dirstate
2020 2037 return sorted(f for f in ds.matches(match) if ds.get_entry(f).tracked)
2021 2038
2022 2039 def markcommitted(self, node):
2023 2040 with self._repo.dirstate.parentchange():
2024 2041 for f in self.modified() + self.added():
2025 2042 self._repo.dirstate.update_file(
2026 2043 f, p1_tracked=True, wc_tracked=True
2027 2044 )
2028 2045 for f in self.removed():
2029 2046 self._repo.dirstate.update_file(
2030 2047 f, p1_tracked=False, wc_tracked=False
2031 2048 )
2032 2049 self._repo.dirstate.setparents(node)
2033 2050 self._repo._quick_access_changeid_invalidate()
2034 2051
2035 2052 sparse.aftercommit(self._repo, node)
2036 2053
2037 2054 # write changes out explicitly, because nesting wlock at
2038 2055 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2039 2056 # from immediately doing so for subsequent changing files
2040 2057 self._repo.dirstate.write(self._repo.currenttransaction())
2041 2058
2042 2059 def mergestate(self, clean=False):
2043 2060 if clean:
2044 2061 return mergestatemod.mergestate.clean(self._repo)
2045 2062 return mergestatemod.mergestate.read(self._repo)
2046 2063
2047 2064
2048 2065 class committablefilectx(basefilectx):
2049 2066 """A committablefilectx provides common functionality for a file context
2050 2067 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2051 2068
2052 2069 def __init__(self, repo, path, filelog=None, ctx=None):
2053 2070 self._repo = repo
2054 2071 self._path = path
2055 2072 self._changeid = None
2056 2073 self._filerev = self._filenode = None
2057 2074
2058 2075 if filelog is not None:
2059 2076 self._filelog = filelog
2060 2077 if ctx:
2061 2078 self._changectx = ctx
2062 2079
2063 2080 def __nonzero__(self):
2064 2081 return True
2065 2082
2066 2083 __bool__ = __nonzero__
2067 2084
2068 2085 def linkrev(self):
2069 2086 # linked to self._changectx no matter if file is modified or not
2070 2087 return self.rev()
2071 2088
2072 2089 def renamed(self):
2073 2090 path = self.copysource()
2074 2091 if not path:
2075 2092 return None
2076 2093 return (
2077 2094 path,
2078 2095 self._changectx._parents[0]._manifest.get(
2079 2096 path, self._repo.nodeconstants.nullid
2080 2097 ),
2081 2098 )
2082 2099
2083 2100 def parents(self):
2084 2101 '''return parent filectxs, following copies if necessary'''
2085 2102
2086 2103 def filenode(ctx, path):
2087 2104 return ctx._manifest.get(path, self._repo.nodeconstants.nullid)
2088 2105
2089 2106 path = self._path
2090 2107 fl = self._filelog
2091 2108 pcl = self._changectx._parents
2092 2109 renamed = self.renamed()
2093 2110
2094 2111 if renamed:
2095 2112 pl = [renamed + (None,)]
2096 2113 else:
2097 2114 pl = [(path, filenode(pcl[0], path), fl)]
2098 2115
2099 2116 for pc in pcl[1:]:
2100 2117 pl.append((path, filenode(pc, path), fl))
2101 2118
2102 2119 return [
2103 2120 self._parentfilectx(p, fileid=n, filelog=l)
2104 2121 for p, n, l in pl
2105 2122 if n != self._repo.nodeconstants.nullid
2106 2123 ]
2107 2124
2108 2125 def children(self):
2109 2126 return []
2110 2127
2111 2128
2112 2129 class workingfilectx(committablefilectx):
2113 2130 """A workingfilectx object makes access to data related to a particular
2114 2131 file in the working directory convenient."""
2115 2132
2116 2133 def __init__(self, repo, path, filelog=None, workingctx=None):
2117 2134 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2118 2135
2119 2136 @propertycache
2120 2137 def _changectx(self):
2121 2138 return workingctx(self._repo)
2122 2139
2123 2140 def data(self):
2124 2141 return self._repo.wread(self._path)
2125 2142
2126 2143 def copysource(self):
2127 2144 return self._repo.dirstate.copied(self._path)
2128 2145
2129 2146 def size(self):
2130 2147 return self._repo.wvfs.lstat(self._path).st_size
2131 2148
2132 2149 def lstat(self):
2133 2150 return self._repo.wvfs.lstat(self._path)
2134 2151
2135 2152 def date(self):
2136 2153 t, tz = self._changectx.date()
2137 2154 try:
2138 2155 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2139 2156 except OSError as err:
2140 2157 if err.errno != errno.ENOENT:
2141 2158 raise
2142 2159 return (t, tz)
2143 2160
2144 2161 def exists(self):
2145 2162 return self._repo.wvfs.exists(self._path)
2146 2163
2147 2164 def lexists(self):
2148 2165 return self._repo.wvfs.lexists(self._path)
2149 2166
2150 2167 def audit(self):
2151 2168 return self._repo.wvfs.audit(self._path)
2152 2169
2153 2170 def cmp(self, fctx):
2154 2171 """compare with other file context
2155 2172
2156 2173 returns True if different than fctx.
2157 2174 """
2158 2175 # fctx should be a filectx (not a workingfilectx)
2159 2176 # invert comparison to reuse the same code path
2160 2177 return fctx.cmp(self)
2161 2178
2162 2179 def remove(self, ignoremissing=False):
2163 2180 """wraps unlink for a repo's working directory"""
2164 2181 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2165 2182 self._repo.wvfs.unlinkpath(
2166 2183 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2167 2184 )
2168 2185
2169 2186 def write(self, data, flags, backgroundclose=False, **kwargs):
2170 2187 """wraps repo.wwrite"""
2171 2188 return self._repo.wwrite(
2172 2189 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2173 2190 )
2174 2191
2175 2192 def markcopied(self, src):
2176 2193 """marks this file a copy of `src`"""
2177 2194 self._repo.dirstate.copy(src, self._path)
2178 2195
2179 2196 def clearunknown(self):
2180 2197 """Removes conflicting items in the working directory so that
2181 2198 ``write()`` can be called successfully.
2182 2199 """
2183 2200 wvfs = self._repo.wvfs
2184 2201 f = self._path
2185 2202 wvfs.audit(f)
2186 2203 if self._repo.ui.configbool(
2187 2204 b'experimental', b'merge.checkpathconflicts'
2188 2205 ):
2189 2206 # remove files under the directory as they should already be
2190 2207 # warned and backed up
2191 2208 if wvfs.isdir(f) and not wvfs.islink(f):
2192 2209 wvfs.rmtree(f, forcibly=True)
2193 2210 for p in reversed(list(pathutil.finddirs(f))):
2194 2211 if wvfs.isfileorlink(p):
2195 2212 wvfs.unlink(p)
2196 2213 break
2197 2214 else:
2198 2215 # don't remove files if path conflicts are not processed
2199 2216 if wvfs.isdir(f) and not wvfs.islink(f):
2200 2217 wvfs.removedirs(f)
2201 2218
2202 2219 def setflags(self, l, x):
2203 2220 self._repo.wvfs.setflags(self._path, l, x)
2204 2221
2205 2222
2206 2223 class overlayworkingctx(committablectx):
2207 2224 """Wraps another mutable context with a write-back cache that can be
2208 2225 converted into a commit context.
2209 2226
2210 2227 self._cache[path] maps to a dict with keys: {
2211 2228 'exists': bool?
2212 2229 'date': date?
2213 2230 'data': str?
2214 2231 'flags': str?
2215 2232 'copied': str? (path or None)
2216 2233 }
2217 2234 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2218 2235 is `False`, the file was deleted.
2219 2236 """
2220 2237
2221 2238 def __init__(self, repo):
2222 2239 super(overlayworkingctx, self).__init__(repo)
2223 2240 self.clean()
2224 2241
2225 2242 def setbase(self, wrappedctx):
2226 2243 self._wrappedctx = wrappedctx
2227 2244 self._parents = [wrappedctx]
2228 2245 # Drop old manifest cache as it is now out of date.
2229 2246 # This is necessary when, e.g., rebasing several nodes with one
2230 2247 # ``overlayworkingctx`` (e.g. with --collapse).
2231 2248 util.clearcachedproperty(self, b'_manifest')
2232 2249
2233 2250 def setparents(self, p1node, p2node=None):
2234 2251 if p2node is None:
2235 2252 p2node = self._repo.nodeconstants.nullid
2236 2253 assert p1node == self._wrappedctx.node()
2237 2254 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2238 2255
2239 2256 def data(self, path):
2240 2257 if self.isdirty(path):
2241 2258 if self._cache[path][b'exists']:
2242 2259 if self._cache[path][b'data'] is not None:
2243 2260 return self._cache[path][b'data']
2244 2261 else:
2245 2262 # Must fallback here, too, because we only set flags.
2246 2263 return self._wrappedctx[path].data()
2247 2264 else:
2248 2265 raise error.ProgrammingError(
2249 2266 b"No such file or directory: %s" % path
2250 2267 )
2251 2268 else:
2252 2269 return self._wrappedctx[path].data()
2253 2270
2254 2271 @propertycache
2255 2272 def _manifest(self):
2256 2273 parents = self.parents()
2257 2274 man = parents[0].manifest().copy()
2258 2275
2259 2276 flag = self._flagfunc
2260 2277 for path in self.added():
2261 2278 man[path] = self._repo.nodeconstants.addednodeid
2262 2279 man.setflag(path, flag(path))
2263 2280 for path in self.modified():
2264 2281 man[path] = self._repo.nodeconstants.modifiednodeid
2265 2282 man.setflag(path, flag(path))
2266 2283 for path in self.removed():
2267 2284 del man[path]
2268 2285 return man
2269 2286
2270 2287 @propertycache
2271 2288 def _flagfunc(self):
2272 2289 def f(path):
2273 2290 return self._cache[path][b'flags']
2274 2291
2275 2292 return f
2276 2293
2277 2294 def files(self):
2278 2295 return sorted(self.added() + self.modified() + self.removed())
2279 2296
2280 2297 def modified(self):
2281 2298 return [
2282 2299 f
2283 2300 for f in self._cache.keys()
2284 2301 if self._cache[f][b'exists'] and self._existsinparent(f)
2285 2302 ]
2286 2303
2287 2304 def added(self):
2288 2305 return [
2289 2306 f
2290 2307 for f in self._cache.keys()
2291 2308 if self._cache[f][b'exists'] and not self._existsinparent(f)
2292 2309 ]
2293 2310
2294 2311 def removed(self):
2295 2312 return [
2296 2313 f
2297 2314 for f in self._cache.keys()
2298 2315 if not self._cache[f][b'exists'] and self._existsinparent(f)
2299 2316 ]
2300 2317
2301 2318 def p1copies(self):
2302 2319 copies = {}
2303 2320 narrowmatch = self._repo.narrowmatch()
2304 2321 for f in self._cache.keys():
2305 2322 if not narrowmatch(f):
2306 2323 continue
2307 2324 copies.pop(f, None) # delete if it exists
2308 2325 source = self._cache[f][b'copied']
2309 2326 if source:
2310 2327 copies[f] = source
2311 2328 return copies
2312 2329
2313 2330 def p2copies(self):
2314 2331 copies = {}
2315 2332 narrowmatch = self._repo.narrowmatch()
2316 2333 for f in self._cache.keys():
2317 2334 if not narrowmatch(f):
2318 2335 continue
2319 2336 copies.pop(f, None) # delete if it exists
2320 2337 source = self._cache[f][b'copied']
2321 2338 if source:
2322 2339 copies[f] = source
2323 2340 return copies
2324 2341
2325 2342 def isinmemory(self):
2326 2343 return True
2327 2344
2328 2345 def filedate(self, path):
2329 2346 if self.isdirty(path):
2330 2347 return self._cache[path][b'date']
2331 2348 else:
2332 2349 return self._wrappedctx[path].date()
2333 2350
2334 2351 def markcopied(self, path, origin):
2335 2352 self._markdirty(
2336 2353 path,
2337 2354 exists=True,
2338 2355 date=self.filedate(path),
2339 2356 flags=self.flags(path),
2340 2357 copied=origin,
2341 2358 )
2342 2359
2343 2360 def copydata(self, path):
2344 2361 if self.isdirty(path):
2345 2362 return self._cache[path][b'copied']
2346 2363 else:
2347 2364 return None
2348 2365
2349 2366 def flags(self, path):
2350 2367 if self.isdirty(path):
2351 2368 if self._cache[path][b'exists']:
2352 2369 return self._cache[path][b'flags']
2353 2370 else:
2354 2371 raise error.ProgrammingError(
2355 2372 b"No such file or directory: %s" % path
2356 2373 )
2357 2374 else:
2358 2375 return self._wrappedctx[path].flags()
2359 2376
2360 2377 def __contains__(self, key):
2361 2378 if key in self._cache:
2362 2379 return self._cache[key][b'exists']
2363 2380 return key in self.p1()
2364 2381
2365 2382 def _existsinparent(self, path):
2366 2383 try:
2367 2384 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2368 2385 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2369 2386 # with an ``exists()`` function.
2370 2387 self._wrappedctx[path]
2371 2388 return True
2372 2389 except error.ManifestLookupError:
2373 2390 return False
2374 2391
2375 2392 def _auditconflicts(self, path):
2376 2393 """Replicates conflict checks done by wvfs.write().
2377 2394
2378 2395 Since we never write to the filesystem and never call `applyupdates` in
2379 2396 IMM, we'll never check that a path is actually writable -- e.g., because
2380 2397 it adds `a/foo`, but `a` is actually a file in the other commit.
2381 2398 """
2382 2399
2383 2400 def fail(path, component):
2384 2401 # p1() is the base and we're receiving "writes" for p2()'s
2385 2402 # files.
2386 2403 if b'l' in self.p1()[component].flags():
2387 2404 raise error.Abort(
2388 2405 b"error: %s conflicts with symlink %s "
2389 2406 b"in %d." % (path, component, self.p1().rev())
2390 2407 )
2391 2408 else:
2392 2409 raise error.Abort(
2393 2410 b"error: '%s' conflicts with file '%s' in "
2394 2411 b"%d." % (path, component, self.p1().rev())
2395 2412 )
2396 2413
2397 2414 # Test that each new directory to be created to write this path from p2
2398 2415 # is not a file in p1.
2399 2416 components = path.split(b'/')
2400 2417 for i in pycompat.xrange(len(components)):
2401 2418 component = b"/".join(components[0:i])
2402 2419 if component in self:
2403 2420 fail(path, component)
2404 2421
2405 2422 # Test the other direction -- that this path from p2 isn't a directory
2406 2423 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2407 2424 match = self.match([path], default=b'path')
2408 2425 mfiles = list(self.p1().manifest().walk(match))
2409 2426 if len(mfiles) > 0:
2410 2427 if len(mfiles) == 1 and mfiles[0] == path:
2411 2428 return
2412 2429 # omit the files which are deleted in current IMM wctx
2413 2430 mfiles = [m for m in mfiles if m in self]
2414 2431 if not mfiles:
2415 2432 return
2416 2433 raise error.Abort(
2417 2434 b"error: file '%s' cannot be written because "
2418 2435 b" '%s/' is a directory in %s (containing %d "
2419 2436 b"entries: %s)"
2420 2437 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2421 2438 )
2422 2439
2423 2440 def write(self, path, data, flags=b'', **kwargs):
2424 2441 if data is None:
2425 2442 raise error.ProgrammingError(b"data must be non-None")
2426 2443 self._auditconflicts(path)
2427 2444 self._markdirty(
2428 2445 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2429 2446 )
2430 2447
2431 2448 def setflags(self, path, l, x):
2432 2449 flag = b''
2433 2450 if l:
2434 2451 flag = b'l'
2435 2452 elif x:
2436 2453 flag = b'x'
2437 2454 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2438 2455
2439 2456 def remove(self, path):
2440 2457 self._markdirty(path, exists=False)
2441 2458
2442 2459 def exists(self, path):
2443 2460 """exists behaves like `lexists`, but needs to follow symlinks and
2444 2461 return False if they are broken.
2445 2462 """
2446 2463 if self.isdirty(path):
2447 2464 # If this path exists and is a symlink, "follow" it by calling
2448 2465 # exists on the destination path.
2449 2466 if (
2450 2467 self._cache[path][b'exists']
2451 2468 and b'l' in self._cache[path][b'flags']
2452 2469 ):
2453 2470 return self.exists(self._cache[path][b'data'].strip())
2454 2471 else:
2455 2472 return self._cache[path][b'exists']
2456 2473
2457 2474 return self._existsinparent(path)
2458 2475
2459 2476 def lexists(self, path):
2460 2477 """lexists returns True if the path exists"""
2461 2478 if self.isdirty(path):
2462 2479 return self._cache[path][b'exists']
2463 2480
2464 2481 return self._existsinparent(path)
2465 2482
2466 2483 def size(self, path):
2467 2484 if self.isdirty(path):
2468 2485 if self._cache[path][b'exists']:
2469 2486 return len(self._cache[path][b'data'])
2470 2487 else:
2471 2488 raise error.ProgrammingError(
2472 2489 b"No such file or directory: %s" % path
2473 2490 )
2474 2491 return self._wrappedctx[path].size()
2475 2492
2476 2493 def tomemctx(
2477 2494 self,
2478 2495 text,
2479 2496 branch=None,
2480 2497 extra=None,
2481 2498 date=None,
2482 2499 parents=None,
2483 2500 user=None,
2484 2501 editor=None,
2485 2502 ):
2486 2503 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2487 2504 committed.
2488 2505
2489 2506 ``text`` is the commit message.
2490 2507 ``parents`` (optional) are rev numbers.
2491 2508 """
2492 2509 # Default parents to the wrapped context if not passed.
2493 2510 if parents is None:
2494 2511 parents = self.parents()
2495 2512 if len(parents) == 1:
2496 2513 parents = (parents[0], None)
2497 2514
2498 2515 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2499 2516 if parents[1] is None:
2500 2517 parents = (self._repo[parents[0]], None)
2501 2518 else:
2502 2519 parents = (self._repo[parents[0]], self._repo[parents[1]])
2503 2520
2504 2521 files = self.files()
2505 2522
2506 2523 def getfile(repo, memctx, path):
2507 2524 if self._cache[path][b'exists']:
2508 2525 return memfilectx(
2509 2526 repo,
2510 2527 memctx,
2511 2528 path,
2512 2529 self._cache[path][b'data'],
2513 2530 b'l' in self._cache[path][b'flags'],
2514 2531 b'x' in self._cache[path][b'flags'],
2515 2532 self._cache[path][b'copied'],
2516 2533 )
2517 2534 else:
2518 2535 # Returning None, but including the path in `files`, is
2519 2536 # necessary for memctx to register a deletion.
2520 2537 return None
2521 2538
2522 2539 if branch is None:
2523 2540 branch = self._wrappedctx.branch()
2524 2541
2525 2542 return memctx(
2526 2543 self._repo,
2527 2544 parents,
2528 2545 text,
2529 2546 files,
2530 2547 getfile,
2531 2548 date=date,
2532 2549 extra=extra,
2533 2550 user=user,
2534 2551 branch=branch,
2535 2552 editor=editor,
2536 2553 )
2537 2554
2538 2555 def tomemctx_for_amend(self, precursor):
2539 2556 extra = precursor.extra().copy()
2540 2557 extra[b'amend_source'] = precursor.hex()
2541 2558 return self.tomemctx(
2542 2559 text=precursor.description(),
2543 2560 branch=precursor.branch(),
2544 2561 extra=extra,
2545 2562 date=precursor.date(),
2546 2563 user=precursor.user(),
2547 2564 )
2548 2565
2549 2566 def isdirty(self, path):
2550 2567 return path in self._cache
2551 2568
2552 2569 def clean(self):
2553 2570 self._mergestate = None
2554 2571 self._cache = {}
2555 2572
2556 2573 def _compact(self):
2557 2574 """Removes keys from the cache that are actually clean, by comparing
2558 2575 them with the underlying context.
2559 2576
2560 2577 This can occur during the merge process, e.g. by passing --tool :local
2561 2578 to resolve a conflict.
2562 2579 """
2563 2580 keys = []
2564 2581 # This won't be perfect, but can help performance significantly when
2565 2582 # using things like remotefilelog.
2566 2583 scmutil.prefetchfiles(
2567 2584 self.repo(),
2568 2585 [
2569 2586 (
2570 2587 self.p1().rev(),
2571 2588 scmutil.matchfiles(self.repo(), self._cache.keys()),
2572 2589 )
2573 2590 ],
2574 2591 )
2575 2592
2576 2593 for path in self._cache.keys():
2577 2594 cache = self._cache[path]
2578 2595 try:
2579 2596 underlying = self._wrappedctx[path]
2580 2597 if (
2581 2598 underlying.data() == cache[b'data']
2582 2599 and underlying.flags() == cache[b'flags']
2583 2600 ):
2584 2601 keys.append(path)
2585 2602 except error.ManifestLookupError:
2586 2603 # Path not in the underlying manifest (created).
2587 2604 continue
2588 2605
2589 2606 for path in keys:
2590 2607 del self._cache[path]
2591 2608 return keys
2592 2609
2593 2610 def _markdirty(
2594 2611 self, path, exists, data=None, date=None, flags=b'', copied=None
2595 2612 ):
2596 2613 # data not provided, let's see if we already have some; if not, let's
2597 2614 # grab it from our underlying context, so that we always have data if
2598 2615 # the file is marked as existing.
2599 2616 if exists and data is None:
2600 2617 oldentry = self._cache.get(path) or {}
2601 2618 data = oldentry.get(b'data')
2602 2619 if data is None:
2603 2620 data = self._wrappedctx[path].data()
2604 2621
2605 2622 self._cache[path] = {
2606 2623 b'exists': exists,
2607 2624 b'data': data,
2608 2625 b'date': date,
2609 2626 b'flags': flags,
2610 2627 b'copied': copied,
2611 2628 }
2612 2629 util.clearcachedproperty(self, b'_manifest')
2613 2630
2614 2631 def filectx(self, path, filelog=None):
2615 2632 return overlayworkingfilectx(
2616 2633 self._repo, path, parent=self, filelog=filelog
2617 2634 )
2618 2635
2619 2636 def mergestate(self, clean=False):
2620 2637 if clean or self._mergestate is None:
2621 2638 self._mergestate = mergestatemod.memmergestate(self._repo)
2622 2639 return self._mergestate
2623 2640
2624 2641
2625 2642 class overlayworkingfilectx(committablefilectx):
2626 2643 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2627 2644 cache, which can be flushed through later by calling ``flush()``."""
2628 2645
2629 2646 def __init__(self, repo, path, filelog=None, parent=None):
2630 2647 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2631 2648 self._repo = repo
2632 2649 self._parent = parent
2633 2650 self._path = path
2634 2651
2635 2652 def cmp(self, fctx):
2636 2653 return self.data() != fctx.data()
2637 2654
2638 2655 def changectx(self):
2639 2656 return self._parent
2640 2657
2641 2658 def data(self):
2642 2659 return self._parent.data(self._path)
2643 2660
2644 2661 def date(self):
2645 2662 return self._parent.filedate(self._path)
2646 2663
2647 2664 def exists(self):
2648 2665 return self.lexists()
2649 2666
2650 2667 def lexists(self):
2651 2668 return self._parent.exists(self._path)
2652 2669
2653 2670 def copysource(self):
2654 2671 return self._parent.copydata(self._path)
2655 2672
2656 2673 def size(self):
2657 2674 return self._parent.size(self._path)
2658 2675
2659 2676 def markcopied(self, origin):
2660 2677 self._parent.markcopied(self._path, origin)
2661 2678
2662 2679 def audit(self):
2663 2680 pass
2664 2681
2665 2682 def flags(self):
2666 2683 return self._parent.flags(self._path)
2667 2684
2668 2685 def setflags(self, islink, isexec):
2669 2686 return self._parent.setflags(self._path, islink, isexec)
2670 2687
2671 2688 def write(self, data, flags, backgroundclose=False, **kwargs):
2672 2689 return self._parent.write(self._path, data, flags, **kwargs)
2673 2690
2674 2691 def remove(self, ignoremissing=False):
2675 2692 return self._parent.remove(self._path)
2676 2693
2677 2694 def clearunknown(self):
2678 2695 pass
2679 2696
2680 2697
2681 2698 class workingcommitctx(workingctx):
2682 2699 """A workingcommitctx object makes access to data related to
2683 2700 the revision being committed convenient.
2684 2701
2685 2702 This hides changes in the working directory, if they aren't
2686 2703 committed in this context.
2687 2704 """
2688 2705
2689 2706 def __init__(
2690 2707 self, repo, changes, text=b"", user=None, date=None, extra=None
2691 2708 ):
2692 2709 super(workingcommitctx, self).__init__(
2693 2710 repo, text, user, date, extra, changes
2694 2711 )
2695 2712
2696 2713 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2697 2714 """Return matched files only in ``self._status``
2698 2715
2699 2716 Uncommitted files appear "clean" via this context, even if
2700 2717 they aren't actually so in the working directory.
2701 2718 """
2702 2719 if clean:
2703 2720 clean = [f for f in self._manifest if f not in self._changedset]
2704 2721 else:
2705 2722 clean = []
2706 2723 return scmutil.status(
2707 2724 [f for f in self._status.modified if match(f)],
2708 2725 [f for f in self._status.added if match(f)],
2709 2726 [f for f in self._status.removed if match(f)],
2710 2727 [],
2711 2728 [],
2712 2729 [],
2713 2730 clean,
2714 2731 )
2715 2732
2716 2733 @propertycache
2717 2734 def _changedset(self):
2718 2735 """Return the set of files changed in this context"""
2719 2736 changed = set(self._status.modified)
2720 2737 changed.update(self._status.added)
2721 2738 changed.update(self._status.removed)
2722 2739 return changed
2723 2740
2724 2741
2725 2742 def makecachingfilectxfn(func):
2726 2743 """Create a filectxfn that caches based on the path.
2727 2744
2728 2745 We can't use util.cachefunc because it uses all arguments as the cache
2729 2746 key and this creates a cycle since the arguments include the repo and
2730 2747 memctx.
2731 2748 """
2732 2749 cache = {}
2733 2750
2734 2751 def getfilectx(repo, memctx, path):
2735 2752 if path not in cache:
2736 2753 cache[path] = func(repo, memctx, path)
2737 2754 return cache[path]
2738 2755
2739 2756 return getfilectx
2740 2757
2741 2758
2742 2759 def memfilefromctx(ctx):
2743 2760 """Given a context return a memfilectx for ctx[path]
2744 2761
2745 2762 This is a convenience method for building a memctx based on another
2746 2763 context.
2747 2764 """
2748 2765
2749 2766 def getfilectx(repo, memctx, path):
2750 2767 fctx = ctx[path]
2751 2768 copysource = fctx.copysource()
2752 2769 return memfilectx(
2753 2770 repo,
2754 2771 memctx,
2755 2772 path,
2756 2773 fctx.data(),
2757 2774 islink=fctx.islink(),
2758 2775 isexec=fctx.isexec(),
2759 2776 copysource=copysource,
2760 2777 )
2761 2778
2762 2779 return getfilectx
2763 2780
2764 2781
2765 2782 def memfilefrompatch(patchstore):
2766 2783 """Given a patch (e.g. patchstore object) return a memfilectx
2767 2784
2768 2785 This is a convenience method for building a memctx based on a patchstore.
2769 2786 """
2770 2787
2771 2788 def getfilectx(repo, memctx, path):
2772 2789 data, mode, copysource = patchstore.getfile(path)
2773 2790 if data is None:
2774 2791 return None
2775 2792 islink, isexec = mode
2776 2793 return memfilectx(
2777 2794 repo,
2778 2795 memctx,
2779 2796 path,
2780 2797 data,
2781 2798 islink=islink,
2782 2799 isexec=isexec,
2783 2800 copysource=copysource,
2784 2801 )
2785 2802
2786 2803 return getfilectx
2787 2804
2788 2805
2789 2806 class memctx(committablectx):
2790 2807 """Use memctx to perform in-memory commits via localrepo.commitctx().
2791 2808
2792 2809 Revision information is supplied at initialization time while
2793 2810 related files data and is made available through a callback
2794 2811 mechanism. 'repo' is the current localrepo, 'parents' is a
2795 2812 sequence of two parent revisions identifiers (pass None for every
2796 2813 missing parent), 'text' is the commit message and 'files' lists
2797 2814 names of files touched by the revision (normalized and relative to
2798 2815 repository root).
2799 2816
2800 2817 filectxfn(repo, memctx, path) is a callable receiving the
2801 2818 repository, the current memctx object and the normalized path of
2802 2819 requested file, relative to repository root. It is fired by the
2803 2820 commit function for every file in 'files', but calls order is
2804 2821 undefined. If the file is available in the revision being
2805 2822 committed (updated or added), filectxfn returns a memfilectx
2806 2823 object. If the file was removed, filectxfn return None for recent
2807 2824 Mercurial. Moved files are represented by marking the source file
2808 2825 removed and the new file added with copy information (see
2809 2826 memfilectx).
2810 2827
2811 2828 user receives the committer name and defaults to current
2812 2829 repository username, date is the commit date in any format
2813 2830 supported by dateutil.parsedate() and defaults to current date, extra
2814 2831 is a dictionary of metadata or is left empty.
2815 2832 """
2816 2833
2817 2834 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2818 2835 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2819 2836 # this field to determine what to do in filectxfn.
2820 2837 _returnnoneformissingfiles = True
2821 2838
2822 2839 def __init__(
2823 2840 self,
2824 2841 repo,
2825 2842 parents,
2826 2843 text,
2827 2844 files,
2828 2845 filectxfn,
2829 2846 user=None,
2830 2847 date=None,
2831 2848 extra=None,
2832 2849 branch=None,
2833 2850 editor=None,
2834 2851 ):
2835 2852 super(memctx, self).__init__(
2836 2853 repo, text, user, date, extra, branch=branch
2837 2854 )
2838 2855 self._rev = None
2839 2856 self._node = None
2840 2857 parents = [(p or self._repo.nodeconstants.nullid) for p in parents]
2841 2858 p1, p2 = parents
2842 2859 self._parents = [self._repo[p] for p in (p1, p2)]
2843 2860 files = sorted(set(files))
2844 2861 self._files = files
2845 2862 self.substate = {}
2846 2863
2847 2864 if isinstance(filectxfn, patch.filestore):
2848 2865 filectxfn = memfilefrompatch(filectxfn)
2849 2866 elif not callable(filectxfn):
2850 2867 # if store is not callable, wrap it in a function
2851 2868 filectxfn = memfilefromctx(filectxfn)
2852 2869
2853 2870 # memoizing increases performance for e.g. vcs convert scenarios.
2854 2871 self._filectxfn = makecachingfilectxfn(filectxfn)
2855 2872
2856 2873 if editor:
2857 2874 self._text = editor(self._repo, self, [])
2858 2875 self._repo.savecommitmessage(self._text)
2859 2876
2860 2877 def filectx(self, path, filelog=None):
2861 2878 """get a file context from the working directory
2862 2879
2863 2880 Returns None if file doesn't exist and should be removed."""
2864 2881 return self._filectxfn(self._repo, self, path)
2865 2882
2866 2883 def commit(self):
2867 2884 """commit context to the repo"""
2868 2885 return self._repo.commitctx(self)
2869 2886
2870 2887 @propertycache
2871 2888 def _manifest(self):
2872 2889 """generate a manifest based on the return values of filectxfn"""
2873 2890
2874 2891 # keep this simple for now; just worry about p1
2875 2892 pctx = self._parents[0]
2876 2893 man = pctx.manifest().copy()
2877 2894
2878 2895 for f in self._status.modified:
2879 2896 man[f] = self._repo.nodeconstants.modifiednodeid
2880 2897
2881 2898 for f in self._status.added:
2882 2899 man[f] = self._repo.nodeconstants.addednodeid
2883 2900
2884 2901 for f in self._status.removed:
2885 2902 if f in man:
2886 2903 del man[f]
2887 2904
2888 2905 return man
2889 2906
2890 2907 @propertycache
2891 2908 def _status(self):
2892 2909 """Calculate exact status from ``files`` specified at construction"""
2893 2910 man1 = self.p1().manifest()
2894 2911 p2 = self._parents[1]
2895 2912 # "1 < len(self._parents)" can't be used for checking
2896 2913 # existence of the 2nd parent, because "memctx._parents" is
2897 2914 # explicitly initialized by the list, of which length is 2.
2898 2915 if p2.rev() != nullrev:
2899 2916 man2 = p2.manifest()
2900 2917 managing = lambda f: f in man1 or f in man2
2901 2918 else:
2902 2919 managing = lambda f: f in man1
2903 2920
2904 2921 modified, added, removed = [], [], []
2905 2922 for f in self._files:
2906 2923 if not managing(f):
2907 2924 added.append(f)
2908 2925 elif self[f]:
2909 2926 modified.append(f)
2910 2927 else:
2911 2928 removed.append(f)
2912 2929
2913 2930 return scmutil.status(modified, added, removed, [], [], [], [])
2914 2931
2915 2932 def parents(self):
2916 2933 if self._parents[1].rev() == nullrev:
2917 2934 return [self._parents[0]]
2918 2935 return self._parents
2919 2936
2920 2937
2921 2938 class memfilectx(committablefilectx):
2922 2939 """memfilectx represents an in-memory file to commit.
2923 2940
2924 2941 See memctx and committablefilectx for more details.
2925 2942 """
2926 2943
2927 2944 def __init__(
2928 2945 self,
2929 2946 repo,
2930 2947 changectx,
2931 2948 path,
2932 2949 data,
2933 2950 islink=False,
2934 2951 isexec=False,
2935 2952 copysource=None,
2936 2953 ):
2937 2954 """
2938 2955 path is the normalized file path relative to repository root.
2939 2956 data is the file content as a string.
2940 2957 islink is True if the file is a symbolic link.
2941 2958 isexec is True if the file is executable.
2942 2959 copied is the source file path if current file was copied in the
2943 2960 revision being committed, or None."""
2944 2961 super(memfilectx, self).__init__(repo, path, None, changectx)
2945 2962 self._data = data
2946 2963 if islink:
2947 2964 self._flags = b'l'
2948 2965 elif isexec:
2949 2966 self._flags = b'x'
2950 2967 else:
2951 2968 self._flags = b''
2952 2969 self._copysource = copysource
2953 2970
2954 2971 def copysource(self):
2955 2972 return self._copysource
2956 2973
2957 2974 def cmp(self, fctx):
2958 2975 return self.data() != fctx.data()
2959 2976
2960 2977 def data(self):
2961 2978 return self._data
2962 2979
2963 2980 def remove(self, ignoremissing=False):
2964 2981 """wraps unlink for a repo's working directory"""
2965 2982 # need to figure out what to do here
2966 2983 del self._changectx[self._path]
2967 2984
2968 2985 def write(self, data, flags, **kwargs):
2969 2986 """wraps repo.wwrite"""
2970 2987 self._data = data
2971 2988
2972 2989
2973 2990 class metadataonlyctx(committablectx):
2974 2991 """Like memctx but it's reusing the manifest of different commit.
2975 2992 Intended to be used by lightweight operations that are creating
2976 2993 metadata-only changes.
2977 2994
2978 2995 Revision information is supplied at initialization time. 'repo' is the
2979 2996 current localrepo, 'ctx' is original revision which manifest we're reuisng
2980 2997 'parents' is a sequence of two parent revisions identifiers (pass None for
2981 2998 every missing parent), 'text' is the commit.
2982 2999
2983 3000 user receives the committer name and defaults to current repository
2984 3001 username, date is the commit date in any format supported by
2985 3002 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2986 3003 metadata or is left empty.
2987 3004 """
2988 3005
2989 3006 def __init__(
2990 3007 self,
2991 3008 repo,
2992 3009 originalctx,
2993 3010 parents=None,
2994 3011 text=None,
2995 3012 user=None,
2996 3013 date=None,
2997 3014 extra=None,
2998 3015 editor=None,
2999 3016 ):
3000 3017 if text is None:
3001 3018 text = originalctx.description()
3002 3019 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
3003 3020 self._rev = None
3004 3021 self._node = None
3005 3022 self._originalctx = originalctx
3006 3023 self._manifestnode = originalctx.manifestnode()
3007 3024 if parents is None:
3008 3025 parents = originalctx.parents()
3009 3026 else:
3010 3027 parents = [repo[p] for p in parents if p is not None]
3011 3028 parents = parents[:]
3012 3029 while len(parents) < 2:
3013 3030 parents.append(repo[nullrev])
3014 3031 p1, p2 = self._parents = parents
3015 3032
3016 3033 # sanity check to ensure that the reused manifest parents are
3017 3034 # manifests of our commit parents
3018 3035 mp1, mp2 = self.manifestctx().parents
3019 3036 if p1 != self._repo.nodeconstants.nullid and p1.manifestnode() != mp1:
3020 3037 raise RuntimeError(
3021 3038 r"can't reuse the manifest: its p1 "
3022 3039 r"doesn't match the new ctx p1"
3023 3040 )
3024 3041 if p2 != self._repo.nodeconstants.nullid and p2.manifestnode() != mp2:
3025 3042 raise RuntimeError(
3026 3043 r"can't reuse the manifest: "
3027 3044 r"its p2 doesn't match the new ctx p2"
3028 3045 )
3029 3046
3030 3047 self._files = originalctx.files()
3031 3048 self.substate = {}
3032 3049
3033 3050 if editor:
3034 3051 self._text = editor(self._repo, self, [])
3035 3052 self._repo.savecommitmessage(self._text)
3036 3053
3037 3054 def manifestnode(self):
3038 3055 return self._manifestnode
3039 3056
3040 3057 @property
3041 3058 def _manifestctx(self):
3042 3059 return self._repo.manifestlog[self._manifestnode]
3043 3060
3044 3061 def filectx(self, path, filelog=None):
3045 3062 return self._originalctx.filectx(path, filelog=filelog)
3046 3063
3047 3064 def commit(self):
3048 3065 """commit context to the repo"""
3049 3066 return self._repo.commitctx(self)
3050 3067
3051 3068 @property
3052 3069 def _manifest(self):
3053 3070 return self._originalctx.manifest()
3054 3071
3055 3072 @propertycache
3056 3073 def _status(self):
3057 3074 """Calculate exact status from ``files`` specified in the ``origctx``
3058 3075 and parents manifests.
3059 3076 """
3060 3077 man1 = self.p1().manifest()
3061 3078 p2 = self._parents[1]
3062 3079 # "1 < len(self._parents)" can't be used for checking
3063 3080 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3064 3081 # explicitly initialized by the list, of which length is 2.
3065 3082 if p2.rev() != nullrev:
3066 3083 man2 = p2.manifest()
3067 3084 managing = lambda f: f in man1 or f in man2
3068 3085 else:
3069 3086 managing = lambda f: f in man1
3070 3087
3071 3088 modified, added, removed = [], [], []
3072 3089 for f in self._files:
3073 3090 if not managing(f):
3074 3091 added.append(f)
3075 3092 elif f in self:
3076 3093 modified.append(f)
3077 3094 else:
3078 3095 removed.append(f)
3079 3096
3080 3097 return scmutil.status(modified, added, removed, [], [], [], [])
3081 3098
3082 3099
3083 3100 class arbitraryfilectx(object):
3084 3101 """Allows you to use filectx-like functions on a file in an arbitrary
3085 3102 location on disk, possibly not in the working directory.
3086 3103 """
3087 3104
3088 3105 def __init__(self, path, repo=None):
3089 3106 # Repo is optional because contrib/simplemerge uses this class.
3090 3107 self._repo = repo
3091 3108 self._path = path
3092 3109
3093 3110 def cmp(self, fctx):
3094 3111 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3095 3112 # path if either side is a symlink.
3096 3113 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3097 3114 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3098 3115 # Add a fast-path for merge if both sides are disk-backed.
3099 3116 # Note that filecmp uses the opposite return values (True if same)
3100 3117 # from our cmp functions (True if different).
3101 3118 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3102 3119 return self.data() != fctx.data()
3103 3120
3104 3121 def path(self):
3105 3122 return self._path
3106 3123
3107 3124 def flags(self):
3108 3125 return b''
3109 3126
3110 3127 def data(self):
3111 3128 return util.readfile(self._path)
3112 3129
3113 3130 def decodeddata(self):
3114 3131 with open(self._path, b"rb") as f:
3115 3132 return f.read()
3116 3133
3117 3134 def remove(self):
3118 3135 util.unlink(self._path)
3119 3136
3120 3137 def write(self, data, flags, **kwargs):
3121 3138 assert not flags
3122 3139 with open(self._path, b"wb") as f:
3123 3140 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now