##// END OF EJS Templates
overlayworkingctx: remove unused `nofilechanges()` and `_compact()` methods...
Manuel Jacob -
r45651:6a5dcd75 default
parent child Browse files
Show More
@@ -1,3102 +1,3059 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 hex,
19 19 modifiednodeid,
20 20 nullid,
21 21 nullrev,
22 22 short,
23 23 wdirfilenodeids,
24 24 wdirhex,
25 25 )
26 26 from .pycompat import (
27 27 getattr,
28 28 open,
29 29 )
30 30 from . import (
31 31 dagop,
32 32 encoding,
33 33 error,
34 34 fileset,
35 35 match as matchmod,
36 36 mergestate as mergestatemod,
37 37 metadata,
38 38 obsolete as obsmod,
39 39 patch,
40 40 pathutil,
41 41 phases,
42 42 pycompat,
43 43 repoview,
44 44 scmutil,
45 45 sparse,
46 46 subrepo,
47 47 subrepoutil,
48 48 util,
49 49 )
50 50 from .utils import (
51 51 dateutil,
52 52 stringutil,
53 53 )
54 54
55 55 propertycache = util.propertycache
56 56
57 57
58 58 class basectx(object):
59 59 """A basectx object represents the common logic for its children:
60 60 changectx: read-only context that is already present in the repo,
61 61 workingctx: a context that represents the working directory and can
62 62 be committed,
63 63 memctx: a context that represents changes in-memory and can also
64 64 be committed."""
65 65
66 66 def __init__(self, repo):
67 67 self._repo = repo
68 68
69 69 def __bytes__(self):
70 70 return short(self.node())
71 71
72 72 __str__ = encoding.strmethod(__bytes__)
73 73
74 74 def __repr__(self):
75 75 return "<%s %s>" % (type(self).__name__, str(self))
76 76
77 77 def __eq__(self, other):
78 78 try:
79 79 return type(self) == type(other) and self._rev == other._rev
80 80 except AttributeError:
81 81 return False
82 82
83 83 def __ne__(self, other):
84 84 return not (self == other)
85 85
86 86 def __contains__(self, key):
87 87 return key in self._manifest
88 88
89 89 def __getitem__(self, key):
90 90 return self.filectx(key)
91 91
92 92 def __iter__(self):
93 93 return iter(self._manifest)
94 94
95 95 def _buildstatusmanifest(self, status):
96 96 """Builds a manifest that includes the given status results, if this is
97 97 a working copy context. For non-working copy contexts, it just returns
98 98 the normal manifest."""
99 99 return self.manifest()
100 100
101 101 def _matchstatus(self, other, match):
102 102 """This internal method provides a way for child objects to override the
103 103 match operator.
104 104 """
105 105 return match
106 106
107 107 def _buildstatus(
108 108 self, other, s, match, listignored, listclean, listunknown
109 109 ):
110 110 """build a status with respect to another context"""
111 111 # Load earliest manifest first for caching reasons. More specifically,
112 112 # if you have revisions 1000 and 1001, 1001 is probably stored as a
113 113 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
114 114 # 1000 and cache it so that when you read 1001, we just need to apply a
115 115 # delta to what's in the cache. So that's one full reconstruction + one
116 116 # delta application.
117 117 mf2 = None
118 118 if self.rev() is not None and self.rev() < other.rev():
119 119 mf2 = self._buildstatusmanifest(s)
120 120 mf1 = other._buildstatusmanifest(s)
121 121 if mf2 is None:
122 122 mf2 = self._buildstatusmanifest(s)
123 123
124 124 modified, added = [], []
125 125 removed = []
126 126 clean = []
127 127 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
128 128 deletedset = set(deleted)
129 129 d = mf1.diff(mf2, match=match, clean=listclean)
130 130 for fn, value in pycompat.iteritems(d):
131 131 if fn in deletedset:
132 132 continue
133 133 if value is None:
134 134 clean.append(fn)
135 135 continue
136 136 (node1, flag1), (node2, flag2) = value
137 137 if node1 is None:
138 138 added.append(fn)
139 139 elif node2 is None:
140 140 removed.append(fn)
141 141 elif flag1 != flag2:
142 142 modified.append(fn)
143 143 elif node2 not in wdirfilenodeids:
144 144 # When comparing files between two commits, we save time by
145 145 # not comparing the file contents when the nodeids differ.
146 146 # Note that this means we incorrectly report a reverted change
147 147 # to a file as a modification.
148 148 modified.append(fn)
149 149 elif self[fn].cmp(other[fn]):
150 150 modified.append(fn)
151 151 else:
152 152 clean.append(fn)
153 153
154 154 if removed:
155 155 # need to filter files if they are already reported as removed
156 156 unknown = [
157 157 fn
158 158 for fn in unknown
159 159 if fn not in mf1 and (not match or match(fn))
160 160 ]
161 161 ignored = [
162 162 fn
163 163 for fn in ignored
164 164 if fn not in mf1 and (not match or match(fn))
165 165 ]
166 166 # if they're deleted, don't report them as removed
167 167 removed = [fn for fn in removed if fn not in deletedset]
168 168
169 169 return scmutil.status(
170 170 modified, added, removed, deleted, unknown, ignored, clean
171 171 )
172 172
173 173 @propertycache
174 174 def substate(self):
175 175 return subrepoutil.state(self, self._repo.ui)
176 176
177 177 def subrev(self, subpath):
178 178 return self.substate[subpath][1]
179 179
180 180 def rev(self):
181 181 return self._rev
182 182
183 183 def node(self):
184 184 return self._node
185 185
186 186 def hex(self):
187 187 return hex(self.node())
188 188
189 189 def manifest(self):
190 190 return self._manifest
191 191
192 192 def manifestctx(self):
193 193 return self._manifestctx
194 194
195 195 def repo(self):
196 196 return self._repo
197 197
198 198 def phasestr(self):
199 199 return phases.phasenames[self.phase()]
200 200
201 201 def mutable(self):
202 202 return self.phase() > phases.public
203 203
204 204 def matchfileset(self, cwd, expr, badfn=None):
205 205 return fileset.match(self, cwd, expr, badfn=badfn)
206 206
207 207 def obsolete(self):
208 208 """True if the changeset is obsolete"""
209 209 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
210 210
211 211 def extinct(self):
212 212 """True if the changeset is extinct"""
213 213 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
214 214
215 215 def orphan(self):
216 216 """True if the changeset is not obsolete, but its ancestor is"""
217 217 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
218 218
219 219 def phasedivergent(self):
220 220 """True if the changeset tries to be a successor of a public changeset
221 221
222 222 Only non-public and non-obsolete changesets may be phase-divergent.
223 223 """
224 224 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
225 225
226 226 def contentdivergent(self):
227 227 """Is a successor of a changeset with multiple possible successor sets
228 228
229 229 Only non-public and non-obsolete changesets may be content-divergent.
230 230 """
231 231 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
232 232
233 233 def isunstable(self):
234 234 """True if the changeset is either orphan, phase-divergent or
235 235 content-divergent"""
236 236 return self.orphan() or self.phasedivergent() or self.contentdivergent()
237 237
238 238 def instabilities(self):
239 239 """return the list of instabilities affecting this changeset.
240 240
241 241 Instabilities are returned as strings. possible values are:
242 242 - orphan,
243 243 - phase-divergent,
244 244 - content-divergent.
245 245 """
246 246 instabilities = []
247 247 if self.orphan():
248 248 instabilities.append(b'orphan')
249 249 if self.phasedivergent():
250 250 instabilities.append(b'phase-divergent')
251 251 if self.contentdivergent():
252 252 instabilities.append(b'content-divergent')
253 253 return instabilities
254 254
255 255 def parents(self):
256 256 """return contexts for each parent changeset"""
257 257 return self._parents
258 258
259 259 def p1(self):
260 260 return self._parents[0]
261 261
262 262 def p2(self):
263 263 parents = self._parents
264 264 if len(parents) == 2:
265 265 return parents[1]
266 266 return self._repo[nullrev]
267 267
268 268 def _fileinfo(self, path):
269 269 if '_manifest' in self.__dict__:
270 270 try:
271 271 return self._manifest.find(path)
272 272 except KeyError:
273 273 raise error.ManifestLookupError(
274 274 self._node, path, _(b'not found in manifest')
275 275 )
276 276 if '_manifestdelta' in self.__dict__ or path in self.files():
277 277 if path in self._manifestdelta:
278 278 return (
279 279 self._manifestdelta[path],
280 280 self._manifestdelta.flags(path),
281 281 )
282 282 mfl = self._repo.manifestlog
283 283 try:
284 284 node, flag = mfl[self._changeset.manifest].find(path)
285 285 except KeyError:
286 286 raise error.ManifestLookupError(
287 287 self._node, path, _(b'not found in manifest')
288 288 )
289 289
290 290 return node, flag
291 291
292 292 def filenode(self, path):
293 293 return self._fileinfo(path)[0]
294 294
295 295 def flags(self, path):
296 296 try:
297 297 return self._fileinfo(path)[1]
298 298 except error.LookupError:
299 299 return b''
300 300
301 301 @propertycache
302 302 def _copies(self):
303 303 return metadata.computechangesetcopies(self)
304 304
305 305 def p1copies(self):
306 306 return self._copies[0]
307 307
308 308 def p2copies(self):
309 309 return self._copies[1]
310 310
311 311 def sub(self, path, allowcreate=True):
312 312 '''return a subrepo for the stored revision of path, never wdir()'''
313 313 return subrepo.subrepo(self, path, allowcreate=allowcreate)
314 314
315 315 def nullsub(self, path, pctx):
316 316 return subrepo.nullsubrepo(self, path, pctx)
317 317
318 318 def workingsub(self, path):
319 319 '''return a subrepo for the stored revision, or wdir if this is a wdir
320 320 context.
321 321 '''
322 322 return subrepo.subrepo(self, path, allowwdir=True)
323 323
324 324 def match(
325 325 self,
326 326 pats=None,
327 327 include=None,
328 328 exclude=None,
329 329 default=b'glob',
330 330 listsubrepos=False,
331 331 badfn=None,
332 332 cwd=None,
333 333 ):
334 334 r = self._repo
335 335 if not cwd:
336 336 cwd = r.getcwd()
337 337 return matchmod.match(
338 338 r.root,
339 339 cwd,
340 340 pats,
341 341 include,
342 342 exclude,
343 343 default,
344 344 auditor=r.nofsauditor,
345 345 ctx=self,
346 346 listsubrepos=listsubrepos,
347 347 badfn=badfn,
348 348 )
349 349
350 350 def diff(
351 351 self,
352 352 ctx2=None,
353 353 match=None,
354 354 changes=None,
355 355 opts=None,
356 356 losedatafn=None,
357 357 pathfn=None,
358 358 copy=None,
359 359 copysourcematch=None,
360 360 hunksfilterfn=None,
361 361 ):
362 362 """Returns a diff generator for the given contexts and matcher"""
363 363 if ctx2 is None:
364 364 ctx2 = self.p1()
365 365 if ctx2 is not None:
366 366 ctx2 = self._repo[ctx2]
367 367 return patch.diff(
368 368 self._repo,
369 369 ctx2,
370 370 self,
371 371 match=match,
372 372 changes=changes,
373 373 opts=opts,
374 374 losedatafn=losedatafn,
375 375 pathfn=pathfn,
376 376 copy=copy,
377 377 copysourcematch=copysourcematch,
378 378 hunksfilterfn=hunksfilterfn,
379 379 )
380 380
381 381 def dirs(self):
382 382 return self._manifest.dirs()
383 383
384 384 def hasdir(self, dir):
385 385 return self._manifest.hasdir(dir)
386 386
387 387 def status(
388 388 self,
389 389 other=None,
390 390 match=None,
391 391 listignored=False,
392 392 listclean=False,
393 393 listunknown=False,
394 394 listsubrepos=False,
395 395 ):
396 396 """return status of files between two nodes or node and working
397 397 directory.
398 398
399 399 If other is None, compare this node with working directory.
400 400
401 401 returns (modified, added, removed, deleted, unknown, ignored, clean)
402 402 """
403 403
404 404 ctx1 = self
405 405 ctx2 = self._repo[other]
406 406
407 407 # This next code block is, admittedly, fragile logic that tests for
408 408 # reversing the contexts and wouldn't need to exist if it weren't for
409 409 # the fast (and common) code path of comparing the working directory
410 410 # with its first parent.
411 411 #
412 412 # What we're aiming for here is the ability to call:
413 413 #
414 414 # workingctx.status(parentctx)
415 415 #
416 416 # If we always built the manifest for each context and compared those,
417 417 # then we'd be done. But the special case of the above call means we
418 418 # just copy the manifest of the parent.
419 419 reversed = False
420 420 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
421 421 reversed = True
422 422 ctx1, ctx2 = ctx2, ctx1
423 423
424 424 match = self._repo.narrowmatch(match)
425 425 match = ctx2._matchstatus(ctx1, match)
426 426 r = scmutil.status([], [], [], [], [], [], [])
427 427 r = ctx2._buildstatus(
428 428 ctx1, r, match, listignored, listclean, listunknown
429 429 )
430 430
431 431 if reversed:
432 432 # Reverse added and removed. Clear deleted, unknown and ignored as
433 433 # these make no sense to reverse.
434 434 r = scmutil.status(
435 435 r.modified, r.removed, r.added, [], [], [], r.clean
436 436 )
437 437
438 438 if listsubrepos:
439 439 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
440 440 try:
441 441 rev2 = ctx2.subrev(subpath)
442 442 except KeyError:
443 443 # A subrepo that existed in node1 was deleted between
444 444 # node1 and node2 (inclusive). Thus, ctx2's substate
445 445 # won't contain that subpath. The best we can do ignore it.
446 446 rev2 = None
447 447 submatch = matchmod.subdirmatcher(subpath, match)
448 448 s = sub.status(
449 449 rev2,
450 450 match=submatch,
451 451 ignored=listignored,
452 452 clean=listclean,
453 453 unknown=listunknown,
454 454 listsubrepos=True,
455 455 )
456 456 for k in (
457 457 'modified',
458 458 'added',
459 459 'removed',
460 460 'deleted',
461 461 'unknown',
462 462 'ignored',
463 463 'clean',
464 464 ):
465 465 rfiles, sfiles = getattr(r, k), getattr(s, k)
466 466 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
467 467
468 468 r.modified.sort()
469 469 r.added.sort()
470 470 r.removed.sort()
471 471 r.deleted.sort()
472 472 r.unknown.sort()
473 473 r.ignored.sort()
474 474 r.clean.sort()
475 475
476 476 return r
477 477
478 478 def mergestate(self, clean=False):
479 479 """Get a mergestate object for this context."""
480 480 raise NotImplementedError(
481 481 '%s does not implement mergestate()' % self.__class__
482 482 )
483 483
484 484 def isempty(self):
485 485 return not (
486 486 len(self.parents()) > 1
487 487 or self.branch() != self.p1().branch()
488 488 or self.closesbranch()
489 489 or self.files()
490 490 )
491 491
492 492
493 493 class changectx(basectx):
494 494 """A changecontext object makes access to data related to a particular
495 495 changeset convenient. It represents a read-only context already present in
496 496 the repo."""
497 497
498 498 def __init__(self, repo, rev, node, maybe_filtered=True):
499 499 super(changectx, self).__init__(repo)
500 500 self._rev = rev
501 501 self._node = node
502 502 # When maybe_filtered is True, the revision might be affected by
503 503 # changelog filtering and operation through the filtered changelog must be used.
504 504 #
505 505 # When maybe_filtered is False, the revision has already been checked
506 506 # against filtering and is not filtered. Operation through the
507 507 # unfiltered changelog might be used in some case.
508 508 self._maybe_filtered = maybe_filtered
509 509
510 510 def __hash__(self):
511 511 try:
512 512 return hash(self._rev)
513 513 except AttributeError:
514 514 return id(self)
515 515
516 516 def __nonzero__(self):
517 517 return self._rev != nullrev
518 518
519 519 __bool__ = __nonzero__
520 520
521 521 @propertycache
522 522 def _changeset(self):
523 523 if self._maybe_filtered:
524 524 repo = self._repo
525 525 else:
526 526 repo = self._repo.unfiltered()
527 527 return repo.changelog.changelogrevision(self.rev())
528 528
529 529 @propertycache
530 530 def _manifest(self):
531 531 return self._manifestctx.read()
532 532
533 533 @property
534 534 def _manifestctx(self):
535 535 return self._repo.manifestlog[self._changeset.manifest]
536 536
537 537 @propertycache
538 538 def _manifestdelta(self):
539 539 return self._manifestctx.readdelta()
540 540
541 541 @propertycache
542 542 def _parents(self):
543 543 repo = self._repo
544 544 if self._maybe_filtered:
545 545 cl = repo.changelog
546 546 else:
547 547 cl = repo.unfiltered().changelog
548 548
549 549 p1, p2 = cl.parentrevs(self._rev)
550 550 if p2 == nullrev:
551 551 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
552 552 return [
553 553 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
554 554 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
555 555 ]
556 556
557 557 def changeset(self):
558 558 c = self._changeset
559 559 return (
560 560 c.manifest,
561 561 c.user,
562 562 c.date,
563 563 c.files,
564 564 c.description,
565 565 c.extra,
566 566 )
567 567
568 568 def manifestnode(self):
569 569 return self._changeset.manifest
570 570
571 571 def user(self):
572 572 return self._changeset.user
573 573
574 574 def date(self):
575 575 return self._changeset.date
576 576
577 577 def files(self):
578 578 return self._changeset.files
579 579
580 580 def filesmodified(self):
581 581 modified = set(self.files())
582 582 modified.difference_update(self.filesadded())
583 583 modified.difference_update(self.filesremoved())
584 584 return sorted(modified)
585 585
586 586 def filesadded(self):
587 587 filesadded = self._changeset.filesadded
588 588 compute_on_none = True
589 589 if self._repo.filecopiesmode == b'changeset-sidedata':
590 590 compute_on_none = False
591 591 else:
592 592 source = self._repo.ui.config(b'experimental', b'copies.read-from')
593 593 if source == b'changeset-only':
594 594 compute_on_none = False
595 595 elif source != b'compatibility':
596 596 # filelog mode, ignore any changelog content
597 597 filesadded = None
598 598 if filesadded is None:
599 599 if compute_on_none:
600 600 filesadded = metadata.computechangesetfilesadded(self)
601 601 else:
602 602 filesadded = []
603 603 return filesadded
604 604
605 605 def filesremoved(self):
606 606 filesremoved = self._changeset.filesremoved
607 607 compute_on_none = True
608 608 if self._repo.filecopiesmode == b'changeset-sidedata':
609 609 compute_on_none = False
610 610 else:
611 611 source = self._repo.ui.config(b'experimental', b'copies.read-from')
612 612 if source == b'changeset-only':
613 613 compute_on_none = False
614 614 elif source != b'compatibility':
615 615 # filelog mode, ignore any changelog content
616 616 filesremoved = None
617 617 if filesremoved is None:
618 618 if compute_on_none:
619 619 filesremoved = metadata.computechangesetfilesremoved(self)
620 620 else:
621 621 filesremoved = []
622 622 return filesremoved
623 623
624 624 @propertycache
625 625 def _copies(self):
626 626 p1copies = self._changeset.p1copies
627 627 p2copies = self._changeset.p2copies
628 628 compute_on_none = True
629 629 if self._repo.filecopiesmode == b'changeset-sidedata':
630 630 compute_on_none = False
631 631 else:
632 632 source = self._repo.ui.config(b'experimental', b'copies.read-from')
633 633 # If config says to get copy metadata only from changeset, then
634 634 # return that, defaulting to {} if there was no copy metadata. In
635 635 # compatibility mode, we return copy data from the changeset if it
636 636 # was recorded there, and otherwise we fall back to getting it from
637 637 # the filelogs (below).
638 638 #
639 639 # If we are in compatiblity mode and there is not data in the
640 640 # changeset), we get the copy metadata from the filelogs.
641 641 #
642 642 # otherwise, when config said to read only from filelog, we get the
643 643 # copy metadata from the filelogs.
644 644 if source == b'changeset-only':
645 645 compute_on_none = False
646 646 elif source != b'compatibility':
647 647 # filelog mode, ignore any changelog content
648 648 p1copies = p2copies = None
649 649 if p1copies is None:
650 650 if compute_on_none:
651 651 p1copies, p2copies = super(changectx, self)._copies
652 652 else:
653 653 if p1copies is None:
654 654 p1copies = {}
655 655 if p2copies is None:
656 656 p2copies = {}
657 657 return p1copies, p2copies
658 658
659 659 def description(self):
660 660 return self._changeset.description
661 661
662 662 def branch(self):
663 663 return encoding.tolocal(self._changeset.extra.get(b"branch"))
664 664
665 665 def closesbranch(self):
666 666 return b'close' in self._changeset.extra
667 667
668 668 def extra(self):
669 669 """Return a dict of extra information."""
670 670 return self._changeset.extra
671 671
672 672 def tags(self):
673 673 """Return a list of byte tag names"""
674 674 return self._repo.nodetags(self._node)
675 675
676 676 def bookmarks(self):
677 677 """Return a list of byte bookmark names."""
678 678 return self._repo.nodebookmarks(self._node)
679 679
680 680 def phase(self):
681 681 return self._repo._phasecache.phase(self._repo, self._rev)
682 682
683 683 def hidden(self):
684 684 return self._rev in repoview.filterrevs(self._repo, b'visible')
685 685
686 686 def isinmemory(self):
687 687 return False
688 688
689 689 def children(self):
690 690 """return list of changectx contexts for each child changeset.
691 691
692 692 This returns only the immediate child changesets. Use descendants() to
693 693 recursively walk children.
694 694 """
695 695 c = self._repo.changelog.children(self._node)
696 696 return [self._repo[x] for x in c]
697 697
698 698 def ancestors(self):
699 699 for a in self._repo.changelog.ancestors([self._rev]):
700 700 yield self._repo[a]
701 701
702 702 def descendants(self):
703 703 """Recursively yield all children of the changeset.
704 704
705 705 For just the immediate children, use children()
706 706 """
707 707 for d in self._repo.changelog.descendants([self._rev]):
708 708 yield self._repo[d]
709 709
710 710 def filectx(self, path, fileid=None, filelog=None):
711 711 """get a file context from this changeset"""
712 712 if fileid is None:
713 713 fileid = self.filenode(path)
714 714 return filectx(
715 715 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
716 716 )
717 717
718 718 def ancestor(self, c2, warn=False):
719 719 """return the "best" ancestor context of self and c2
720 720
721 721 If there are multiple candidates, it will show a message and check
722 722 merge.preferancestor configuration before falling back to the
723 723 revlog ancestor."""
724 724 # deal with workingctxs
725 725 n2 = c2._node
726 726 if n2 is None:
727 727 n2 = c2._parents[0]._node
728 728 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
729 729 if not cahs:
730 730 anc = nullid
731 731 elif len(cahs) == 1:
732 732 anc = cahs[0]
733 733 else:
734 734 # experimental config: merge.preferancestor
735 735 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
736 736 try:
737 737 ctx = scmutil.revsymbol(self._repo, r)
738 738 except error.RepoLookupError:
739 739 continue
740 740 anc = ctx.node()
741 741 if anc in cahs:
742 742 break
743 743 else:
744 744 anc = self._repo.changelog.ancestor(self._node, n2)
745 745 if warn:
746 746 self._repo.ui.status(
747 747 (
748 748 _(b"note: using %s as ancestor of %s and %s\n")
749 749 % (short(anc), short(self._node), short(n2))
750 750 )
751 751 + b''.join(
752 752 _(
753 753 b" alternatively, use --config "
754 754 b"merge.preferancestor=%s\n"
755 755 )
756 756 % short(n)
757 757 for n in sorted(cahs)
758 758 if n != anc
759 759 )
760 760 )
761 761 return self._repo[anc]
762 762
763 763 def isancestorof(self, other):
764 764 """True if this changeset is an ancestor of other"""
765 765 return self._repo.changelog.isancestorrev(self._rev, other._rev)
766 766
767 767 def walk(self, match):
768 768 '''Generates matching file names.'''
769 769
770 770 # Wrap match.bad method to have message with nodeid
771 771 def bad(fn, msg):
772 772 # The manifest doesn't know about subrepos, so don't complain about
773 773 # paths into valid subrepos.
774 774 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
775 775 return
776 776 match.bad(fn, _(b'no such file in rev %s') % self)
777 777
778 778 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
779 779 return self._manifest.walk(m)
780 780
781 781 def matches(self, match):
782 782 return self.walk(match)
783 783
784 784
785 785 class basefilectx(object):
786 786 """A filecontext object represents the common logic for its children:
787 787 filectx: read-only access to a filerevision that is already present
788 788 in the repo,
789 789 workingfilectx: a filecontext that represents files from the working
790 790 directory,
791 791 memfilectx: a filecontext that represents files in-memory,
792 792 """
793 793
794 794 @propertycache
795 795 def _filelog(self):
796 796 return self._repo.file(self._path)
797 797
798 798 @propertycache
799 799 def _changeid(self):
800 800 if '_changectx' in self.__dict__:
801 801 return self._changectx.rev()
802 802 elif '_descendantrev' in self.__dict__:
803 803 # this file context was created from a revision with a known
804 804 # descendant, we can (lazily) correct for linkrev aliases
805 805 return self._adjustlinkrev(self._descendantrev)
806 806 else:
807 807 return self._filelog.linkrev(self._filerev)
808 808
809 809 @propertycache
810 810 def _filenode(self):
811 811 if '_fileid' in self.__dict__:
812 812 return self._filelog.lookup(self._fileid)
813 813 else:
814 814 return self._changectx.filenode(self._path)
815 815
816 816 @propertycache
817 817 def _filerev(self):
818 818 return self._filelog.rev(self._filenode)
819 819
820 820 @propertycache
821 821 def _repopath(self):
822 822 return self._path
823 823
824 824 def __nonzero__(self):
825 825 try:
826 826 self._filenode
827 827 return True
828 828 except error.LookupError:
829 829 # file is missing
830 830 return False
831 831
832 832 __bool__ = __nonzero__
833 833
834 834 def __bytes__(self):
835 835 try:
836 836 return b"%s@%s" % (self.path(), self._changectx)
837 837 except error.LookupError:
838 838 return b"%s@???" % self.path()
839 839
840 840 __str__ = encoding.strmethod(__bytes__)
841 841
842 842 def __repr__(self):
843 843 return "<%s %s>" % (type(self).__name__, str(self))
844 844
845 845 def __hash__(self):
846 846 try:
847 847 return hash((self._path, self._filenode))
848 848 except AttributeError:
849 849 return id(self)
850 850
851 851 def __eq__(self, other):
852 852 try:
853 853 return (
854 854 type(self) == type(other)
855 855 and self._path == other._path
856 856 and self._filenode == other._filenode
857 857 )
858 858 except AttributeError:
859 859 return False
860 860
861 861 def __ne__(self, other):
862 862 return not (self == other)
863 863
864 864 def filerev(self):
865 865 return self._filerev
866 866
867 867 def filenode(self):
868 868 return self._filenode
869 869
870 870 @propertycache
871 871 def _flags(self):
872 872 return self._changectx.flags(self._path)
873 873
874 874 def flags(self):
875 875 return self._flags
876 876
877 877 def filelog(self):
878 878 return self._filelog
879 879
880 880 def rev(self):
881 881 return self._changeid
882 882
883 883 def linkrev(self):
884 884 return self._filelog.linkrev(self._filerev)
885 885
886 886 def node(self):
887 887 return self._changectx.node()
888 888
889 889 def hex(self):
890 890 return self._changectx.hex()
891 891
892 892 def user(self):
893 893 return self._changectx.user()
894 894
895 895 def date(self):
896 896 return self._changectx.date()
897 897
898 898 def files(self):
899 899 return self._changectx.files()
900 900
901 901 def description(self):
902 902 return self._changectx.description()
903 903
904 904 def branch(self):
905 905 return self._changectx.branch()
906 906
907 907 def extra(self):
908 908 return self._changectx.extra()
909 909
910 910 def phase(self):
911 911 return self._changectx.phase()
912 912
913 913 def phasestr(self):
914 914 return self._changectx.phasestr()
915 915
916 916 def obsolete(self):
917 917 return self._changectx.obsolete()
918 918
919 919 def instabilities(self):
920 920 return self._changectx.instabilities()
921 921
922 922 def manifest(self):
923 923 return self._changectx.manifest()
924 924
925 925 def changectx(self):
926 926 return self._changectx
927 927
928 928 def renamed(self):
929 929 return self._copied
930 930
931 931 def copysource(self):
932 932 return self._copied and self._copied[0]
933 933
934 934 def repo(self):
935 935 return self._repo
936 936
937 937 def size(self):
938 938 return len(self.data())
939 939
940 940 def path(self):
941 941 return self._path
942 942
943 943 def isbinary(self):
944 944 try:
945 945 return stringutil.binary(self.data())
946 946 except IOError:
947 947 return False
948 948
949 949 def isexec(self):
950 950 return b'x' in self.flags()
951 951
952 952 def islink(self):
953 953 return b'l' in self.flags()
954 954
955 955 def isabsent(self):
956 956 """whether this filectx represents a file not in self._changectx
957 957
958 958 This is mainly for merge code to detect change/delete conflicts. This is
959 959 expected to be True for all subclasses of basectx."""
960 960 return False
961 961
962 962 _customcmp = False
963 963
964 964 def cmp(self, fctx):
965 965 """compare with other file context
966 966
967 967 returns True if different than fctx.
968 968 """
969 969 if fctx._customcmp:
970 970 return fctx.cmp(self)
971 971
972 972 if self._filenode is None:
973 973 raise error.ProgrammingError(
974 974 b'filectx.cmp() must be reimplemented if not backed by revlog'
975 975 )
976 976
977 977 if fctx._filenode is None:
978 978 if self._repo._encodefilterpats:
979 979 # can't rely on size() because wdir content may be decoded
980 980 return self._filelog.cmp(self._filenode, fctx.data())
981 981 if self.size() - 4 == fctx.size():
982 982 # size() can match:
983 983 # if file data starts with '\1\n', empty metadata block is
984 984 # prepended, which adds 4 bytes to filelog.size().
985 985 return self._filelog.cmp(self._filenode, fctx.data())
986 986 if self.size() == fctx.size():
987 987 # size() matches: need to compare content
988 988 return self._filelog.cmp(self._filenode, fctx.data())
989 989
990 990 # size() differs
991 991 return True
992 992
993 993 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
994 994 """return the first ancestor of <srcrev> introducing <fnode>
995 995
996 996 If the linkrev of the file revision does not point to an ancestor of
997 997 srcrev, we'll walk down the ancestors until we find one introducing
998 998 this file revision.
999 999
1000 1000 :srcrev: the changeset revision we search ancestors from
1001 1001 :inclusive: if true, the src revision will also be checked
1002 1002 :stoprev: an optional revision to stop the walk at. If no introduction
1003 1003 of this file content could be found before this floor
1004 1004 revision, the function will returns "None" and stops its
1005 1005 iteration.
1006 1006 """
1007 1007 repo = self._repo
1008 1008 cl = repo.unfiltered().changelog
1009 1009 mfl = repo.manifestlog
1010 1010 # fetch the linkrev
1011 1011 lkr = self.linkrev()
1012 1012 if srcrev == lkr:
1013 1013 return lkr
1014 1014 # hack to reuse ancestor computation when searching for renames
1015 1015 memberanc = getattr(self, '_ancestrycontext', None)
1016 1016 iteranc = None
1017 1017 if srcrev is None:
1018 1018 # wctx case, used by workingfilectx during mergecopy
1019 1019 revs = [p.rev() for p in self._repo[None].parents()]
1020 1020 inclusive = True # we skipped the real (revless) source
1021 1021 else:
1022 1022 revs = [srcrev]
1023 1023 if memberanc is None:
1024 1024 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1025 1025 # check if this linkrev is an ancestor of srcrev
1026 1026 if lkr not in memberanc:
1027 1027 if iteranc is None:
1028 1028 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1029 1029 fnode = self._filenode
1030 1030 path = self._path
1031 1031 for a in iteranc:
1032 1032 if stoprev is not None and a < stoprev:
1033 1033 return None
1034 1034 ac = cl.read(a) # get changeset data (we avoid object creation)
1035 1035 if path in ac[3]: # checking the 'files' field.
1036 1036 # The file has been touched, check if the content is
1037 1037 # similar to the one we search for.
1038 1038 if fnode == mfl[ac[0]].readfast().get(path):
1039 1039 return a
1040 1040 # In theory, we should never get out of that loop without a result.
1041 1041 # But if manifest uses a buggy file revision (not children of the
1042 1042 # one it replaces) we could. Such a buggy situation will likely
1043 1043 # result is crash somewhere else at to some point.
1044 1044 return lkr
1045 1045
1046 1046 def isintroducedafter(self, changelogrev):
1047 1047 """True if a filectx has been introduced after a given floor revision
1048 1048 """
1049 1049 if self.linkrev() >= changelogrev:
1050 1050 return True
1051 1051 introrev = self._introrev(stoprev=changelogrev)
1052 1052 if introrev is None:
1053 1053 return False
1054 1054 return introrev >= changelogrev
1055 1055
1056 1056 def introrev(self):
1057 1057 """return the rev of the changeset which introduced this file revision
1058 1058
1059 1059 This method is different from linkrev because it take into account the
1060 1060 changeset the filectx was created from. It ensures the returned
1061 1061 revision is one of its ancestors. This prevents bugs from
1062 1062 'linkrev-shadowing' when a file revision is used by multiple
1063 1063 changesets.
1064 1064 """
1065 1065 return self._introrev()
1066 1066
1067 1067 def _introrev(self, stoprev=None):
1068 1068 """
1069 1069 Same as `introrev` but, with an extra argument to limit changelog
1070 1070 iteration range in some internal usecase.
1071 1071
1072 1072 If `stoprev` is set, the `introrev` will not be searched past that
1073 1073 `stoprev` revision and "None" might be returned. This is useful to
1074 1074 limit the iteration range.
1075 1075 """
1076 1076 toprev = None
1077 1077 attrs = vars(self)
1078 1078 if '_changeid' in attrs:
1079 1079 # We have a cached value already
1080 1080 toprev = self._changeid
1081 1081 elif '_changectx' in attrs:
1082 1082 # We know which changelog entry we are coming from
1083 1083 toprev = self._changectx.rev()
1084 1084
1085 1085 if toprev is not None:
1086 1086 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1087 1087 elif '_descendantrev' in attrs:
1088 1088 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1089 1089 # be nice and cache the result of the computation
1090 1090 if introrev is not None:
1091 1091 self._changeid = introrev
1092 1092 return introrev
1093 1093 else:
1094 1094 return self.linkrev()
1095 1095
1096 1096 def introfilectx(self):
1097 1097 """Return filectx having identical contents, but pointing to the
1098 1098 changeset revision where this filectx was introduced"""
1099 1099 introrev = self.introrev()
1100 1100 if self.rev() == introrev:
1101 1101 return self
1102 1102 return self.filectx(self.filenode(), changeid=introrev)
1103 1103
1104 1104 def _parentfilectx(self, path, fileid, filelog):
1105 1105 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1106 1106 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1107 1107 if '_changeid' in vars(self) or '_changectx' in vars(self):
1108 1108 # If self is associated with a changeset (probably explicitly
1109 1109 # fed), ensure the created filectx is associated with a
1110 1110 # changeset that is an ancestor of self.changectx.
1111 1111 # This lets us later use _adjustlinkrev to get a correct link.
1112 1112 fctx._descendantrev = self.rev()
1113 1113 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1114 1114 elif '_descendantrev' in vars(self):
1115 1115 # Otherwise propagate _descendantrev if we have one associated.
1116 1116 fctx._descendantrev = self._descendantrev
1117 1117 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1118 1118 return fctx
1119 1119
1120 1120 def parents(self):
1121 1121 _path = self._path
1122 1122 fl = self._filelog
1123 1123 parents = self._filelog.parents(self._filenode)
1124 1124 pl = [(_path, node, fl) for node in parents if node != nullid]
1125 1125
1126 1126 r = fl.renamed(self._filenode)
1127 1127 if r:
1128 1128 # - In the simple rename case, both parent are nullid, pl is empty.
1129 1129 # - In case of merge, only one of the parent is null id and should
1130 1130 # be replaced with the rename information. This parent is -always-
1131 1131 # the first one.
1132 1132 #
1133 1133 # As null id have always been filtered out in the previous list
1134 1134 # comprehension, inserting to 0 will always result in "replacing
1135 1135 # first nullid parent with rename information.
1136 1136 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1137 1137
1138 1138 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1139 1139
1140 1140 def p1(self):
1141 1141 return self.parents()[0]
1142 1142
1143 1143 def p2(self):
1144 1144 p = self.parents()
1145 1145 if len(p) == 2:
1146 1146 return p[1]
1147 1147 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1148 1148
1149 1149 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1150 1150 """Returns a list of annotateline objects for each line in the file
1151 1151
1152 1152 - line.fctx is the filectx of the node where that line was last changed
1153 1153 - line.lineno is the line number at the first appearance in the managed
1154 1154 file
1155 1155 - line.text is the data on that line (including newline character)
1156 1156 """
1157 1157 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1158 1158
1159 1159 def parents(f):
1160 1160 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1161 1161 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1162 1162 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1163 1163 # isn't an ancestor of the srcrev.
1164 1164 f._changeid
1165 1165 pl = f.parents()
1166 1166
1167 1167 # Don't return renamed parents if we aren't following.
1168 1168 if not follow:
1169 1169 pl = [p for p in pl if p.path() == f.path()]
1170 1170
1171 1171 # renamed filectx won't have a filelog yet, so set it
1172 1172 # from the cache to save time
1173 1173 for p in pl:
1174 1174 if not '_filelog' in p.__dict__:
1175 1175 p._filelog = getlog(p.path())
1176 1176
1177 1177 return pl
1178 1178
1179 1179 # use linkrev to find the first changeset where self appeared
1180 1180 base = self.introfilectx()
1181 1181 if getattr(base, '_ancestrycontext', None) is None:
1182 1182 # it is safe to use an unfiltered repository here because we are
1183 1183 # walking ancestors only.
1184 1184 cl = self._repo.unfiltered().changelog
1185 1185 if base.rev() is None:
1186 1186 # wctx is not inclusive, but works because _ancestrycontext
1187 1187 # is used to test filelog revisions
1188 1188 ac = cl.ancestors(
1189 1189 [p.rev() for p in base.parents()], inclusive=True
1190 1190 )
1191 1191 else:
1192 1192 ac = cl.ancestors([base.rev()], inclusive=True)
1193 1193 base._ancestrycontext = ac
1194 1194
1195 1195 return dagop.annotate(
1196 1196 base, parents, skiprevs=skiprevs, diffopts=diffopts
1197 1197 )
1198 1198
1199 1199 def ancestors(self, followfirst=False):
1200 1200 visit = {}
1201 1201 c = self
1202 1202 if followfirst:
1203 1203 cut = 1
1204 1204 else:
1205 1205 cut = None
1206 1206
1207 1207 while True:
1208 1208 for parent in c.parents()[:cut]:
1209 1209 visit[(parent.linkrev(), parent.filenode())] = parent
1210 1210 if not visit:
1211 1211 break
1212 1212 c = visit.pop(max(visit))
1213 1213 yield c
1214 1214
1215 1215 def decodeddata(self):
1216 1216 """Returns `data()` after running repository decoding filters.
1217 1217
1218 1218 This is often equivalent to how the data would be expressed on disk.
1219 1219 """
1220 1220 return self._repo.wwritedata(self.path(), self.data())
1221 1221
1222 1222
1223 1223 class filectx(basefilectx):
1224 1224 """A filecontext object makes access to data related to a particular
1225 1225 filerevision convenient."""
1226 1226
1227 1227 def __init__(
1228 1228 self,
1229 1229 repo,
1230 1230 path,
1231 1231 changeid=None,
1232 1232 fileid=None,
1233 1233 filelog=None,
1234 1234 changectx=None,
1235 1235 ):
1236 1236 """changeid must be a revision number, if specified.
1237 1237 fileid can be a file revision or node."""
1238 1238 self._repo = repo
1239 1239 self._path = path
1240 1240
1241 1241 assert (
1242 1242 changeid is not None or fileid is not None or changectx is not None
1243 1243 ), (
1244 1244 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1245 1245 % (changeid, fileid, changectx,)
1246 1246 )
1247 1247
1248 1248 if filelog is not None:
1249 1249 self._filelog = filelog
1250 1250
1251 1251 if changeid is not None:
1252 1252 self._changeid = changeid
1253 1253 if changectx is not None:
1254 1254 self._changectx = changectx
1255 1255 if fileid is not None:
1256 1256 self._fileid = fileid
1257 1257
1258 1258 @propertycache
1259 1259 def _changectx(self):
1260 1260 try:
1261 1261 return self._repo[self._changeid]
1262 1262 except error.FilteredRepoLookupError:
1263 1263 # Linkrev may point to any revision in the repository. When the
1264 1264 # repository is filtered this may lead to `filectx` trying to build
1265 1265 # `changectx` for filtered revision. In such case we fallback to
1266 1266 # creating `changectx` on the unfiltered version of the reposition.
1267 1267 # This fallback should not be an issue because `changectx` from
1268 1268 # `filectx` are not used in complex operations that care about
1269 1269 # filtering.
1270 1270 #
1271 1271 # This fallback is a cheap and dirty fix that prevent several
1272 1272 # crashes. It does not ensure the behavior is correct. However the
1273 1273 # behavior was not correct before filtering either and "incorrect
1274 1274 # behavior" is seen as better as "crash"
1275 1275 #
1276 1276 # Linkrevs have several serious troubles with filtering that are
1277 1277 # complicated to solve. Proper handling of the issue here should be
1278 1278 # considered when solving linkrev issue are on the table.
1279 1279 return self._repo.unfiltered()[self._changeid]
1280 1280
1281 1281 def filectx(self, fileid, changeid=None):
1282 1282 '''opens an arbitrary revision of the file without
1283 1283 opening a new filelog'''
1284 1284 return filectx(
1285 1285 self._repo,
1286 1286 self._path,
1287 1287 fileid=fileid,
1288 1288 filelog=self._filelog,
1289 1289 changeid=changeid,
1290 1290 )
1291 1291
1292 1292 def rawdata(self):
1293 1293 return self._filelog.rawdata(self._filenode)
1294 1294
1295 1295 def rawflags(self):
1296 1296 """low-level revlog flags"""
1297 1297 return self._filelog.flags(self._filerev)
1298 1298
1299 1299 def data(self):
1300 1300 try:
1301 1301 return self._filelog.read(self._filenode)
1302 1302 except error.CensoredNodeError:
1303 1303 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1304 1304 return b""
1305 1305 raise error.Abort(
1306 1306 _(b"censored node: %s") % short(self._filenode),
1307 1307 hint=_(b"set censor.policy to ignore errors"),
1308 1308 )
1309 1309
1310 1310 def size(self):
1311 1311 return self._filelog.size(self._filerev)
1312 1312
1313 1313 @propertycache
1314 1314 def _copied(self):
1315 1315 """check if file was actually renamed in this changeset revision
1316 1316
1317 1317 If rename logged in file revision, we report copy for changeset only
1318 1318 if file revisions linkrev points back to the changeset in question
1319 1319 or both changeset parents contain different file revisions.
1320 1320 """
1321 1321
1322 1322 renamed = self._filelog.renamed(self._filenode)
1323 1323 if not renamed:
1324 1324 return None
1325 1325
1326 1326 if self.rev() == self.linkrev():
1327 1327 return renamed
1328 1328
1329 1329 name = self.path()
1330 1330 fnode = self._filenode
1331 1331 for p in self._changectx.parents():
1332 1332 try:
1333 1333 if fnode == p.filenode(name):
1334 1334 return None
1335 1335 except error.LookupError:
1336 1336 pass
1337 1337 return renamed
1338 1338
1339 1339 def children(self):
1340 1340 # hard for renames
1341 1341 c = self._filelog.children(self._filenode)
1342 1342 return [
1343 1343 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1344 1344 for x in c
1345 1345 ]
1346 1346
1347 1347
1348 1348 class committablectx(basectx):
1349 1349 """A committablectx object provides common functionality for a context that
1350 1350 wants the ability to commit, e.g. workingctx or memctx."""
1351 1351
1352 1352 def __init__(
1353 1353 self,
1354 1354 repo,
1355 1355 text=b"",
1356 1356 user=None,
1357 1357 date=None,
1358 1358 extra=None,
1359 1359 changes=None,
1360 1360 branch=None,
1361 1361 ):
1362 1362 super(committablectx, self).__init__(repo)
1363 1363 self._rev = None
1364 1364 self._node = None
1365 1365 self._text = text
1366 1366 if date:
1367 1367 self._date = dateutil.parsedate(date)
1368 1368 if user:
1369 1369 self._user = user
1370 1370 if changes:
1371 1371 self._status = changes
1372 1372
1373 1373 self._extra = {}
1374 1374 if extra:
1375 1375 self._extra = extra.copy()
1376 1376 if branch is not None:
1377 1377 self._extra[b'branch'] = encoding.fromlocal(branch)
1378 1378 if not self._extra.get(b'branch'):
1379 1379 self._extra[b'branch'] = b'default'
1380 1380
1381 1381 def __bytes__(self):
1382 1382 return bytes(self._parents[0]) + b"+"
1383 1383
1384 1384 __str__ = encoding.strmethod(__bytes__)
1385 1385
1386 1386 def __nonzero__(self):
1387 1387 return True
1388 1388
1389 1389 __bool__ = __nonzero__
1390 1390
1391 1391 @propertycache
1392 1392 def _status(self):
1393 1393 return self._repo.status()
1394 1394
1395 1395 @propertycache
1396 1396 def _user(self):
1397 1397 return self._repo.ui.username()
1398 1398
1399 1399 @propertycache
1400 1400 def _date(self):
1401 1401 ui = self._repo.ui
1402 1402 date = ui.configdate(b'devel', b'default-date')
1403 1403 if date is None:
1404 1404 date = dateutil.makedate()
1405 1405 return date
1406 1406
1407 1407 def subrev(self, subpath):
1408 1408 return None
1409 1409
1410 1410 def manifestnode(self):
1411 1411 return None
1412 1412
1413 1413 def user(self):
1414 1414 return self._user or self._repo.ui.username()
1415 1415
1416 1416 def date(self):
1417 1417 return self._date
1418 1418
1419 1419 def description(self):
1420 1420 return self._text
1421 1421
1422 1422 def files(self):
1423 1423 return sorted(
1424 1424 self._status.modified + self._status.added + self._status.removed
1425 1425 )
1426 1426
1427 1427 def modified(self):
1428 1428 return self._status.modified
1429 1429
1430 1430 def added(self):
1431 1431 return self._status.added
1432 1432
1433 1433 def removed(self):
1434 1434 return self._status.removed
1435 1435
1436 1436 def deleted(self):
1437 1437 return self._status.deleted
1438 1438
1439 1439 filesmodified = modified
1440 1440 filesadded = added
1441 1441 filesremoved = removed
1442 1442
1443 1443 def branch(self):
1444 1444 return encoding.tolocal(self._extra[b'branch'])
1445 1445
1446 1446 def closesbranch(self):
1447 1447 return b'close' in self._extra
1448 1448
1449 1449 def extra(self):
1450 1450 return self._extra
1451 1451
1452 1452 def isinmemory(self):
1453 1453 return False
1454 1454
1455 1455 def tags(self):
1456 1456 return []
1457 1457
1458 1458 def bookmarks(self):
1459 1459 b = []
1460 1460 for p in self.parents():
1461 1461 b.extend(p.bookmarks())
1462 1462 return b
1463 1463
1464 1464 def phase(self):
1465 1465 phase = phases.newcommitphase(self._repo.ui)
1466 1466 for p in self.parents():
1467 1467 phase = max(phase, p.phase())
1468 1468 return phase
1469 1469
1470 1470 def hidden(self):
1471 1471 return False
1472 1472
1473 1473 def children(self):
1474 1474 return []
1475 1475
1476 1476 def flags(self, path):
1477 1477 if '_manifest' in self.__dict__:
1478 1478 try:
1479 1479 return self._manifest.flags(path)
1480 1480 except KeyError:
1481 1481 return b''
1482 1482
1483 1483 try:
1484 1484 return self._flagfunc(path)
1485 1485 except OSError:
1486 1486 return b''
1487 1487
1488 1488 def ancestor(self, c2):
1489 1489 """return the "best" ancestor context of self and c2"""
1490 1490 return self._parents[0].ancestor(c2) # punt on two parents for now
1491 1491
1492 1492 def ancestors(self):
1493 1493 for p in self._parents:
1494 1494 yield p
1495 1495 for a in self._repo.changelog.ancestors(
1496 1496 [p.rev() for p in self._parents]
1497 1497 ):
1498 1498 yield self._repo[a]
1499 1499
1500 1500 def markcommitted(self, node):
1501 1501 """Perform post-commit cleanup necessary after committing this ctx
1502 1502
1503 1503 Specifically, this updates backing stores this working context
1504 1504 wraps to reflect the fact that the changes reflected by this
1505 1505 workingctx have been committed. For example, it marks
1506 1506 modified and added files as normal in the dirstate.
1507 1507
1508 1508 """
1509 1509
1510 1510 def dirty(self, missing=False, merge=True, branch=True):
1511 1511 return False
1512 1512
1513 1513
1514 1514 class workingctx(committablectx):
1515 1515 """A workingctx object makes access to data related to
1516 1516 the current working directory convenient.
1517 1517 date - any valid date string or (unixtime, offset), or None.
1518 1518 user - username string, or None.
1519 1519 extra - a dictionary of extra values, or None.
1520 1520 changes - a list of file lists as returned by localrepo.status()
1521 1521 or None to use the repository status.
1522 1522 """
1523 1523
1524 1524 def __init__(
1525 1525 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1526 1526 ):
1527 1527 branch = None
1528 1528 if not extra or b'branch' not in extra:
1529 1529 try:
1530 1530 branch = repo.dirstate.branch()
1531 1531 except UnicodeDecodeError:
1532 1532 raise error.Abort(_(b'branch name not in UTF-8!'))
1533 1533 super(workingctx, self).__init__(
1534 1534 repo, text, user, date, extra, changes, branch=branch
1535 1535 )
1536 1536
1537 1537 def __iter__(self):
1538 1538 d = self._repo.dirstate
1539 1539 for f in d:
1540 1540 if d[f] != b'r':
1541 1541 yield f
1542 1542
1543 1543 def __contains__(self, key):
1544 1544 return self._repo.dirstate[key] not in b"?r"
1545 1545
1546 1546 def hex(self):
1547 1547 return wdirhex
1548 1548
1549 1549 @propertycache
1550 1550 def _parents(self):
1551 1551 p = self._repo.dirstate.parents()
1552 1552 if p[1] == nullid:
1553 1553 p = p[:-1]
1554 1554 # use unfiltered repo to delay/avoid loading obsmarkers
1555 1555 unfi = self._repo.unfiltered()
1556 1556 return [
1557 1557 changectx(
1558 1558 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1559 1559 )
1560 1560 for n in p
1561 1561 ]
1562 1562
1563 1563 def setparents(self, p1node, p2node=nullid):
1564 1564 dirstate = self._repo.dirstate
1565 1565 with dirstate.parentchange():
1566 1566 copies = dirstate.setparents(p1node, p2node)
1567 1567 pctx = self._repo[p1node]
1568 1568 if copies:
1569 1569 # Adjust copy records, the dirstate cannot do it, it
1570 1570 # requires access to parents manifests. Preserve them
1571 1571 # only for entries added to first parent.
1572 1572 for f in copies:
1573 1573 if f not in pctx and copies[f] in pctx:
1574 1574 dirstate.copy(copies[f], f)
1575 1575 if p2node == nullid:
1576 1576 for f, s in sorted(dirstate.copies().items()):
1577 1577 if f not in pctx and s not in pctx:
1578 1578 dirstate.copy(None, f)
1579 1579
1580 1580 def _fileinfo(self, path):
1581 1581 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1582 1582 self._manifest
1583 1583 return super(workingctx, self)._fileinfo(path)
1584 1584
1585 1585 def _buildflagfunc(self):
1586 1586 # Create a fallback function for getting file flags when the
1587 1587 # filesystem doesn't support them
1588 1588
1589 1589 copiesget = self._repo.dirstate.copies().get
1590 1590 parents = self.parents()
1591 1591 if len(parents) < 2:
1592 1592 # when we have one parent, it's easy: copy from parent
1593 1593 man = parents[0].manifest()
1594 1594
1595 1595 def func(f):
1596 1596 f = copiesget(f, f)
1597 1597 return man.flags(f)
1598 1598
1599 1599 else:
1600 1600 # merges are tricky: we try to reconstruct the unstored
1601 1601 # result from the merge (issue1802)
1602 1602 p1, p2 = parents
1603 1603 pa = p1.ancestor(p2)
1604 1604 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1605 1605
1606 1606 def func(f):
1607 1607 f = copiesget(f, f) # may be wrong for merges with copies
1608 1608 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1609 1609 if fl1 == fl2:
1610 1610 return fl1
1611 1611 if fl1 == fla:
1612 1612 return fl2
1613 1613 if fl2 == fla:
1614 1614 return fl1
1615 1615 return b'' # punt for conflicts
1616 1616
1617 1617 return func
1618 1618
1619 1619 @propertycache
1620 1620 def _flagfunc(self):
1621 1621 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1622 1622
1623 1623 def flags(self, path):
1624 1624 try:
1625 1625 return self._flagfunc(path)
1626 1626 except OSError:
1627 1627 return b''
1628 1628
1629 1629 def filectx(self, path, filelog=None):
1630 1630 """get a file context from the working directory"""
1631 1631 return workingfilectx(
1632 1632 self._repo, path, workingctx=self, filelog=filelog
1633 1633 )
1634 1634
1635 1635 def dirty(self, missing=False, merge=True, branch=True):
1636 1636 """check whether a working directory is modified"""
1637 1637 # check subrepos first
1638 1638 for s in sorted(self.substate):
1639 1639 if self.sub(s).dirty(missing=missing):
1640 1640 return True
1641 1641 # check current working dir
1642 1642 return (
1643 1643 (merge and self.p2())
1644 1644 or (branch and self.branch() != self.p1().branch())
1645 1645 or self.modified()
1646 1646 or self.added()
1647 1647 or self.removed()
1648 1648 or (missing and self.deleted())
1649 1649 )
1650 1650
1651 1651 def add(self, list, prefix=b""):
1652 1652 with self._repo.wlock():
1653 1653 ui, ds = self._repo.ui, self._repo.dirstate
1654 1654 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1655 1655 rejected = []
1656 1656 lstat = self._repo.wvfs.lstat
1657 1657 for f in list:
1658 1658 # ds.pathto() returns an absolute file when this is invoked from
1659 1659 # the keyword extension. That gets flagged as non-portable on
1660 1660 # Windows, since it contains the drive letter and colon.
1661 1661 scmutil.checkportable(ui, os.path.join(prefix, f))
1662 1662 try:
1663 1663 st = lstat(f)
1664 1664 except OSError:
1665 1665 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1666 1666 rejected.append(f)
1667 1667 continue
1668 1668 limit = ui.configbytes(b'ui', b'large-file-limit')
1669 1669 if limit != 0 and st.st_size > limit:
1670 1670 ui.warn(
1671 1671 _(
1672 1672 b"%s: up to %d MB of RAM may be required "
1673 1673 b"to manage this file\n"
1674 1674 b"(use 'hg revert %s' to cancel the "
1675 1675 b"pending addition)\n"
1676 1676 )
1677 1677 % (f, 3 * st.st_size // 1000000, uipath(f))
1678 1678 )
1679 1679 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1680 1680 ui.warn(
1681 1681 _(
1682 1682 b"%s not added: only files and symlinks "
1683 1683 b"supported currently\n"
1684 1684 )
1685 1685 % uipath(f)
1686 1686 )
1687 1687 rejected.append(f)
1688 1688 elif ds[f] in b'amn':
1689 1689 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1690 1690 elif ds[f] == b'r':
1691 1691 ds.normallookup(f)
1692 1692 else:
1693 1693 ds.add(f)
1694 1694 return rejected
1695 1695
1696 1696 def forget(self, files, prefix=b""):
1697 1697 with self._repo.wlock():
1698 1698 ds = self._repo.dirstate
1699 1699 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1700 1700 rejected = []
1701 1701 for f in files:
1702 1702 if f not in ds:
1703 1703 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1704 1704 rejected.append(f)
1705 1705 elif ds[f] != b'a':
1706 1706 ds.remove(f)
1707 1707 else:
1708 1708 ds.drop(f)
1709 1709 return rejected
1710 1710
1711 1711 def copy(self, source, dest):
1712 1712 try:
1713 1713 st = self._repo.wvfs.lstat(dest)
1714 1714 except OSError as err:
1715 1715 if err.errno != errno.ENOENT:
1716 1716 raise
1717 1717 self._repo.ui.warn(
1718 1718 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1719 1719 )
1720 1720 return
1721 1721 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1722 1722 self._repo.ui.warn(
1723 1723 _(b"copy failed: %s is not a file or a symbolic link\n")
1724 1724 % self._repo.dirstate.pathto(dest)
1725 1725 )
1726 1726 else:
1727 1727 with self._repo.wlock():
1728 1728 ds = self._repo.dirstate
1729 1729 if ds[dest] in b'?':
1730 1730 ds.add(dest)
1731 1731 elif ds[dest] in b'r':
1732 1732 ds.normallookup(dest)
1733 1733 ds.copy(source, dest)
1734 1734
1735 1735 def match(
1736 1736 self,
1737 1737 pats=None,
1738 1738 include=None,
1739 1739 exclude=None,
1740 1740 default=b'glob',
1741 1741 listsubrepos=False,
1742 1742 badfn=None,
1743 1743 cwd=None,
1744 1744 ):
1745 1745 r = self._repo
1746 1746 if not cwd:
1747 1747 cwd = r.getcwd()
1748 1748
1749 1749 # Only a case insensitive filesystem needs magic to translate user input
1750 1750 # to actual case in the filesystem.
1751 1751 icasefs = not util.fscasesensitive(r.root)
1752 1752 return matchmod.match(
1753 1753 r.root,
1754 1754 cwd,
1755 1755 pats,
1756 1756 include,
1757 1757 exclude,
1758 1758 default,
1759 1759 auditor=r.auditor,
1760 1760 ctx=self,
1761 1761 listsubrepos=listsubrepos,
1762 1762 badfn=badfn,
1763 1763 icasefs=icasefs,
1764 1764 )
1765 1765
1766 1766 def _filtersuspectsymlink(self, files):
1767 1767 if not files or self._repo.dirstate._checklink:
1768 1768 return files
1769 1769
1770 1770 # Symlink placeholders may get non-symlink-like contents
1771 1771 # via user error or dereferencing by NFS or Samba servers,
1772 1772 # so we filter out any placeholders that don't look like a
1773 1773 # symlink
1774 1774 sane = []
1775 1775 for f in files:
1776 1776 if self.flags(f) == b'l':
1777 1777 d = self[f].data()
1778 1778 if (
1779 1779 d == b''
1780 1780 or len(d) >= 1024
1781 1781 or b'\n' in d
1782 1782 or stringutil.binary(d)
1783 1783 ):
1784 1784 self._repo.ui.debug(
1785 1785 b'ignoring suspect symlink placeholder "%s"\n' % f
1786 1786 )
1787 1787 continue
1788 1788 sane.append(f)
1789 1789 return sane
1790 1790
1791 1791 def _checklookup(self, files):
1792 1792 # check for any possibly clean files
1793 1793 if not files:
1794 1794 return [], [], []
1795 1795
1796 1796 modified = []
1797 1797 deleted = []
1798 1798 fixup = []
1799 1799 pctx = self._parents[0]
1800 1800 # do a full compare of any files that might have changed
1801 1801 for f in sorted(files):
1802 1802 try:
1803 1803 # This will return True for a file that got replaced by a
1804 1804 # directory in the interim, but fixing that is pretty hard.
1805 1805 if (
1806 1806 f not in pctx
1807 1807 or self.flags(f) != pctx.flags(f)
1808 1808 or pctx[f].cmp(self[f])
1809 1809 ):
1810 1810 modified.append(f)
1811 1811 else:
1812 1812 fixup.append(f)
1813 1813 except (IOError, OSError):
1814 1814 # A file become inaccessible in between? Mark it as deleted,
1815 1815 # matching dirstate behavior (issue5584).
1816 1816 # The dirstate has more complex behavior around whether a
1817 1817 # missing file matches a directory, etc, but we don't need to
1818 1818 # bother with that: if f has made it to this point, we're sure
1819 1819 # it's in the dirstate.
1820 1820 deleted.append(f)
1821 1821
1822 1822 return modified, deleted, fixup
1823 1823
1824 1824 def _poststatusfixup(self, status, fixup):
1825 1825 """update dirstate for files that are actually clean"""
1826 1826 poststatus = self._repo.postdsstatus()
1827 1827 if fixup or poststatus:
1828 1828 try:
1829 1829 oldid = self._repo.dirstate.identity()
1830 1830
1831 1831 # updating the dirstate is optional
1832 1832 # so we don't wait on the lock
1833 1833 # wlock can invalidate the dirstate, so cache normal _after_
1834 1834 # taking the lock
1835 1835 with self._repo.wlock(False):
1836 1836 if self._repo.dirstate.identity() == oldid:
1837 1837 if fixup:
1838 1838 normal = self._repo.dirstate.normal
1839 1839 for f in fixup:
1840 1840 normal(f)
1841 1841 # write changes out explicitly, because nesting
1842 1842 # wlock at runtime may prevent 'wlock.release()'
1843 1843 # after this block from doing so for subsequent
1844 1844 # changing files
1845 1845 tr = self._repo.currenttransaction()
1846 1846 self._repo.dirstate.write(tr)
1847 1847
1848 1848 if poststatus:
1849 1849 for ps in poststatus:
1850 1850 ps(self, status)
1851 1851 else:
1852 1852 # in this case, writing changes out breaks
1853 1853 # consistency, because .hg/dirstate was
1854 1854 # already changed simultaneously after last
1855 1855 # caching (see also issue5584 for detail)
1856 1856 self._repo.ui.debug(
1857 1857 b'skip updating dirstate: identity mismatch\n'
1858 1858 )
1859 1859 except error.LockError:
1860 1860 pass
1861 1861 finally:
1862 1862 # Even if the wlock couldn't be grabbed, clear out the list.
1863 1863 self._repo.clearpostdsstatus()
1864 1864
1865 1865 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1866 1866 '''Gets the status from the dirstate -- internal use only.'''
1867 1867 subrepos = []
1868 1868 if b'.hgsub' in self:
1869 1869 subrepos = sorted(self.substate)
1870 1870 cmp, s = self._repo.dirstate.status(
1871 1871 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1872 1872 )
1873 1873
1874 1874 # check for any possibly clean files
1875 1875 fixup = []
1876 1876 if cmp:
1877 1877 modified2, deleted2, fixup = self._checklookup(cmp)
1878 1878 s.modified.extend(modified2)
1879 1879 s.deleted.extend(deleted2)
1880 1880
1881 1881 if fixup and clean:
1882 1882 s.clean.extend(fixup)
1883 1883
1884 1884 self._poststatusfixup(s, fixup)
1885 1885
1886 1886 if match.always():
1887 1887 # cache for performance
1888 1888 if s.unknown or s.ignored or s.clean:
1889 1889 # "_status" is cached with list*=False in the normal route
1890 1890 self._status = scmutil.status(
1891 1891 s.modified, s.added, s.removed, s.deleted, [], [], []
1892 1892 )
1893 1893 else:
1894 1894 self._status = s
1895 1895
1896 1896 return s
1897 1897
1898 1898 @propertycache
1899 1899 def _copies(self):
1900 1900 p1copies = {}
1901 1901 p2copies = {}
1902 1902 parents = self._repo.dirstate.parents()
1903 1903 p1manifest = self._repo[parents[0]].manifest()
1904 1904 p2manifest = self._repo[parents[1]].manifest()
1905 1905 changedset = set(self.added()) | set(self.modified())
1906 1906 narrowmatch = self._repo.narrowmatch()
1907 1907 for dst, src in self._repo.dirstate.copies().items():
1908 1908 if dst not in changedset or not narrowmatch(dst):
1909 1909 continue
1910 1910 if src in p1manifest:
1911 1911 p1copies[dst] = src
1912 1912 elif src in p2manifest:
1913 1913 p2copies[dst] = src
1914 1914 return p1copies, p2copies
1915 1915
1916 1916 @propertycache
1917 1917 def _manifest(self):
1918 1918 """generate a manifest corresponding to the values in self._status
1919 1919
1920 1920 This reuse the file nodeid from parent, but we use special node
1921 1921 identifiers for added and modified files. This is used by manifests
1922 1922 merge to see that files are different and by update logic to avoid
1923 1923 deleting newly added files.
1924 1924 """
1925 1925 return self._buildstatusmanifest(self._status)
1926 1926
1927 1927 def _buildstatusmanifest(self, status):
1928 1928 """Builds a manifest that includes the given status results."""
1929 1929 parents = self.parents()
1930 1930
1931 1931 man = parents[0].manifest().copy()
1932 1932
1933 1933 ff = self._flagfunc
1934 1934 for i, l in (
1935 1935 (addednodeid, status.added),
1936 1936 (modifiednodeid, status.modified),
1937 1937 ):
1938 1938 for f in l:
1939 1939 man[f] = i
1940 1940 try:
1941 1941 man.setflag(f, ff(f))
1942 1942 except OSError:
1943 1943 pass
1944 1944
1945 1945 for f in status.deleted + status.removed:
1946 1946 if f in man:
1947 1947 del man[f]
1948 1948
1949 1949 return man
1950 1950
1951 1951 def _buildstatus(
1952 1952 self, other, s, match, listignored, listclean, listunknown
1953 1953 ):
1954 1954 """build a status with respect to another context
1955 1955
1956 1956 This includes logic for maintaining the fast path of status when
1957 1957 comparing the working directory against its parent, which is to skip
1958 1958 building a new manifest if self (working directory) is not comparing
1959 1959 against its parent (repo['.']).
1960 1960 """
1961 1961 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1962 1962 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1963 1963 # might have accidentally ended up with the entire contents of the file
1964 1964 # they are supposed to be linking to.
1965 1965 s.modified[:] = self._filtersuspectsymlink(s.modified)
1966 1966 if other != self._repo[b'.']:
1967 1967 s = super(workingctx, self)._buildstatus(
1968 1968 other, s, match, listignored, listclean, listunknown
1969 1969 )
1970 1970 return s
1971 1971
1972 1972 def _matchstatus(self, other, match):
1973 1973 """override the match method with a filter for directory patterns
1974 1974
1975 1975 We use inheritance to customize the match.bad method only in cases of
1976 1976 workingctx since it belongs only to the working directory when
1977 1977 comparing against the parent changeset.
1978 1978
1979 1979 If we aren't comparing against the working directory's parent, then we
1980 1980 just use the default match object sent to us.
1981 1981 """
1982 1982 if other != self._repo[b'.']:
1983 1983
1984 1984 def bad(f, msg):
1985 1985 # 'f' may be a directory pattern from 'match.files()',
1986 1986 # so 'f not in ctx1' is not enough
1987 1987 if f not in other and not other.hasdir(f):
1988 1988 self._repo.ui.warn(
1989 1989 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1990 1990 )
1991 1991
1992 1992 match.bad = bad
1993 1993 return match
1994 1994
1995 1995 def walk(self, match):
1996 1996 '''Generates matching file names.'''
1997 1997 return sorted(
1998 1998 self._repo.dirstate.walk(
1999 1999 self._repo.narrowmatch(match),
2000 2000 subrepos=sorted(self.substate),
2001 2001 unknown=True,
2002 2002 ignored=False,
2003 2003 )
2004 2004 )
2005 2005
2006 2006 def matches(self, match):
2007 2007 match = self._repo.narrowmatch(match)
2008 2008 ds = self._repo.dirstate
2009 2009 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2010 2010
2011 2011 def markcommitted(self, node):
2012 2012 with self._repo.dirstate.parentchange():
2013 2013 for f in self.modified() + self.added():
2014 2014 self._repo.dirstate.normal(f)
2015 2015 for f in self.removed():
2016 2016 self._repo.dirstate.drop(f)
2017 2017 self._repo.dirstate.setparents(node)
2018 2018 self._repo._quick_access_changeid_invalidate()
2019 2019
2020 2020 # write changes out explicitly, because nesting wlock at
2021 2021 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2022 2022 # from immediately doing so for subsequent changing files
2023 2023 self._repo.dirstate.write(self._repo.currenttransaction())
2024 2024
2025 2025 sparse.aftercommit(self._repo, node)
2026 2026
2027 2027 def mergestate(self, clean=False):
2028 2028 if clean:
2029 2029 return mergestatemod.mergestate.clean(self._repo)
2030 2030 return mergestatemod.mergestate.read(self._repo)
2031 2031
2032 2032
2033 2033 class committablefilectx(basefilectx):
2034 2034 """A committablefilectx provides common functionality for a file context
2035 2035 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2036 2036
2037 2037 def __init__(self, repo, path, filelog=None, ctx=None):
2038 2038 self._repo = repo
2039 2039 self._path = path
2040 2040 self._changeid = None
2041 2041 self._filerev = self._filenode = None
2042 2042
2043 2043 if filelog is not None:
2044 2044 self._filelog = filelog
2045 2045 if ctx:
2046 2046 self._changectx = ctx
2047 2047
2048 2048 def __nonzero__(self):
2049 2049 return True
2050 2050
2051 2051 __bool__ = __nonzero__
2052 2052
2053 2053 def linkrev(self):
2054 2054 # linked to self._changectx no matter if file is modified or not
2055 2055 return self.rev()
2056 2056
2057 2057 def renamed(self):
2058 2058 path = self.copysource()
2059 2059 if not path:
2060 2060 return None
2061 2061 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2062 2062
2063 2063 def parents(self):
2064 2064 '''return parent filectxs, following copies if necessary'''
2065 2065
2066 2066 def filenode(ctx, path):
2067 2067 return ctx._manifest.get(path, nullid)
2068 2068
2069 2069 path = self._path
2070 2070 fl = self._filelog
2071 2071 pcl = self._changectx._parents
2072 2072 renamed = self.renamed()
2073 2073
2074 2074 if renamed:
2075 2075 pl = [renamed + (None,)]
2076 2076 else:
2077 2077 pl = [(path, filenode(pcl[0], path), fl)]
2078 2078
2079 2079 for pc in pcl[1:]:
2080 2080 pl.append((path, filenode(pc, path), fl))
2081 2081
2082 2082 return [
2083 2083 self._parentfilectx(p, fileid=n, filelog=l)
2084 2084 for p, n, l in pl
2085 2085 if n != nullid
2086 2086 ]
2087 2087
2088 2088 def children(self):
2089 2089 return []
2090 2090
2091 2091
2092 2092 class workingfilectx(committablefilectx):
2093 2093 """A workingfilectx object makes access to data related to a particular
2094 2094 file in the working directory convenient."""
2095 2095
2096 2096 def __init__(self, repo, path, filelog=None, workingctx=None):
2097 2097 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2098 2098
2099 2099 @propertycache
2100 2100 def _changectx(self):
2101 2101 return workingctx(self._repo)
2102 2102
2103 2103 def data(self):
2104 2104 return self._repo.wread(self._path)
2105 2105
2106 2106 def copysource(self):
2107 2107 return self._repo.dirstate.copied(self._path)
2108 2108
2109 2109 def size(self):
2110 2110 return self._repo.wvfs.lstat(self._path).st_size
2111 2111
2112 2112 def lstat(self):
2113 2113 return self._repo.wvfs.lstat(self._path)
2114 2114
2115 2115 def date(self):
2116 2116 t, tz = self._changectx.date()
2117 2117 try:
2118 2118 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2119 2119 except OSError as err:
2120 2120 if err.errno != errno.ENOENT:
2121 2121 raise
2122 2122 return (t, tz)
2123 2123
2124 2124 def exists(self):
2125 2125 return self._repo.wvfs.exists(self._path)
2126 2126
2127 2127 def lexists(self):
2128 2128 return self._repo.wvfs.lexists(self._path)
2129 2129
2130 2130 def audit(self):
2131 2131 return self._repo.wvfs.audit(self._path)
2132 2132
2133 2133 def cmp(self, fctx):
2134 2134 """compare with other file context
2135 2135
2136 2136 returns True if different than fctx.
2137 2137 """
2138 2138 # fctx should be a filectx (not a workingfilectx)
2139 2139 # invert comparison to reuse the same code path
2140 2140 return fctx.cmp(self)
2141 2141
2142 2142 def remove(self, ignoremissing=False):
2143 2143 """wraps unlink for a repo's working directory"""
2144 2144 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2145 2145 self._repo.wvfs.unlinkpath(
2146 2146 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2147 2147 )
2148 2148
2149 2149 def write(self, data, flags, backgroundclose=False, **kwargs):
2150 2150 """wraps repo.wwrite"""
2151 2151 return self._repo.wwrite(
2152 2152 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2153 2153 )
2154 2154
2155 2155 def markcopied(self, src):
2156 2156 """marks this file a copy of `src`"""
2157 2157 self._repo.dirstate.copy(src, self._path)
2158 2158
2159 2159 def clearunknown(self):
2160 2160 """Removes conflicting items in the working directory so that
2161 2161 ``write()`` can be called successfully.
2162 2162 """
2163 2163 wvfs = self._repo.wvfs
2164 2164 f = self._path
2165 2165 wvfs.audit(f)
2166 2166 if self._repo.ui.configbool(
2167 2167 b'experimental', b'merge.checkpathconflicts'
2168 2168 ):
2169 2169 # remove files under the directory as they should already be
2170 2170 # warned and backed up
2171 2171 if wvfs.isdir(f) and not wvfs.islink(f):
2172 2172 wvfs.rmtree(f, forcibly=True)
2173 2173 for p in reversed(list(pathutil.finddirs(f))):
2174 2174 if wvfs.isfileorlink(p):
2175 2175 wvfs.unlink(p)
2176 2176 break
2177 2177 else:
2178 2178 # don't remove files if path conflicts are not processed
2179 2179 if wvfs.isdir(f) and not wvfs.islink(f):
2180 2180 wvfs.removedirs(f)
2181 2181
2182 2182 def setflags(self, l, x):
2183 2183 self._repo.wvfs.setflags(self._path, l, x)
2184 2184
2185 2185
2186 2186 class overlayworkingctx(committablectx):
2187 2187 """Wraps another mutable context with a write-back cache that can be
2188 2188 converted into a commit context.
2189 2189
2190 2190 self._cache[path] maps to a dict with keys: {
2191 2191 'exists': bool?
2192 2192 'date': date?
2193 2193 'data': str?
2194 2194 'flags': str?
2195 2195 'copied': str? (path or None)
2196 2196 }
2197 2197 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2198 2198 is `False`, the file was deleted.
2199 2199 """
2200 2200
2201 2201 def __init__(self, repo):
2202 2202 super(overlayworkingctx, self).__init__(repo)
2203 2203 self.clean()
2204 2204
2205 2205 def setbase(self, wrappedctx):
2206 2206 self._wrappedctx = wrappedctx
2207 2207 self._parents = [wrappedctx]
2208 2208 # Drop old manifest cache as it is now out of date.
2209 2209 # This is necessary when, e.g., rebasing several nodes with one
2210 2210 # ``overlayworkingctx`` (e.g. with --collapse).
2211 2211 util.clearcachedproperty(self, b'_manifest')
2212 2212
2213 2213 def setparents(self, p1node, p2node=nullid):
2214 2214 assert p1node == self._wrappedctx.node()
2215 2215 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2216 2216
2217 2217 def data(self, path):
2218 2218 if self.isdirty(path):
2219 2219 if self._cache[path][b'exists']:
2220 2220 if self._cache[path][b'data'] is not None:
2221 2221 return self._cache[path][b'data']
2222 2222 else:
2223 2223 # Must fallback here, too, because we only set flags.
2224 2224 return self._wrappedctx[path].data()
2225 2225 else:
2226 2226 raise error.ProgrammingError(
2227 2227 b"No such file or directory: %s" % path
2228 2228 )
2229 2229 else:
2230 2230 return self._wrappedctx[path].data()
2231 2231
2232 2232 @propertycache
2233 2233 def _manifest(self):
2234 2234 parents = self.parents()
2235 2235 man = parents[0].manifest().copy()
2236 2236
2237 2237 flag = self._flagfunc
2238 2238 for path in self.added():
2239 2239 man[path] = addednodeid
2240 2240 man.setflag(path, flag(path))
2241 2241 for path in self.modified():
2242 2242 man[path] = modifiednodeid
2243 2243 man.setflag(path, flag(path))
2244 2244 for path in self.removed():
2245 2245 del man[path]
2246 2246 return man
2247 2247
2248 2248 @propertycache
2249 2249 def _flagfunc(self):
2250 2250 def f(path):
2251 2251 return self._cache[path][b'flags']
2252 2252
2253 2253 return f
2254 2254
2255 2255 def files(self):
2256 2256 return sorted(self.added() + self.modified() + self.removed())
2257 2257
2258 2258 def modified(self):
2259 2259 return [
2260 2260 f
2261 2261 for f in self._cache.keys()
2262 2262 if self._cache[f][b'exists'] and self._existsinparent(f)
2263 2263 ]
2264 2264
2265 2265 def added(self):
2266 2266 return [
2267 2267 f
2268 2268 for f in self._cache.keys()
2269 2269 if self._cache[f][b'exists'] and not self._existsinparent(f)
2270 2270 ]
2271 2271
2272 2272 def removed(self):
2273 2273 return [
2274 2274 f
2275 2275 for f in self._cache.keys()
2276 2276 if not self._cache[f][b'exists'] and self._existsinparent(f)
2277 2277 ]
2278 2278
2279 2279 def p1copies(self):
2280 2280 copies = {}
2281 2281 narrowmatch = self._repo.narrowmatch()
2282 2282 for f in self._cache.keys():
2283 2283 if not narrowmatch(f):
2284 2284 continue
2285 2285 copies.pop(f, None) # delete if it exists
2286 2286 source = self._cache[f][b'copied']
2287 2287 if source:
2288 2288 copies[f] = source
2289 2289 return copies
2290 2290
2291 2291 def p2copies(self):
2292 2292 copies = {}
2293 2293 narrowmatch = self._repo.narrowmatch()
2294 2294 for f in self._cache.keys():
2295 2295 if not narrowmatch(f):
2296 2296 continue
2297 2297 copies.pop(f, None) # delete if it exists
2298 2298 source = self._cache[f][b'copied']
2299 2299 if source:
2300 2300 copies[f] = source
2301 2301 return copies
2302 2302
2303 2303 def isinmemory(self):
2304 2304 return True
2305 2305
2306 2306 def filedate(self, path):
2307 2307 if self.isdirty(path):
2308 2308 return self._cache[path][b'date']
2309 2309 else:
2310 2310 return self._wrappedctx[path].date()
2311 2311
2312 2312 def markcopied(self, path, origin):
2313 2313 self._markdirty(
2314 2314 path,
2315 2315 exists=True,
2316 2316 date=self.filedate(path),
2317 2317 flags=self.flags(path),
2318 2318 copied=origin,
2319 2319 )
2320 2320
2321 2321 def copydata(self, path):
2322 2322 if self.isdirty(path):
2323 2323 return self._cache[path][b'copied']
2324 2324 else:
2325 2325 return None
2326 2326
2327 2327 def flags(self, path):
2328 2328 if self.isdirty(path):
2329 2329 if self._cache[path][b'exists']:
2330 2330 return self._cache[path][b'flags']
2331 2331 else:
2332 2332 raise error.ProgrammingError(
2333 2333 b"No such file or directory: %s" % path
2334 2334 )
2335 2335 else:
2336 2336 return self._wrappedctx[path].flags()
2337 2337
2338 2338 def __contains__(self, key):
2339 2339 if key in self._cache:
2340 2340 return self._cache[key][b'exists']
2341 2341 return key in self.p1()
2342 2342
2343 2343 def _existsinparent(self, path):
2344 2344 try:
2345 2345 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2346 2346 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2347 2347 # with an ``exists()`` function.
2348 2348 self._wrappedctx[path]
2349 2349 return True
2350 2350 except error.ManifestLookupError:
2351 2351 return False
2352 2352
2353 2353 def _auditconflicts(self, path):
2354 2354 """Replicates conflict checks done by wvfs.write().
2355 2355
2356 2356 Since we never write to the filesystem and never call `applyupdates` in
2357 2357 IMM, we'll never check that a path is actually writable -- e.g., because
2358 2358 it adds `a/foo`, but `a` is actually a file in the other commit.
2359 2359 """
2360 2360
2361 2361 def fail(path, component):
2362 2362 # p1() is the base and we're receiving "writes" for p2()'s
2363 2363 # files.
2364 2364 if b'l' in self.p1()[component].flags():
2365 2365 raise error.Abort(
2366 2366 b"error: %s conflicts with symlink %s "
2367 2367 b"in %d." % (path, component, self.p1().rev())
2368 2368 )
2369 2369 else:
2370 2370 raise error.Abort(
2371 2371 b"error: '%s' conflicts with file '%s' in "
2372 2372 b"%d." % (path, component, self.p1().rev())
2373 2373 )
2374 2374
2375 2375 # Test that each new directory to be created to write this path from p2
2376 2376 # is not a file in p1.
2377 2377 components = path.split(b'/')
2378 2378 for i in pycompat.xrange(len(components)):
2379 2379 component = b"/".join(components[0:i])
2380 2380 if component in self:
2381 2381 fail(path, component)
2382 2382
2383 2383 # Test the other direction -- that this path from p2 isn't a directory
2384 2384 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2385 2385 match = self.match([path], default=b'path')
2386 2386 mfiles = list(self.p1().manifest().walk(match))
2387 2387 if len(mfiles) > 0:
2388 2388 if len(mfiles) == 1 and mfiles[0] == path:
2389 2389 return
2390 2390 # omit the files which are deleted in current IMM wctx
2391 2391 mfiles = [m for m in mfiles if m in self]
2392 2392 if not mfiles:
2393 2393 return
2394 2394 raise error.Abort(
2395 2395 b"error: file '%s' cannot be written because "
2396 2396 b" '%s/' is a directory in %s (containing %d "
2397 2397 b"entries: %s)"
2398 2398 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2399 2399 )
2400 2400
2401 2401 def write(self, path, data, flags=b'', **kwargs):
2402 2402 if data is None:
2403 2403 raise error.ProgrammingError(b"data must be non-None")
2404 2404 self._auditconflicts(path)
2405 2405 self._markdirty(
2406 2406 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2407 2407 )
2408 2408
2409 2409 def setflags(self, path, l, x):
2410 2410 flag = b''
2411 2411 if l:
2412 2412 flag = b'l'
2413 2413 elif x:
2414 2414 flag = b'x'
2415 2415 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2416 2416
2417 2417 def remove(self, path):
2418 2418 self._markdirty(path, exists=False)
2419 2419
2420 2420 def exists(self, path):
2421 2421 """exists behaves like `lexists`, but needs to follow symlinks and
2422 2422 return False if they are broken.
2423 2423 """
2424 2424 if self.isdirty(path):
2425 2425 # If this path exists and is a symlink, "follow" it by calling
2426 2426 # exists on the destination path.
2427 2427 if (
2428 2428 self._cache[path][b'exists']
2429 2429 and b'l' in self._cache[path][b'flags']
2430 2430 ):
2431 2431 return self.exists(self._cache[path][b'data'].strip())
2432 2432 else:
2433 2433 return self._cache[path][b'exists']
2434 2434
2435 2435 return self._existsinparent(path)
2436 2436
2437 2437 def lexists(self, path):
2438 2438 """lexists returns True if the path exists"""
2439 2439 if self.isdirty(path):
2440 2440 return self._cache[path][b'exists']
2441 2441
2442 2442 return self._existsinparent(path)
2443 2443
2444 2444 def size(self, path):
2445 2445 if self.isdirty(path):
2446 2446 if self._cache[path][b'exists']:
2447 2447 return len(self._cache[path][b'data'])
2448 2448 else:
2449 2449 raise error.ProgrammingError(
2450 2450 b"No such file or directory: %s" % path
2451 2451 )
2452 2452 return self._wrappedctx[path].size()
2453 2453
2454 2454 def tomemctx(
2455 2455 self,
2456 2456 text,
2457 2457 branch=None,
2458 2458 extra=None,
2459 2459 date=None,
2460 2460 parents=None,
2461 2461 user=None,
2462 2462 editor=None,
2463 2463 ):
2464 2464 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2465 2465 committed.
2466 2466
2467 2467 ``text`` is the commit message.
2468 2468 ``parents`` (optional) are rev numbers.
2469 2469 """
2470 2470 # Default parents to the wrapped context if not passed.
2471 2471 if parents is None:
2472 2472 parents = self.parents()
2473 2473 if len(parents) == 1:
2474 2474 parents = (parents[0], None)
2475 2475
2476 2476 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2477 2477 if parents[1] is None:
2478 2478 parents = (self._repo[parents[0]], None)
2479 2479 else:
2480 2480 parents = (self._repo[parents[0]], self._repo[parents[1]])
2481 2481
2482 2482 files = self.files()
2483 2483
2484 2484 def getfile(repo, memctx, path):
2485 2485 if self._cache[path][b'exists']:
2486 2486 return memfilectx(
2487 2487 repo,
2488 2488 memctx,
2489 2489 path,
2490 2490 self._cache[path][b'data'],
2491 2491 b'l' in self._cache[path][b'flags'],
2492 2492 b'x' in self._cache[path][b'flags'],
2493 2493 self._cache[path][b'copied'],
2494 2494 )
2495 2495 else:
2496 2496 # Returning None, but including the path in `files`, is
2497 2497 # necessary for memctx to register a deletion.
2498 2498 return None
2499 2499
2500 2500 if branch is None:
2501 2501 branch = self._wrappedctx.branch()
2502 2502
2503 2503 return memctx(
2504 2504 self._repo,
2505 2505 parents,
2506 2506 text,
2507 2507 files,
2508 2508 getfile,
2509 2509 date=date,
2510 2510 extra=extra,
2511 2511 user=user,
2512 2512 branch=branch,
2513 2513 editor=editor,
2514 2514 )
2515 2515
2516 2516 def tomemctx_for_amend(self, precursor):
2517 2517 extra = precursor.extra().copy()
2518 2518 extra[b'amend_source'] = precursor.hex()
2519 2519 return self.tomemctx(
2520 2520 text=precursor.description(),
2521 2521 branch=precursor.branch(),
2522 2522 extra=extra,
2523 2523 date=precursor.date(),
2524 2524 user=precursor.user(),
2525 2525 )
2526 2526
2527 2527 def isdirty(self, path):
2528 2528 return path in self._cache
2529 2529
2530 def nofilechanges(self):
2531 # We need to discard any keys that are actually clean before the empty
2532 # commit check.
2533 self._compact()
2534 return len(self._cache) == 0
2535
2536 2530 def clean(self):
2537 2531 self._cache = {}
2538 2532
2539 def _compact(self):
2540 """Removes keys from the cache that are actually clean, by comparing
2541 them with the underlying context.
2542
2543 This can occur during the merge process, e.g. by passing --tool :local
2544 to resolve a conflict.
2545 """
2546 keys = []
2547 # This won't be perfect, but can help performance significantly when
2548 # using things like remotefilelog.
2549 scmutil.prefetchfiles(
2550 self.repo(),
2551 [
2552 (
2553 self.p1().rev(),
2554 scmutil.matchfiles(self.repo(), self._cache.keys()),
2555 )
2556 ],
2557 )
2558
2559 for path in self._cache.keys():
2560 cache = self._cache[path]
2561 try:
2562 underlying = self._wrappedctx[path]
2563 if (
2564 underlying.data() == cache[b'data']
2565 and underlying.flags() == cache[b'flags']
2566 ):
2567 keys.append(path)
2568 except error.ManifestLookupError:
2569 # Path not in the underlying manifest (created).
2570 continue
2571
2572 for path in keys:
2573 del self._cache[path]
2574 return keys
2575
2576 2533 def _markdirty(
2577 2534 self, path, exists, data=None, date=None, flags=b'', copied=None
2578 2535 ):
2579 2536 # data not provided, let's see if we already have some; if not, let's
2580 2537 # grab it from our underlying context, so that we always have data if
2581 2538 # the file is marked as existing.
2582 2539 if exists and data is None:
2583 2540 oldentry = self._cache.get(path) or {}
2584 2541 data = oldentry.get(b'data')
2585 2542 if data is None:
2586 2543 data = self._wrappedctx[path].data()
2587 2544
2588 2545 self._cache[path] = {
2589 2546 b'exists': exists,
2590 2547 b'data': data,
2591 2548 b'date': date,
2592 2549 b'flags': flags,
2593 2550 b'copied': copied,
2594 2551 }
2595 2552
2596 2553 def filectx(self, path, filelog=None):
2597 2554 return overlayworkingfilectx(
2598 2555 self._repo, path, parent=self, filelog=filelog
2599 2556 )
2600 2557
2601 2558
2602 2559 class overlayworkingfilectx(committablefilectx):
2603 2560 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2604 2561 cache, which can be flushed through later by calling ``flush()``."""
2605 2562
2606 2563 def __init__(self, repo, path, filelog=None, parent=None):
2607 2564 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2608 2565 self._repo = repo
2609 2566 self._parent = parent
2610 2567 self._path = path
2611 2568
2612 2569 def cmp(self, fctx):
2613 2570 return self.data() != fctx.data()
2614 2571
2615 2572 def changectx(self):
2616 2573 return self._parent
2617 2574
2618 2575 def data(self):
2619 2576 return self._parent.data(self._path)
2620 2577
2621 2578 def date(self):
2622 2579 return self._parent.filedate(self._path)
2623 2580
2624 2581 def exists(self):
2625 2582 return self.lexists()
2626 2583
2627 2584 def lexists(self):
2628 2585 return self._parent.exists(self._path)
2629 2586
2630 2587 def copysource(self):
2631 2588 return self._parent.copydata(self._path)
2632 2589
2633 2590 def size(self):
2634 2591 return self._parent.size(self._path)
2635 2592
2636 2593 def markcopied(self, origin):
2637 2594 self._parent.markcopied(self._path, origin)
2638 2595
2639 2596 def audit(self):
2640 2597 pass
2641 2598
2642 2599 def flags(self):
2643 2600 return self._parent.flags(self._path)
2644 2601
2645 2602 def setflags(self, islink, isexec):
2646 2603 return self._parent.setflags(self._path, islink, isexec)
2647 2604
2648 2605 def write(self, data, flags, backgroundclose=False, **kwargs):
2649 2606 return self._parent.write(self._path, data, flags, **kwargs)
2650 2607
2651 2608 def remove(self, ignoremissing=False):
2652 2609 return self._parent.remove(self._path)
2653 2610
2654 2611 def clearunknown(self):
2655 2612 pass
2656 2613
2657 2614
2658 2615 class workingcommitctx(workingctx):
2659 2616 """A workingcommitctx object makes access to data related to
2660 2617 the revision being committed convenient.
2661 2618
2662 2619 This hides changes in the working directory, if they aren't
2663 2620 committed in this context.
2664 2621 """
2665 2622
2666 2623 def __init__(
2667 2624 self, repo, changes, text=b"", user=None, date=None, extra=None
2668 2625 ):
2669 2626 super(workingcommitctx, self).__init__(
2670 2627 repo, text, user, date, extra, changes
2671 2628 )
2672 2629
2673 2630 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2674 2631 """Return matched files only in ``self._status``
2675 2632
2676 2633 Uncommitted files appear "clean" via this context, even if
2677 2634 they aren't actually so in the working directory.
2678 2635 """
2679 2636 if clean:
2680 2637 clean = [f for f in self._manifest if f not in self._changedset]
2681 2638 else:
2682 2639 clean = []
2683 2640 return scmutil.status(
2684 2641 [f for f in self._status.modified if match(f)],
2685 2642 [f for f in self._status.added if match(f)],
2686 2643 [f for f in self._status.removed if match(f)],
2687 2644 [],
2688 2645 [],
2689 2646 [],
2690 2647 clean,
2691 2648 )
2692 2649
2693 2650 @propertycache
2694 2651 def _changedset(self):
2695 2652 """Return the set of files changed in this context
2696 2653 """
2697 2654 changed = set(self._status.modified)
2698 2655 changed.update(self._status.added)
2699 2656 changed.update(self._status.removed)
2700 2657 return changed
2701 2658
2702 2659
2703 2660 def makecachingfilectxfn(func):
2704 2661 """Create a filectxfn that caches based on the path.
2705 2662
2706 2663 We can't use util.cachefunc because it uses all arguments as the cache
2707 2664 key and this creates a cycle since the arguments include the repo and
2708 2665 memctx.
2709 2666 """
2710 2667 cache = {}
2711 2668
2712 2669 def getfilectx(repo, memctx, path):
2713 2670 if path not in cache:
2714 2671 cache[path] = func(repo, memctx, path)
2715 2672 return cache[path]
2716 2673
2717 2674 return getfilectx
2718 2675
2719 2676
2720 2677 def memfilefromctx(ctx):
2721 2678 """Given a context return a memfilectx for ctx[path]
2722 2679
2723 2680 This is a convenience method for building a memctx based on another
2724 2681 context.
2725 2682 """
2726 2683
2727 2684 def getfilectx(repo, memctx, path):
2728 2685 fctx = ctx[path]
2729 2686 copysource = fctx.copysource()
2730 2687 return memfilectx(
2731 2688 repo,
2732 2689 memctx,
2733 2690 path,
2734 2691 fctx.data(),
2735 2692 islink=fctx.islink(),
2736 2693 isexec=fctx.isexec(),
2737 2694 copysource=copysource,
2738 2695 )
2739 2696
2740 2697 return getfilectx
2741 2698
2742 2699
2743 2700 def memfilefrompatch(patchstore):
2744 2701 """Given a patch (e.g. patchstore object) return a memfilectx
2745 2702
2746 2703 This is a convenience method for building a memctx based on a patchstore.
2747 2704 """
2748 2705
2749 2706 def getfilectx(repo, memctx, path):
2750 2707 data, mode, copysource = patchstore.getfile(path)
2751 2708 if data is None:
2752 2709 return None
2753 2710 islink, isexec = mode
2754 2711 return memfilectx(
2755 2712 repo,
2756 2713 memctx,
2757 2714 path,
2758 2715 data,
2759 2716 islink=islink,
2760 2717 isexec=isexec,
2761 2718 copysource=copysource,
2762 2719 )
2763 2720
2764 2721 return getfilectx
2765 2722
2766 2723
2767 2724 class memctx(committablectx):
2768 2725 """Use memctx to perform in-memory commits via localrepo.commitctx().
2769 2726
2770 2727 Revision information is supplied at initialization time while
2771 2728 related files data and is made available through a callback
2772 2729 mechanism. 'repo' is the current localrepo, 'parents' is a
2773 2730 sequence of two parent revisions identifiers (pass None for every
2774 2731 missing parent), 'text' is the commit message and 'files' lists
2775 2732 names of files touched by the revision (normalized and relative to
2776 2733 repository root).
2777 2734
2778 2735 filectxfn(repo, memctx, path) is a callable receiving the
2779 2736 repository, the current memctx object and the normalized path of
2780 2737 requested file, relative to repository root. It is fired by the
2781 2738 commit function for every file in 'files', but calls order is
2782 2739 undefined. If the file is available in the revision being
2783 2740 committed (updated or added), filectxfn returns a memfilectx
2784 2741 object. If the file was removed, filectxfn return None for recent
2785 2742 Mercurial. Moved files are represented by marking the source file
2786 2743 removed and the new file added with copy information (see
2787 2744 memfilectx).
2788 2745
2789 2746 user receives the committer name and defaults to current
2790 2747 repository username, date is the commit date in any format
2791 2748 supported by dateutil.parsedate() and defaults to current date, extra
2792 2749 is a dictionary of metadata or is left empty.
2793 2750 """
2794 2751
2795 2752 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2796 2753 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2797 2754 # this field to determine what to do in filectxfn.
2798 2755 _returnnoneformissingfiles = True
2799 2756
2800 2757 def __init__(
2801 2758 self,
2802 2759 repo,
2803 2760 parents,
2804 2761 text,
2805 2762 files,
2806 2763 filectxfn,
2807 2764 user=None,
2808 2765 date=None,
2809 2766 extra=None,
2810 2767 branch=None,
2811 2768 editor=None,
2812 2769 ):
2813 2770 super(memctx, self).__init__(
2814 2771 repo, text, user, date, extra, branch=branch
2815 2772 )
2816 2773 self._rev = None
2817 2774 self._node = None
2818 2775 parents = [(p or nullid) for p in parents]
2819 2776 p1, p2 = parents
2820 2777 self._parents = [self._repo[p] for p in (p1, p2)]
2821 2778 files = sorted(set(files))
2822 2779 self._files = files
2823 2780 self.substate = {}
2824 2781
2825 2782 if isinstance(filectxfn, patch.filestore):
2826 2783 filectxfn = memfilefrompatch(filectxfn)
2827 2784 elif not callable(filectxfn):
2828 2785 # if store is not callable, wrap it in a function
2829 2786 filectxfn = memfilefromctx(filectxfn)
2830 2787
2831 2788 # memoizing increases performance for e.g. vcs convert scenarios.
2832 2789 self._filectxfn = makecachingfilectxfn(filectxfn)
2833 2790
2834 2791 if editor:
2835 2792 self._text = editor(self._repo, self, [])
2836 2793 self._repo.savecommitmessage(self._text)
2837 2794
2838 2795 def filectx(self, path, filelog=None):
2839 2796 """get a file context from the working directory
2840 2797
2841 2798 Returns None if file doesn't exist and should be removed."""
2842 2799 return self._filectxfn(self._repo, self, path)
2843 2800
2844 2801 def commit(self):
2845 2802 """commit context to the repo"""
2846 2803 return self._repo.commitctx(self)
2847 2804
2848 2805 @propertycache
2849 2806 def _manifest(self):
2850 2807 """generate a manifest based on the return values of filectxfn"""
2851 2808
2852 2809 # keep this simple for now; just worry about p1
2853 2810 pctx = self._parents[0]
2854 2811 man = pctx.manifest().copy()
2855 2812
2856 2813 for f in self._status.modified:
2857 2814 man[f] = modifiednodeid
2858 2815
2859 2816 for f in self._status.added:
2860 2817 man[f] = addednodeid
2861 2818
2862 2819 for f in self._status.removed:
2863 2820 if f in man:
2864 2821 del man[f]
2865 2822
2866 2823 return man
2867 2824
2868 2825 @propertycache
2869 2826 def _status(self):
2870 2827 """Calculate exact status from ``files`` specified at construction
2871 2828 """
2872 2829 man1 = self.p1().manifest()
2873 2830 p2 = self._parents[1]
2874 2831 # "1 < len(self._parents)" can't be used for checking
2875 2832 # existence of the 2nd parent, because "memctx._parents" is
2876 2833 # explicitly initialized by the list, of which length is 2.
2877 2834 if p2.node() != nullid:
2878 2835 man2 = p2.manifest()
2879 2836 managing = lambda f: f in man1 or f in man2
2880 2837 else:
2881 2838 managing = lambda f: f in man1
2882 2839
2883 2840 modified, added, removed = [], [], []
2884 2841 for f in self._files:
2885 2842 if not managing(f):
2886 2843 added.append(f)
2887 2844 elif self[f]:
2888 2845 modified.append(f)
2889 2846 else:
2890 2847 removed.append(f)
2891 2848
2892 2849 return scmutil.status(modified, added, removed, [], [], [], [])
2893 2850
2894 2851 def parents(self):
2895 2852 if self._parents[1].node() == nullid:
2896 2853 return [self._parents[0]]
2897 2854 return self._parents
2898 2855
2899 2856
2900 2857 class memfilectx(committablefilectx):
2901 2858 """memfilectx represents an in-memory file to commit.
2902 2859
2903 2860 See memctx and committablefilectx for more details.
2904 2861 """
2905 2862
2906 2863 def __init__(
2907 2864 self,
2908 2865 repo,
2909 2866 changectx,
2910 2867 path,
2911 2868 data,
2912 2869 islink=False,
2913 2870 isexec=False,
2914 2871 copysource=None,
2915 2872 ):
2916 2873 """
2917 2874 path is the normalized file path relative to repository root.
2918 2875 data is the file content as a string.
2919 2876 islink is True if the file is a symbolic link.
2920 2877 isexec is True if the file is executable.
2921 2878 copied is the source file path if current file was copied in the
2922 2879 revision being committed, or None."""
2923 2880 super(memfilectx, self).__init__(repo, path, None, changectx)
2924 2881 self._data = data
2925 2882 if islink:
2926 2883 self._flags = b'l'
2927 2884 elif isexec:
2928 2885 self._flags = b'x'
2929 2886 else:
2930 2887 self._flags = b''
2931 2888 self._copysource = copysource
2932 2889
2933 2890 def copysource(self):
2934 2891 return self._copysource
2935 2892
2936 2893 def cmp(self, fctx):
2937 2894 return self.data() != fctx.data()
2938 2895
2939 2896 def data(self):
2940 2897 return self._data
2941 2898
2942 2899 def remove(self, ignoremissing=False):
2943 2900 """wraps unlink for a repo's working directory"""
2944 2901 # need to figure out what to do here
2945 2902 del self._changectx[self._path]
2946 2903
2947 2904 def write(self, data, flags, **kwargs):
2948 2905 """wraps repo.wwrite"""
2949 2906 self._data = data
2950 2907
2951 2908
2952 2909 class metadataonlyctx(committablectx):
2953 2910 """Like memctx but it's reusing the manifest of different commit.
2954 2911 Intended to be used by lightweight operations that are creating
2955 2912 metadata-only changes.
2956 2913
2957 2914 Revision information is supplied at initialization time. 'repo' is the
2958 2915 current localrepo, 'ctx' is original revision which manifest we're reuisng
2959 2916 'parents' is a sequence of two parent revisions identifiers (pass None for
2960 2917 every missing parent), 'text' is the commit.
2961 2918
2962 2919 user receives the committer name and defaults to current repository
2963 2920 username, date is the commit date in any format supported by
2964 2921 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2965 2922 metadata or is left empty.
2966 2923 """
2967 2924
2968 2925 def __init__(
2969 2926 self,
2970 2927 repo,
2971 2928 originalctx,
2972 2929 parents=None,
2973 2930 text=None,
2974 2931 user=None,
2975 2932 date=None,
2976 2933 extra=None,
2977 2934 editor=None,
2978 2935 ):
2979 2936 if text is None:
2980 2937 text = originalctx.description()
2981 2938 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2982 2939 self._rev = None
2983 2940 self._node = None
2984 2941 self._originalctx = originalctx
2985 2942 self._manifestnode = originalctx.manifestnode()
2986 2943 if parents is None:
2987 2944 parents = originalctx.parents()
2988 2945 else:
2989 2946 parents = [repo[p] for p in parents if p is not None]
2990 2947 parents = parents[:]
2991 2948 while len(parents) < 2:
2992 2949 parents.append(repo[nullid])
2993 2950 p1, p2 = self._parents = parents
2994 2951
2995 2952 # sanity check to ensure that the reused manifest parents are
2996 2953 # manifests of our commit parents
2997 2954 mp1, mp2 = self.manifestctx().parents
2998 2955 if p1 != nullid and p1.manifestnode() != mp1:
2999 2956 raise RuntimeError(
3000 2957 r"can't reuse the manifest: its p1 "
3001 2958 r"doesn't match the new ctx p1"
3002 2959 )
3003 2960 if p2 != nullid and p2.manifestnode() != mp2:
3004 2961 raise RuntimeError(
3005 2962 r"can't reuse the manifest: "
3006 2963 r"its p2 doesn't match the new ctx p2"
3007 2964 )
3008 2965
3009 2966 self._files = originalctx.files()
3010 2967 self.substate = {}
3011 2968
3012 2969 if editor:
3013 2970 self._text = editor(self._repo, self, [])
3014 2971 self._repo.savecommitmessage(self._text)
3015 2972
3016 2973 def manifestnode(self):
3017 2974 return self._manifestnode
3018 2975
3019 2976 @property
3020 2977 def _manifestctx(self):
3021 2978 return self._repo.manifestlog[self._manifestnode]
3022 2979
3023 2980 def filectx(self, path, filelog=None):
3024 2981 return self._originalctx.filectx(path, filelog=filelog)
3025 2982
3026 2983 def commit(self):
3027 2984 """commit context to the repo"""
3028 2985 return self._repo.commitctx(self)
3029 2986
3030 2987 @property
3031 2988 def _manifest(self):
3032 2989 return self._originalctx.manifest()
3033 2990
3034 2991 @propertycache
3035 2992 def _status(self):
3036 2993 """Calculate exact status from ``files`` specified in the ``origctx``
3037 2994 and parents manifests.
3038 2995 """
3039 2996 man1 = self.p1().manifest()
3040 2997 p2 = self._parents[1]
3041 2998 # "1 < len(self._parents)" can't be used for checking
3042 2999 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3043 3000 # explicitly initialized by the list, of which length is 2.
3044 3001 if p2.node() != nullid:
3045 3002 man2 = p2.manifest()
3046 3003 managing = lambda f: f in man1 or f in man2
3047 3004 else:
3048 3005 managing = lambda f: f in man1
3049 3006
3050 3007 modified, added, removed = [], [], []
3051 3008 for f in self._files:
3052 3009 if not managing(f):
3053 3010 added.append(f)
3054 3011 elif f in self:
3055 3012 modified.append(f)
3056 3013 else:
3057 3014 removed.append(f)
3058 3015
3059 3016 return scmutil.status(modified, added, removed, [], [], [], [])
3060 3017
3061 3018
3062 3019 class arbitraryfilectx(object):
3063 3020 """Allows you to use filectx-like functions on a file in an arbitrary
3064 3021 location on disk, possibly not in the working directory.
3065 3022 """
3066 3023
3067 3024 def __init__(self, path, repo=None):
3068 3025 # Repo is optional because contrib/simplemerge uses this class.
3069 3026 self._repo = repo
3070 3027 self._path = path
3071 3028
3072 3029 def cmp(self, fctx):
3073 3030 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3074 3031 # path if either side is a symlink.
3075 3032 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3076 3033 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3077 3034 # Add a fast-path for merge if both sides are disk-backed.
3078 3035 # Note that filecmp uses the opposite return values (True if same)
3079 3036 # from our cmp functions (True if different).
3080 3037 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3081 3038 return self.data() != fctx.data()
3082 3039
3083 3040 def path(self):
3084 3041 return self._path
3085 3042
3086 3043 def flags(self):
3087 3044 return b''
3088 3045
3089 3046 def data(self):
3090 3047 return util.readfile(self._path)
3091 3048
3092 3049 def decodeddata(self):
3093 3050 with open(self._path, b"rb") as f:
3094 3051 return f.read()
3095 3052
3096 3053 def remove(self):
3097 3054 util.unlink(self._path)
3098 3055
3099 3056 def write(self, data, flags, **kwargs):
3100 3057 assert not flags
3101 3058 with open(self._path, b"wb") as f:
3102 3059 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now