##// END OF EJS Templates
changectx: mark parent of changesets as non filtered...
marmoute -
r44568:98349edd default
parent child Browse files
Show More
@@ -1,3054 +1,3057 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 hex,
19 19 modifiednodeid,
20 20 nullid,
21 21 nullrev,
22 22 short,
23 23 wdirfilenodeids,
24 24 wdirhex,
25 25 )
26 26 from .pycompat import (
27 27 getattr,
28 28 open,
29 29 )
30 30 from . import (
31 31 copies,
32 32 dagop,
33 33 encoding,
34 34 error,
35 35 fileset,
36 36 match as matchmod,
37 37 obsolete as obsmod,
38 38 patch,
39 39 pathutil,
40 40 phases,
41 41 pycompat,
42 42 repoview,
43 43 scmutil,
44 44 sparse,
45 45 subrepo,
46 46 subrepoutil,
47 47 util,
48 48 )
49 49 from .utils import (
50 50 dateutil,
51 51 stringutil,
52 52 )
53 53
54 54 propertycache = util.propertycache
55 55
56 56
57 57 class basectx(object):
58 58 """A basectx object represents the common logic for its children:
59 59 changectx: read-only context that is already present in the repo,
60 60 workingctx: a context that represents the working directory and can
61 61 be committed,
62 62 memctx: a context that represents changes in-memory and can also
63 63 be committed."""
64 64
65 65 def __init__(self, repo):
66 66 self._repo = repo
67 67
68 68 def __bytes__(self):
69 69 return short(self.node())
70 70
71 71 __str__ = encoding.strmethod(__bytes__)
72 72
73 73 def __repr__(self):
74 74 return "<%s %s>" % (type(self).__name__, str(self))
75 75
76 76 def __eq__(self, other):
77 77 try:
78 78 return type(self) == type(other) and self._rev == other._rev
79 79 except AttributeError:
80 80 return False
81 81
82 82 def __ne__(self, other):
83 83 return not (self == other)
84 84
85 85 def __contains__(self, key):
86 86 return key in self._manifest
87 87
88 88 def __getitem__(self, key):
89 89 return self.filectx(key)
90 90
91 91 def __iter__(self):
92 92 return iter(self._manifest)
93 93
94 94 def _buildstatusmanifest(self, status):
95 95 """Builds a manifest that includes the given status results, if this is
96 96 a working copy context. For non-working copy contexts, it just returns
97 97 the normal manifest."""
98 98 return self.manifest()
99 99
100 100 def _matchstatus(self, other, match):
101 101 """This internal method provides a way for child objects to override the
102 102 match operator.
103 103 """
104 104 return match
105 105
106 106 def _buildstatus(
107 107 self, other, s, match, listignored, listclean, listunknown
108 108 ):
109 109 """build a status with respect to another context"""
110 110 # Load earliest manifest first for caching reasons. More specifically,
111 111 # if you have revisions 1000 and 1001, 1001 is probably stored as a
112 112 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
113 113 # 1000 and cache it so that when you read 1001, we just need to apply a
114 114 # delta to what's in the cache. So that's one full reconstruction + one
115 115 # delta application.
116 116 mf2 = None
117 117 if self.rev() is not None and self.rev() < other.rev():
118 118 mf2 = self._buildstatusmanifest(s)
119 119 mf1 = other._buildstatusmanifest(s)
120 120 if mf2 is None:
121 121 mf2 = self._buildstatusmanifest(s)
122 122
123 123 modified, added = [], []
124 124 removed = []
125 125 clean = []
126 126 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
127 127 deletedset = set(deleted)
128 128 d = mf1.diff(mf2, match=match, clean=listclean)
129 129 for fn, value in pycompat.iteritems(d):
130 130 if fn in deletedset:
131 131 continue
132 132 if value is None:
133 133 clean.append(fn)
134 134 continue
135 135 (node1, flag1), (node2, flag2) = value
136 136 if node1 is None:
137 137 added.append(fn)
138 138 elif node2 is None:
139 139 removed.append(fn)
140 140 elif flag1 != flag2:
141 141 modified.append(fn)
142 142 elif node2 not in wdirfilenodeids:
143 143 # When comparing files between two commits, we save time by
144 144 # not comparing the file contents when the nodeids differ.
145 145 # Note that this means we incorrectly report a reverted change
146 146 # to a file as a modification.
147 147 modified.append(fn)
148 148 elif self[fn].cmp(other[fn]):
149 149 modified.append(fn)
150 150 else:
151 151 clean.append(fn)
152 152
153 153 if removed:
154 154 # need to filter files if they are already reported as removed
155 155 unknown = [
156 156 fn
157 157 for fn in unknown
158 158 if fn not in mf1 and (not match or match(fn))
159 159 ]
160 160 ignored = [
161 161 fn
162 162 for fn in ignored
163 163 if fn not in mf1 and (not match or match(fn))
164 164 ]
165 165 # if they're deleted, don't report them as removed
166 166 removed = [fn for fn in removed if fn not in deletedset]
167 167
168 168 return scmutil.status(
169 169 modified, added, removed, deleted, unknown, ignored, clean
170 170 )
171 171
172 172 @propertycache
173 173 def substate(self):
174 174 return subrepoutil.state(self, self._repo.ui)
175 175
176 176 def subrev(self, subpath):
177 177 return self.substate[subpath][1]
178 178
179 179 def rev(self):
180 180 return self._rev
181 181
182 182 def node(self):
183 183 return self._node
184 184
185 185 def hex(self):
186 186 return hex(self.node())
187 187
188 188 def manifest(self):
189 189 return self._manifest
190 190
191 191 def manifestctx(self):
192 192 return self._manifestctx
193 193
194 194 def repo(self):
195 195 return self._repo
196 196
197 197 def phasestr(self):
198 198 return phases.phasenames[self.phase()]
199 199
200 200 def mutable(self):
201 201 return self.phase() > phases.public
202 202
203 203 def matchfileset(self, cwd, expr, badfn=None):
204 204 return fileset.match(self, cwd, expr, badfn=badfn)
205 205
206 206 def obsolete(self):
207 207 """True if the changeset is obsolete"""
208 208 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
209 209
210 210 def extinct(self):
211 211 """True if the changeset is extinct"""
212 212 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
213 213
214 214 def orphan(self):
215 215 """True if the changeset is not obsolete, but its ancestor is"""
216 216 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
217 217
218 218 def phasedivergent(self):
219 219 """True if the changeset tries to be a successor of a public changeset
220 220
221 221 Only non-public and non-obsolete changesets may be phase-divergent.
222 222 """
223 223 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
224 224
225 225 def contentdivergent(self):
226 226 """Is a successor of a changeset with multiple possible successor sets
227 227
228 228 Only non-public and non-obsolete changesets may be content-divergent.
229 229 """
230 230 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
231 231
232 232 def isunstable(self):
233 233 """True if the changeset is either orphan, phase-divergent or
234 234 content-divergent"""
235 235 return self.orphan() or self.phasedivergent() or self.contentdivergent()
236 236
237 237 def instabilities(self):
238 238 """return the list of instabilities affecting this changeset.
239 239
240 240 Instabilities are returned as strings. possible values are:
241 241 - orphan,
242 242 - phase-divergent,
243 243 - content-divergent.
244 244 """
245 245 instabilities = []
246 246 if self.orphan():
247 247 instabilities.append(b'orphan')
248 248 if self.phasedivergent():
249 249 instabilities.append(b'phase-divergent')
250 250 if self.contentdivergent():
251 251 instabilities.append(b'content-divergent')
252 252 return instabilities
253 253
254 254 def parents(self):
255 255 """return contexts for each parent changeset"""
256 256 return self._parents
257 257
258 258 def p1(self):
259 259 return self._parents[0]
260 260
261 261 def p2(self):
262 262 parents = self._parents
263 263 if len(parents) == 2:
264 264 return parents[1]
265 265 return self._repo[nullrev]
266 266
267 267 def _fileinfo(self, path):
268 268 if '_manifest' in self.__dict__:
269 269 try:
270 270 return self._manifest[path], self._manifest.flags(path)
271 271 except KeyError:
272 272 raise error.ManifestLookupError(
273 273 self._node, path, _(b'not found in manifest')
274 274 )
275 275 if '_manifestdelta' in self.__dict__ or path in self.files():
276 276 if path in self._manifestdelta:
277 277 return (
278 278 self._manifestdelta[path],
279 279 self._manifestdelta.flags(path),
280 280 )
281 281 mfl = self._repo.manifestlog
282 282 try:
283 283 node, flag = mfl[self._changeset.manifest].find(path)
284 284 except KeyError:
285 285 raise error.ManifestLookupError(
286 286 self._node, path, _(b'not found in manifest')
287 287 )
288 288
289 289 return node, flag
290 290
291 291 def filenode(self, path):
292 292 return self._fileinfo(path)[0]
293 293
294 294 def flags(self, path):
295 295 try:
296 296 return self._fileinfo(path)[1]
297 297 except error.LookupError:
298 298 return b''
299 299
300 300 @propertycache
301 301 def _copies(self):
302 302 return copies.computechangesetcopies(self)
303 303
304 304 def p1copies(self):
305 305 return self._copies[0]
306 306
307 307 def p2copies(self):
308 308 return self._copies[1]
309 309
310 310 def sub(self, path, allowcreate=True):
311 311 '''return a subrepo for the stored revision of path, never wdir()'''
312 312 return subrepo.subrepo(self, path, allowcreate=allowcreate)
313 313
314 314 def nullsub(self, path, pctx):
315 315 return subrepo.nullsubrepo(self, path, pctx)
316 316
317 317 def workingsub(self, path):
318 318 '''return a subrepo for the stored revision, or wdir if this is a wdir
319 319 context.
320 320 '''
321 321 return subrepo.subrepo(self, path, allowwdir=True)
322 322
323 323 def match(
324 324 self,
325 325 pats=None,
326 326 include=None,
327 327 exclude=None,
328 328 default=b'glob',
329 329 listsubrepos=False,
330 330 badfn=None,
331 331 cwd=None,
332 332 ):
333 333 r = self._repo
334 334 if not cwd:
335 335 cwd = r.getcwd()
336 336 return matchmod.match(
337 337 r.root,
338 338 cwd,
339 339 pats,
340 340 include,
341 341 exclude,
342 342 default,
343 343 auditor=r.nofsauditor,
344 344 ctx=self,
345 345 listsubrepos=listsubrepos,
346 346 badfn=badfn,
347 347 )
348 348
349 349 def diff(
350 350 self,
351 351 ctx2=None,
352 352 match=None,
353 353 changes=None,
354 354 opts=None,
355 355 losedatafn=None,
356 356 pathfn=None,
357 357 copy=None,
358 358 copysourcematch=None,
359 359 hunksfilterfn=None,
360 360 ):
361 361 """Returns a diff generator for the given contexts and matcher"""
362 362 if ctx2 is None:
363 363 ctx2 = self.p1()
364 364 if ctx2 is not None:
365 365 ctx2 = self._repo[ctx2]
366 366 return patch.diff(
367 367 self._repo,
368 368 ctx2,
369 369 self,
370 370 match=match,
371 371 changes=changes,
372 372 opts=opts,
373 373 losedatafn=losedatafn,
374 374 pathfn=pathfn,
375 375 copy=copy,
376 376 copysourcematch=copysourcematch,
377 377 hunksfilterfn=hunksfilterfn,
378 378 )
379 379
380 380 def dirs(self):
381 381 return self._manifest.dirs()
382 382
383 383 def hasdir(self, dir):
384 384 return self._manifest.hasdir(dir)
385 385
386 386 def status(
387 387 self,
388 388 other=None,
389 389 match=None,
390 390 listignored=False,
391 391 listclean=False,
392 392 listunknown=False,
393 393 listsubrepos=False,
394 394 ):
395 395 """return status of files between two nodes or node and working
396 396 directory.
397 397
398 398 If other is None, compare this node with working directory.
399 399
400 400 returns (modified, added, removed, deleted, unknown, ignored, clean)
401 401 """
402 402
403 403 ctx1 = self
404 404 ctx2 = self._repo[other]
405 405
406 406 # This next code block is, admittedly, fragile logic that tests for
407 407 # reversing the contexts and wouldn't need to exist if it weren't for
408 408 # the fast (and common) code path of comparing the working directory
409 409 # with its first parent.
410 410 #
411 411 # What we're aiming for here is the ability to call:
412 412 #
413 413 # workingctx.status(parentctx)
414 414 #
415 415 # If we always built the manifest for each context and compared those,
416 416 # then we'd be done. But the special case of the above call means we
417 417 # just copy the manifest of the parent.
418 418 reversed = False
419 419 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
420 420 reversed = True
421 421 ctx1, ctx2 = ctx2, ctx1
422 422
423 423 match = self._repo.narrowmatch(match)
424 424 match = ctx2._matchstatus(ctx1, match)
425 425 r = scmutil.status([], [], [], [], [], [], [])
426 426 r = ctx2._buildstatus(
427 427 ctx1, r, match, listignored, listclean, listunknown
428 428 )
429 429
430 430 if reversed:
431 431 # Reverse added and removed. Clear deleted, unknown and ignored as
432 432 # these make no sense to reverse.
433 433 r = scmutil.status(
434 434 r.modified, r.removed, r.added, [], [], [], r.clean
435 435 )
436 436
437 437 if listsubrepos:
438 438 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
439 439 try:
440 440 rev2 = ctx2.subrev(subpath)
441 441 except KeyError:
442 442 # A subrepo that existed in node1 was deleted between
443 443 # node1 and node2 (inclusive). Thus, ctx2's substate
444 444 # won't contain that subpath. The best we can do ignore it.
445 445 rev2 = None
446 446 submatch = matchmod.subdirmatcher(subpath, match)
447 447 s = sub.status(
448 448 rev2,
449 449 match=submatch,
450 450 ignored=listignored,
451 451 clean=listclean,
452 452 unknown=listunknown,
453 453 listsubrepos=True,
454 454 )
455 455 for k in (
456 456 'modified',
457 457 'added',
458 458 'removed',
459 459 'deleted',
460 460 'unknown',
461 461 'ignored',
462 462 'clean',
463 463 ):
464 464 rfiles, sfiles = getattr(r, k), getattr(s, k)
465 465 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
466 466
467 467 r.modified.sort()
468 468 r.added.sort()
469 469 r.removed.sort()
470 470 r.deleted.sort()
471 471 r.unknown.sort()
472 472 r.ignored.sort()
473 473 r.clean.sort()
474 474
475 475 return r
476 476
477 477
478 478 class changectx(basectx):
479 479 """A changecontext object makes access to data related to a particular
480 480 changeset convenient. It represents a read-only context already present in
481 481 the repo."""
482 482
483 483 def __init__(self, repo, rev, node, maybe_filtered=True):
484 484 super(changectx, self).__init__(repo)
485 485 self._rev = rev
486 486 self._node = node
487 487 # When maybe_filtered is True, the revision might be affected by
488 488 # changelog filtering and operation through the filtered changelog must be used.
489 489 #
490 490 # When maybe_filtered is False, the revision has already been checked
491 491 # against filtering and is not filtered. Operation through the
492 492 # unfiltered changelog might be used in some case.
493 493 self._maybe_filtered = maybe_filtered
494 494
495 495 def __hash__(self):
496 496 try:
497 497 return hash(self._rev)
498 498 except AttributeError:
499 499 return id(self)
500 500
501 501 def __nonzero__(self):
502 502 return self._rev != nullrev
503 503
504 504 __bool__ = __nonzero__
505 505
506 506 @propertycache
507 507 def _changeset(self):
508 508 if self._maybe_filtered:
509 509 repo = self._repo
510 510 else:
511 511 repo = self._repo.unfiltered()
512 512 return repo.changelog.changelogrevision(self.rev())
513 513
514 514 @propertycache
515 515 def _manifest(self):
516 516 return self._manifestctx.read()
517 517
518 518 @property
519 519 def _manifestctx(self):
520 520 return self._repo.manifestlog[self._changeset.manifest]
521 521
522 522 @propertycache
523 523 def _manifestdelta(self):
524 524 return self._manifestctx.readdelta()
525 525
526 526 @propertycache
527 527 def _parents(self):
528 528 repo = self._repo
529 529 if self._maybe_filtered:
530 530 cl = repo.changelog
531 531 else:
532 532 cl = repo.unfiltered().changelog
533 533
534 534 p1, p2 = cl.parentrevs(self._rev)
535 535 if p2 == nullrev:
536 return [repo[p1]]
537 return [repo[p1], repo[p2]]
536 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
537 return [
538 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
539 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
540 ]
538 541
539 542 def changeset(self):
540 543 c = self._changeset
541 544 return (
542 545 c.manifest,
543 546 c.user,
544 547 c.date,
545 548 c.files,
546 549 c.description,
547 550 c.extra,
548 551 )
549 552
550 553 def manifestnode(self):
551 554 return self._changeset.manifest
552 555
553 556 def user(self):
554 557 return self._changeset.user
555 558
556 559 def date(self):
557 560 return self._changeset.date
558 561
559 562 def files(self):
560 563 return self._changeset.files
561 564
562 565 def filesmodified(self):
563 566 modified = set(self.files())
564 567 modified.difference_update(self.filesadded())
565 568 modified.difference_update(self.filesremoved())
566 569 return sorted(modified)
567 570
568 571 def filesadded(self):
569 572 filesadded = self._changeset.filesadded
570 573 compute_on_none = True
571 574 if self._repo.filecopiesmode == b'changeset-sidedata':
572 575 compute_on_none = False
573 576 else:
574 577 source = self._repo.ui.config(b'experimental', b'copies.read-from')
575 578 if source == b'changeset-only':
576 579 compute_on_none = False
577 580 elif source != b'compatibility':
578 581 # filelog mode, ignore any changelog content
579 582 filesadded = None
580 583 if filesadded is None:
581 584 if compute_on_none:
582 585 filesadded = copies.computechangesetfilesadded(self)
583 586 else:
584 587 filesadded = []
585 588 return filesadded
586 589
587 590 def filesremoved(self):
588 591 filesremoved = self._changeset.filesremoved
589 592 compute_on_none = True
590 593 if self._repo.filecopiesmode == b'changeset-sidedata':
591 594 compute_on_none = False
592 595 else:
593 596 source = self._repo.ui.config(b'experimental', b'copies.read-from')
594 597 if source == b'changeset-only':
595 598 compute_on_none = False
596 599 elif source != b'compatibility':
597 600 # filelog mode, ignore any changelog content
598 601 filesremoved = None
599 602 if filesremoved is None:
600 603 if compute_on_none:
601 604 filesremoved = copies.computechangesetfilesremoved(self)
602 605 else:
603 606 filesremoved = []
604 607 return filesremoved
605 608
606 609 @propertycache
607 610 def _copies(self):
608 611 p1copies = self._changeset.p1copies
609 612 p2copies = self._changeset.p2copies
610 613 compute_on_none = True
611 614 if self._repo.filecopiesmode == b'changeset-sidedata':
612 615 compute_on_none = False
613 616 else:
614 617 source = self._repo.ui.config(b'experimental', b'copies.read-from')
615 618 # If config says to get copy metadata only from changeset, then
616 619 # return that, defaulting to {} if there was no copy metadata. In
617 620 # compatibility mode, we return copy data from the changeset if it
618 621 # was recorded there, and otherwise we fall back to getting it from
619 622 # the filelogs (below).
620 623 #
621 624 # If we are in compatiblity mode and there is not data in the
622 625 # changeset), we get the copy metadata from the filelogs.
623 626 #
624 627 # otherwise, when config said to read only from filelog, we get the
625 628 # copy metadata from the filelogs.
626 629 if source == b'changeset-only':
627 630 compute_on_none = False
628 631 elif source != b'compatibility':
629 632 # filelog mode, ignore any changelog content
630 633 p1copies = p2copies = None
631 634 if p1copies is None:
632 635 if compute_on_none:
633 636 p1copies, p2copies = super(changectx, self)._copies
634 637 else:
635 638 if p1copies is None:
636 639 p1copies = {}
637 640 if p2copies is None:
638 641 p2copies = {}
639 642 return p1copies, p2copies
640 643
641 644 def description(self):
642 645 return self._changeset.description
643 646
644 647 def branch(self):
645 648 return encoding.tolocal(self._changeset.extra.get(b"branch"))
646 649
647 650 def closesbranch(self):
648 651 return b'close' in self._changeset.extra
649 652
650 653 def extra(self):
651 654 """Return a dict of extra information."""
652 655 return self._changeset.extra
653 656
654 657 def tags(self):
655 658 """Return a list of byte tag names"""
656 659 return self._repo.nodetags(self._node)
657 660
658 661 def bookmarks(self):
659 662 """Return a list of byte bookmark names."""
660 663 return self._repo.nodebookmarks(self._node)
661 664
662 665 def phase(self):
663 666 return self._repo._phasecache.phase(self._repo, self._rev)
664 667
665 668 def hidden(self):
666 669 return self._rev in repoview.filterrevs(self._repo, b'visible')
667 670
668 671 def isinmemory(self):
669 672 return False
670 673
671 674 def children(self):
672 675 """return list of changectx contexts for each child changeset.
673 676
674 677 This returns only the immediate child changesets. Use descendants() to
675 678 recursively walk children.
676 679 """
677 680 c = self._repo.changelog.children(self._node)
678 681 return [self._repo[x] for x in c]
679 682
680 683 def ancestors(self):
681 684 for a in self._repo.changelog.ancestors([self._rev]):
682 685 yield self._repo[a]
683 686
684 687 def descendants(self):
685 688 """Recursively yield all children of the changeset.
686 689
687 690 For just the immediate children, use children()
688 691 """
689 692 for d in self._repo.changelog.descendants([self._rev]):
690 693 yield self._repo[d]
691 694
692 695 def filectx(self, path, fileid=None, filelog=None):
693 696 """get a file context from this changeset"""
694 697 if fileid is None:
695 698 fileid = self.filenode(path)
696 699 return filectx(
697 700 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
698 701 )
699 702
700 703 def ancestor(self, c2, warn=False):
701 704 """return the "best" ancestor context of self and c2
702 705
703 706 If there are multiple candidates, it will show a message and check
704 707 merge.preferancestor configuration before falling back to the
705 708 revlog ancestor."""
706 709 # deal with workingctxs
707 710 n2 = c2._node
708 711 if n2 is None:
709 712 n2 = c2._parents[0]._node
710 713 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
711 714 if not cahs:
712 715 anc = nullid
713 716 elif len(cahs) == 1:
714 717 anc = cahs[0]
715 718 else:
716 719 # experimental config: merge.preferancestor
717 720 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
718 721 try:
719 722 ctx = scmutil.revsymbol(self._repo, r)
720 723 except error.RepoLookupError:
721 724 continue
722 725 anc = ctx.node()
723 726 if anc in cahs:
724 727 break
725 728 else:
726 729 anc = self._repo.changelog.ancestor(self._node, n2)
727 730 if warn:
728 731 self._repo.ui.status(
729 732 (
730 733 _(b"note: using %s as ancestor of %s and %s\n")
731 734 % (short(anc), short(self._node), short(n2))
732 735 )
733 736 + b''.join(
734 737 _(
735 738 b" alternatively, use --config "
736 739 b"merge.preferancestor=%s\n"
737 740 )
738 741 % short(n)
739 742 for n in sorted(cahs)
740 743 if n != anc
741 744 )
742 745 )
743 746 return self._repo[anc]
744 747
745 748 def isancestorof(self, other):
746 749 """True if this changeset is an ancestor of other"""
747 750 return self._repo.changelog.isancestorrev(self._rev, other._rev)
748 751
749 752 def walk(self, match):
750 753 '''Generates matching file names.'''
751 754
752 755 # Wrap match.bad method to have message with nodeid
753 756 def bad(fn, msg):
754 757 # The manifest doesn't know about subrepos, so don't complain about
755 758 # paths into valid subrepos.
756 759 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
757 760 return
758 761 match.bad(fn, _(b'no such file in rev %s') % self)
759 762
760 763 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
761 764 return self._manifest.walk(m)
762 765
763 766 def matches(self, match):
764 767 return self.walk(match)
765 768
766 769
767 770 class basefilectx(object):
768 771 """A filecontext object represents the common logic for its children:
769 772 filectx: read-only access to a filerevision that is already present
770 773 in the repo,
771 774 workingfilectx: a filecontext that represents files from the working
772 775 directory,
773 776 memfilectx: a filecontext that represents files in-memory,
774 777 """
775 778
776 779 @propertycache
777 780 def _filelog(self):
778 781 return self._repo.file(self._path)
779 782
780 783 @propertycache
781 784 def _changeid(self):
782 785 if '_changectx' in self.__dict__:
783 786 return self._changectx.rev()
784 787 elif '_descendantrev' in self.__dict__:
785 788 # this file context was created from a revision with a known
786 789 # descendant, we can (lazily) correct for linkrev aliases
787 790 return self._adjustlinkrev(self._descendantrev)
788 791 else:
789 792 return self._filelog.linkrev(self._filerev)
790 793
791 794 @propertycache
792 795 def _filenode(self):
793 796 if '_fileid' in self.__dict__:
794 797 return self._filelog.lookup(self._fileid)
795 798 else:
796 799 return self._changectx.filenode(self._path)
797 800
798 801 @propertycache
799 802 def _filerev(self):
800 803 return self._filelog.rev(self._filenode)
801 804
802 805 @propertycache
803 806 def _repopath(self):
804 807 return self._path
805 808
806 809 def __nonzero__(self):
807 810 try:
808 811 self._filenode
809 812 return True
810 813 except error.LookupError:
811 814 # file is missing
812 815 return False
813 816
814 817 __bool__ = __nonzero__
815 818
816 819 def __bytes__(self):
817 820 try:
818 821 return b"%s@%s" % (self.path(), self._changectx)
819 822 except error.LookupError:
820 823 return b"%s@???" % self.path()
821 824
822 825 __str__ = encoding.strmethod(__bytes__)
823 826
824 827 def __repr__(self):
825 828 return "<%s %s>" % (type(self).__name__, str(self))
826 829
827 830 def __hash__(self):
828 831 try:
829 832 return hash((self._path, self._filenode))
830 833 except AttributeError:
831 834 return id(self)
832 835
833 836 def __eq__(self, other):
834 837 try:
835 838 return (
836 839 type(self) == type(other)
837 840 and self._path == other._path
838 841 and self._filenode == other._filenode
839 842 )
840 843 except AttributeError:
841 844 return False
842 845
843 846 def __ne__(self, other):
844 847 return not (self == other)
845 848
846 849 def filerev(self):
847 850 return self._filerev
848 851
849 852 def filenode(self):
850 853 return self._filenode
851 854
852 855 @propertycache
853 856 def _flags(self):
854 857 return self._changectx.flags(self._path)
855 858
856 859 def flags(self):
857 860 return self._flags
858 861
859 862 def filelog(self):
860 863 return self._filelog
861 864
862 865 def rev(self):
863 866 return self._changeid
864 867
865 868 def linkrev(self):
866 869 return self._filelog.linkrev(self._filerev)
867 870
868 871 def node(self):
869 872 return self._changectx.node()
870 873
871 874 def hex(self):
872 875 return self._changectx.hex()
873 876
874 877 def user(self):
875 878 return self._changectx.user()
876 879
877 880 def date(self):
878 881 return self._changectx.date()
879 882
880 883 def files(self):
881 884 return self._changectx.files()
882 885
883 886 def description(self):
884 887 return self._changectx.description()
885 888
886 889 def branch(self):
887 890 return self._changectx.branch()
888 891
889 892 def extra(self):
890 893 return self._changectx.extra()
891 894
892 895 def phase(self):
893 896 return self._changectx.phase()
894 897
895 898 def phasestr(self):
896 899 return self._changectx.phasestr()
897 900
898 901 def obsolete(self):
899 902 return self._changectx.obsolete()
900 903
901 904 def instabilities(self):
902 905 return self._changectx.instabilities()
903 906
904 907 def manifest(self):
905 908 return self._changectx.manifest()
906 909
907 910 def changectx(self):
908 911 return self._changectx
909 912
910 913 def renamed(self):
911 914 return self._copied
912 915
913 916 def copysource(self):
914 917 return self._copied and self._copied[0]
915 918
916 919 def repo(self):
917 920 return self._repo
918 921
919 922 def size(self):
920 923 return len(self.data())
921 924
922 925 def path(self):
923 926 return self._path
924 927
925 928 def isbinary(self):
926 929 try:
927 930 return stringutil.binary(self.data())
928 931 except IOError:
929 932 return False
930 933
931 934 def isexec(self):
932 935 return b'x' in self.flags()
933 936
934 937 def islink(self):
935 938 return b'l' in self.flags()
936 939
937 940 def isabsent(self):
938 941 """whether this filectx represents a file not in self._changectx
939 942
940 943 This is mainly for merge code to detect change/delete conflicts. This is
941 944 expected to be True for all subclasses of basectx."""
942 945 return False
943 946
944 947 _customcmp = False
945 948
946 949 def cmp(self, fctx):
947 950 """compare with other file context
948 951
949 952 returns True if different than fctx.
950 953 """
951 954 if fctx._customcmp:
952 955 return fctx.cmp(self)
953 956
954 957 if self._filenode is None:
955 958 raise error.ProgrammingError(
956 959 b'filectx.cmp() must be reimplemented if not backed by revlog'
957 960 )
958 961
959 962 if fctx._filenode is None:
960 963 if self._repo._encodefilterpats:
961 964 # can't rely on size() because wdir content may be decoded
962 965 return self._filelog.cmp(self._filenode, fctx.data())
963 966 if self.size() - 4 == fctx.size():
964 967 # size() can match:
965 968 # if file data starts with '\1\n', empty metadata block is
966 969 # prepended, which adds 4 bytes to filelog.size().
967 970 return self._filelog.cmp(self._filenode, fctx.data())
968 971 if self.size() == fctx.size():
969 972 # size() matches: need to compare content
970 973 return self._filelog.cmp(self._filenode, fctx.data())
971 974
972 975 # size() differs
973 976 return True
974 977
975 978 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
976 979 """return the first ancestor of <srcrev> introducing <fnode>
977 980
978 981 If the linkrev of the file revision does not point to an ancestor of
979 982 srcrev, we'll walk down the ancestors until we find one introducing
980 983 this file revision.
981 984
982 985 :srcrev: the changeset revision we search ancestors from
983 986 :inclusive: if true, the src revision will also be checked
984 987 :stoprev: an optional revision to stop the walk at. If no introduction
985 988 of this file content could be found before this floor
986 989 revision, the function will returns "None" and stops its
987 990 iteration.
988 991 """
989 992 repo = self._repo
990 993 cl = repo.unfiltered().changelog
991 994 mfl = repo.manifestlog
992 995 # fetch the linkrev
993 996 lkr = self.linkrev()
994 997 if srcrev == lkr:
995 998 return lkr
996 999 # hack to reuse ancestor computation when searching for renames
997 1000 memberanc = getattr(self, '_ancestrycontext', None)
998 1001 iteranc = None
999 1002 if srcrev is None:
1000 1003 # wctx case, used by workingfilectx during mergecopy
1001 1004 revs = [p.rev() for p in self._repo[None].parents()]
1002 1005 inclusive = True # we skipped the real (revless) source
1003 1006 else:
1004 1007 revs = [srcrev]
1005 1008 if memberanc is None:
1006 1009 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1007 1010 # check if this linkrev is an ancestor of srcrev
1008 1011 if lkr not in memberanc:
1009 1012 if iteranc is None:
1010 1013 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1011 1014 fnode = self._filenode
1012 1015 path = self._path
1013 1016 for a in iteranc:
1014 1017 if stoprev is not None and a < stoprev:
1015 1018 return None
1016 1019 ac = cl.read(a) # get changeset data (we avoid object creation)
1017 1020 if path in ac[3]: # checking the 'files' field.
1018 1021 # The file has been touched, check if the content is
1019 1022 # similar to the one we search for.
1020 1023 if fnode == mfl[ac[0]].readfast().get(path):
1021 1024 return a
1022 1025 # In theory, we should never get out of that loop without a result.
1023 1026 # But if manifest uses a buggy file revision (not children of the
1024 1027 # one it replaces) we could. Such a buggy situation will likely
1025 1028 # result is crash somewhere else at to some point.
1026 1029 return lkr
1027 1030
1028 1031 def isintroducedafter(self, changelogrev):
1029 1032 """True if a filectx has been introduced after a given floor revision
1030 1033 """
1031 1034 if self.linkrev() >= changelogrev:
1032 1035 return True
1033 1036 introrev = self._introrev(stoprev=changelogrev)
1034 1037 if introrev is None:
1035 1038 return False
1036 1039 return introrev >= changelogrev
1037 1040
1038 1041 def introrev(self):
1039 1042 """return the rev of the changeset which introduced this file revision
1040 1043
1041 1044 This method is different from linkrev because it take into account the
1042 1045 changeset the filectx was created from. It ensures the returned
1043 1046 revision is one of its ancestors. This prevents bugs from
1044 1047 'linkrev-shadowing' when a file revision is used by multiple
1045 1048 changesets.
1046 1049 """
1047 1050 return self._introrev()
1048 1051
1049 1052 def _introrev(self, stoprev=None):
1050 1053 """
1051 1054 Same as `introrev` but, with an extra argument to limit changelog
1052 1055 iteration range in some internal usecase.
1053 1056
1054 1057 If `stoprev` is set, the `introrev` will not be searched past that
1055 1058 `stoprev` revision and "None" might be returned. This is useful to
1056 1059 limit the iteration range.
1057 1060 """
1058 1061 toprev = None
1059 1062 attrs = vars(self)
1060 1063 if '_changeid' in attrs:
1061 1064 # We have a cached value already
1062 1065 toprev = self._changeid
1063 1066 elif '_changectx' in attrs:
1064 1067 # We know which changelog entry we are coming from
1065 1068 toprev = self._changectx.rev()
1066 1069
1067 1070 if toprev is not None:
1068 1071 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1069 1072 elif '_descendantrev' in attrs:
1070 1073 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1071 1074 # be nice and cache the result of the computation
1072 1075 if introrev is not None:
1073 1076 self._changeid = introrev
1074 1077 return introrev
1075 1078 else:
1076 1079 return self.linkrev()
1077 1080
1078 1081 def introfilectx(self):
1079 1082 """Return filectx having identical contents, but pointing to the
1080 1083 changeset revision where this filectx was introduced"""
1081 1084 introrev = self.introrev()
1082 1085 if self.rev() == introrev:
1083 1086 return self
1084 1087 return self.filectx(self.filenode(), changeid=introrev)
1085 1088
1086 1089 def _parentfilectx(self, path, fileid, filelog):
1087 1090 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1088 1091 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1089 1092 if '_changeid' in vars(self) or '_changectx' in vars(self):
1090 1093 # If self is associated with a changeset (probably explicitly
1091 1094 # fed), ensure the created filectx is associated with a
1092 1095 # changeset that is an ancestor of self.changectx.
1093 1096 # This lets us later use _adjustlinkrev to get a correct link.
1094 1097 fctx._descendantrev = self.rev()
1095 1098 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1096 1099 elif '_descendantrev' in vars(self):
1097 1100 # Otherwise propagate _descendantrev if we have one associated.
1098 1101 fctx._descendantrev = self._descendantrev
1099 1102 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1100 1103 return fctx
1101 1104
1102 1105 def parents(self):
1103 1106 _path = self._path
1104 1107 fl = self._filelog
1105 1108 parents = self._filelog.parents(self._filenode)
1106 1109 pl = [(_path, node, fl) for node in parents if node != nullid]
1107 1110
1108 1111 r = fl.renamed(self._filenode)
1109 1112 if r:
1110 1113 # - In the simple rename case, both parent are nullid, pl is empty.
1111 1114 # - In case of merge, only one of the parent is null id and should
1112 1115 # be replaced with the rename information. This parent is -always-
1113 1116 # the first one.
1114 1117 #
1115 1118 # As null id have always been filtered out in the previous list
1116 1119 # comprehension, inserting to 0 will always result in "replacing
1117 1120 # first nullid parent with rename information.
1118 1121 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1119 1122
1120 1123 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1121 1124
1122 1125 def p1(self):
1123 1126 return self.parents()[0]
1124 1127
1125 1128 def p2(self):
1126 1129 p = self.parents()
1127 1130 if len(p) == 2:
1128 1131 return p[1]
1129 1132 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1130 1133
1131 1134 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1132 1135 """Returns a list of annotateline objects for each line in the file
1133 1136
1134 1137 - line.fctx is the filectx of the node where that line was last changed
1135 1138 - line.lineno is the line number at the first appearance in the managed
1136 1139 file
1137 1140 - line.text is the data on that line (including newline character)
1138 1141 """
1139 1142 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1140 1143
1141 1144 def parents(f):
1142 1145 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1143 1146 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1144 1147 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1145 1148 # isn't an ancestor of the srcrev.
1146 1149 f._changeid
1147 1150 pl = f.parents()
1148 1151
1149 1152 # Don't return renamed parents if we aren't following.
1150 1153 if not follow:
1151 1154 pl = [p for p in pl if p.path() == f.path()]
1152 1155
1153 1156 # renamed filectx won't have a filelog yet, so set it
1154 1157 # from the cache to save time
1155 1158 for p in pl:
1156 1159 if not '_filelog' in p.__dict__:
1157 1160 p._filelog = getlog(p.path())
1158 1161
1159 1162 return pl
1160 1163
1161 1164 # use linkrev to find the first changeset where self appeared
1162 1165 base = self.introfilectx()
1163 1166 if getattr(base, '_ancestrycontext', None) is None:
1164 1167 # it is safe to use an unfiltered repository here because we are
1165 1168 # walking ancestors only.
1166 1169 cl = self._repo.unfiltered().changelog
1167 1170 if base.rev() is None:
1168 1171 # wctx is not inclusive, but works because _ancestrycontext
1169 1172 # is used to test filelog revisions
1170 1173 ac = cl.ancestors(
1171 1174 [p.rev() for p in base.parents()], inclusive=True
1172 1175 )
1173 1176 else:
1174 1177 ac = cl.ancestors([base.rev()], inclusive=True)
1175 1178 base._ancestrycontext = ac
1176 1179
1177 1180 return dagop.annotate(
1178 1181 base, parents, skiprevs=skiprevs, diffopts=diffopts
1179 1182 )
1180 1183
1181 1184 def ancestors(self, followfirst=False):
1182 1185 visit = {}
1183 1186 c = self
1184 1187 if followfirst:
1185 1188 cut = 1
1186 1189 else:
1187 1190 cut = None
1188 1191
1189 1192 while True:
1190 1193 for parent in c.parents()[:cut]:
1191 1194 visit[(parent.linkrev(), parent.filenode())] = parent
1192 1195 if not visit:
1193 1196 break
1194 1197 c = visit.pop(max(visit))
1195 1198 yield c
1196 1199
1197 1200 def decodeddata(self):
1198 1201 """Returns `data()` after running repository decoding filters.
1199 1202
1200 1203 This is often equivalent to how the data would be expressed on disk.
1201 1204 """
1202 1205 return self._repo.wwritedata(self.path(), self.data())
1203 1206
1204 1207
1205 1208 class filectx(basefilectx):
1206 1209 """A filecontext object makes access to data related to a particular
1207 1210 filerevision convenient."""
1208 1211
1209 1212 def __init__(
1210 1213 self,
1211 1214 repo,
1212 1215 path,
1213 1216 changeid=None,
1214 1217 fileid=None,
1215 1218 filelog=None,
1216 1219 changectx=None,
1217 1220 ):
1218 1221 """changeid must be a revision number, if specified.
1219 1222 fileid can be a file revision or node."""
1220 1223 self._repo = repo
1221 1224 self._path = path
1222 1225
1223 1226 assert (
1224 1227 changeid is not None or fileid is not None or changectx is not None
1225 1228 ), (
1226 1229 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1227 1230 % (changeid, fileid, changectx,)
1228 1231 )
1229 1232
1230 1233 if filelog is not None:
1231 1234 self._filelog = filelog
1232 1235
1233 1236 if changeid is not None:
1234 1237 self._changeid = changeid
1235 1238 if changectx is not None:
1236 1239 self._changectx = changectx
1237 1240 if fileid is not None:
1238 1241 self._fileid = fileid
1239 1242
1240 1243 @propertycache
1241 1244 def _changectx(self):
1242 1245 try:
1243 1246 return self._repo[self._changeid]
1244 1247 except error.FilteredRepoLookupError:
1245 1248 # Linkrev may point to any revision in the repository. When the
1246 1249 # repository is filtered this may lead to `filectx` trying to build
1247 1250 # `changectx` for filtered revision. In such case we fallback to
1248 1251 # creating `changectx` on the unfiltered version of the reposition.
1249 1252 # This fallback should not be an issue because `changectx` from
1250 1253 # `filectx` are not used in complex operations that care about
1251 1254 # filtering.
1252 1255 #
1253 1256 # This fallback is a cheap and dirty fix that prevent several
1254 1257 # crashes. It does not ensure the behavior is correct. However the
1255 1258 # behavior was not correct before filtering either and "incorrect
1256 1259 # behavior" is seen as better as "crash"
1257 1260 #
1258 1261 # Linkrevs have several serious troubles with filtering that are
1259 1262 # complicated to solve. Proper handling of the issue here should be
1260 1263 # considered when solving linkrev issue are on the table.
1261 1264 return self._repo.unfiltered()[self._changeid]
1262 1265
1263 1266 def filectx(self, fileid, changeid=None):
1264 1267 '''opens an arbitrary revision of the file without
1265 1268 opening a new filelog'''
1266 1269 return filectx(
1267 1270 self._repo,
1268 1271 self._path,
1269 1272 fileid=fileid,
1270 1273 filelog=self._filelog,
1271 1274 changeid=changeid,
1272 1275 )
1273 1276
1274 1277 def rawdata(self):
1275 1278 return self._filelog.rawdata(self._filenode)
1276 1279
1277 1280 def rawflags(self):
1278 1281 """low-level revlog flags"""
1279 1282 return self._filelog.flags(self._filerev)
1280 1283
1281 1284 def data(self):
1282 1285 try:
1283 1286 return self._filelog.read(self._filenode)
1284 1287 except error.CensoredNodeError:
1285 1288 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1286 1289 return b""
1287 1290 raise error.Abort(
1288 1291 _(b"censored node: %s") % short(self._filenode),
1289 1292 hint=_(b"set censor.policy to ignore errors"),
1290 1293 )
1291 1294
1292 1295 def size(self):
1293 1296 return self._filelog.size(self._filerev)
1294 1297
1295 1298 @propertycache
1296 1299 def _copied(self):
1297 1300 """check if file was actually renamed in this changeset revision
1298 1301
1299 1302 If rename logged in file revision, we report copy for changeset only
1300 1303 if file revisions linkrev points back to the changeset in question
1301 1304 or both changeset parents contain different file revisions.
1302 1305 """
1303 1306
1304 1307 renamed = self._filelog.renamed(self._filenode)
1305 1308 if not renamed:
1306 1309 return None
1307 1310
1308 1311 if self.rev() == self.linkrev():
1309 1312 return renamed
1310 1313
1311 1314 name = self.path()
1312 1315 fnode = self._filenode
1313 1316 for p in self._changectx.parents():
1314 1317 try:
1315 1318 if fnode == p.filenode(name):
1316 1319 return None
1317 1320 except error.LookupError:
1318 1321 pass
1319 1322 return renamed
1320 1323
1321 1324 def children(self):
1322 1325 # hard for renames
1323 1326 c = self._filelog.children(self._filenode)
1324 1327 return [
1325 1328 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1326 1329 for x in c
1327 1330 ]
1328 1331
1329 1332
1330 1333 class committablectx(basectx):
1331 1334 """A committablectx object provides common functionality for a context that
1332 1335 wants the ability to commit, e.g. workingctx or memctx."""
1333 1336
1334 1337 def __init__(
1335 1338 self,
1336 1339 repo,
1337 1340 text=b"",
1338 1341 user=None,
1339 1342 date=None,
1340 1343 extra=None,
1341 1344 changes=None,
1342 1345 branch=None,
1343 1346 ):
1344 1347 super(committablectx, self).__init__(repo)
1345 1348 self._rev = None
1346 1349 self._node = None
1347 1350 self._text = text
1348 1351 if date:
1349 1352 self._date = dateutil.parsedate(date)
1350 1353 if user:
1351 1354 self._user = user
1352 1355 if changes:
1353 1356 self._status = changes
1354 1357
1355 1358 self._extra = {}
1356 1359 if extra:
1357 1360 self._extra = extra.copy()
1358 1361 if branch is not None:
1359 1362 self._extra[b'branch'] = encoding.fromlocal(branch)
1360 1363 if not self._extra.get(b'branch'):
1361 1364 self._extra[b'branch'] = b'default'
1362 1365
1363 1366 def __bytes__(self):
1364 1367 return bytes(self._parents[0]) + b"+"
1365 1368
1366 1369 __str__ = encoding.strmethod(__bytes__)
1367 1370
1368 1371 def __nonzero__(self):
1369 1372 return True
1370 1373
1371 1374 __bool__ = __nonzero__
1372 1375
1373 1376 @propertycache
1374 1377 def _status(self):
1375 1378 return self._repo.status()
1376 1379
1377 1380 @propertycache
1378 1381 def _user(self):
1379 1382 return self._repo.ui.username()
1380 1383
1381 1384 @propertycache
1382 1385 def _date(self):
1383 1386 ui = self._repo.ui
1384 1387 date = ui.configdate(b'devel', b'default-date')
1385 1388 if date is None:
1386 1389 date = dateutil.makedate()
1387 1390 return date
1388 1391
1389 1392 def subrev(self, subpath):
1390 1393 return None
1391 1394
1392 1395 def manifestnode(self):
1393 1396 return None
1394 1397
1395 1398 def user(self):
1396 1399 return self._user or self._repo.ui.username()
1397 1400
1398 1401 def date(self):
1399 1402 return self._date
1400 1403
1401 1404 def description(self):
1402 1405 return self._text
1403 1406
1404 1407 def files(self):
1405 1408 return sorted(
1406 1409 self._status.modified + self._status.added + self._status.removed
1407 1410 )
1408 1411
1409 1412 def modified(self):
1410 1413 return self._status.modified
1411 1414
1412 1415 def added(self):
1413 1416 return self._status.added
1414 1417
1415 1418 def removed(self):
1416 1419 return self._status.removed
1417 1420
1418 1421 def deleted(self):
1419 1422 return self._status.deleted
1420 1423
1421 1424 filesmodified = modified
1422 1425 filesadded = added
1423 1426 filesremoved = removed
1424 1427
1425 1428 def branch(self):
1426 1429 return encoding.tolocal(self._extra[b'branch'])
1427 1430
1428 1431 def closesbranch(self):
1429 1432 return b'close' in self._extra
1430 1433
1431 1434 def extra(self):
1432 1435 return self._extra
1433 1436
1434 1437 def isinmemory(self):
1435 1438 return False
1436 1439
1437 1440 def tags(self):
1438 1441 return []
1439 1442
1440 1443 def bookmarks(self):
1441 1444 b = []
1442 1445 for p in self.parents():
1443 1446 b.extend(p.bookmarks())
1444 1447 return b
1445 1448
1446 1449 def phase(self):
1447 1450 phase = phases.newcommitphase(self._repo.ui)
1448 1451 for p in self.parents():
1449 1452 phase = max(phase, p.phase())
1450 1453 return phase
1451 1454
1452 1455 def hidden(self):
1453 1456 return False
1454 1457
1455 1458 def children(self):
1456 1459 return []
1457 1460
1458 1461 def ancestor(self, c2):
1459 1462 """return the "best" ancestor context of self and c2"""
1460 1463 return self._parents[0].ancestor(c2) # punt on two parents for now
1461 1464
1462 1465 def ancestors(self):
1463 1466 for p in self._parents:
1464 1467 yield p
1465 1468 for a in self._repo.changelog.ancestors(
1466 1469 [p.rev() for p in self._parents]
1467 1470 ):
1468 1471 yield self._repo[a]
1469 1472
1470 1473 def markcommitted(self, node):
1471 1474 """Perform post-commit cleanup necessary after committing this ctx
1472 1475
1473 1476 Specifically, this updates backing stores this working context
1474 1477 wraps to reflect the fact that the changes reflected by this
1475 1478 workingctx have been committed. For example, it marks
1476 1479 modified and added files as normal in the dirstate.
1477 1480
1478 1481 """
1479 1482
1480 1483 def dirty(self, missing=False, merge=True, branch=True):
1481 1484 return False
1482 1485
1483 1486
1484 1487 class workingctx(committablectx):
1485 1488 """A workingctx object makes access to data related to
1486 1489 the current working directory convenient.
1487 1490 date - any valid date string or (unixtime, offset), or None.
1488 1491 user - username string, or None.
1489 1492 extra - a dictionary of extra values, or None.
1490 1493 changes - a list of file lists as returned by localrepo.status()
1491 1494 or None to use the repository status.
1492 1495 """
1493 1496
1494 1497 def __init__(
1495 1498 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1496 1499 ):
1497 1500 branch = None
1498 1501 if not extra or b'branch' not in extra:
1499 1502 try:
1500 1503 branch = repo.dirstate.branch()
1501 1504 except UnicodeDecodeError:
1502 1505 raise error.Abort(_(b'branch name not in UTF-8!'))
1503 1506 super(workingctx, self).__init__(
1504 1507 repo, text, user, date, extra, changes, branch=branch
1505 1508 )
1506 1509
1507 1510 def __iter__(self):
1508 1511 d = self._repo.dirstate
1509 1512 for f in d:
1510 1513 if d[f] != b'r':
1511 1514 yield f
1512 1515
1513 1516 def __contains__(self, key):
1514 1517 return self._repo.dirstate[key] not in b"?r"
1515 1518
1516 1519 def hex(self):
1517 1520 return wdirhex
1518 1521
1519 1522 @propertycache
1520 1523 def _parents(self):
1521 1524 p = self._repo.dirstate.parents()
1522 1525 if p[1] == nullid:
1523 1526 p = p[:-1]
1524 1527 # use unfiltered repo to delay/avoid loading obsmarkers
1525 1528 unfi = self._repo.unfiltered()
1526 1529 return [
1527 1530 changectx(
1528 1531 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1529 1532 )
1530 1533 for n in p
1531 1534 ]
1532 1535
1533 1536 def setparents(self, p1node, p2node=nullid):
1534 1537 dirstate = self._repo.dirstate
1535 1538 with dirstate.parentchange():
1536 1539 copies = dirstate.setparents(p1node, p2node)
1537 1540 pctx = self._repo[p1node]
1538 1541 if copies:
1539 1542 # Adjust copy records, the dirstate cannot do it, it
1540 1543 # requires access to parents manifests. Preserve them
1541 1544 # only for entries added to first parent.
1542 1545 for f in copies:
1543 1546 if f not in pctx and copies[f] in pctx:
1544 1547 dirstate.copy(copies[f], f)
1545 1548 if p2node == nullid:
1546 1549 for f, s in sorted(dirstate.copies().items()):
1547 1550 if f not in pctx and s not in pctx:
1548 1551 dirstate.copy(None, f)
1549 1552
1550 1553 def _fileinfo(self, path):
1551 1554 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1552 1555 self._manifest
1553 1556 return super(workingctx, self)._fileinfo(path)
1554 1557
1555 1558 def _buildflagfunc(self):
1556 1559 # Create a fallback function for getting file flags when the
1557 1560 # filesystem doesn't support them
1558 1561
1559 1562 copiesget = self._repo.dirstate.copies().get
1560 1563 parents = self.parents()
1561 1564 if len(parents) < 2:
1562 1565 # when we have one parent, it's easy: copy from parent
1563 1566 man = parents[0].manifest()
1564 1567
1565 1568 def func(f):
1566 1569 f = copiesget(f, f)
1567 1570 return man.flags(f)
1568 1571
1569 1572 else:
1570 1573 # merges are tricky: we try to reconstruct the unstored
1571 1574 # result from the merge (issue1802)
1572 1575 p1, p2 = parents
1573 1576 pa = p1.ancestor(p2)
1574 1577 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1575 1578
1576 1579 def func(f):
1577 1580 f = copiesget(f, f) # may be wrong for merges with copies
1578 1581 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1579 1582 if fl1 == fl2:
1580 1583 return fl1
1581 1584 if fl1 == fla:
1582 1585 return fl2
1583 1586 if fl2 == fla:
1584 1587 return fl1
1585 1588 return b'' # punt for conflicts
1586 1589
1587 1590 return func
1588 1591
1589 1592 @propertycache
1590 1593 def _flagfunc(self):
1591 1594 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1592 1595
1593 1596 def flags(self, path):
1594 1597 if '_manifest' in self.__dict__:
1595 1598 try:
1596 1599 return self._manifest.flags(path)
1597 1600 except KeyError:
1598 1601 return b''
1599 1602
1600 1603 try:
1601 1604 return self._flagfunc(path)
1602 1605 except OSError:
1603 1606 return b''
1604 1607
1605 1608 def filectx(self, path, filelog=None):
1606 1609 """get a file context from the working directory"""
1607 1610 return workingfilectx(
1608 1611 self._repo, path, workingctx=self, filelog=filelog
1609 1612 )
1610 1613
1611 1614 def dirty(self, missing=False, merge=True, branch=True):
1612 1615 """check whether a working directory is modified"""
1613 1616 # check subrepos first
1614 1617 for s in sorted(self.substate):
1615 1618 if self.sub(s).dirty(missing=missing):
1616 1619 return True
1617 1620 # check current working dir
1618 1621 return (
1619 1622 (merge and self.p2())
1620 1623 or (branch and self.branch() != self.p1().branch())
1621 1624 or self.modified()
1622 1625 or self.added()
1623 1626 or self.removed()
1624 1627 or (missing and self.deleted())
1625 1628 )
1626 1629
1627 1630 def add(self, list, prefix=b""):
1628 1631 with self._repo.wlock():
1629 1632 ui, ds = self._repo.ui, self._repo.dirstate
1630 1633 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1631 1634 rejected = []
1632 1635 lstat = self._repo.wvfs.lstat
1633 1636 for f in list:
1634 1637 # ds.pathto() returns an absolute file when this is invoked from
1635 1638 # the keyword extension. That gets flagged as non-portable on
1636 1639 # Windows, since it contains the drive letter and colon.
1637 1640 scmutil.checkportable(ui, os.path.join(prefix, f))
1638 1641 try:
1639 1642 st = lstat(f)
1640 1643 except OSError:
1641 1644 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1642 1645 rejected.append(f)
1643 1646 continue
1644 1647 limit = ui.configbytes(b'ui', b'large-file-limit')
1645 1648 if limit != 0 and st.st_size > limit:
1646 1649 ui.warn(
1647 1650 _(
1648 1651 b"%s: up to %d MB of RAM may be required "
1649 1652 b"to manage this file\n"
1650 1653 b"(use 'hg revert %s' to cancel the "
1651 1654 b"pending addition)\n"
1652 1655 )
1653 1656 % (f, 3 * st.st_size // 1000000, uipath(f))
1654 1657 )
1655 1658 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1656 1659 ui.warn(
1657 1660 _(
1658 1661 b"%s not added: only files and symlinks "
1659 1662 b"supported currently\n"
1660 1663 )
1661 1664 % uipath(f)
1662 1665 )
1663 1666 rejected.append(f)
1664 1667 elif ds[f] in b'amn':
1665 1668 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1666 1669 elif ds[f] == b'r':
1667 1670 ds.normallookup(f)
1668 1671 else:
1669 1672 ds.add(f)
1670 1673 return rejected
1671 1674
1672 1675 def forget(self, files, prefix=b""):
1673 1676 with self._repo.wlock():
1674 1677 ds = self._repo.dirstate
1675 1678 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1676 1679 rejected = []
1677 1680 for f in files:
1678 1681 if f not in ds:
1679 1682 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1680 1683 rejected.append(f)
1681 1684 elif ds[f] != b'a':
1682 1685 ds.remove(f)
1683 1686 else:
1684 1687 ds.drop(f)
1685 1688 return rejected
1686 1689
1687 1690 def copy(self, source, dest):
1688 1691 try:
1689 1692 st = self._repo.wvfs.lstat(dest)
1690 1693 except OSError as err:
1691 1694 if err.errno != errno.ENOENT:
1692 1695 raise
1693 1696 self._repo.ui.warn(
1694 1697 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1695 1698 )
1696 1699 return
1697 1700 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1698 1701 self._repo.ui.warn(
1699 1702 _(b"copy failed: %s is not a file or a symbolic link\n")
1700 1703 % self._repo.dirstate.pathto(dest)
1701 1704 )
1702 1705 else:
1703 1706 with self._repo.wlock():
1704 1707 ds = self._repo.dirstate
1705 1708 if ds[dest] in b'?':
1706 1709 ds.add(dest)
1707 1710 elif ds[dest] in b'r':
1708 1711 ds.normallookup(dest)
1709 1712 ds.copy(source, dest)
1710 1713
1711 1714 def match(
1712 1715 self,
1713 1716 pats=None,
1714 1717 include=None,
1715 1718 exclude=None,
1716 1719 default=b'glob',
1717 1720 listsubrepos=False,
1718 1721 badfn=None,
1719 1722 cwd=None,
1720 1723 ):
1721 1724 r = self._repo
1722 1725 if not cwd:
1723 1726 cwd = r.getcwd()
1724 1727
1725 1728 # Only a case insensitive filesystem needs magic to translate user input
1726 1729 # to actual case in the filesystem.
1727 1730 icasefs = not util.fscasesensitive(r.root)
1728 1731 return matchmod.match(
1729 1732 r.root,
1730 1733 cwd,
1731 1734 pats,
1732 1735 include,
1733 1736 exclude,
1734 1737 default,
1735 1738 auditor=r.auditor,
1736 1739 ctx=self,
1737 1740 listsubrepos=listsubrepos,
1738 1741 badfn=badfn,
1739 1742 icasefs=icasefs,
1740 1743 )
1741 1744
1742 1745 def _filtersuspectsymlink(self, files):
1743 1746 if not files or self._repo.dirstate._checklink:
1744 1747 return files
1745 1748
1746 1749 # Symlink placeholders may get non-symlink-like contents
1747 1750 # via user error or dereferencing by NFS or Samba servers,
1748 1751 # so we filter out any placeholders that don't look like a
1749 1752 # symlink
1750 1753 sane = []
1751 1754 for f in files:
1752 1755 if self.flags(f) == b'l':
1753 1756 d = self[f].data()
1754 1757 if (
1755 1758 d == b''
1756 1759 or len(d) >= 1024
1757 1760 or b'\n' in d
1758 1761 or stringutil.binary(d)
1759 1762 ):
1760 1763 self._repo.ui.debug(
1761 1764 b'ignoring suspect symlink placeholder "%s"\n' % f
1762 1765 )
1763 1766 continue
1764 1767 sane.append(f)
1765 1768 return sane
1766 1769
1767 1770 def _checklookup(self, files):
1768 1771 # check for any possibly clean files
1769 1772 if not files:
1770 1773 return [], [], []
1771 1774
1772 1775 modified = []
1773 1776 deleted = []
1774 1777 fixup = []
1775 1778 pctx = self._parents[0]
1776 1779 # do a full compare of any files that might have changed
1777 1780 for f in sorted(files):
1778 1781 try:
1779 1782 # This will return True for a file that got replaced by a
1780 1783 # directory in the interim, but fixing that is pretty hard.
1781 1784 if (
1782 1785 f not in pctx
1783 1786 or self.flags(f) != pctx.flags(f)
1784 1787 or pctx[f].cmp(self[f])
1785 1788 ):
1786 1789 modified.append(f)
1787 1790 else:
1788 1791 fixup.append(f)
1789 1792 except (IOError, OSError):
1790 1793 # A file become inaccessible in between? Mark it as deleted,
1791 1794 # matching dirstate behavior (issue5584).
1792 1795 # The dirstate has more complex behavior around whether a
1793 1796 # missing file matches a directory, etc, but we don't need to
1794 1797 # bother with that: if f has made it to this point, we're sure
1795 1798 # it's in the dirstate.
1796 1799 deleted.append(f)
1797 1800
1798 1801 return modified, deleted, fixup
1799 1802
1800 1803 def _poststatusfixup(self, status, fixup):
1801 1804 """update dirstate for files that are actually clean"""
1802 1805 poststatus = self._repo.postdsstatus()
1803 1806 if fixup or poststatus:
1804 1807 try:
1805 1808 oldid = self._repo.dirstate.identity()
1806 1809
1807 1810 # updating the dirstate is optional
1808 1811 # so we don't wait on the lock
1809 1812 # wlock can invalidate the dirstate, so cache normal _after_
1810 1813 # taking the lock
1811 1814 with self._repo.wlock(False):
1812 1815 if self._repo.dirstate.identity() == oldid:
1813 1816 if fixup:
1814 1817 normal = self._repo.dirstate.normal
1815 1818 for f in fixup:
1816 1819 normal(f)
1817 1820 # write changes out explicitly, because nesting
1818 1821 # wlock at runtime may prevent 'wlock.release()'
1819 1822 # after this block from doing so for subsequent
1820 1823 # changing files
1821 1824 tr = self._repo.currenttransaction()
1822 1825 self._repo.dirstate.write(tr)
1823 1826
1824 1827 if poststatus:
1825 1828 for ps in poststatus:
1826 1829 ps(self, status)
1827 1830 else:
1828 1831 # in this case, writing changes out breaks
1829 1832 # consistency, because .hg/dirstate was
1830 1833 # already changed simultaneously after last
1831 1834 # caching (see also issue5584 for detail)
1832 1835 self._repo.ui.debug(
1833 1836 b'skip updating dirstate: identity mismatch\n'
1834 1837 )
1835 1838 except error.LockError:
1836 1839 pass
1837 1840 finally:
1838 1841 # Even if the wlock couldn't be grabbed, clear out the list.
1839 1842 self._repo.clearpostdsstatus()
1840 1843
1841 1844 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1842 1845 '''Gets the status from the dirstate -- internal use only.'''
1843 1846 subrepos = []
1844 1847 if b'.hgsub' in self:
1845 1848 subrepos = sorted(self.substate)
1846 1849 cmp, s = self._repo.dirstate.status(
1847 1850 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1848 1851 )
1849 1852
1850 1853 # check for any possibly clean files
1851 1854 fixup = []
1852 1855 if cmp:
1853 1856 modified2, deleted2, fixup = self._checklookup(cmp)
1854 1857 s.modified.extend(modified2)
1855 1858 s.deleted.extend(deleted2)
1856 1859
1857 1860 if fixup and clean:
1858 1861 s.clean.extend(fixup)
1859 1862
1860 1863 self._poststatusfixup(s, fixup)
1861 1864
1862 1865 if match.always():
1863 1866 # cache for performance
1864 1867 if s.unknown or s.ignored or s.clean:
1865 1868 # "_status" is cached with list*=False in the normal route
1866 1869 self._status = scmutil.status(
1867 1870 s.modified, s.added, s.removed, s.deleted, [], [], []
1868 1871 )
1869 1872 else:
1870 1873 self._status = s
1871 1874
1872 1875 return s
1873 1876
1874 1877 @propertycache
1875 1878 def _copies(self):
1876 1879 p1copies = {}
1877 1880 p2copies = {}
1878 1881 parents = self._repo.dirstate.parents()
1879 1882 p1manifest = self._repo[parents[0]].manifest()
1880 1883 p2manifest = self._repo[parents[1]].manifest()
1881 1884 changedset = set(self.added()) | set(self.modified())
1882 1885 narrowmatch = self._repo.narrowmatch()
1883 1886 for dst, src in self._repo.dirstate.copies().items():
1884 1887 if dst not in changedset or not narrowmatch(dst):
1885 1888 continue
1886 1889 if src in p1manifest:
1887 1890 p1copies[dst] = src
1888 1891 elif src in p2manifest:
1889 1892 p2copies[dst] = src
1890 1893 return p1copies, p2copies
1891 1894
1892 1895 @propertycache
1893 1896 def _manifest(self):
1894 1897 """generate a manifest corresponding to the values in self._status
1895 1898
1896 1899 This reuse the file nodeid from parent, but we use special node
1897 1900 identifiers for added and modified files. This is used by manifests
1898 1901 merge to see that files are different and by update logic to avoid
1899 1902 deleting newly added files.
1900 1903 """
1901 1904 return self._buildstatusmanifest(self._status)
1902 1905
1903 1906 def _buildstatusmanifest(self, status):
1904 1907 """Builds a manifest that includes the given status results."""
1905 1908 parents = self.parents()
1906 1909
1907 1910 man = parents[0].manifest().copy()
1908 1911
1909 1912 ff = self._flagfunc
1910 1913 for i, l in (
1911 1914 (addednodeid, status.added),
1912 1915 (modifiednodeid, status.modified),
1913 1916 ):
1914 1917 for f in l:
1915 1918 man[f] = i
1916 1919 try:
1917 1920 man.setflag(f, ff(f))
1918 1921 except OSError:
1919 1922 pass
1920 1923
1921 1924 for f in status.deleted + status.removed:
1922 1925 if f in man:
1923 1926 del man[f]
1924 1927
1925 1928 return man
1926 1929
1927 1930 def _buildstatus(
1928 1931 self, other, s, match, listignored, listclean, listunknown
1929 1932 ):
1930 1933 """build a status with respect to another context
1931 1934
1932 1935 This includes logic for maintaining the fast path of status when
1933 1936 comparing the working directory against its parent, which is to skip
1934 1937 building a new manifest if self (working directory) is not comparing
1935 1938 against its parent (repo['.']).
1936 1939 """
1937 1940 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1938 1941 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1939 1942 # might have accidentally ended up with the entire contents of the file
1940 1943 # they are supposed to be linking to.
1941 1944 s.modified[:] = self._filtersuspectsymlink(s.modified)
1942 1945 if other != self._repo[b'.']:
1943 1946 s = super(workingctx, self)._buildstatus(
1944 1947 other, s, match, listignored, listclean, listunknown
1945 1948 )
1946 1949 return s
1947 1950
1948 1951 def _matchstatus(self, other, match):
1949 1952 """override the match method with a filter for directory patterns
1950 1953
1951 1954 We use inheritance to customize the match.bad method only in cases of
1952 1955 workingctx since it belongs only to the working directory when
1953 1956 comparing against the parent changeset.
1954 1957
1955 1958 If we aren't comparing against the working directory's parent, then we
1956 1959 just use the default match object sent to us.
1957 1960 """
1958 1961 if other != self._repo[b'.']:
1959 1962
1960 1963 def bad(f, msg):
1961 1964 # 'f' may be a directory pattern from 'match.files()',
1962 1965 # so 'f not in ctx1' is not enough
1963 1966 if f not in other and not other.hasdir(f):
1964 1967 self._repo.ui.warn(
1965 1968 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1966 1969 )
1967 1970
1968 1971 match.bad = bad
1969 1972 return match
1970 1973
1971 1974 def walk(self, match):
1972 1975 '''Generates matching file names.'''
1973 1976 return sorted(
1974 1977 self._repo.dirstate.walk(
1975 1978 self._repo.narrowmatch(match),
1976 1979 subrepos=sorted(self.substate),
1977 1980 unknown=True,
1978 1981 ignored=False,
1979 1982 )
1980 1983 )
1981 1984
1982 1985 def matches(self, match):
1983 1986 match = self._repo.narrowmatch(match)
1984 1987 ds = self._repo.dirstate
1985 1988 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
1986 1989
1987 1990 def markcommitted(self, node):
1988 1991 with self._repo.dirstate.parentchange():
1989 1992 for f in self.modified() + self.added():
1990 1993 self._repo.dirstate.normal(f)
1991 1994 for f in self.removed():
1992 1995 self._repo.dirstate.drop(f)
1993 1996 self._repo.dirstate.setparents(node)
1994 1997 self._repo._quick_access_changeid_invalidate()
1995 1998
1996 1999 # write changes out explicitly, because nesting wlock at
1997 2000 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1998 2001 # from immediately doing so for subsequent changing files
1999 2002 self._repo.dirstate.write(self._repo.currenttransaction())
2000 2003
2001 2004 sparse.aftercommit(self._repo, node)
2002 2005
2003 2006
2004 2007 class committablefilectx(basefilectx):
2005 2008 """A committablefilectx provides common functionality for a file context
2006 2009 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2007 2010
2008 2011 def __init__(self, repo, path, filelog=None, ctx=None):
2009 2012 self._repo = repo
2010 2013 self._path = path
2011 2014 self._changeid = None
2012 2015 self._filerev = self._filenode = None
2013 2016
2014 2017 if filelog is not None:
2015 2018 self._filelog = filelog
2016 2019 if ctx:
2017 2020 self._changectx = ctx
2018 2021
2019 2022 def __nonzero__(self):
2020 2023 return True
2021 2024
2022 2025 __bool__ = __nonzero__
2023 2026
2024 2027 def linkrev(self):
2025 2028 # linked to self._changectx no matter if file is modified or not
2026 2029 return self.rev()
2027 2030
2028 2031 def renamed(self):
2029 2032 path = self.copysource()
2030 2033 if not path:
2031 2034 return None
2032 2035 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2033 2036
2034 2037 def parents(self):
2035 2038 '''return parent filectxs, following copies if necessary'''
2036 2039
2037 2040 def filenode(ctx, path):
2038 2041 return ctx._manifest.get(path, nullid)
2039 2042
2040 2043 path = self._path
2041 2044 fl = self._filelog
2042 2045 pcl = self._changectx._parents
2043 2046 renamed = self.renamed()
2044 2047
2045 2048 if renamed:
2046 2049 pl = [renamed + (None,)]
2047 2050 else:
2048 2051 pl = [(path, filenode(pcl[0], path), fl)]
2049 2052
2050 2053 for pc in pcl[1:]:
2051 2054 pl.append((path, filenode(pc, path), fl))
2052 2055
2053 2056 return [
2054 2057 self._parentfilectx(p, fileid=n, filelog=l)
2055 2058 for p, n, l in pl
2056 2059 if n != nullid
2057 2060 ]
2058 2061
2059 2062 def children(self):
2060 2063 return []
2061 2064
2062 2065
2063 2066 class workingfilectx(committablefilectx):
2064 2067 """A workingfilectx object makes access to data related to a particular
2065 2068 file in the working directory convenient."""
2066 2069
2067 2070 def __init__(self, repo, path, filelog=None, workingctx=None):
2068 2071 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2069 2072
2070 2073 @propertycache
2071 2074 def _changectx(self):
2072 2075 return workingctx(self._repo)
2073 2076
2074 2077 def data(self):
2075 2078 return self._repo.wread(self._path)
2076 2079
2077 2080 def copysource(self):
2078 2081 return self._repo.dirstate.copied(self._path)
2079 2082
2080 2083 def size(self):
2081 2084 return self._repo.wvfs.lstat(self._path).st_size
2082 2085
2083 2086 def lstat(self):
2084 2087 return self._repo.wvfs.lstat(self._path)
2085 2088
2086 2089 def date(self):
2087 2090 t, tz = self._changectx.date()
2088 2091 try:
2089 2092 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2090 2093 except OSError as err:
2091 2094 if err.errno != errno.ENOENT:
2092 2095 raise
2093 2096 return (t, tz)
2094 2097
2095 2098 def exists(self):
2096 2099 return self._repo.wvfs.exists(self._path)
2097 2100
2098 2101 def lexists(self):
2099 2102 return self._repo.wvfs.lexists(self._path)
2100 2103
2101 2104 def audit(self):
2102 2105 return self._repo.wvfs.audit(self._path)
2103 2106
2104 2107 def cmp(self, fctx):
2105 2108 """compare with other file context
2106 2109
2107 2110 returns True if different than fctx.
2108 2111 """
2109 2112 # fctx should be a filectx (not a workingfilectx)
2110 2113 # invert comparison to reuse the same code path
2111 2114 return fctx.cmp(self)
2112 2115
2113 2116 def remove(self, ignoremissing=False):
2114 2117 """wraps unlink for a repo's working directory"""
2115 2118 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2116 2119 self._repo.wvfs.unlinkpath(
2117 2120 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2118 2121 )
2119 2122
2120 2123 def write(self, data, flags, backgroundclose=False, **kwargs):
2121 2124 """wraps repo.wwrite"""
2122 2125 return self._repo.wwrite(
2123 2126 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2124 2127 )
2125 2128
2126 2129 def markcopied(self, src):
2127 2130 """marks this file a copy of `src`"""
2128 2131 self._repo.dirstate.copy(src, self._path)
2129 2132
2130 2133 def clearunknown(self):
2131 2134 """Removes conflicting items in the working directory so that
2132 2135 ``write()`` can be called successfully.
2133 2136 """
2134 2137 wvfs = self._repo.wvfs
2135 2138 f = self._path
2136 2139 wvfs.audit(f)
2137 2140 if self._repo.ui.configbool(
2138 2141 b'experimental', b'merge.checkpathconflicts'
2139 2142 ):
2140 2143 # remove files under the directory as they should already be
2141 2144 # warned and backed up
2142 2145 if wvfs.isdir(f) and not wvfs.islink(f):
2143 2146 wvfs.rmtree(f, forcibly=True)
2144 2147 for p in reversed(list(pathutil.finddirs(f))):
2145 2148 if wvfs.isfileorlink(p):
2146 2149 wvfs.unlink(p)
2147 2150 break
2148 2151 else:
2149 2152 # don't remove files if path conflicts are not processed
2150 2153 if wvfs.isdir(f) and not wvfs.islink(f):
2151 2154 wvfs.removedirs(f)
2152 2155
2153 2156 def setflags(self, l, x):
2154 2157 self._repo.wvfs.setflags(self._path, l, x)
2155 2158
2156 2159
2157 2160 class overlayworkingctx(committablectx):
2158 2161 """Wraps another mutable context with a write-back cache that can be
2159 2162 converted into a commit context.
2160 2163
2161 2164 self._cache[path] maps to a dict with keys: {
2162 2165 'exists': bool?
2163 2166 'date': date?
2164 2167 'data': str?
2165 2168 'flags': str?
2166 2169 'copied': str? (path or None)
2167 2170 }
2168 2171 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2169 2172 is `False`, the file was deleted.
2170 2173 """
2171 2174
2172 2175 def __init__(self, repo):
2173 2176 super(overlayworkingctx, self).__init__(repo)
2174 2177 self.clean()
2175 2178
2176 2179 def setbase(self, wrappedctx):
2177 2180 self._wrappedctx = wrappedctx
2178 2181 self._parents = [wrappedctx]
2179 2182 # Drop old manifest cache as it is now out of date.
2180 2183 # This is necessary when, e.g., rebasing several nodes with one
2181 2184 # ``overlayworkingctx`` (e.g. with --collapse).
2182 2185 util.clearcachedproperty(self, b'_manifest')
2183 2186
2184 2187 def setparents(self, p1node, p2node=nullid):
2185 2188 assert p1node == self._wrappedctx.node()
2186 2189 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2187 2190
2188 2191 def data(self, path):
2189 2192 if self.isdirty(path):
2190 2193 if self._cache[path][b'exists']:
2191 2194 if self._cache[path][b'data'] is not None:
2192 2195 return self._cache[path][b'data']
2193 2196 else:
2194 2197 # Must fallback here, too, because we only set flags.
2195 2198 return self._wrappedctx[path].data()
2196 2199 else:
2197 2200 raise error.ProgrammingError(
2198 2201 b"No such file or directory: %s" % path
2199 2202 )
2200 2203 else:
2201 2204 return self._wrappedctx[path].data()
2202 2205
2203 2206 @propertycache
2204 2207 def _manifest(self):
2205 2208 parents = self.parents()
2206 2209 man = parents[0].manifest().copy()
2207 2210
2208 2211 flag = self._flagfunc
2209 2212 for path in self.added():
2210 2213 man[path] = addednodeid
2211 2214 man.setflag(path, flag(path))
2212 2215 for path in self.modified():
2213 2216 man[path] = modifiednodeid
2214 2217 man.setflag(path, flag(path))
2215 2218 for path in self.removed():
2216 2219 del man[path]
2217 2220 return man
2218 2221
2219 2222 @propertycache
2220 2223 def _flagfunc(self):
2221 2224 def f(path):
2222 2225 return self._cache[path][b'flags']
2223 2226
2224 2227 return f
2225 2228
2226 2229 def files(self):
2227 2230 return sorted(self.added() + self.modified() + self.removed())
2228 2231
2229 2232 def modified(self):
2230 2233 return [
2231 2234 f
2232 2235 for f in self._cache.keys()
2233 2236 if self._cache[f][b'exists'] and self._existsinparent(f)
2234 2237 ]
2235 2238
2236 2239 def added(self):
2237 2240 return [
2238 2241 f
2239 2242 for f in self._cache.keys()
2240 2243 if self._cache[f][b'exists'] and not self._existsinparent(f)
2241 2244 ]
2242 2245
2243 2246 def removed(self):
2244 2247 return [
2245 2248 f
2246 2249 for f in self._cache.keys()
2247 2250 if not self._cache[f][b'exists'] and self._existsinparent(f)
2248 2251 ]
2249 2252
2250 2253 def p1copies(self):
2251 2254 copies = {}
2252 2255 narrowmatch = self._repo.narrowmatch()
2253 2256 for f in self._cache.keys():
2254 2257 if not narrowmatch(f):
2255 2258 continue
2256 2259 copies.pop(f, None) # delete if it exists
2257 2260 source = self._cache[f][b'copied']
2258 2261 if source:
2259 2262 copies[f] = source
2260 2263 return copies
2261 2264
2262 2265 def p2copies(self):
2263 2266 copies = {}
2264 2267 narrowmatch = self._repo.narrowmatch()
2265 2268 for f in self._cache.keys():
2266 2269 if not narrowmatch(f):
2267 2270 continue
2268 2271 copies.pop(f, None) # delete if it exists
2269 2272 source = self._cache[f][b'copied']
2270 2273 if source:
2271 2274 copies[f] = source
2272 2275 return copies
2273 2276
2274 2277 def isinmemory(self):
2275 2278 return True
2276 2279
2277 2280 def filedate(self, path):
2278 2281 if self.isdirty(path):
2279 2282 return self._cache[path][b'date']
2280 2283 else:
2281 2284 return self._wrappedctx[path].date()
2282 2285
2283 2286 def markcopied(self, path, origin):
2284 2287 self._markdirty(
2285 2288 path,
2286 2289 exists=True,
2287 2290 date=self.filedate(path),
2288 2291 flags=self.flags(path),
2289 2292 copied=origin,
2290 2293 )
2291 2294
2292 2295 def copydata(self, path):
2293 2296 if self.isdirty(path):
2294 2297 return self._cache[path][b'copied']
2295 2298 else:
2296 2299 return None
2297 2300
2298 2301 def flags(self, path):
2299 2302 if self.isdirty(path):
2300 2303 if self._cache[path][b'exists']:
2301 2304 return self._cache[path][b'flags']
2302 2305 else:
2303 2306 raise error.ProgrammingError(
2304 2307 b"No such file or directory: %s" % self._path
2305 2308 )
2306 2309 else:
2307 2310 return self._wrappedctx[path].flags()
2308 2311
2309 2312 def __contains__(self, key):
2310 2313 if key in self._cache:
2311 2314 return self._cache[key][b'exists']
2312 2315 return key in self.p1()
2313 2316
2314 2317 def _existsinparent(self, path):
2315 2318 try:
2316 2319 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2317 2320 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2318 2321 # with an ``exists()`` function.
2319 2322 self._wrappedctx[path]
2320 2323 return True
2321 2324 except error.ManifestLookupError:
2322 2325 return False
2323 2326
2324 2327 def _auditconflicts(self, path):
2325 2328 """Replicates conflict checks done by wvfs.write().
2326 2329
2327 2330 Since we never write to the filesystem and never call `applyupdates` in
2328 2331 IMM, we'll never check that a path is actually writable -- e.g., because
2329 2332 it adds `a/foo`, but `a` is actually a file in the other commit.
2330 2333 """
2331 2334
2332 2335 def fail(path, component):
2333 2336 # p1() is the base and we're receiving "writes" for p2()'s
2334 2337 # files.
2335 2338 if b'l' in self.p1()[component].flags():
2336 2339 raise error.Abort(
2337 2340 b"error: %s conflicts with symlink %s "
2338 2341 b"in %d." % (path, component, self.p1().rev())
2339 2342 )
2340 2343 else:
2341 2344 raise error.Abort(
2342 2345 b"error: '%s' conflicts with file '%s' in "
2343 2346 b"%d." % (path, component, self.p1().rev())
2344 2347 )
2345 2348
2346 2349 # Test that each new directory to be created to write this path from p2
2347 2350 # is not a file in p1.
2348 2351 components = path.split(b'/')
2349 2352 for i in pycompat.xrange(len(components)):
2350 2353 component = b"/".join(components[0:i])
2351 2354 if component in self:
2352 2355 fail(path, component)
2353 2356
2354 2357 # Test the other direction -- that this path from p2 isn't a directory
2355 2358 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2356 2359 match = self.match([path], default=b'path')
2357 2360 matches = self.p1().manifest().matches(match)
2358 2361 mfiles = matches.keys()
2359 2362 if len(mfiles) > 0:
2360 2363 if len(mfiles) == 1 and mfiles[0] == path:
2361 2364 return
2362 2365 # omit the files which are deleted in current IMM wctx
2363 2366 mfiles = [m for m in mfiles if m in self]
2364 2367 if not mfiles:
2365 2368 return
2366 2369 raise error.Abort(
2367 2370 b"error: file '%s' cannot be written because "
2368 2371 b" '%s/' is a directory in %s (containing %d "
2369 2372 b"entries: %s)"
2370 2373 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2371 2374 )
2372 2375
2373 2376 def write(self, path, data, flags=b'', **kwargs):
2374 2377 if data is None:
2375 2378 raise error.ProgrammingError(b"data must be non-None")
2376 2379 self._auditconflicts(path)
2377 2380 self._markdirty(
2378 2381 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2379 2382 )
2380 2383
2381 2384 def setflags(self, path, l, x):
2382 2385 flag = b''
2383 2386 if l:
2384 2387 flag = b'l'
2385 2388 elif x:
2386 2389 flag = b'x'
2387 2390 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2388 2391
2389 2392 def remove(self, path):
2390 2393 self._markdirty(path, exists=False)
2391 2394
2392 2395 def exists(self, path):
2393 2396 """exists behaves like `lexists`, but needs to follow symlinks and
2394 2397 return False if they are broken.
2395 2398 """
2396 2399 if self.isdirty(path):
2397 2400 # If this path exists and is a symlink, "follow" it by calling
2398 2401 # exists on the destination path.
2399 2402 if (
2400 2403 self._cache[path][b'exists']
2401 2404 and b'l' in self._cache[path][b'flags']
2402 2405 ):
2403 2406 return self.exists(self._cache[path][b'data'].strip())
2404 2407 else:
2405 2408 return self._cache[path][b'exists']
2406 2409
2407 2410 return self._existsinparent(path)
2408 2411
2409 2412 def lexists(self, path):
2410 2413 """lexists returns True if the path exists"""
2411 2414 if self.isdirty(path):
2412 2415 return self._cache[path][b'exists']
2413 2416
2414 2417 return self._existsinparent(path)
2415 2418
2416 2419 def size(self, path):
2417 2420 if self.isdirty(path):
2418 2421 if self._cache[path][b'exists']:
2419 2422 return len(self._cache[path][b'data'])
2420 2423 else:
2421 2424 raise error.ProgrammingError(
2422 2425 b"No such file or directory: %s" % self._path
2423 2426 )
2424 2427 return self._wrappedctx[path].size()
2425 2428
2426 2429 def tomemctx(
2427 2430 self,
2428 2431 text,
2429 2432 branch=None,
2430 2433 extra=None,
2431 2434 date=None,
2432 2435 parents=None,
2433 2436 user=None,
2434 2437 editor=None,
2435 2438 ):
2436 2439 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2437 2440 committed.
2438 2441
2439 2442 ``text`` is the commit message.
2440 2443 ``parents`` (optional) are rev numbers.
2441 2444 """
2442 2445 # Default parents to the wrapped context if not passed.
2443 2446 if parents is None:
2444 2447 parents = self.parents()
2445 2448 if len(parents) == 1:
2446 2449 parents = (parents[0], None)
2447 2450
2448 2451 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2449 2452 if parents[1] is None:
2450 2453 parents = (self._repo[parents[0]], None)
2451 2454 else:
2452 2455 parents = (self._repo[parents[0]], self._repo[parents[1]])
2453 2456
2454 2457 files = self.files()
2455 2458
2456 2459 def getfile(repo, memctx, path):
2457 2460 if self._cache[path][b'exists']:
2458 2461 return memfilectx(
2459 2462 repo,
2460 2463 memctx,
2461 2464 path,
2462 2465 self._cache[path][b'data'],
2463 2466 b'l' in self._cache[path][b'flags'],
2464 2467 b'x' in self._cache[path][b'flags'],
2465 2468 self._cache[path][b'copied'],
2466 2469 )
2467 2470 else:
2468 2471 # Returning None, but including the path in `files`, is
2469 2472 # necessary for memctx to register a deletion.
2470 2473 return None
2471 2474
2472 2475 if branch is None:
2473 2476 branch = self._wrappedctx.branch()
2474 2477
2475 2478 return memctx(
2476 2479 self._repo,
2477 2480 parents,
2478 2481 text,
2479 2482 files,
2480 2483 getfile,
2481 2484 date=date,
2482 2485 extra=extra,
2483 2486 user=user,
2484 2487 branch=branch,
2485 2488 editor=editor,
2486 2489 )
2487 2490
2488 2491 def isdirty(self, path):
2489 2492 return path in self._cache
2490 2493
2491 2494 def isempty(self):
2492 2495 # We need to discard any keys that are actually clean before the empty
2493 2496 # commit check.
2494 2497 self._compact()
2495 2498 return len(self._cache) == 0
2496 2499
2497 2500 def clean(self):
2498 2501 self._cache = {}
2499 2502
2500 2503 def _compact(self):
2501 2504 """Removes keys from the cache that are actually clean, by comparing
2502 2505 them with the underlying context.
2503 2506
2504 2507 This can occur during the merge process, e.g. by passing --tool :local
2505 2508 to resolve a conflict.
2506 2509 """
2507 2510 keys = []
2508 2511 # This won't be perfect, but can help performance significantly when
2509 2512 # using things like remotefilelog.
2510 2513 scmutil.prefetchfiles(
2511 2514 self.repo(),
2512 2515 [self.p1().rev()],
2513 2516 scmutil.matchfiles(self.repo(), self._cache.keys()),
2514 2517 )
2515 2518
2516 2519 for path in self._cache.keys():
2517 2520 cache = self._cache[path]
2518 2521 try:
2519 2522 underlying = self._wrappedctx[path]
2520 2523 if (
2521 2524 underlying.data() == cache[b'data']
2522 2525 and underlying.flags() == cache[b'flags']
2523 2526 ):
2524 2527 keys.append(path)
2525 2528 except error.ManifestLookupError:
2526 2529 # Path not in the underlying manifest (created).
2527 2530 continue
2528 2531
2529 2532 for path in keys:
2530 2533 del self._cache[path]
2531 2534 return keys
2532 2535
2533 2536 def _markdirty(
2534 2537 self, path, exists, data=None, date=None, flags=b'', copied=None
2535 2538 ):
2536 2539 # data not provided, let's see if we already have some; if not, let's
2537 2540 # grab it from our underlying context, so that we always have data if
2538 2541 # the file is marked as existing.
2539 2542 if exists and data is None:
2540 2543 oldentry = self._cache.get(path) or {}
2541 2544 data = oldentry.get(b'data')
2542 2545 if data is None:
2543 2546 data = self._wrappedctx[path].data()
2544 2547
2545 2548 self._cache[path] = {
2546 2549 b'exists': exists,
2547 2550 b'data': data,
2548 2551 b'date': date,
2549 2552 b'flags': flags,
2550 2553 b'copied': copied,
2551 2554 }
2552 2555
2553 2556 def filectx(self, path, filelog=None):
2554 2557 return overlayworkingfilectx(
2555 2558 self._repo, path, parent=self, filelog=filelog
2556 2559 )
2557 2560
2558 2561
2559 2562 class overlayworkingfilectx(committablefilectx):
2560 2563 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2561 2564 cache, which can be flushed through later by calling ``flush()``."""
2562 2565
2563 2566 def __init__(self, repo, path, filelog=None, parent=None):
2564 2567 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2565 2568 self._repo = repo
2566 2569 self._parent = parent
2567 2570 self._path = path
2568 2571
2569 2572 def cmp(self, fctx):
2570 2573 return self.data() != fctx.data()
2571 2574
2572 2575 def changectx(self):
2573 2576 return self._parent
2574 2577
2575 2578 def data(self):
2576 2579 return self._parent.data(self._path)
2577 2580
2578 2581 def date(self):
2579 2582 return self._parent.filedate(self._path)
2580 2583
2581 2584 def exists(self):
2582 2585 return self.lexists()
2583 2586
2584 2587 def lexists(self):
2585 2588 return self._parent.exists(self._path)
2586 2589
2587 2590 def copysource(self):
2588 2591 return self._parent.copydata(self._path)
2589 2592
2590 2593 def size(self):
2591 2594 return self._parent.size(self._path)
2592 2595
2593 2596 def markcopied(self, origin):
2594 2597 self._parent.markcopied(self._path, origin)
2595 2598
2596 2599 def audit(self):
2597 2600 pass
2598 2601
2599 2602 def flags(self):
2600 2603 return self._parent.flags(self._path)
2601 2604
2602 2605 def setflags(self, islink, isexec):
2603 2606 return self._parent.setflags(self._path, islink, isexec)
2604 2607
2605 2608 def write(self, data, flags, backgroundclose=False, **kwargs):
2606 2609 return self._parent.write(self._path, data, flags, **kwargs)
2607 2610
2608 2611 def remove(self, ignoremissing=False):
2609 2612 return self._parent.remove(self._path)
2610 2613
2611 2614 def clearunknown(self):
2612 2615 pass
2613 2616
2614 2617
2615 2618 class workingcommitctx(workingctx):
2616 2619 """A workingcommitctx object makes access to data related to
2617 2620 the revision being committed convenient.
2618 2621
2619 2622 This hides changes in the working directory, if they aren't
2620 2623 committed in this context.
2621 2624 """
2622 2625
2623 2626 def __init__(
2624 2627 self, repo, changes, text=b"", user=None, date=None, extra=None
2625 2628 ):
2626 2629 super(workingcommitctx, self).__init__(
2627 2630 repo, text, user, date, extra, changes
2628 2631 )
2629 2632
2630 2633 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2631 2634 """Return matched files only in ``self._status``
2632 2635
2633 2636 Uncommitted files appear "clean" via this context, even if
2634 2637 they aren't actually so in the working directory.
2635 2638 """
2636 2639 if clean:
2637 2640 clean = [f for f in self._manifest if f not in self._changedset]
2638 2641 else:
2639 2642 clean = []
2640 2643 return scmutil.status(
2641 2644 [f for f in self._status.modified if match(f)],
2642 2645 [f for f in self._status.added if match(f)],
2643 2646 [f for f in self._status.removed if match(f)],
2644 2647 [],
2645 2648 [],
2646 2649 [],
2647 2650 clean,
2648 2651 )
2649 2652
2650 2653 @propertycache
2651 2654 def _changedset(self):
2652 2655 """Return the set of files changed in this context
2653 2656 """
2654 2657 changed = set(self._status.modified)
2655 2658 changed.update(self._status.added)
2656 2659 changed.update(self._status.removed)
2657 2660 return changed
2658 2661
2659 2662
2660 2663 def makecachingfilectxfn(func):
2661 2664 """Create a filectxfn that caches based on the path.
2662 2665
2663 2666 We can't use util.cachefunc because it uses all arguments as the cache
2664 2667 key and this creates a cycle since the arguments include the repo and
2665 2668 memctx.
2666 2669 """
2667 2670 cache = {}
2668 2671
2669 2672 def getfilectx(repo, memctx, path):
2670 2673 if path not in cache:
2671 2674 cache[path] = func(repo, memctx, path)
2672 2675 return cache[path]
2673 2676
2674 2677 return getfilectx
2675 2678
2676 2679
2677 2680 def memfilefromctx(ctx):
2678 2681 """Given a context return a memfilectx for ctx[path]
2679 2682
2680 2683 This is a convenience method for building a memctx based on another
2681 2684 context.
2682 2685 """
2683 2686
2684 2687 def getfilectx(repo, memctx, path):
2685 2688 fctx = ctx[path]
2686 2689 copysource = fctx.copysource()
2687 2690 return memfilectx(
2688 2691 repo,
2689 2692 memctx,
2690 2693 path,
2691 2694 fctx.data(),
2692 2695 islink=fctx.islink(),
2693 2696 isexec=fctx.isexec(),
2694 2697 copysource=copysource,
2695 2698 )
2696 2699
2697 2700 return getfilectx
2698 2701
2699 2702
2700 2703 def memfilefrompatch(patchstore):
2701 2704 """Given a patch (e.g. patchstore object) return a memfilectx
2702 2705
2703 2706 This is a convenience method for building a memctx based on a patchstore.
2704 2707 """
2705 2708
2706 2709 def getfilectx(repo, memctx, path):
2707 2710 data, mode, copysource = patchstore.getfile(path)
2708 2711 if data is None:
2709 2712 return None
2710 2713 islink, isexec = mode
2711 2714 return memfilectx(
2712 2715 repo,
2713 2716 memctx,
2714 2717 path,
2715 2718 data,
2716 2719 islink=islink,
2717 2720 isexec=isexec,
2718 2721 copysource=copysource,
2719 2722 )
2720 2723
2721 2724 return getfilectx
2722 2725
2723 2726
2724 2727 class memctx(committablectx):
2725 2728 """Use memctx to perform in-memory commits via localrepo.commitctx().
2726 2729
2727 2730 Revision information is supplied at initialization time while
2728 2731 related files data and is made available through a callback
2729 2732 mechanism. 'repo' is the current localrepo, 'parents' is a
2730 2733 sequence of two parent revisions identifiers (pass None for every
2731 2734 missing parent), 'text' is the commit message and 'files' lists
2732 2735 names of files touched by the revision (normalized and relative to
2733 2736 repository root).
2734 2737
2735 2738 filectxfn(repo, memctx, path) is a callable receiving the
2736 2739 repository, the current memctx object and the normalized path of
2737 2740 requested file, relative to repository root. It is fired by the
2738 2741 commit function for every file in 'files', but calls order is
2739 2742 undefined. If the file is available in the revision being
2740 2743 committed (updated or added), filectxfn returns a memfilectx
2741 2744 object. If the file was removed, filectxfn return None for recent
2742 2745 Mercurial. Moved files are represented by marking the source file
2743 2746 removed and the new file added with copy information (see
2744 2747 memfilectx).
2745 2748
2746 2749 user receives the committer name and defaults to current
2747 2750 repository username, date is the commit date in any format
2748 2751 supported by dateutil.parsedate() and defaults to current date, extra
2749 2752 is a dictionary of metadata or is left empty.
2750 2753 """
2751 2754
2752 2755 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2753 2756 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2754 2757 # this field to determine what to do in filectxfn.
2755 2758 _returnnoneformissingfiles = True
2756 2759
2757 2760 def __init__(
2758 2761 self,
2759 2762 repo,
2760 2763 parents,
2761 2764 text,
2762 2765 files,
2763 2766 filectxfn,
2764 2767 user=None,
2765 2768 date=None,
2766 2769 extra=None,
2767 2770 branch=None,
2768 2771 editor=None,
2769 2772 ):
2770 2773 super(memctx, self).__init__(
2771 2774 repo, text, user, date, extra, branch=branch
2772 2775 )
2773 2776 self._rev = None
2774 2777 self._node = None
2775 2778 parents = [(p or nullid) for p in parents]
2776 2779 p1, p2 = parents
2777 2780 self._parents = [self._repo[p] for p in (p1, p2)]
2778 2781 files = sorted(set(files))
2779 2782 self._files = files
2780 2783 self.substate = {}
2781 2784
2782 2785 if isinstance(filectxfn, patch.filestore):
2783 2786 filectxfn = memfilefrompatch(filectxfn)
2784 2787 elif not callable(filectxfn):
2785 2788 # if store is not callable, wrap it in a function
2786 2789 filectxfn = memfilefromctx(filectxfn)
2787 2790
2788 2791 # memoizing increases performance for e.g. vcs convert scenarios.
2789 2792 self._filectxfn = makecachingfilectxfn(filectxfn)
2790 2793
2791 2794 if editor:
2792 2795 self._text = editor(self._repo, self, [])
2793 2796 self._repo.savecommitmessage(self._text)
2794 2797
2795 2798 def filectx(self, path, filelog=None):
2796 2799 """get a file context from the working directory
2797 2800
2798 2801 Returns None if file doesn't exist and should be removed."""
2799 2802 return self._filectxfn(self._repo, self, path)
2800 2803
2801 2804 def commit(self):
2802 2805 """commit context to the repo"""
2803 2806 return self._repo.commitctx(self)
2804 2807
2805 2808 @propertycache
2806 2809 def _manifest(self):
2807 2810 """generate a manifest based on the return values of filectxfn"""
2808 2811
2809 2812 # keep this simple for now; just worry about p1
2810 2813 pctx = self._parents[0]
2811 2814 man = pctx.manifest().copy()
2812 2815
2813 2816 for f in self._status.modified:
2814 2817 man[f] = modifiednodeid
2815 2818
2816 2819 for f in self._status.added:
2817 2820 man[f] = addednodeid
2818 2821
2819 2822 for f in self._status.removed:
2820 2823 if f in man:
2821 2824 del man[f]
2822 2825
2823 2826 return man
2824 2827
2825 2828 @propertycache
2826 2829 def _status(self):
2827 2830 """Calculate exact status from ``files`` specified at construction
2828 2831 """
2829 2832 man1 = self.p1().manifest()
2830 2833 p2 = self._parents[1]
2831 2834 # "1 < len(self._parents)" can't be used for checking
2832 2835 # existence of the 2nd parent, because "memctx._parents" is
2833 2836 # explicitly initialized by the list, of which length is 2.
2834 2837 if p2.node() != nullid:
2835 2838 man2 = p2.manifest()
2836 2839 managing = lambda f: f in man1 or f in man2
2837 2840 else:
2838 2841 managing = lambda f: f in man1
2839 2842
2840 2843 modified, added, removed = [], [], []
2841 2844 for f in self._files:
2842 2845 if not managing(f):
2843 2846 added.append(f)
2844 2847 elif self[f]:
2845 2848 modified.append(f)
2846 2849 else:
2847 2850 removed.append(f)
2848 2851
2849 2852 return scmutil.status(modified, added, removed, [], [], [], [])
2850 2853
2851 2854
2852 2855 class memfilectx(committablefilectx):
2853 2856 """memfilectx represents an in-memory file to commit.
2854 2857
2855 2858 See memctx and committablefilectx for more details.
2856 2859 """
2857 2860
2858 2861 def __init__(
2859 2862 self,
2860 2863 repo,
2861 2864 changectx,
2862 2865 path,
2863 2866 data,
2864 2867 islink=False,
2865 2868 isexec=False,
2866 2869 copysource=None,
2867 2870 ):
2868 2871 """
2869 2872 path is the normalized file path relative to repository root.
2870 2873 data is the file content as a string.
2871 2874 islink is True if the file is a symbolic link.
2872 2875 isexec is True if the file is executable.
2873 2876 copied is the source file path if current file was copied in the
2874 2877 revision being committed, or None."""
2875 2878 super(memfilectx, self).__init__(repo, path, None, changectx)
2876 2879 self._data = data
2877 2880 if islink:
2878 2881 self._flags = b'l'
2879 2882 elif isexec:
2880 2883 self._flags = b'x'
2881 2884 else:
2882 2885 self._flags = b''
2883 2886 self._copysource = copysource
2884 2887
2885 2888 def copysource(self):
2886 2889 return self._copysource
2887 2890
2888 2891 def cmp(self, fctx):
2889 2892 return self.data() != fctx.data()
2890 2893
2891 2894 def data(self):
2892 2895 return self._data
2893 2896
2894 2897 def remove(self, ignoremissing=False):
2895 2898 """wraps unlink for a repo's working directory"""
2896 2899 # need to figure out what to do here
2897 2900 del self._changectx[self._path]
2898 2901
2899 2902 def write(self, data, flags, **kwargs):
2900 2903 """wraps repo.wwrite"""
2901 2904 self._data = data
2902 2905
2903 2906
2904 2907 class metadataonlyctx(committablectx):
2905 2908 """Like memctx but it's reusing the manifest of different commit.
2906 2909 Intended to be used by lightweight operations that are creating
2907 2910 metadata-only changes.
2908 2911
2909 2912 Revision information is supplied at initialization time. 'repo' is the
2910 2913 current localrepo, 'ctx' is original revision which manifest we're reuisng
2911 2914 'parents' is a sequence of two parent revisions identifiers (pass None for
2912 2915 every missing parent), 'text' is the commit.
2913 2916
2914 2917 user receives the committer name and defaults to current repository
2915 2918 username, date is the commit date in any format supported by
2916 2919 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2917 2920 metadata or is left empty.
2918 2921 """
2919 2922
2920 2923 def __init__(
2921 2924 self,
2922 2925 repo,
2923 2926 originalctx,
2924 2927 parents=None,
2925 2928 text=None,
2926 2929 user=None,
2927 2930 date=None,
2928 2931 extra=None,
2929 2932 editor=None,
2930 2933 ):
2931 2934 if text is None:
2932 2935 text = originalctx.description()
2933 2936 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2934 2937 self._rev = None
2935 2938 self._node = None
2936 2939 self._originalctx = originalctx
2937 2940 self._manifestnode = originalctx.manifestnode()
2938 2941 if parents is None:
2939 2942 parents = originalctx.parents()
2940 2943 else:
2941 2944 parents = [repo[p] for p in parents if p is not None]
2942 2945 parents = parents[:]
2943 2946 while len(parents) < 2:
2944 2947 parents.append(repo[nullid])
2945 2948 p1, p2 = self._parents = parents
2946 2949
2947 2950 # sanity check to ensure that the reused manifest parents are
2948 2951 # manifests of our commit parents
2949 2952 mp1, mp2 = self.manifestctx().parents
2950 2953 if p1 != nullid and p1.manifestnode() != mp1:
2951 2954 raise RuntimeError(
2952 2955 r"can't reuse the manifest: its p1 "
2953 2956 r"doesn't match the new ctx p1"
2954 2957 )
2955 2958 if p2 != nullid and p2.manifestnode() != mp2:
2956 2959 raise RuntimeError(
2957 2960 r"can't reuse the manifest: "
2958 2961 r"its p2 doesn't match the new ctx p2"
2959 2962 )
2960 2963
2961 2964 self._files = originalctx.files()
2962 2965 self.substate = {}
2963 2966
2964 2967 if editor:
2965 2968 self._text = editor(self._repo, self, [])
2966 2969 self._repo.savecommitmessage(self._text)
2967 2970
2968 2971 def manifestnode(self):
2969 2972 return self._manifestnode
2970 2973
2971 2974 @property
2972 2975 def _manifestctx(self):
2973 2976 return self._repo.manifestlog[self._manifestnode]
2974 2977
2975 2978 def filectx(self, path, filelog=None):
2976 2979 return self._originalctx.filectx(path, filelog=filelog)
2977 2980
2978 2981 def commit(self):
2979 2982 """commit context to the repo"""
2980 2983 return self._repo.commitctx(self)
2981 2984
2982 2985 @property
2983 2986 def _manifest(self):
2984 2987 return self._originalctx.manifest()
2985 2988
2986 2989 @propertycache
2987 2990 def _status(self):
2988 2991 """Calculate exact status from ``files`` specified in the ``origctx``
2989 2992 and parents manifests.
2990 2993 """
2991 2994 man1 = self.p1().manifest()
2992 2995 p2 = self._parents[1]
2993 2996 # "1 < len(self._parents)" can't be used for checking
2994 2997 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2995 2998 # explicitly initialized by the list, of which length is 2.
2996 2999 if p2.node() != nullid:
2997 3000 man2 = p2.manifest()
2998 3001 managing = lambda f: f in man1 or f in man2
2999 3002 else:
3000 3003 managing = lambda f: f in man1
3001 3004
3002 3005 modified, added, removed = [], [], []
3003 3006 for f in self._files:
3004 3007 if not managing(f):
3005 3008 added.append(f)
3006 3009 elif f in self:
3007 3010 modified.append(f)
3008 3011 else:
3009 3012 removed.append(f)
3010 3013
3011 3014 return scmutil.status(modified, added, removed, [], [], [], [])
3012 3015
3013 3016
3014 3017 class arbitraryfilectx(object):
3015 3018 """Allows you to use filectx-like functions on a file in an arbitrary
3016 3019 location on disk, possibly not in the working directory.
3017 3020 """
3018 3021
3019 3022 def __init__(self, path, repo=None):
3020 3023 # Repo is optional because contrib/simplemerge uses this class.
3021 3024 self._repo = repo
3022 3025 self._path = path
3023 3026
3024 3027 def cmp(self, fctx):
3025 3028 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3026 3029 # path if either side is a symlink.
3027 3030 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3028 3031 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3029 3032 # Add a fast-path for merge if both sides are disk-backed.
3030 3033 # Note that filecmp uses the opposite return values (True if same)
3031 3034 # from our cmp functions (True if different).
3032 3035 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3033 3036 return self.data() != fctx.data()
3034 3037
3035 3038 def path(self):
3036 3039 return self._path
3037 3040
3038 3041 def flags(self):
3039 3042 return b''
3040 3043
3041 3044 def data(self):
3042 3045 return util.readfile(self._path)
3043 3046
3044 3047 def decodeddata(self):
3045 3048 with open(self._path, b"rb") as f:
3046 3049 return f.read()
3047 3050
3048 3051 def remove(self):
3049 3052 util.unlink(self._path)
3050 3053
3051 3054 def write(self, data, flags, **kwargs):
3052 3055 assert not flags
3053 3056 with open(self._path, b"wb") as f:
3054 3057 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now