##// END OF EJS Templates
context: use manifest.find() instead of two separate calls...
Augie Fackler -
r44829:2e2cfc3b default
parent child Browse files
Show More
@@ -1,3056 +1,3056 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 hex,
19 19 modifiednodeid,
20 20 nullid,
21 21 nullrev,
22 22 short,
23 23 wdirfilenodeids,
24 24 wdirhex,
25 25 )
26 26 from .pycompat import (
27 27 getattr,
28 28 open,
29 29 )
30 30 from . import (
31 31 copies,
32 32 dagop,
33 33 encoding,
34 34 error,
35 35 fileset,
36 36 match as matchmod,
37 37 obsolete as obsmod,
38 38 patch,
39 39 pathutil,
40 40 phases,
41 41 pycompat,
42 42 repoview,
43 43 scmutil,
44 44 sparse,
45 45 subrepo,
46 46 subrepoutil,
47 47 util,
48 48 )
49 49 from .utils import (
50 50 dateutil,
51 51 stringutil,
52 52 )
53 53
54 54 propertycache = util.propertycache
55 55
56 56
57 57 class basectx(object):
58 58 """A basectx object represents the common logic for its children:
59 59 changectx: read-only context that is already present in the repo,
60 60 workingctx: a context that represents the working directory and can
61 61 be committed,
62 62 memctx: a context that represents changes in-memory and can also
63 63 be committed."""
64 64
65 65 def __init__(self, repo):
66 66 self._repo = repo
67 67
68 68 def __bytes__(self):
69 69 return short(self.node())
70 70
71 71 __str__ = encoding.strmethod(__bytes__)
72 72
73 73 def __repr__(self):
74 74 return "<%s %s>" % (type(self).__name__, str(self))
75 75
76 76 def __eq__(self, other):
77 77 try:
78 78 return type(self) == type(other) and self._rev == other._rev
79 79 except AttributeError:
80 80 return False
81 81
82 82 def __ne__(self, other):
83 83 return not (self == other)
84 84
85 85 def __contains__(self, key):
86 86 return key in self._manifest
87 87
88 88 def __getitem__(self, key):
89 89 return self.filectx(key)
90 90
91 91 def __iter__(self):
92 92 return iter(self._manifest)
93 93
94 94 def _buildstatusmanifest(self, status):
95 95 """Builds a manifest that includes the given status results, if this is
96 96 a working copy context. For non-working copy contexts, it just returns
97 97 the normal manifest."""
98 98 return self.manifest()
99 99
100 100 def _matchstatus(self, other, match):
101 101 """This internal method provides a way for child objects to override the
102 102 match operator.
103 103 """
104 104 return match
105 105
106 106 def _buildstatus(
107 107 self, other, s, match, listignored, listclean, listunknown
108 108 ):
109 109 """build a status with respect to another context"""
110 110 # Load earliest manifest first for caching reasons. More specifically,
111 111 # if you have revisions 1000 and 1001, 1001 is probably stored as a
112 112 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
113 113 # 1000 and cache it so that when you read 1001, we just need to apply a
114 114 # delta to what's in the cache. So that's one full reconstruction + one
115 115 # delta application.
116 116 mf2 = None
117 117 if self.rev() is not None and self.rev() < other.rev():
118 118 mf2 = self._buildstatusmanifest(s)
119 119 mf1 = other._buildstatusmanifest(s)
120 120 if mf2 is None:
121 121 mf2 = self._buildstatusmanifest(s)
122 122
123 123 modified, added = [], []
124 124 removed = []
125 125 clean = []
126 126 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
127 127 deletedset = set(deleted)
128 128 d = mf1.diff(mf2, match=match, clean=listclean)
129 129 for fn, value in pycompat.iteritems(d):
130 130 if fn in deletedset:
131 131 continue
132 132 if value is None:
133 133 clean.append(fn)
134 134 continue
135 135 (node1, flag1), (node2, flag2) = value
136 136 if node1 is None:
137 137 added.append(fn)
138 138 elif node2 is None:
139 139 removed.append(fn)
140 140 elif flag1 != flag2:
141 141 modified.append(fn)
142 142 elif node2 not in wdirfilenodeids:
143 143 # When comparing files between two commits, we save time by
144 144 # not comparing the file contents when the nodeids differ.
145 145 # Note that this means we incorrectly report a reverted change
146 146 # to a file as a modification.
147 147 modified.append(fn)
148 148 elif self[fn].cmp(other[fn]):
149 149 modified.append(fn)
150 150 else:
151 151 clean.append(fn)
152 152
153 153 if removed:
154 154 # need to filter files if they are already reported as removed
155 155 unknown = [
156 156 fn
157 157 for fn in unknown
158 158 if fn not in mf1 and (not match or match(fn))
159 159 ]
160 160 ignored = [
161 161 fn
162 162 for fn in ignored
163 163 if fn not in mf1 and (not match or match(fn))
164 164 ]
165 165 # if they're deleted, don't report them as removed
166 166 removed = [fn for fn in removed if fn not in deletedset]
167 167
168 168 return scmutil.status(
169 169 modified, added, removed, deleted, unknown, ignored, clean
170 170 )
171 171
172 172 @propertycache
173 173 def substate(self):
174 174 return subrepoutil.state(self, self._repo.ui)
175 175
176 176 def subrev(self, subpath):
177 177 return self.substate[subpath][1]
178 178
179 179 def rev(self):
180 180 return self._rev
181 181
182 182 def node(self):
183 183 return self._node
184 184
185 185 def hex(self):
186 186 return hex(self.node())
187 187
188 188 def manifest(self):
189 189 return self._manifest
190 190
191 191 def manifestctx(self):
192 192 return self._manifestctx
193 193
194 194 def repo(self):
195 195 return self._repo
196 196
197 197 def phasestr(self):
198 198 return phases.phasenames[self.phase()]
199 199
200 200 def mutable(self):
201 201 return self.phase() > phases.public
202 202
203 203 def matchfileset(self, cwd, expr, badfn=None):
204 204 return fileset.match(self, cwd, expr, badfn=badfn)
205 205
206 206 def obsolete(self):
207 207 """True if the changeset is obsolete"""
208 208 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
209 209
210 210 def extinct(self):
211 211 """True if the changeset is extinct"""
212 212 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
213 213
214 214 def orphan(self):
215 215 """True if the changeset is not obsolete, but its ancestor is"""
216 216 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
217 217
218 218 def phasedivergent(self):
219 219 """True if the changeset tries to be a successor of a public changeset
220 220
221 221 Only non-public and non-obsolete changesets may be phase-divergent.
222 222 """
223 223 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
224 224
225 225 def contentdivergent(self):
226 226 """Is a successor of a changeset with multiple possible successor sets
227 227
228 228 Only non-public and non-obsolete changesets may be content-divergent.
229 229 """
230 230 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
231 231
232 232 def isunstable(self):
233 233 """True if the changeset is either orphan, phase-divergent or
234 234 content-divergent"""
235 235 return self.orphan() or self.phasedivergent() or self.contentdivergent()
236 236
237 237 def instabilities(self):
238 238 """return the list of instabilities affecting this changeset.
239 239
240 240 Instabilities are returned as strings. possible values are:
241 241 - orphan,
242 242 - phase-divergent,
243 243 - content-divergent.
244 244 """
245 245 instabilities = []
246 246 if self.orphan():
247 247 instabilities.append(b'orphan')
248 248 if self.phasedivergent():
249 249 instabilities.append(b'phase-divergent')
250 250 if self.contentdivergent():
251 251 instabilities.append(b'content-divergent')
252 252 return instabilities
253 253
254 254 def parents(self):
255 255 """return contexts for each parent changeset"""
256 256 return self._parents
257 257
258 258 def p1(self):
259 259 return self._parents[0]
260 260
261 261 def p2(self):
262 262 parents = self._parents
263 263 if len(parents) == 2:
264 264 return parents[1]
265 265 return self._repo[nullrev]
266 266
267 267 def _fileinfo(self, path):
268 268 if '_manifest' in self.__dict__:
269 269 try:
270 return self._manifest[path], self._manifest.flags(path)
270 return self._manifest.find(path)
271 271 except KeyError:
272 272 raise error.ManifestLookupError(
273 273 self._node, path, _(b'not found in manifest')
274 274 )
275 275 if '_manifestdelta' in self.__dict__ or path in self.files():
276 276 if path in self._manifestdelta:
277 277 return (
278 278 self._manifestdelta[path],
279 279 self._manifestdelta.flags(path),
280 280 )
281 281 mfl = self._repo.manifestlog
282 282 try:
283 283 node, flag = mfl[self._changeset.manifest].find(path)
284 284 except KeyError:
285 285 raise error.ManifestLookupError(
286 286 self._node, path, _(b'not found in manifest')
287 287 )
288 288
289 289 return node, flag
290 290
291 291 def filenode(self, path):
292 292 return self._fileinfo(path)[0]
293 293
294 294 def flags(self, path):
295 295 try:
296 296 return self._fileinfo(path)[1]
297 297 except error.LookupError:
298 298 return b''
299 299
300 300 @propertycache
301 301 def _copies(self):
302 302 return copies.computechangesetcopies(self)
303 303
304 304 def p1copies(self):
305 305 return self._copies[0]
306 306
307 307 def p2copies(self):
308 308 return self._copies[1]
309 309
310 310 def sub(self, path, allowcreate=True):
311 311 '''return a subrepo for the stored revision of path, never wdir()'''
312 312 return subrepo.subrepo(self, path, allowcreate=allowcreate)
313 313
314 314 def nullsub(self, path, pctx):
315 315 return subrepo.nullsubrepo(self, path, pctx)
316 316
317 317 def workingsub(self, path):
318 318 '''return a subrepo for the stored revision, or wdir if this is a wdir
319 319 context.
320 320 '''
321 321 return subrepo.subrepo(self, path, allowwdir=True)
322 322
323 323 def match(
324 324 self,
325 325 pats=None,
326 326 include=None,
327 327 exclude=None,
328 328 default=b'glob',
329 329 listsubrepos=False,
330 330 badfn=None,
331 331 cwd=None,
332 332 ):
333 333 r = self._repo
334 334 if not cwd:
335 335 cwd = r.getcwd()
336 336 return matchmod.match(
337 337 r.root,
338 338 cwd,
339 339 pats,
340 340 include,
341 341 exclude,
342 342 default,
343 343 auditor=r.nofsauditor,
344 344 ctx=self,
345 345 listsubrepos=listsubrepos,
346 346 badfn=badfn,
347 347 )
348 348
349 349 def diff(
350 350 self,
351 351 ctx2=None,
352 352 match=None,
353 353 changes=None,
354 354 opts=None,
355 355 losedatafn=None,
356 356 pathfn=None,
357 357 copy=None,
358 358 copysourcematch=None,
359 359 hunksfilterfn=None,
360 360 ):
361 361 """Returns a diff generator for the given contexts and matcher"""
362 362 if ctx2 is None:
363 363 ctx2 = self.p1()
364 364 if ctx2 is not None:
365 365 ctx2 = self._repo[ctx2]
366 366 return patch.diff(
367 367 self._repo,
368 368 ctx2,
369 369 self,
370 370 match=match,
371 371 changes=changes,
372 372 opts=opts,
373 373 losedatafn=losedatafn,
374 374 pathfn=pathfn,
375 375 copy=copy,
376 376 copysourcematch=copysourcematch,
377 377 hunksfilterfn=hunksfilterfn,
378 378 )
379 379
380 380 def dirs(self):
381 381 return self._manifest.dirs()
382 382
383 383 def hasdir(self, dir):
384 384 return self._manifest.hasdir(dir)
385 385
386 386 def status(
387 387 self,
388 388 other=None,
389 389 match=None,
390 390 listignored=False,
391 391 listclean=False,
392 392 listunknown=False,
393 393 listsubrepos=False,
394 394 ):
395 395 """return status of files between two nodes or node and working
396 396 directory.
397 397
398 398 If other is None, compare this node with working directory.
399 399
400 400 returns (modified, added, removed, deleted, unknown, ignored, clean)
401 401 """
402 402
403 403 ctx1 = self
404 404 ctx2 = self._repo[other]
405 405
406 406 # This next code block is, admittedly, fragile logic that tests for
407 407 # reversing the contexts and wouldn't need to exist if it weren't for
408 408 # the fast (and common) code path of comparing the working directory
409 409 # with its first parent.
410 410 #
411 411 # What we're aiming for here is the ability to call:
412 412 #
413 413 # workingctx.status(parentctx)
414 414 #
415 415 # If we always built the manifest for each context and compared those,
416 416 # then we'd be done. But the special case of the above call means we
417 417 # just copy the manifest of the parent.
418 418 reversed = False
419 419 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
420 420 reversed = True
421 421 ctx1, ctx2 = ctx2, ctx1
422 422
423 423 match = self._repo.narrowmatch(match)
424 424 match = ctx2._matchstatus(ctx1, match)
425 425 r = scmutil.status([], [], [], [], [], [], [])
426 426 r = ctx2._buildstatus(
427 427 ctx1, r, match, listignored, listclean, listunknown
428 428 )
429 429
430 430 if reversed:
431 431 # Reverse added and removed. Clear deleted, unknown and ignored as
432 432 # these make no sense to reverse.
433 433 r = scmutil.status(
434 434 r.modified, r.removed, r.added, [], [], [], r.clean
435 435 )
436 436
437 437 if listsubrepos:
438 438 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
439 439 try:
440 440 rev2 = ctx2.subrev(subpath)
441 441 except KeyError:
442 442 # A subrepo that existed in node1 was deleted between
443 443 # node1 and node2 (inclusive). Thus, ctx2's substate
444 444 # won't contain that subpath. The best we can do ignore it.
445 445 rev2 = None
446 446 submatch = matchmod.subdirmatcher(subpath, match)
447 447 s = sub.status(
448 448 rev2,
449 449 match=submatch,
450 450 ignored=listignored,
451 451 clean=listclean,
452 452 unknown=listunknown,
453 453 listsubrepos=True,
454 454 )
455 455 for k in (
456 456 'modified',
457 457 'added',
458 458 'removed',
459 459 'deleted',
460 460 'unknown',
461 461 'ignored',
462 462 'clean',
463 463 ):
464 464 rfiles, sfiles = getattr(r, k), getattr(s, k)
465 465 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
466 466
467 467 r.modified.sort()
468 468 r.added.sort()
469 469 r.removed.sort()
470 470 r.deleted.sort()
471 471 r.unknown.sort()
472 472 r.ignored.sort()
473 473 r.clean.sort()
474 474
475 475 return r
476 476
477 477
478 478 class changectx(basectx):
479 479 """A changecontext object makes access to data related to a particular
480 480 changeset convenient. It represents a read-only context already present in
481 481 the repo."""
482 482
483 483 def __init__(self, repo, rev, node, maybe_filtered=True):
484 484 super(changectx, self).__init__(repo)
485 485 self._rev = rev
486 486 self._node = node
487 487 # When maybe_filtered is True, the revision might be affected by
488 488 # changelog filtering and operation through the filtered changelog must be used.
489 489 #
490 490 # When maybe_filtered is False, the revision has already been checked
491 491 # against filtering and is not filtered. Operation through the
492 492 # unfiltered changelog might be used in some case.
493 493 self._maybe_filtered = maybe_filtered
494 494
495 495 def __hash__(self):
496 496 try:
497 497 return hash(self._rev)
498 498 except AttributeError:
499 499 return id(self)
500 500
501 501 def __nonzero__(self):
502 502 return self._rev != nullrev
503 503
504 504 __bool__ = __nonzero__
505 505
506 506 @propertycache
507 507 def _changeset(self):
508 508 if self._maybe_filtered:
509 509 repo = self._repo
510 510 else:
511 511 repo = self._repo.unfiltered()
512 512 return repo.changelog.changelogrevision(self.rev())
513 513
514 514 @propertycache
515 515 def _manifest(self):
516 516 return self._manifestctx.read()
517 517
518 518 @property
519 519 def _manifestctx(self):
520 520 return self._repo.manifestlog[self._changeset.manifest]
521 521
522 522 @propertycache
523 523 def _manifestdelta(self):
524 524 return self._manifestctx.readdelta()
525 525
526 526 @propertycache
527 527 def _parents(self):
528 528 repo = self._repo
529 529 if self._maybe_filtered:
530 530 cl = repo.changelog
531 531 else:
532 532 cl = repo.unfiltered().changelog
533 533
534 534 p1, p2 = cl.parentrevs(self._rev)
535 535 if p2 == nullrev:
536 536 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
537 537 return [
538 538 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
539 539 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
540 540 ]
541 541
542 542 def changeset(self):
543 543 c = self._changeset
544 544 return (
545 545 c.manifest,
546 546 c.user,
547 547 c.date,
548 548 c.files,
549 549 c.description,
550 550 c.extra,
551 551 )
552 552
553 553 def manifestnode(self):
554 554 return self._changeset.manifest
555 555
556 556 def user(self):
557 557 return self._changeset.user
558 558
559 559 def date(self):
560 560 return self._changeset.date
561 561
562 562 def files(self):
563 563 return self._changeset.files
564 564
565 565 def filesmodified(self):
566 566 modified = set(self.files())
567 567 modified.difference_update(self.filesadded())
568 568 modified.difference_update(self.filesremoved())
569 569 return sorted(modified)
570 570
571 571 def filesadded(self):
572 572 filesadded = self._changeset.filesadded
573 573 compute_on_none = True
574 574 if self._repo.filecopiesmode == b'changeset-sidedata':
575 575 compute_on_none = False
576 576 else:
577 577 source = self._repo.ui.config(b'experimental', b'copies.read-from')
578 578 if source == b'changeset-only':
579 579 compute_on_none = False
580 580 elif source != b'compatibility':
581 581 # filelog mode, ignore any changelog content
582 582 filesadded = None
583 583 if filesadded is None:
584 584 if compute_on_none:
585 585 filesadded = copies.computechangesetfilesadded(self)
586 586 else:
587 587 filesadded = []
588 588 return filesadded
589 589
590 590 def filesremoved(self):
591 591 filesremoved = self._changeset.filesremoved
592 592 compute_on_none = True
593 593 if self._repo.filecopiesmode == b'changeset-sidedata':
594 594 compute_on_none = False
595 595 else:
596 596 source = self._repo.ui.config(b'experimental', b'copies.read-from')
597 597 if source == b'changeset-only':
598 598 compute_on_none = False
599 599 elif source != b'compatibility':
600 600 # filelog mode, ignore any changelog content
601 601 filesremoved = None
602 602 if filesremoved is None:
603 603 if compute_on_none:
604 604 filesremoved = copies.computechangesetfilesremoved(self)
605 605 else:
606 606 filesremoved = []
607 607 return filesremoved
608 608
609 609 @propertycache
610 610 def _copies(self):
611 611 p1copies = self._changeset.p1copies
612 612 p2copies = self._changeset.p2copies
613 613 compute_on_none = True
614 614 if self._repo.filecopiesmode == b'changeset-sidedata':
615 615 compute_on_none = False
616 616 else:
617 617 source = self._repo.ui.config(b'experimental', b'copies.read-from')
618 618 # If config says to get copy metadata only from changeset, then
619 619 # return that, defaulting to {} if there was no copy metadata. In
620 620 # compatibility mode, we return copy data from the changeset if it
621 621 # was recorded there, and otherwise we fall back to getting it from
622 622 # the filelogs (below).
623 623 #
624 624 # If we are in compatiblity mode and there is not data in the
625 625 # changeset), we get the copy metadata from the filelogs.
626 626 #
627 627 # otherwise, when config said to read only from filelog, we get the
628 628 # copy metadata from the filelogs.
629 629 if source == b'changeset-only':
630 630 compute_on_none = False
631 631 elif source != b'compatibility':
632 632 # filelog mode, ignore any changelog content
633 633 p1copies = p2copies = None
634 634 if p1copies is None:
635 635 if compute_on_none:
636 636 p1copies, p2copies = super(changectx, self)._copies
637 637 else:
638 638 if p1copies is None:
639 639 p1copies = {}
640 640 if p2copies is None:
641 641 p2copies = {}
642 642 return p1copies, p2copies
643 643
644 644 def description(self):
645 645 return self._changeset.description
646 646
647 647 def branch(self):
648 648 return encoding.tolocal(self._changeset.extra.get(b"branch"))
649 649
650 650 def closesbranch(self):
651 651 return b'close' in self._changeset.extra
652 652
653 653 def extra(self):
654 654 """Return a dict of extra information."""
655 655 return self._changeset.extra
656 656
657 657 def tags(self):
658 658 """Return a list of byte tag names"""
659 659 return self._repo.nodetags(self._node)
660 660
661 661 def bookmarks(self):
662 662 """Return a list of byte bookmark names."""
663 663 return self._repo.nodebookmarks(self._node)
664 664
665 665 def phase(self):
666 666 return self._repo._phasecache.phase(self._repo, self._rev)
667 667
668 668 def hidden(self):
669 669 return self._rev in repoview.filterrevs(self._repo, b'visible')
670 670
671 671 def isinmemory(self):
672 672 return False
673 673
674 674 def children(self):
675 675 """return list of changectx contexts for each child changeset.
676 676
677 677 This returns only the immediate child changesets. Use descendants() to
678 678 recursively walk children.
679 679 """
680 680 c = self._repo.changelog.children(self._node)
681 681 return [self._repo[x] for x in c]
682 682
683 683 def ancestors(self):
684 684 for a in self._repo.changelog.ancestors([self._rev]):
685 685 yield self._repo[a]
686 686
687 687 def descendants(self):
688 688 """Recursively yield all children of the changeset.
689 689
690 690 For just the immediate children, use children()
691 691 """
692 692 for d in self._repo.changelog.descendants([self._rev]):
693 693 yield self._repo[d]
694 694
695 695 def filectx(self, path, fileid=None, filelog=None):
696 696 """get a file context from this changeset"""
697 697 if fileid is None:
698 698 fileid = self.filenode(path)
699 699 return filectx(
700 700 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
701 701 )
702 702
703 703 def ancestor(self, c2, warn=False):
704 704 """return the "best" ancestor context of self and c2
705 705
706 706 If there are multiple candidates, it will show a message and check
707 707 merge.preferancestor configuration before falling back to the
708 708 revlog ancestor."""
709 709 # deal with workingctxs
710 710 n2 = c2._node
711 711 if n2 is None:
712 712 n2 = c2._parents[0]._node
713 713 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
714 714 if not cahs:
715 715 anc = nullid
716 716 elif len(cahs) == 1:
717 717 anc = cahs[0]
718 718 else:
719 719 # experimental config: merge.preferancestor
720 720 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
721 721 try:
722 722 ctx = scmutil.revsymbol(self._repo, r)
723 723 except error.RepoLookupError:
724 724 continue
725 725 anc = ctx.node()
726 726 if anc in cahs:
727 727 break
728 728 else:
729 729 anc = self._repo.changelog.ancestor(self._node, n2)
730 730 if warn:
731 731 self._repo.ui.status(
732 732 (
733 733 _(b"note: using %s as ancestor of %s and %s\n")
734 734 % (short(anc), short(self._node), short(n2))
735 735 )
736 736 + b''.join(
737 737 _(
738 738 b" alternatively, use --config "
739 739 b"merge.preferancestor=%s\n"
740 740 )
741 741 % short(n)
742 742 for n in sorted(cahs)
743 743 if n != anc
744 744 )
745 745 )
746 746 return self._repo[anc]
747 747
748 748 def isancestorof(self, other):
749 749 """True if this changeset is an ancestor of other"""
750 750 return self._repo.changelog.isancestorrev(self._rev, other._rev)
751 751
752 752 def walk(self, match):
753 753 '''Generates matching file names.'''
754 754
755 755 # Wrap match.bad method to have message with nodeid
756 756 def bad(fn, msg):
757 757 # The manifest doesn't know about subrepos, so don't complain about
758 758 # paths into valid subrepos.
759 759 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
760 760 return
761 761 match.bad(fn, _(b'no such file in rev %s') % self)
762 762
763 763 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
764 764 return self._manifest.walk(m)
765 765
766 766 def matches(self, match):
767 767 return self.walk(match)
768 768
769 769
770 770 class basefilectx(object):
771 771 """A filecontext object represents the common logic for its children:
772 772 filectx: read-only access to a filerevision that is already present
773 773 in the repo,
774 774 workingfilectx: a filecontext that represents files from the working
775 775 directory,
776 776 memfilectx: a filecontext that represents files in-memory,
777 777 """
778 778
779 779 @propertycache
780 780 def _filelog(self):
781 781 return self._repo.file(self._path)
782 782
783 783 @propertycache
784 784 def _changeid(self):
785 785 if '_changectx' in self.__dict__:
786 786 return self._changectx.rev()
787 787 elif '_descendantrev' in self.__dict__:
788 788 # this file context was created from a revision with a known
789 789 # descendant, we can (lazily) correct for linkrev aliases
790 790 return self._adjustlinkrev(self._descendantrev)
791 791 else:
792 792 return self._filelog.linkrev(self._filerev)
793 793
794 794 @propertycache
795 795 def _filenode(self):
796 796 if '_fileid' in self.__dict__:
797 797 return self._filelog.lookup(self._fileid)
798 798 else:
799 799 return self._changectx.filenode(self._path)
800 800
801 801 @propertycache
802 802 def _filerev(self):
803 803 return self._filelog.rev(self._filenode)
804 804
805 805 @propertycache
806 806 def _repopath(self):
807 807 return self._path
808 808
809 809 def __nonzero__(self):
810 810 try:
811 811 self._filenode
812 812 return True
813 813 except error.LookupError:
814 814 # file is missing
815 815 return False
816 816
817 817 __bool__ = __nonzero__
818 818
819 819 def __bytes__(self):
820 820 try:
821 821 return b"%s@%s" % (self.path(), self._changectx)
822 822 except error.LookupError:
823 823 return b"%s@???" % self.path()
824 824
825 825 __str__ = encoding.strmethod(__bytes__)
826 826
827 827 def __repr__(self):
828 828 return "<%s %s>" % (type(self).__name__, str(self))
829 829
830 830 def __hash__(self):
831 831 try:
832 832 return hash((self._path, self._filenode))
833 833 except AttributeError:
834 834 return id(self)
835 835
836 836 def __eq__(self, other):
837 837 try:
838 838 return (
839 839 type(self) == type(other)
840 840 and self._path == other._path
841 841 and self._filenode == other._filenode
842 842 )
843 843 except AttributeError:
844 844 return False
845 845
846 846 def __ne__(self, other):
847 847 return not (self == other)
848 848
849 849 def filerev(self):
850 850 return self._filerev
851 851
852 852 def filenode(self):
853 853 return self._filenode
854 854
855 855 @propertycache
856 856 def _flags(self):
857 857 return self._changectx.flags(self._path)
858 858
859 859 def flags(self):
860 860 return self._flags
861 861
862 862 def filelog(self):
863 863 return self._filelog
864 864
865 865 def rev(self):
866 866 return self._changeid
867 867
868 868 def linkrev(self):
869 869 return self._filelog.linkrev(self._filerev)
870 870
871 871 def node(self):
872 872 return self._changectx.node()
873 873
874 874 def hex(self):
875 875 return self._changectx.hex()
876 876
877 877 def user(self):
878 878 return self._changectx.user()
879 879
880 880 def date(self):
881 881 return self._changectx.date()
882 882
883 883 def files(self):
884 884 return self._changectx.files()
885 885
886 886 def description(self):
887 887 return self._changectx.description()
888 888
889 889 def branch(self):
890 890 return self._changectx.branch()
891 891
892 892 def extra(self):
893 893 return self._changectx.extra()
894 894
895 895 def phase(self):
896 896 return self._changectx.phase()
897 897
898 898 def phasestr(self):
899 899 return self._changectx.phasestr()
900 900
901 901 def obsolete(self):
902 902 return self._changectx.obsolete()
903 903
904 904 def instabilities(self):
905 905 return self._changectx.instabilities()
906 906
907 907 def manifest(self):
908 908 return self._changectx.manifest()
909 909
910 910 def changectx(self):
911 911 return self._changectx
912 912
913 913 def renamed(self):
914 914 return self._copied
915 915
916 916 def copysource(self):
917 917 return self._copied and self._copied[0]
918 918
919 919 def repo(self):
920 920 return self._repo
921 921
922 922 def size(self):
923 923 return len(self.data())
924 924
925 925 def path(self):
926 926 return self._path
927 927
928 928 def isbinary(self):
929 929 try:
930 930 return stringutil.binary(self.data())
931 931 except IOError:
932 932 return False
933 933
934 934 def isexec(self):
935 935 return b'x' in self.flags()
936 936
937 937 def islink(self):
938 938 return b'l' in self.flags()
939 939
940 940 def isabsent(self):
941 941 """whether this filectx represents a file not in self._changectx
942 942
943 943 This is mainly for merge code to detect change/delete conflicts. This is
944 944 expected to be True for all subclasses of basectx."""
945 945 return False
946 946
947 947 _customcmp = False
948 948
949 949 def cmp(self, fctx):
950 950 """compare with other file context
951 951
952 952 returns True if different than fctx.
953 953 """
954 954 if fctx._customcmp:
955 955 return fctx.cmp(self)
956 956
957 957 if self._filenode is None:
958 958 raise error.ProgrammingError(
959 959 b'filectx.cmp() must be reimplemented if not backed by revlog'
960 960 )
961 961
962 962 if fctx._filenode is None:
963 963 if self._repo._encodefilterpats:
964 964 # can't rely on size() because wdir content may be decoded
965 965 return self._filelog.cmp(self._filenode, fctx.data())
966 966 if self.size() - 4 == fctx.size():
967 967 # size() can match:
968 968 # if file data starts with '\1\n', empty metadata block is
969 969 # prepended, which adds 4 bytes to filelog.size().
970 970 return self._filelog.cmp(self._filenode, fctx.data())
971 971 if self.size() == fctx.size():
972 972 # size() matches: need to compare content
973 973 return self._filelog.cmp(self._filenode, fctx.data())
974 974
975 975 # size() differs
976 976 return True
977 977
978 978 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
979 979 """return the first ancestor of <srcrev> introducing <fnode>
980 980
981 981 If the linkrev of the file revision does not point to an ancestor of
982 982 srcrev, we'll walk down the ancestors until we find one introducing
983 983 this file revision.
984 984
985 985 :srcrev: the changeset revision we search ancestors from
986 986 :inclusive: if true, the src revision will also be checked
987 987 :stoprev: an optional revision to stop the walk at. If no introduction
988 988 of this file content could be found before this floor
989 989 revision, the function will returns "None" and stops its
990 990 iteration.
991 991 """
992 992 repo = self._repo
993 993 cl = repo.unfiltered().changelog
994 994 mfl = repo.manifestlog
995 995 # fetch the linkrev
996 996 lkr = self.linkrev()
997 997 if srcrev == lkr:
998 998 return lkr
999 999 # hack to reuse ancestor computation when searching for renames
1000 1000 memberanc = getattr(self, '_ancestrycontext', None)
1001 1001 iteranc = None
1002 1002 if srcrev is None:
1003 1003 # wctx case, used by workingfilectx during mergecopy
1004 1004 revs = [p.rev() for p in self._repo[None].parents()]
1005 1005 inclusive = True # we skipped the real (revless) source
1006 1006 else:
1007 1007 revs = [srcrev]
1008 1008 if memberanc is None:
1009 1009 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1010 1010 # check if this linkrev is an ancestor of srcrev
1011 1011 if lkr not in memberanc:
1012 1012 if iteranc is None:
1013 1013 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1014 1014 fnode = self._filenode
1015 1015 path = self._path
1016 1016 for a in iteranc:
1017 1017 if stoprev is not None and a < stoprev:
1018 1018 return None
1019 1019 ac = cl.read(a) # get changeset data (we avoid object creation)
1020 1020 if path in ac[3]: # checking the 'files' field.
1021 1021 # The file has been touched, check if the content is
1022 1022 # similar to the one we search for.
1023 1023 if fnode == mfl[ac[0]].readfast().get(path):
1024 1024 return a
1025 1025 # In theory, we should never get out of that loop without a result.
1026 1026 # But if manifest uses a buggy file revision (not children of the
1027 1027 # one it replaces) we could. Such a buggy situation will likely
1028 1028 # result is crash somewhere else at to some point.
1029 1029 return lkr
1030 1030
1031 1031 def isintroducedafter(self, changelogrev):
1032 1032 """True if a filectx has been introduced after a given floor revision
1033 1033 """
1034 1034 if self.linkrev() >= changelogrev:
1035 1035 return True
1036 1036 introrev = self._introrev(stoprev=changelogrev)
1037 1037 if introrev is None:
1038 1038 return False
1039 1039 return introrev >= changelogrev
1040 1040
1041 1041 def introrev(self):
1042 1042 """return the rev of the changeset which introduced this file revision
1043 1043
1044 1044 This method is different from linkrev because it take into account the
1045 1045 changeset the filectx was created from. It ensures the returned
1046 1046 revision is one of its ancestors. This prevents bugs from
1047 1047 'linkrev-shadowing' when a file revision is used by multiple
1048 1048 changesets.
1049 1049 """
1050 1050 return self._introrev()
1051 1051
1052 1052 def _introrev(self, stoprev=None):
1053 1053 """
1054 1054 Same as `introrev` but, with an extra argument to limit changelog
1055 1055 iteration range in some internal usecase.
1056 1056
1057 1057 If `stoprev` is set, the `introrev` will not be searched past that
1058 1058 `stoprev` revision and "None" might be returned. This is useful to
1059 1059 limit the iteration range.
1060 1060 """
1061 1061 toprev = None
1062 1062 attrs = vars(self)
1063 1063 if '_changeid' in attrs:
1064 1064 # We have a cached value already
1065 1065 toprev = self._changeid
1066 1066 elif '_changectx' in attrs:
1067 1067 # We know which changelog entry we are coming from
1068 1068 toprev = self._changectx.rev()
1069 1069
1070 1070 if toprev is not None:
1071 1071 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1072 1072 elif '_descendantrev' in attrs:
1073 1073 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1074 1074 # be nice and cache the result of the computation
1075 1075 if introrev is not None:
1076 1076 self._changeid = introrev
1077 1077 return introrev
1078 1078 else:
1079 1079 return self.linkrev()
1080 1080
1081 1081 def introfilectx(self):
1082 1082 """Return filectx having identical contents, but pointing to the
1083 1083 changeset revision where this filectx was introduced"""
1084 1084 introrev = self.introrev()
1085 1085 if self.rev() == introrev:
1086 1086 return self
1087 1087 return self.filectx(self.filenode(), changeid=introrev)
1088 1088
1089 1089 def _parentfilectx(self, path, fileid, filelog):
1090 1090 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1091 1091 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1092 1092 if '_changeid' in vars(self) or '_changectx' in vars(self):
1093 1093 # If self is associated with a changeset (probably explicitly
1094 1094 # fed), ensure the created filectx is associated with a
1095 1095 # changeset that is an ancestor of self.changectx.
1096 1096 # This lets us later use _adjustlinkrev to get a correct link.
1097 1097 fctx._descendantrev = self.rev()
1098 1098 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1099 1099 elif '_descendantrev' in vars(self):
1100 1100 # Otherwise propagate _descendantrev if we have one associated.
1101 1101 fctx._descendantrev = self._descendantrev
1102 1102 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1103 1103 return fctx
1104 1104
1105 1105 def parents(self):
1106 1106 _path = self._path
1107 1107 fl = self._filelog
1108 1108 parents = self._filelog.parents(self._filenode)
1109 1109 pl = [(_path, node, fl) for node in parents if node != nullid]
1110 1110
1111 1111 r = fl.renamed(self._filenode)
1112 1112 if r:
1113 1113 # - In the simple rename case, both parent are nullid, pl is empty.
1114 1114 # - In case of merge, only one of the parent is null id and should
1115 1115 # be replaced with the rename information. This parent is -always-
1116 1116 # the first one.
1117 1117 #
1118 1118 # As null id have always been filtered out in the previous list
1119 1119 # comprehension, inserting to 0 will always result in "replacing
1120 1120 # first nullid parent with rename information.
1121 1121 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1122 1122
1123 1123 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1124 1124
1125 1125 def p1(self):
1126 1126 return self.parents()[0]
1127 1127
1128 1128 def p2(self):
1129 1129 p = self.parents()
1130 1130 if len(p) == 2:
1131 1131 return p[1]
1132 1132 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1133 1133
1134 1134 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1135 1135 """Returns a list of annotateline objects for each line in the file
1136 1136
1137 1137 - line.fctx is the filectx of the node where that line was last changed
1138 1138 - line.lineno is the line number at the first appearance in the managed
1139 1139 file
1140 1140 - line.text is the data on that line (including newline character)
1141 1141 """
1142 1142 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1143 1143
1144 1144 def parents(f):
1145 1145 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1146 1146 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1147 1147 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1148 1148 # isn't an ancestor of the srcrev.
1149 1149 f._changeid
1150 1150 pl = f.parents()
1151 1151
1152 1152 # Don't return renamed parents if we aren't following.
1153 1153 if not follow:
1154 1154 pl = [p for p in pl if p.path() == f.path()]
1155 1155
1156 1156 # renamed filectx won't have a filelog yet, so set it
1157 1157 # from the cache to save time
1158 1158 for p in pl:
1159 1159 if not '_filelog' in p.__dict__:
1160 1160 p._filelog = getlog(p.path())
1161 1161
1162 1162 return pl
1163 1163
1164 1164 # use linkrev to find the first changeset where self appeared
1165 1165 base = self.introfilectx()
1166 1166 if getattr(base, '_ancestrycontext', None) is None:
1167 1167 # it is safe to use an unfiltered repository here because we are
1168 1168 # walking ancestors only.
1169 1169 cl = self._repo.unfiltered().changelog
1170 1170 if base.rev() is None:
1171 1171 # wctx is not inclusive, but works because _ancestrycontext
1172 1172 # is used to test filelog revisions
1173 1173 ac = cl.ancestors(
1174 1174 [p.rev() for p in base.parents()], inclusive=True
1175 1175 )
1176 1176 else:
1177 1177 ac = cl.ancestors([base.rev()], inclusive=True)
1178 1178 base._ancestrycontext = ac
1179 1179
1180 1180 return dagop.annotate(
1181 1181 base, parents, skiprevs=skiprevs, diffopts=diffopts
1182 1182 )
1183 1183
1184 1184 def ancestors(self, followfirst=False):
1185 1185 visit = {}
1186 1186 c = self
1187 1187 if followfirst:
1188 1188 cut = 1
1189 1189 else:
1190 1190 cut = None
1191 1191
1192 1192 while True:
1193 1193 for parent in c.parents()[:cut]:
1194 1194 visit[(parent.linkrev(), parent.filenode())] = parent
1195 1195 if not visit:
1196 1196 break
1197 1197 c = visit.pop(max(visit))
1198 1198 yield c
1199 1199
1200 1200 def decodeddata(self):
1201 1201 """Returns `data()` after running repository decoding filters.
1202 1202
1203 1203 This is often equivalent to how the data would be expressed on disk.
1204 1204 """
1205 1205 return self._repo.wwritedata(self.path(), self.data())
1206 1206
1207 1207
1208 1208 class filectx(basefilectx):
1209 1209 """A filecontext object makes access to data related to a particular
1210 1210 filerevision convenient."""
1211 1211
1212 1212 def __init__(
1213 1213 self,
1214 1214 repo,
1215 1215 path,
1216 1216 changeid=None,
1217 1217 fileid=None,
1218 1218 filelog=None,
1219 1219 changectx=None,
1220 1220 ):
1221 1221 """changeid must be a revision number, if specified.
1222 1222 fileid can be a file revision or node."""
1223 1223 self._repo = repo
1224 1224 self._path = path
1225 1225
1226 1226 assert (
1227 1227 changeid is not None or fileid is not None or changectx is not None
1228 1228 ), (
1229 1229 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1230 1230 % (changeid, fileid, changectx,)
1231 1231 )
1232 1232
1233 1233 if filelog is not None:
1234 1234 self._filelog = filelog
1235 1235
1236 1236 if changeid is not None:
1237 1237 self._changeid = changeid
1238 1238 if changectx is not None:
1239 1239 self._changectx = changectx
1240 1240 if fileid is not None:
1241 1241 self._fileid = fileid
1242 1242
1243 1243 @propertycache
1244 1244 def _changectx(self):
1245 1245 try:
1246 1246 return self._repo[self._changeid]
1247 1247 except error.FilteredRepoLookupError:
1248 1248 # Linkrev may point to any revision in the repository. When the
1249 1249 # repository is filtered this may lead to `filectx` trying to build
1250 1250 # `changectx` for filtered revision. In such case we fallback to
1251 1251 # creating `changectx` on the unfiltered version of the reposition.
1252 1252 # This fallback should not be an issue because `changectx` from
1253 1253 # `filectx` are not used in complex operations that care about
1254 1254 # filtering.
1255 1255 #
1256 1256 # This fallback is a cheap and dirty fix that prevent several
1257 1257 # crashes. It does not ensure the behavior is correct. However the
1258 1258 # behavior was not correct before filtering either and "incorrect
1259 1259 # behavior" is seen as better as "crash"
1260 1260 #
1261 1261 # Linkrevs have several serious troubles with filtering that are
1262 1262 # complicated to solve. Proper handling of the issue here should be
1263 1263 # considered when solving linkrev issue are on the table.
1264 1264 return self._repo.unfiltered()[self._changeid]
1265 1265
1266 1266 def filectx(self, fileid, changeid=None):
1267 1267 '''opens an arbitrary revision of the file without
1268 1268 opening a new filelog'''
1269 1269 return filectx(
1270 1270 self._repo,
1271 1271 self._path,
1272 1272 fileid=fileid,
1273 1273 filelog=self._filelog,
1274 1274 changeid=changeid,
1275 1275 )
1276 1276
1277 1277 def rawdata(self):
1278 1278 return self._filelog.rawdata(self._filenode)
1279 1279
1280 1280 def rawflags(self):
1281 1281 """low-level revlog flags"""
1282 1282 return self._filelog.flags(self._filerev)
1283 1283
1284 1284 def data(self):
1285 1285 try:
1286 1286 return self._filelog.read(self._filenode)
1287 1287 except error.CensoredNodeError:
1288 1288 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1289 1289 return b""
1290 1290 raise error.Abort(
1291 1291 _(b"censored node: %s") % short(self._filenode),
1292 1292 hint=_(b"set censor.policy to ignore errors"),
1293 1293 )
1294 1294
1295 1295 def size(self):
1296 1296 return self._filelog.size(self._filerev)
1297 1297
1298 1298 @propertycache
1299 1299 def _copied(self):
1300 1300 """check if file was actually renamed in this changeset revision
1301 1301
1302 1302 If rename logged in file revision, we report copy for changeset only
1303 1303 if file revisions linkrev points back to the changeset in question
1304 1304 or both changeset parents contain different file revisions.
1305 1305 """
1306 1306
1307 1307 renamed = self._filelog.renamed(self._filenode)
1308 1308 if not renamed:
1309 1309 return None
1310 1310
1311 1311 if self.rev() == self.linkrev():
1312 1312 return renamed
1313 1313
1314 1314 name = self.path()
1315 1315 fnode = self._filenode
1316 1316 for p in self._changectx.parents():
1317 1317 try:
1318 1318 if fnode == p.filenode(name):
1319 1319 return None
1320 1320 except error.LookupError:
1321 1321 pass
1322 1322 return renamed
1323 1323
1324 1324 def children(self):
1325 1325 # hard for renames
1326 1326 c = self._filelog.children(self._filenode)
1327 1327 return [
1328 1328 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1329 1329 for x in c
1330 1330 ]
1331 1331
1332 1332
1333 1333 class committablectx(basectx):
1334 1334 """A committablectx object provides common functionality for a context that
1335 1335 wants the ability to commit, e.g. workingctx or memctx."""
1336 1336
1337 1337 def __init__(
1338 1338 self,
1339 1339 repo,
1340 1340 text=b"",
1341 1341 user=None,
1342 1342 date=None,
1343 1343 extra=None,
1344 1344 changes=None,
1345 1345 branch=None,
1346 1346 ):
1347 1347 super(committablectx, self).__init__(repo)
1348 1348 self._rev = None
1349 1349 self._node = None
1350 1350 self._text = text
1351 1351 if date:
1352 1352 self._date = dateutil.parsedate(date)
1353 1353 if user:
1354 1354 self._user = user
1355 1355 if changes:
1356 1356 self._status = changes
1357 1357
1358 1358 self._extra = {}
1359 1359 if extra:
1360 1360 self._extra = extra.copy()
1361 1361 if branch is not None:
1362 1362 self._extra[b'branch'] = encoding.fromlocal(branch)
1363 1363 if not self._extra.get(b'branch'):
1364 1364 self._extra[b'branch'] = b'default'
1365 1365
1366 1366 def __bytes__(self):
1367 1367 return bytes(self._parents[0]) + b"+"
1368 1368
1369 1369 __str__ = encoding.strmethod(__bytes__)
1370 1370
1371 1371 def __nonzero__(self):
1372 1372 return True
1373 1373
1374 1374 __bool__ = __nonzero__
1375 1375
1376 1376 @propertycache
1377 1377 def _status(self):
1378 1378 return self._repo.status()
1379 1379
1380 1380 @propertycache
1381 1381 def _user(self):
1382 1382 return self._repo.ui.username()
1383 1383
1384 1384 @propertycache
1385 1385 def _date(self):
1386 1386 ui = self._repo.ui
1387 1387 date = ui.configdate(b'devel', b'default-date')
1388 1388 if date is None:
1389 1389 date = dateutil.makedate()
1390 1390 return date
1391 1391
1392 1392 def subrev(self, subpath):
1393 1393 return None
1394 1394
1395 1395 def manifestnode(self):
1396 1396 return None
1397 1397
1398 1398 def user(self):
1399 1399 return self._user or self._repo.ui.username()
1400 1400
1401 1401 def date(self):
1402 1402 return self._date
1403 1403
1404 1404 def description(self):
1405 1405 return self._text
1406 1406
1407 1407 def files(self):
1408 1408 return sorted(
1409 1409 self._status.modified + self._status.added + self._status.removed
1410 1410 )
1411 1411
1412 1412 def modified(self):
1413 1413 return self._status.modified
1414 1414
1415 1415 def added(self):
1416 1416 return self._status.added
1417 1417
1418 1418 def removed(self):
1419 1419 return self._status.removed
1420 1420
1421 1421 def deleted(self):
1422 1422 return self._status.deleted
1423 1423
1424 1424 filesmodified = modified
1425 1425 filesadded = added
1426 1426 filesremoved = removed
1427 1427
1428 1428 def branch(self):
1429 1429 return encoding.tolocal(self._extra[b'branch'])
1430 1430
1431 1431 def closesbranch(self):
1432 1432 return b'close' in self._extra
1433 1433
1434 1434 def extra(self):
1435 1435 return self._extra
1436 1436
1437 1437 def isinmemory(self):
1438 1438 return False
1439 1439
1440 1440 def tags(self):
1441 1441 return []
1442 1442
1443 1443 def bookmarks(self):
1444 1444 b = []
1445 1445 for p in self.parents():
1446 1446 b.extend(p.bookmarks())
1447 1447 return b
1448 1448
1449 1449 def phase(self):
1450 1450 phase = phases.newcommitphase(self._repo.ui)
1451 1451 for p in self.parents():
1452 1452 phase = max(phase, p.phase())
1453 1453 return phase
1454 1454
1455 1455 def hidden(self):
1456 1456 return False
1457 1457
1458 1458 def children(self):
1459 1459 return []
1460 1460
1461 1461 def ancestor(self, c2):
1462 1462 """return the "best" ancestor context of self and c2"""
1463 1463 return self._parents[0].ancestor(c2) # punt on two parents for now
1464 1464
1465 1465 def ancestors(self):
1466 1466 for p in self._parents:
1467 1467 yield p
1468 1468 for a in self._repo.changelog.ancestors(
1469 1469 [p.rev() for p in self._parents]
1470 1470 ):
1471 1471 yield self._repo[a]
1472 1472
1473 1473 def markcommitted(self, node):
1474 1474 """Perform post-commit cleanup necessary after committing this ctx
1475 1475
1476 1476 Specifically, this updates backing stores this working context
1477 1477 wraps to reflect the fact that the changes reflected by this
1478 1478 workingctx have been committed. For example, it marks
1479 1479 modified and added files as normal in the dirstate.
1480 1480
1481 1481 """
1482 1482
1483 1483 def dirty(self, missing=False, merge=True, branch=True):
1484 1484 return False
1485 1485
1486 1486
1487 1487 class workingctx(committablectx):
1488 1488 """A workingctx object makes access to data related to
1489 1489 the current working directory convenient.
1490 1490 date - any valid date string or (unixtime, offset), or None.
1491 1491 user - username string, or None.
1492 1492 extra - a dictionary of extra values, or None.
1493 1493 changes - a list of file lists as returned by localrepo.status()
1494 1494 or None to use the repository status.
1495 1495 """
1496 1496
1497 1497 def __init__(
1498 1498 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1499 1499 ):
1500 1500 branch = None
1501 1501 if not extra or b'branch' not in extra:
1502 1502 try:
1503 1503 branch = repo.dirstate.branch()
1504 1504 except UnicodeDecodeError:
1505 1505 raise error.Abort(_(b'branch name not in UTF-8!'))
1506 1506 super(workingctx, self).__init__(
1507 1507 repo, text, user, date, extra, changes, branch=branch
1508 1508 )
1509 1509
1510 1510 def __iter__(self):
1511 1511 d = self._repo.dirstate
1512 1512 for f in d:
1513 1513 if d[f] != b'r':
1514 1514 yield f
1515 1515
1516 1516 def __contains__(self, key):
1517 1517 return self._repo.dirstate[key] not in b"?r"
1518 1518
1519 1519 def hex(self):
1520 1520 return wdirhex
1521 1521
1522 1522 @propertycache
1523 1523 def _parents(self):
1524 1524 p = self._repo.dirstate.parents()
1525 1525 if p[1] == nullid:
1526 1526 p = p[:-1]
1527 1527 # use unfiltered repo to delay/avoid loading obsmarkers
1528 1528 unfi = self._repo.unfiltered()
1529 1529 return [
1530 1530 changectx(
1531 1531 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1532 1532 )
1533 1533 for n in p
1534 1534 ]
1535 1535
1536 1536 def setparents(self, p1node, p2node=nullid):
1537 1537 dirstate = self._repo.dirstate
1538 1538 with dirstate.parentchange():
1539 1539 copies = dirstate.setparents(p1node, p2node)
1540 1540 pctx = self._repo[p1node]
1541 1541 if copies:
1542 1542 # Adjust copy records, the dirstate cannot do it, it
1543 1543 # requires access to parents manifests. Preserve them
1544 1544 # only for entries added to first parent.
1545 1545 for f in copies:
1546 1546 if f not in pctx and copies[f] in pctx:
1547 1547 dirstate.copy(copies[f], f)
1548 1548 if p2node == nullid:
1549 1549 for f, s in sorted(dirstate.copies().items()):
1550 1550 if f not in pctx and s not in pctx:
1551 1551 dirstate.copy(None, f)
1552 1552
1553 1553 def _fileinfo(self, path):
1554 1554 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1555 1555 self._manifest
1556 1556 return super(workingctx, self)._fileinfo(path)
1557 1557
1558 1558 def _buildflagfunc(self):
1559 1559 # Create a fallback function for getting file flags when the
1560 1560 # filesystem doesn't support them
1561 1561
1562 1562 copiesget = self._repo.dirstate.copies().get
1563 1563 parents = self.parents()
1564 1564 if len(parents) < 2:
1565 1565 # when we have one parent, it's easy: copy from parent
1566 1566 man = parents[0].manifest()
1567 1567
1568 1568 def func(f):
1569 1569 f = copiesget(f, f)
1570 1570 return man.flags(f)
1571 1571
1572 1572 else:
1573 1573 # merges are tricky: we try to reconstruct the unstored
1574 1574 # result from the merge (issue1802)
1575 1575 p1, p2 = parents
1576 1576 pa = p1.ancestor(p2)
1577 1577 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1578 1578
1579 1579 def func(f):
1580 1580 f = copiesget(f, f) # may be wrong for merges with copies
1581 1581 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1582 1582 if fl1 == fl2:
1583 1583 return fl1
1584 1584 if fl1 == fla:
1585 1585 return fl2
1586 1586 if fl2 == fla:
1587 1587 return fl1
1588 1588 return b'' # punt for conflicts
1589 1589
1590 1590 return func
1591 1591
1592 1592 @propertycache
1593 1593 def _flagfunc(self):
1594 1594 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1595 1595
1596 1596 def flags(self, path):
1597 1597 if '_manifest' in self.__dict__:
1598 1598 try:
1599 1599 return self._manifest.flags(path)
1600 1600 except KeyError:
1601 1601 return b''
1602 1602
1603 1603 try:
1604 1604 return self._flagfunc(path)
1605 1605 except OSError:
1606 1606 return b''
1607 1607
1608 1608 def filectx(self, path, filelog=None):
1609 1609 """get a file context from the working directory"""
1610 1610 return workingfilectx(
1611 1611 self._repo, path, workingctx=self, filelog=filelog
1612 1612 )
1613 1613
1614 1614 def dirty(self, missing=False, merge=True, branch=True):
1615 1615 """check whether a working directory is modified"""
1616 1616 # check subrepos first
1617 1617 for s in sorted(self.substate):
1618 1618 if self.sub(s).dirty(missing=missing):
1619 1619 return True
1620 1620 # check current working dir
1621 1621 return (
1622 1622 (merge and self.p2())
1623 1623 or (branch and self.branch() != self.p1().branch())
1624 1624 or self.modified()
1625 1625 or self.added()
1626 1626 or self.removed()
1627 1627 or (missing and self.deleted())
1628 1628 )
1629 1629
1630 1630 def add(self, list, prefix=b""):
1631 1631 with self._repo.wlock():
1632 1632 ui, ds = self._repo.ui, self._repo.dirstate
1633 1633 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1634 1634 rejected = []
1635 1635 lstat = self._repo.wvfs.lstat
1636 1636 for f in list:
1637 1637 # ds.pathto() returns an absolute file when this is invoked from
1638 1638 # the keyword extension. That gets flagged as non-portable on
1639 1639 # Windows, since it contains the drive letter and colon.
1640 1640 scmutil.checkportable(ui, os.path.join(prefix, f))
1641 1641 try:
1642 1642 st = lstat(f)
1643 1643 except OSError:
1644 1644 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1645 1645 rejected.append(f)
1646 1646 continue
1647 1647 limit = ui.configbytes(b'ui', b'large-file-limit')
1648 1648 if limit != 0 and st.st_size > limit:
1649 1649 ui.warn(
1650 1650 _(
1651 1651 b"%s: up to %d MB of RAM may be required "
1652 1652 b"to manage this file\n"
1653 1653 b"(use 'hg revert %s' to cancel the "
1654 1654 b"pending addition)\n"
1655 1655 )
1656 1656 % (f, 3 * st.st_size // 1000000, uipath(f))
1657 1657 )
1658 1658 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1659 1659 ui.warn(
1660 1660 _(
1661 1661 b"%s not added: only files and symlinks "
1662 1662 b"supported currently\n"
1663 1663 )
1664 1664 % uipath(f)
1665 1665 )
1666 1666 rejected.append(f)
1667 1667 elif ds[f] in b'amn':
1668 1668 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1669 1669 elif ds[f] == b'r':
1670 1670 ds.normallookup(f)
1671 1671 else:
1672 1672 ds.add(f)
1673 1673 return rejected
1674 1674
1675 1675 def forget(self, files, prefix=b""):
1676 1676 with self._repo.wlock():
1677 1677 ds = self._repo.dirstate
1678 1678 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1679 1679 rejected = []
1680 1680 for f in files:
1681 1681 if f not in ds:
1682 1682 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1683 1683 rejected.append(f)
1684 1684 elif ds[f] != b'a':
1685 1685 ds.remove(f)
1686 1686 else:
1687 1687 ds.drop(f)
1688 1688 return rejected
1689 1689
1690 1690 def copy(self, source, dest):
1691 1691 try:
1692 1692 st = self._repo.wvfs.lstat(dest)
1693 1693 except OSError as err:
1694 1694 if err.errno != errno.ENOENT:
1695 1695 raise
1696 1696 self._repo.ui.warn(
1697 1697 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1698 1698 )
1699 1699 return
1700 1700 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1701 1701 self._repo.ui.warn(
1702 1702 _(b"copy failed: %s is not a file or a symbolic link\n")
1703 1703 % self._repo.dirstate.pathto(dest)
1704 1704 )
1705 1705 else:
1706 1706 with self._repo.wlock():
1707 1707 ds = self._repo.dirstate
1708 1708 if ds[dest] in b'?':
1709 1709 ds.add(dest)
1710 1710 elif ds[dest] in b'r':
1711 1711 ds.normallookup(dest)
1712 1712 ds.copy(source, dest)
1713 1713
1714 1714 def match(
1715 1715 self,
1716 1716 pats=None,
1717 1717 include=None,
1718 1718 exclude=None,
1719 1719 default=b'glob',
1720 1720 listsubrepos=False,
1721 1721 badfn=None,
1722 1722 cwd=None,
1723 1723 ):
1724 1724 r = self._repo
1725 1725 if not cwd:
1726 1726 cwd = r.getcwd()
1727 1727
1728 1728 # Only a case insensitive filesystem needs magic to translate user input
1729 1729 # to actual case in the filesystem.
1730 1730 icasefs = not util.fscasesensitive(r.root)
1731 1731 return matchmod.match(
1732 1732 r.root,
1733 1733 cwd,
1734 1734 pats,
1735 1735 include,
1736 1736 exclude,
1737 1737 default,
1738 1738 auditor=r.auditor,
1739 1739 ctx=self,
1740 1740 listsubrepos=listsubrepos,
1741 1741 badfn=badfn,
1742 1742 icasefs=icasefs,
1743 1743 )
1744 1744
1745 1745 def _filtersuspectsymlink(self, files):
1746 1746 if not files or self._repo.dirstate._checklink:
1747 1747 return files
1748 1748
1749 1749 # Symlink placeholders may get non-symlink-like contents
1750 1750 # via user error or dereferencing by NFS or Samba servers,
1751 1751 # so we filter out any placeholders that don't look like a
1752 1752 # symlink
1753 1753 sane = []
1754 1754 for f in files:
1755 1755 if self.flags(f) == b'l':
1756 1756 d = self[f].data()
1757 1757 if (
1758 1758 d == b''
1759 1759 or len(d) >= 1024
1760 1760 or b'\n' in d
1761 1761 or stringutil.binary(d)
1762 1762 ):
1763 1763 self._repo.ui.debug(
1764 1764 b'ignoring suspect symlink placeholder "%s"\n' % f
1765 1765 )
1766 1766 continue
1767 1767 sane.append(f)
1768 1768 return sane
1769 1769
1770 1770 def _checklookup(self, files):
1771 1771 # check for any possibly clean files
1772 1772 if not files:
1773 1773 return [], [], []
1774 1774
1775 1775 modified = []
1776 1776 deleted = []
1777 1777 fixup = []
1778 1778 pctx = self._parents[0]
1779 1779 # do a full compare of any files that might have changed
1780 1780 for f in sorted(files):
1781 1781 try:
1782 1782 # This will return True for a file that got replaced by a
1783 1783 # directory in the interim, but fixing that is pretty hard.
1784 1784 if (
1785 1785 f not in pctx
1786 1786 or self.flags(f) != pctx.flags(f)
1787 1787 or pctx[f].cmp(self[f])
1788 1788 ):
1789 1789 modified.append(f)
1790 1790 else:
1791 1791 fixup.append(f)
1792 1792 except (IOError, OSError):
1793 1793 # A file become inaccessible in between? Mark it as deleted,
1794 1794 # matching dirstate behavior (issue5584).
1795 1795 # The dirstate has more complex behavior around whether a
1796 1796 # missing file matches a directory, etc, but we don't need to
1797 1797 # bother with that: if f has made it to this point, we're sure
1798 1798 # it's in the dirstate.
1799 1799 deleted.append(f)
1800 1800
1801 1801 return modified, deleted, fixup
1802 1802
1803 1803 def _poststatusfixup(self, status, fixup):
1804 1804 """update dirstate for files that are actually clean"""
1805 1805 poststatus = self._repo.postdsstatus()
1806 1806 if fixup or poststatus:
1807 1807 try:
1808 1808 oldid = self._repo.dirstate.identity()
1809 1809
1810 1810 # updating the dirstate is optional
1811 1811 # so we don't wait on the lock
1812 1812 # wlock can invalidate the dirstate, so cache normal _after_
1813 1813 # taking the lock
1814 1814 with self._repo.wlock(False):
1815 1815 if self._repo.dirstate.identity() == oldid:
1816 1816 if fixup:
1817 1817 normal = self._repo.dirstate.normal
1818 1818 for f in fixup:
1819 1819 normal(f)
1820 1820 # write changes out explicitly, because nesting
1821 1821 # wlock at runtime may prevent 'wlock.release()'
1822 1822 # after this block from doing so for subsequent
1823 1823 # changing files
1824 1824 tr = self._repo.currenttransaction()
1825 1825 self._repo.dirstate.write(tr)
1826 1826
1827 1827 if poststatus:
1828 1828 for ps in poststatus:
1829 1829 ps(self, status)
1830 1830 else:
1831 1831 # in this case, writing changes out breaks
1832 1832 # consistency, because .hg/dirstate was
1833 1833 # already changed simultaneously after last
1834 1834 # caching (see also issue5584 for detail)
1835 1835 self._repo.ui.debug(
1836 1836 b'skip updating dirstate: identity mismatch\n'
1837 1837 )
1838 1838 except error.LockError:
1839 1839 pass
1840 1840 finally:
1841 1841 # Even if the wlock couldn't be grabbed, clear out the list.
1842 1842 self._repo.clearpostdsstatus()
1843 1843
1844 1844 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1845 1845 '''Gets the status from the dirstate -- internal use only.'''
1846 1846 subrepos = []
1847 1847 if b'.hgsub' in self:
1848 1848 subrepos = sorted(self.substate)
1849 1849 cmp, s = self._repo.dirstate.status(
1850 1850 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1851 1851 )
1852 1852
1853 1853 # check for any possibly clean files
1854 1854 fixup = []
1855 1855 if cmp:
1856 1856 modified2, deleted2, fixup = self._checklookup(cmp)
1857 1857 s.modified.extend(modified2)
1858 1858 s.deleted.extend(deleted2)
1859 1859
1860 1860 if fixup and clean:
1861 1861 s.clean.extend(fixup)
1862 1862
1863 1863 self._poststatusfixup(s, fixup)
1864 1864
1865 1865 if match.always():
1866 1866 # cache for performance
1867 1867 if s.unknown or s.ignored or s.clean:
1868 1868 # "_status" is cached with list*=False in the normal route
1869 1869 self._status = scmutil.status(
1870 1870 s.modified, s.added, s.removed, s.deleted, [], [], []
1871 1871 )
1872 1872 else:
1873 1873 self._status = s
1874 1874
1875 1875 return s
1876 1876
1877 1877 @propertycache
1878 1878 def _copies(self):
1879 1879 p1copies = {}
1880 1880 p2copies = {}
1881 1881 parents = self._repo.dirstate.parents()
1882 1882 p1manifest = self._repo[parents[0]].manifest()
1883 1883 p2manifest = self._repo[parents[1]].manifest()
1884 1884 changedset = set(self.added()) | set(self.modified())
1885 1885 narrowmatch = self._repo.narrowmatch()
1886 1886 for dst, src in self._repo.dirstate.copies().items():
1887 1887 if dst not in changedset or not narrowmatch(dst):
1888 1888 continue
1889 1889 if src in p1manifest:
1890 1890 p1copies[dst] = src
1891 1891 elif src in p2manifest:
1892 1892 p2copies[dst] = src
1893 1893 return p1copies, p2copies
1894 1894
1895 1895 @propertycache
1896 1896 def _manifest(self):
1897 1897 """generate a manifest corresponding to the values in self._status
1898 1898
1899 1899 This reuse the file nodeid from parent, but we use special node
1900 1900 identifiers for added and modified files. This is used by manifests
1901 1901 merge to see that files are different and by update logic to avoid
1902 1902 deleting newly added files.
1903 1903 """
1904 1904 return self._buildstatusmanifest(self._status)
1905 1905
1906 1906 def _buildstatusmanifest(self, status):
1907 1907 """Builds a manifest that includes the given status results."""
1908 1908 parents = self.parents()
1909 1909
1910 1910 man = parents[0].manifest().copy()
1911 1911
1912 1912 ff = self._flagfunc
1913 1913 for i, l in (
1914 1914 (addednodeid, status.added),
1915 1915 (modifiednodeid, status.modified),
1916 1916 ):
1917 1917 for f in l:
1918 1918 man[f] = i
1919 1919 try:
1920 1920 man.setflag(f, ff(f))
1921 1921 except OSError:
1922 1922 pass
1923 1923
1924 1924 for f in status.deleted + status.removed:
1925 1925 if f in man:
1926 1926 del man[f]
1927 1927
1928 1928 return man
1929 1929
1930 1930 def _buildstatus(
1931 1931 self, other, s, match, listignored, listclean, listunknown
1932 1932 ):
1933 1933 """build a status with respect to another context
1934 1934
1935 1935 This includes logic for maintaining the fast path of status when
1936 1936 comparing the working directory against its parent, which is to skip
1937 1937 building a new manifest if self (working directory) is not comparing
1938 1938 against its parent (repo['.']).
1939 1939 """
1940 1940 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1941 1941 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1942 1942 # might have accidentally ended up with the entire contents of the file
1943 1943 # they are supposed to be linking to.
1944 1944 s.modified[:] = self._filtersuspectsymlink(s.modified)
1945 1945 if other != self._repo[b'.']:
1946 1946 s = super(workingctx, self)._buildstatus(
1947 1947 other, s, match, listignored, listclean, listunknown
1948 1948 )
1949 1949 return s
1950 1950
1951 1951 def _matchstatus(self, other, match):
1952 1952 """override the match method with a filter for directory patterns
1953 1953
1954 1954 We use inheritance to customize the match.bad method only in cases of
1955 1955 workingctx since it belongs only to the working directory when
1956 1956 comparing against the parent changeset.
1957 1957
1958 1958 If we aren't comparing against the working directory's parent, then we
1959 1959 just use the default match object sent to us.
1960 1960 """
1961 1961 if other != self._repo[b'.']:
1962 1962
1963 1963 def bad(f, msg):
1964 1964 # 'f' may be a directory pattern from 'match.files()',
1965 1965 # so 'f not in ctx1' is not enough
1966 1966 if f not in other and not other.hasdir(f):
1967 1967 self._repo.ui.warn(
1968 1968 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1969 1969 )
1970 1970
1971 1971 match.bad = bad
1972 1972 return match
1973 1973
1974 1974 def walk(self, match):
1975 1975 '''Generates matching file names.'''
1976 1976 return sorted(
1977 1977 self._repo.dirstate.walk(
1978 1978 self._repo.narrowmatch(match),
1979 1979 subrepos=sorted(self.substate),
1980 1980 unknown=True,
1981 1981 ignored=False,
1982 1982 )
1983 1983 )
1984 1984
1985 1985 def matches(self, match):
1986 1986 match = self._repo.narrowmatch(match)
1987 1987 ds = self._repo.dirstate
1988 1988 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
1989 1989
1990 1990 def markcommitted(self, node):
1991 1991 with self._repo.dirstate.parentchange():
1992 1992 for f in self.modified() + self.added():
1993 1993 self._repo.dirstate.normal(f)
1994 1994 for f in self.removed():
1995 1995 self._repo.dirstate.drop(f)
1996 1996 self._repo.dirstate.setparents(node)
1997 1997 self._repo._quick_access_changeid_invalidate()
1998 1998
1999 1999 # write changes out explicitly, because nesting wlock at
2000 2000 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2001 2001 # from immediately doing so for subsequent changing files
2002 2002 self._repo.dirstate.write(self._repo.currenttransaction())
2003 2003
2004 2004 sparse.aftercommit(self._repo, node)
2005 2005
2006 2006
2007 2007 class committablefilectx(basefilectx):
2008 2008 """A committablefilectx provides common functionality for a file context
2009 2009 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2010 2010
2011 2011 def __init__(self, repo, path, filelog=None, ctx=None):
2012 2012 self._repo = repo
2013 2013 self._path = path
2014 2014 self._changeid = None
2015 2015 self._filerev = self._filenode = None
2016 2016
2017 2017 if filelog is not None:
2018 2018 self._filelog = filelog
2019 2019 if ctx:
2020 2020 self._changectx = ctx
2021 2021
2022 2022 def __nonzero__(self):
2023 2023 return True
2024 2024
2025 2025 __bool__ = __nonzero__
2026 2026
2027 2027 def linkrev(self):
2028 2028 # linked to self._changectx no matter if file is modified or not
2029 2029 return self.rev()
2030 2030
2031 2031 def renamed(self):
2032 2032 path = self.copysource()
2033 2033 if not path:
2034 2034 return None
2035 2035 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2036 2036
2037 2037 def parents(self):
2038 2038 '''return parent filectxs, following copies if necessary'''
2039 2039
2040 2040 def filenode(ctx, path):
2041 2041 return ctx._manifest.get(path, nullid)
2042 2042
2043 2043 path = self._path
2044 2044 fl = self._filelog
2045 2045 pcl = self._changectx._parents
2046 2046 renamed = self.renamed()
2047 2047
2048 2048 if renamed:
2049 2049 pl = [renamed + (None,)]
2050 2050 else:
2051 2051 pl = [(path, filenode(pcl[0], path), fl)]
2052 2052
2053 2053 for pc in pcl[1:]:
2054 2054 pl.append((path, filenode(pc, path), fl))
2055 2055
2056 2056 return [
2057 2057 self._parentfilectx(p, fileid=n, filelog=l)
2058 2058 for p, n, l in pl
2059 2059 if n != nullid
2060 2060 ]
2061 2061
2062 2062 def children(self):
2063 2063 return []
2064 2064
2065 2065
2066 2066 class workingfilectx(committablefilectx):
2067 2067 """A workingfilectx object makes access to data related to a particular
2068 2068 file in the working directory convenient."""
2069 2069
2070 2070 def __init__(self, repo, path, filelog=None, workingctx=None):
2071 2071 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2072 2072
2073 2073 @propertycache
2074 2074 def _changectx(self):
2075 2075 return workingctx(self._repo)
2076 2076
2077 2077 def data(self):
2078 2078 return self._repo.wread(self._path)
2079 2079
2080 2080 def copysource(self):
2081 2081 return self._repo.dirstate.copied(self._path)
2082 2082
2083 2083 def size(self):
2084 2084 return self._repo.wvfs.lstat(self._path).st_size
2085 2085
2086 2086 def lstat(self):
2087 2087 return self._repo.wvfs.lstat(self._path)
2088 2088
2089 2089 def date(self):
2090 2090 t, tz = self._changectx.date()
2091 2091 try:
2092 2092 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2093 2093 except OSError as err:
2094 2094 if err.errno != errno.ENOENT:
2095 2095 raise
2096 2096 return (t, tz)
2097 2097
2098 2098 def exists(self):
2099 2099 return self._repo.wvfs.exists(self._path)
2100 2100
2101 2101 def lexists(self):
2102 2102 return self._repo.wvfs.lexists(self._path)
2103 2103
2104 2104 def audit(self):
2105 2105 return self._repo.wvfs.audit(self._path)
2106 2106
2107 2107 def cmp(self, fctx):
2108 2108 """compare with other file context
2109 2109
2110 2110 returns True if different than fctx.
2111 2111 """
2112 2112 # fctx should be a filectx (not a workingfilectx)
2113 2113 # invert comparison to reuse the same code path
2114 2114 return fctx.cmp(self)
2115 2115
2116 2116 def remove(self, ignoremissing=False):
2117 2117 """wraps unlink for a repo's working directory"""
2118 2118 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2119 2119 self._repo.wvfs.unlinkpath(
2120 2120 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2121 2121 )
2122 2122
2123 2123 def write(self, data, flags, backgroundclose=False, **kwargs):
2124 2124 """wraps repo.wwrite"""
2125 2125 return self._repo.wwrite(
2126 2126 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2127 2127 )
2128 2128
2129 2129 def markcopied(self, src):
2130 2130 """marks this file a copy of `src`"""
2131 2131 self._repo.dirstate.copy(src, self._path)
2132 2132
2133 2133 def clearunknown(self):
2134 2134 """Removes conflicting items in the working directory so that
2135 2135 ``write()`` can be called successfully.
2136 2136 """
2137 2137 wvfs = self._repo.wvfs
2138 2138 f = self._path
2139 2139 wvfs.audit(f)
2140 2140 if self._repo.ui.configbool(
2141 2141 b'experimental', b'merge.checkpathconflicts'
2142 2142 ):
2143 2143 # remove files under the directory as they should already be
2144 2144 # warned and backed up
2145 2145 if wvfs.isdir(f) and not wvfs.islink(f):
2146 2146 wvfs.rmtree(f, forcibly=True)
2147 2147 for p in reversed(list(pathutil.finddirs(f))):
2148 2148 if wvfs.isfileorlink(p):
2149 2149 wvfs.unlink(p)
2150 2150 break
2151 2151 else:
2152 2152 # don't remove files if path conflicts are not processed
2153 2153 if wvfs.isdir(f) and not wvfs.islink(f):
2154 2154 wvfs.removedirs(f)
2155 2155
2156 2156 def setflags(self, l, x):
2157 2157 self._repo.wvfs.setflags(self._path, l, x)
2158 2158
2159 2159
2160 2160 class overlayworkingctx(committablectx):
2161 2161 """Wraps another mutable context with a write-back cache that can be
2162 2162 converted into a commit context.
2163 2163
2164 2164 self._cache[path] maps to a dict with keys: {
2165 2165 'exists': bool?
2166 2166 'date': date?
2167 2167 'data': str?
2168 2168 'flags': str?
2169 2169 'copied': str? (path or None)
2170 2170 }
2171 2171 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2172 2172 is `False`, the file was deleted.
2173 2173 """
2174 2174
2175 2175 def __init__(self, repo):
2176 2176 super(overlayworkingctx, self).__init__(repo)
2177 2177 self.clean()
2178 2178
2179 2179 def setbase(self, wrappedctx):
2180 2180 self._wrappedctx = wrappedctx
2181 2181 self._parents = [wrappedctx]
2182 2182 # Drop old manifest cache as it is now out of date.
2183 2183 # This is necessary when, e.g., rebasing several nodes with one
2184 2184 # ``overlayworkingctx`` (e.g. with --collapse).
2185 2185 util.clearcachedproperty(self, b'_manifest')
2186 2186
2187 2187 def setparents(self, p1node, p2node=nullid):
2188 2188 assert p1node == self._wrappedctx.node()
2189 2189 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2190 2190
2191 2191 def data(self, path):
2192 2192 if self.isdirty(path):
2193 2193 if self._cache[path][b'exists']:
2194 2194 if self._cache[path][b'data'] is not None:
2195 2195 return self._cache[path][b'data']
2196 2196 else:
2197 2197 # Must fallback here, too, because we only set flags.
2198 2198 return self._wrappedctx[path].data()
2199 2199 else:
2200 2200 raise error.ProgrammingError(
2201 2201 b"No such file or directory: %s" % path
2202 2202 )
2203 2203 else:
2204 2204 return self._wrappedctx[path].data()
2205 2205
2206 2206 @propertycache
2207 2207 def _manifest(self):
2208 2208 parents = self.parents()
2209 2209 man = parents[0].manifest().copy()
2210 2210
2211 2211 flag = self._flagfunc
2212 2212 for path in self.added():
2213 2213 man[path] = addednodeid
2214 2214 man.setflag(path, flag(path))
2215 2215 for path in self.modified():
2216 2216 man[path] = modifiednodeid
2217 2217 man.setflag(path, flag(path))
2218 2218 for path in self.removed():
2219 2219 del man[path]
2220 2220 return man
2221 2221
2222 2222 @propertycache
2223 2223 def _flagfunc(self):
2224 2224 def f(path):
2225 2225 return self._cache[path][b'flags']
2226 2226
2227 2227 return f
2228 2228
2229 2229 def files(self):
2230 2230 return sorted(self.added() + self.modified() + self.removed())
2231 2231
2232 2232 def modified(self):
2233 2233 return [
2234 2234 f
2235 2235 for f in self._cache.keys()
2236 2236 if self._cache[f][b'exists'] and self._existsinparent(f)
2237 2237 ]
2238 2238
2239 2239 def added(self):
2240 2240 return [
2241 2241 f
2242 2242 for f in self._cache.keys()
2243 2243 if self._cache[f][b'exists'] and not self._existsinparent(f)
2244 2244 ]
2245 2245
2246 2246 def removed(self):
2247 2247 return [
2248 2248 f
2249 2249 for f in self._cache.keys()
2250 2250 if not self._cache[f][b'exists'] and self._existsinparent(f)
2251 2251 ]
2252 2252
2253 2253 def p1copies(self):
2254 2254 copies = {}
2255 2255 narrowmatch = self._repo.narrowmatch()
2256 2256 for f in self._cache.keys():
2257 2257 if not narrowmatch(f):
2258 2258 continue
2259 2259 copies.pop(f, None) # delete if it exists
2260 2260 source = self._cache[f][b'copied']
2261 2261 if source:
2262 2262 copies[f] = source
2263 2263 return copies
2264 2264
2265 2265 def p2copies(self):
2266 2266 copies = {}
2267 2267 narrowmatch = self._repo.narrowmatch()
2268 2268 for f in self._cache.keys():
2269 2269 if not narrowmatch(f):
2270 2270 continue
2271 2271 copies.pop(f, None) # delete if it exists
2272 2272 source = self._cache[f][b'copied']
2273 2273 if source:
2274 2274 copies[f] = source
2275 2275 return copies
2276 2276
2277 2277 def isinmemory(self):
2278 2278 return True
2279 2279
2280 2280 def filedate(self, path):
2281 2281 if self.isdirty(path):
2282 2282 return self._cache[path][b'date']
2283 2283 else:
2284 2284 return self._wrappedctx[path].date()
2285 2285
2286 2286 def markcopied(self, path, origin):
2287 2287 self._markdirty(
2288 2288 path,
2289 2289 exists=True,
2290 2290 date=self.filedate(path),
2291 2291 flags=self.flags(path),
2292 2292 copied=origin,
2293 2293 )
2294 2294
2295 2295 def copydata(self, path):
2296 2296 if self.isdirty(path):
2297 2297 return self._cache[path][b'copied']
2298 2298 else:
2299 2299 return None
2300 2300
2301 2301 def flags(self, path):
2302 2302 if self.isdirty(path):
2303 2303 if self._cache[path][b'exists']:
2304 2304 return self._cache[path][b'flags']
2305 2305 else:
2306 2306 raise error.ProgrammingError(
2307 2307 b"No such file or directory: %s" % self._path
2308 2308 )
2309 2309 else:
2310 2310 return self._wrappedctx[path].flags()
2311 2311
2312 2312 def __contains__(self, key):
2313 2313 if key in self._cache:
2314 2314 return self._cache[key][b'exists']
2315 2315 return key in self.p1()
2316 2316
2317 2317 def _existsinparent(self, path):
2318 2318 try:
2319 2319 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2320 2320 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2321 2321 # with an ``exists()`` function.
2322 2322 self._wrappedctx[path]
2323 2323 return True
2324 2324 except error.ManifestLookupError:
2325 2325 return False
2326 2326
2327 2327 def _auditconflicts(self, path):
2328 2328 """Replicates conflict checks done by wvfs.write().
2329 2329
2330 2330 Since we never write to the filesystem and never call `applyupdates` in
2331 2331 IMM, we'll never check that a path is actually writable -- e.g., because
2332 2332 it adds `a/foo`, but `a` is actually a file in the other commit.
2333 2333 """
2334 2334
2335 2335 def fail(path, component):
2336 2336 # p1() is the base and we're receiving "writes" for p2()'s
2337 2337 # files.
2338 2338 if b'l' in self.p1()[component].flags():
2339 2339 raise error.Abort(
2340 2340 b"error: %s conflicts with symlink %s "
2341 2341 b"in %d." % (path, component, self.p1().rev())
2342 2342 )
2343 2343 else:
2344 2344 raise error.Abort(
2345 2345 b"error: '%s' conflicts with file '%s' in "
2346 2346 b"%d." % (path, component, self.p1().rev())
2347 2347 )
2348 2348
2349 2349 # Test that each new directory to be created to write this path from p2
2350 2350 # is not a file in p1.
2351 2351 components = path.split(b'/')
2352 2352 for i in pycompat.xrange(len(components)):
2353 2353 component = b"/".join(components[0:i])
2354 2354 if component in self:
2355 2355 fail(path, component)
2356 2356
2357 2357 # Test the other direction -- that this path from p2 isn't a directory
2358 2358 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2359 2359 match = self.match([path], default=b'path')
2360 2360 mfiles = list(self.p1().manifest().walk(match))
2361 2361 if len(mfiles) > 0:
2362 2362 if len(mfiles) == 1 and mfiles[0] == path:
2363 2363 return
2364 2364 # omit the files which are deleted in current IMM wctx
2365 2365 mfiles = [m for m in mfiles if m in self]
2366 2366 if not mfiles:
2367 2367 return
2368 2368 raise error.Abort(
2369 2369 b"error: file '%s' cannot be written because "
2370 2370 b" '%s/' is a directory in %s (containing %d "
2371 2371 b"entries: %s)"
2372 2372 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2373 2373 )
2374 2374
2375 2375 def write(self, path, data, flags=b'', **kwargs):
2376 2376 if data is None:
2377 2377 raise error.ProgrammingError(b"data must be non-None")
2378 2378 self._auditconflicts(path)
2379 2379 self._markdirty(
2380 2380 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2381 2381 )
2382 2382
2383 2383 def setflags(self, path, l, x):
2384 2384 flag = b''
2385 2385 if l:
2386 2386 flag = b'l'
2387 2387 elif x:
2388 2388 flag = b'x'
2389 2389 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2390 2390
2391 2391 def remove(self, path):
2392 2392 self._markdirty(path, exists=False)
2393 2393
2394 2394 def exists(self, path):
2395 2395 """exists behaves like `lexists`, but needs to follow symlinks and
2396 2396 return False if they are broken.
2397 2397 """
2398 2398 if self.isdirty(path):
2399 2399 # If this path exists and is a symlink, "follow" it by calling
2400 2400 # exists on the destination path.
2401 2401 if (
2402 2402 self._cache[path][b'exists']
2403 2403 and b'l' in self._cache[path][b'flags']
2404 2404 ):
2405 2405 return self.exists(self._cache[path][b'data'].strip())
2406 2406 else:
2407 2407 return self._cache[path][b'exists']
2408 2408
2409 2409 return self._existsinparent(path)
2410 2410
2411 2411 def lexists(self, path):
2412 2412 """lexists returns True if the path exists"""
2413 2413 if self.isdirty(path):
2414 2414 return self._cache[path][b'exists']
2415 2415
2416 2416 return self._existsinparent(path)
2417 2417
2418 2418 def size(self, path):
2419 2419 if self.isdirty(path):
2420 2420 if self._cache[path][b'exists']:
2421 2421 return len(self._cache[path][b'data'])
2422 2422 else:
2423 2423 raise error.ProgrammingError(
2424 2424 b"No such file or directory: %s" % self._path
2425 2425 )
2426 2426 return self._wrappedctx[path].size()
2427 2427
2428 2428 def tomemctx(
2429 2429 self,
2430 2430 text,
2431 2431 branch=None,
2432 2432 extra=None,
2433 2433 date=None,
2434 2434 parents=None,
2435 2435 user=None,
2436 2436 editor=None,
2437 2437 ):
2438 2438 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2439 2439 committed.
2440 2440
2441 2441 ``text`` is the commit message.
2442 2442 ``parents`` (optional) are rev numbers.
2443 2443 """
2444 2444 # Default parents to the wrapped context if not passed.
2445 2445 if parents is None:
2446 2446 parents = self.parents()
2447 2447 if len(parents) == 1:
2448 2448 parents = (parents[0], None)
2449 2449
2450 2450 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2451 2451 if parents[1] is None:
2452 2452 parents = (self._repo[parents[0]], None)
2453 2453 else:
2454 2454 parents = (self._repo[parents[0]], self._repo[parents[1]])
2455 2455
2456 2456 files = self.files()
2457 2457
2458 2458 def getfile(repo, memctx, path):
2459 2459 if self._cache[path][b'exists']:
2460 2460 return memfilectx(
2461 2461 repo,
2462 2462 memctx,
2463 2463 path,
2464 2464 self._cache[path][b'data'],
2465 2465 b'l' in self._cache[path][b'flags'],
2466 2466 b'x' in self._cache[path][b'flags'],
2467 2467 self._cache[path][b'copied'],
2468 2468 )
2469 2469 else:
2470 2470 # Returning None, but including the path in `files`, is
2471 2471 # necessary for memctx to register a deletion.
2472 2472 return None
2473 2473
2474 2474 if branch is None:
2475 2475 branch = self._wrappedctx.branch()
2476 2476
2477 2477 return memctx(
2478 2478 self._repo,
2479 2479 parents,
2480 2480 text,
2481 2481 files,
2482 2482 getfile,
2483 2483 date=date,
2484 2484 extra=extra,
2485 2485 user=user,
2486 2486 branch=branch,
2487 2487 editor=editor,
2488 2488 )
2489 2489
2490 2490 def isdirty(self, path):
2491 2491 return path in self._cache
2492 2492
2493 2493 def isempty(self):
2494 2494 # We need to discard any keys that are actually clean before the empty
2495 2495 # commit check.
2496 2496 self._compact()
2497 2497 return len(self._cache) == 0
2498 2498
2499 2499 def clean(self):
2500 2500 self._cache = {}
2501 2501
2502 2502 def _compact(self):
2503 2503 """Removes keys from the cache that are actually clean, by comparing
2504 2504 them with the underlying context.
2505 2505
2506 2506 This can occur during the merge process, e.g. by passing --tool :local
2507 2507 to resolve a conflict.
2508 2508 """
2509 2509 keys = []
2510 2510 # This won't be perfect, but can help performance significantly when
2511 2511 # using things like remotefilelog.
2512 2512 scmutil.prefetchfiles(
2513 2513 self.repo(),
2514 2514 [self.p1().rev()],
2515 2515 scmutil.matchfiles(self.repo(), self._cache.keys()),
2516 2516 )
2517 2517
2518 2518 for path in self._cache.keys():
2519 2519 cache = self._cache[path]
2520 2520 try:
2521 2521 underlying = self._wrappedctx[path]
2522 2522 if (
2523 2523 underlying.data() == cache[b'data']
2524 2524 and underlying.flags() == cache[b'flags']
2525 2525 ):
2526 2526 keys.append(path)
2527 2527 except error.ManifestLookupError:
2528 2528 # Path not in the underlying manifest (created).
2529 2529 continue
2530 2530
2531 2531 for path in keys:
2532 2532 del self._cache[path]
2533 2533 return keys
2534 2534
2535 2535 def _markdirty(
2536 2536 self, path, exists, data=None, date=None, flags=b'', copied=None
2537 2537 ):
2538 2538 # data not provided, let's see if we already have some; if not, let's
2539 2539 # grab it from our underlying context, so that we always have data if
2540 2540 # the file is marked as existing.
2541 2541 if exists and data is None:
2542 2542 oldentry = self._cache.get(path) or {}
2543 2543 data = oldentry.get(b'data')
2544 2544 if data is None:
2545 2545 data = self._wrappedctx[path].data()
2546 2546
2547 2547 self._cache[path] = {
2548 2548 b'exists': exists,
2549 2549 b'data': data,
2550 2550 b'date': date,
2551 2551 b'flags': flags,
2552 2552 b'copied': copied,
2553 2553 }
2554 2554
2555 2555 def filectx(self, path, filelog=None):
2556 2556 return overlayworkingfilectx(
2557 2557 self._repo, path, parent=self, filelog=filelog
2558 2558 )
2559 2559
2560 2560
2561 2561 class overlayworkingfilectx(committablefilectx):
2562 2562 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2563 2563 cache, which can be flushed through later by calling ``flush()``."""
2564 2564
2565 2565 def __init__(self, repo, path, filelog=None, parent=None):
2566 2566 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2567 2567 self._repo = repo
2568 2568 self._parent = parent
2569 2569 self._path = path
2570 2570
2571 2571 def cmp(self, fctx):
2572 2572 return self.data() != fctx.data()
2573 2573
2574 2574 def changectx(self):
2575 2575 return self._parent
2576 2576
2577 2577 def data(self):
2578 2578 return self._parent.data(self._path)
2579 2579
2580 2580 def date(self):
2581 2581 return self._parent.filedate(self._path)
2582 2582
2583 2583 def exists(self):
2584 2584 return self.lexists()
2585 2585
2586 2586 def lexists(self):
2587 2587 return self._parent.exists(self._path)
2588 2588
2589 2589 def copysource(self):
2590 2590 return self._parent.copydata(self._path)
2591 2591
2592 2592 def size(self):
2593 2593 return self._parent.size(self._path)
2594 2594
2595 2595 def markcopied(self, origin):
2596 2596 self._parent.markcopied(self._path, origin)
2597 2597
2598 2598 def audit(self):
2599 2599 pass
2600 2600
2601 2601 def flags(self):
2602 2602 return self._parent.flags(self._path)
2603 2603
2604 2604 def setflags(self, islink, isexec):
2605 2605 return self._parent.setflags(self._path, islink, isexec)
2606 2606
2607 2607 def write(self, data, flags, backgroundclose=False, **kwargs):
2608 2608 return self._parent.write(self._path, data, flags, **kwargs)
2609 2609
2610 2610 def remove(self, ignoremissing=False):
2611 2611 return self._parent.remove(self._path)
2612 2612
2613 2613 def clearunknown(self):
2614 2614 pass
2615 2615
2616 2616
2617 2617 class workingcommitctx(workingctx):
2618 2618 """A workingcommitctx object makes access to data related to
2619 2619 the revision being committed convenient.
2620 2620
2621 2621 This hides changes in the working directory, if they aren't
2622 2622 committed in this context.
2623 2623 """
2624 2624
2625 2625 def __init__(
2626 2626 self, repo, changes, text=b"", user=None, date=None, extra=None
2627 2627 ):
2628 2628 super(workingcommitctx, self).__init__(
2629 2629 repo, text, user, date, extra, changes
2630 2630 )
2631 2631
2632 2632 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2633 2633 """Return matched files only in ``self._status``
2634 2634
2635 2635 Uncommitted files appear "clean" via this context, even if
2636 2636 they aren't actually so in the working directory.
2637 2637 """
2638 2638 if clean:
2639 2639 clean = [f for f in self._manifest if f not in self._changedset]
2640 2640 else:
2641 2641 clean = []
2642 2642 return scmutil.status(
2643 2643 [f for f in self._status.modified if match(f)],
2644 2644 [f for f in self._status.added if match(f)],
2645 2645 [f for f in self._status.removed if match(f)],
2646 2646 [],
2647 2647 [],
2648 2648 [],
2649 2649 clean,
2650 2650 )
2651 2651
2652 2652 @propertycache
2653 2653 def _changedset(self):
2654 2654 """Return the set of files changed in this context
2655 2655 """
2656 2656 changed = set(self._status.modified)
2657 2657 changed.update(self._status.added)
2658 2658 changed.update(self._status.removed)
2659 2659 return changed
2660 2660
2661 2661
2662 2662 def makecachingfilectxfn(func):
2663 2663 """Create a filectxfn that caches based on the path.
2664 2664
2665 2665 We can't use util.cachefunc because it uses all arguments as the cache
2666 2666 key and this creates a cycle since the arguments include the repo and
2667 2667 memctx.
2668 2668 """
2669 2669 cache = {}
2670 2670
2671 2671 def getfilectx(repo, memctx, path):
2672 2672 if path not in cache:
2673 2673 cache[path] = func(repo, memctx, path)
2674 2674 return cache[path]
2675 2675
2676 2676 return getfilectx
2677 2677
2678 2678
2679 2679 def memfilefromctx(ctx):
2680 2680 """Given a context return a memfilectx for ctx[path]
2681 2681
2682 2682 This is a convenience method for building a memctx based on another
2683 2683 context.
2684 2684 """
2685 2685
2686 2686 def getfilectx(repo, memctx, path):
2687 2687 fctx = ctx[path]
2688 2688 copysource = fctx.copysource()
2689 2689 return memfilectx(
2690 2690 repo,
2691 2691 memctx,
2692 2692 path,
2693 2693 fctx.data(),
2694 2694 islink=fctx.islink(),
2695 2695 isexec=fctx.isexec(),
2696 2696 copysource=copysource,
2697 2697 )
2698 2698
2699 2699 return getfilectx
2700 2700
2701 2701
2702 2702 def memfilefrompatch(patchstore):
2703 2703 """Given a patch (e.g. patchstore object) return a memfilectx
2704 2704
2705 2705 This is a convenience method for building a memctx based on a patchstore.
2706 2706 """
2707 2707
2708 2708 def getfilectx(repo, memctx, path):
2709 2709 data, mode, copysource = patchstore.getfile(path)
2710 2710 if data is None:
2711 2711 return None
2712 2712 islink, isexec = mode
2713 2713 return memfilectx(
2714 2714 repo,
2715 2715 memctx,
2716 2716 path,
2717 2717 data,
2718 2718 islink=islink,
2719 2719 isexec=isexec,
2720 2720 copysource=copysource,
2721 2721 )
2722 2722
2723 2723 return getfilectx
2724 2724
2725 2725
2726 2726 class memctx(committablectx):
2727 2727 """Use memctx to perform in-memory commits via localrepo.commitctx().
2728 2728
2729 2729 Revision information is supplied at initialization time while
2730 2730 related files data and is made available through a callback
2731 2731 mechanism. 'repo' is the current localrepo, 'parents' is a
2732 2732 sequence of two parent revisions identifiers (pass None for every
2733 2733 missing parent), 'text' is the commit message and 'files' lists
2734 2734 names of files touched by the revision (normalized and relative to
2735 2735 repository root).
2736 2736
2737 2737 filectxfn(repo, memctx, path) is a callable receiving the
2738 2738 repository, the current memctx object and the normalized path of
2739 2739 requested file, relative to repository root. It is fired by the
2740 2740 commit function for every file in 'files', but calls order is
2741 2741 undefined. If the file is available in the revision being
2742 2742 committed (updated or added), filectxfn returns a memfilectx
2743 2743 object. If the file was removed, filectxfn return None for recent
2744 2744 Mercurial. Moved files are represented by marking the source file
2745 2745 removed and the new file added with copy information (see
2746 2746 memfilectx).
2747 2747
2748 2748 user receives the committer name and defaults to current
2749 2749 repository username, date is the commit date in any format
2750 2750 supported by dateutil.parsedate() and defaults to current date, extra
2751 2751 is a dictionary of metadata or is left empty.
2752 2752 """
2753 2753
2754 2754 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2755 2755 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2756 2756 # this field to determine what to do in filectxfn.
2757 2757 _returnnoneformissingfiles = True
2758 2758
2759 2759 def __init__(
2760 2760 self,
2761 2761 repo,
2762 2762 parents,
2763 2763 text,
2764 2764 files,
2765 2765 filectxfn,
2766 2766 user=None,
2767 2767 date=None,
2768 2768 extra=None,
2769 2769 branch=None,
2770 2770 editor=None,
2771 2771 ):
2772 2772 super(memctx, self).__init__(
2773 2773 repo, text, user, date, extra, branch=branch
2774 2774 )
2775 2775 self._rev = None
2776 2776 self._node = None
2777 2777 parents = [(p or nullid) for p in parents]
2778 2778 p1, p2 = parents
2779 2779 self._parents = [self._repo[p] for p in (p1, p2)]
2780 2780 files = sorted(set(files))
2781 2781 self._files = files
2782 2782 self.substate = {}
2783 2783
2784 2784 if isinstance(filectxfn, patch.filestore):
2785 2785 filectxfn = memfilefrompatch(filectxfn)
2786 2786 elif not callable(filectxfn):
2787 2787 # if store is not callable, wrap it in a function
2788 2788 filectxfn = memfilefromctx(filectxfn)
2789 2789
2790 2790 # memoizing increases performance for e.g. vcs convert scenarios.
2791 2791 self._filectxfn = makecachingfilectxfn(filectxfn)
2792 2792
2793 2793 if editor:
2794 2794 self._text = editor(self._repo, self, [])
2795 2795 self._repo.savecommitmessage(self._text)
2796 2796
2797 2797 def filectx(self, path, filelog=None):
2798 2798 """get a file context from the working directory
2799 2799
2800 2800 Returns None if file doesn't exist and should be removed."""
2801 2801 return self._filectxfn(self._repo, self, path)
2802 2802
2803 2803 def commit(self):
2804 2804 """commit context to the repo"""
2805 2805 return self._repo.commitctx(self)
2806 2806
2807 2807 @propertycache
2808 2808 def _manifest(self):
2809 2809 """generate a manifest based on the return values of filectxfn"""
2810 2810
2811 2811 # keep this simple for now; just worry about p1
2812 2812 pctx = self._parents[0]
2813 2813 man = pctx.manifest().copy()
2814 2814
2815 2815 for f in self._status.modified:
2816 2816 man[f] = modifiednodeid
2817 2817
2818 2818 for f in self._status.added:
2819 2819 man[f] = addednodeid
2820 2820
2821 2821 for f in self._status.removed:
2822 2822 if f in man:
2823 2823 del man[f]
2824 2824
2825 2825 return man
2826 2826
2827 2827 @propertycache
2828 2828 def _status(self):
2829 2829 """Calculate exact status from ``files`` specified at construction
2830 2830 """
2831 2831 man1 = self.p1().manifest()
2832 2832 p2 = self._parents[1]
2833 2833 # "1 < len(self._parents)" can't be used for checking
2834 2834 # existence of the 2nd parent, because "memctx._parents" is
2835 2835 # explicitly initialized by the list, of which length is 2.
2836 2836 if p2.node() != nullid:
2837 2837 man2 = p2.manifest()
2838 2838 managing = lambda f: f in man1 or f in man2
2839 2839 else:
2840 2840 managing = lambda f: f in man1
2841 2841
2842 2842 modified, added, removed = [], [], []
2843 2843 for f in self._files:
2844 2844 if not managing(f):
2845 2845 added.append(f)
2846 2846 elif self[f]:
2847 2847 modified.append(f)
2848 2848 else:
2849 2849 removed.append(f)
2850 2850
2851 2851 return scmutil.status(modified, added, removed, [], [], [], [])
2852 2852
2853 2853
2854 2854 class memfilectx(committablefilectx):
2855 2855 """memfilectx represents an in-memory file to commit.
2856 2856
2857 2857 See memctx and committablefilectx for more details.
2858 2858 """
2859 2859
2860 2860 def __init__(
2861 2861 self,
2862 2862 repo,
2863 2863 changectx,
2864 2864 path,
2865 2865 data,
2866 2866 islink=False,
2867 2867 isexec=False,
2868 2868 copysource=None,
2869 2869 ):
2870 2870 """
2871 2871 path is the normalized file path relative to repository root.
2872 2872 data is the file content as a string.
2873 2873 islink is True if the file is a symbolic link.
2874 2874 isexec is True if the file is executable.
2875 2875 copied is the source file path if current file was copied in the
2876 2876 revision being committed, or None."""
2877 2877 super(memfilectx, self).__init__(repo, path, None, changectx)
2878 2878 self._data = data
2879 2879 if islink:
2880 2880 self._flags = b'l'
2881 2881 elif isexec:
2882 2882 self._flags = b'x'
2883 2883 else:
2884 2884 self._flags = b''
2885 2885 self._copysource = copysource
2886 2886
2887 2887 def copysource(self):
2888 2888 return self._copysource
2889 2889
2890 2890 def cmp(self, fctx):
2891 2891 return self.data() != fctx.data()
2892 2892
2893 2893 def data(self):
2894 2894 return self._data
2895 2895
2896 2896 def remove(self, ignoremissing=False):
2897 2897 """wraps unlink for a repo's working directory"""
2898 2898 # need to figure out what to do here
2899 2899 del self._changectx[self._path]
2900 2900
2901 2901 def write(self, data, flags, **kwargs):
2902 2902 """wraps repo.wwrite"""
2903 2903 self._data = data
2904 2904
2905 2905
2906 2906 class metadataonlyctx(committablectx):
2907 2907 """Like memctx but it's reusing the manifest of different commit.
2908 2908 Intended to be used by lightweight operations that are creating
2909 2909 metadata-only changes.
2910 2910
2911 2911 Revision information is supplied at initialization time. 'repo' is the
2912 2912 current localrepo, 'ctx' is original revision which manifest we're reuisng
2913 2913 'parents' is a sequence of two parent revisions identifiers (pass None for
2914 2914 every missing parent), 'text' is the commit.
2915 2915
2916 2916 user receives the committer name and defaults to current repository
2917 2917 username, date is the commit date in any format supported by
2918 2918 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2919 2919 metadata or is left empty.
2920 2920 """
2921 2921
2922 2922 def __init__(
2923 2923 self,
2924 2924 repo,
2925 2925 originalctx,
2926 2926 parents=None,
2927 2927 text=None,
2928 2928 user=None,
2929 2929 date=None,
2930 2930 extra=None,
2931 2931 editor=None,
2932 2932 ):
2933 2933 if text is None:
2934 2934 text = originalctx.description()
2935 2935 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2936 2936 self._rev = None
2937 2937 self._node = None
2938 2938 self._originalctx = originalctx
2939 2939 self._manifestnode = originalctx.manifestnode()
2940 2940 if parents is None:
2941 2941 parents = originalctx.parents()
2942 2942 else:
2943 2943 parents = [repo[p] for p in parents if p is not None]
2944 2944 parents = parents[:]
2945 2945 while len(parents) < 2:
2946 2946 parents.append(repo[nullid])
2947 2947 p1, p2 = self._parents = parents
2948 2948
2949 2949 # sanity check to ensure that the reused manifest parents are
2950 2950 # manifests of our commit parents
2951 2951 mp1, mp2 = self.manifestctx().parents
2952 2952 if p1 != nullid and p1.manifestnode() != mp1:
2953 2953 raise RuntimeError(
2954 2954 r"can't reuse the manifest: its p1 "
2955 2955 r"doesn't match the new ctx p1"
2956 2956 )
2957 2957 if p2 != nullid and p2.manifestnode() != mp2:
2958 2958 raise RuntimeError(
2959 2959 r"can't reuse the manifest: "
2960 2960 r"its p2 doesn't match the new ctx p2"
2961 2961 )
2962 2962
2963 2963 self._files = originalctx.files()
2964 2964 self.substate = {}
2965 2965
2966 2966 if editor:
2967 2967 self._text = editor(self._repo, self, [])
2968 2968 self._repo.savecommitmessage(self._text)
2969 2969
2970 2970 def manifestnode(self):
2971 2971 return self._manifestnode
2972 2972
2973 2973 @property
2974 2974 def _manifestctx(self):
2975 2975 return self._repo.manifestlog[self._manifestnode]
2976 2976
2977 2977 def filectx(self, path, filelog=None):
2978 2978 return self._originalctx.filectx(path, filelog=filelog)
2979 2979
2980 2980 def commit(self):
2981 2981 """commit context to the repo"""
2982 2982 return self._repo.commitctx(self)
2983 2983
2984 2984 @property
2985 2985 def _manifest(self):
2986 2986 return self._originalctx.manifest()
2987 2987
2988 2988 @propertycache
2989 2989 def _status(self):
2990 2990 """Calculate exact status from ``files`` specified in the ``origctx``
2991 2991 and parents manifests.
2992 2992 """
2993 2993 man1 = self.p1().manifest()
2994 2994 p2 = self._parents[1]
2995 2995 # "1 < len(self._parents)" can't be used for checking
2996 2996 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2997 2997 # explicitly initialized by the list, of which length is 2.
2998 2998 if p2.node() != nullid:
2999 2999 man2 = p2.manifest()
3000 3000 managing = lambda f: f in man1 or f in man2
3001 3001 else:
3002 3002 managing = lambda f: f in man1
3003 3003
3004 3004 modified, added, removed = [], [], []
3005 3005 for f in self._files:
3006 3006 if not managing(f):
3007 3007 added.append(f)
3008 3008 elif f in self:
3009 3009 modified.append(f)
3010 3010 else:
3011 3011 removed.append(f)
3012 3012
3013 3013 return scmutil.status(modified, added, removed, [], [], [], [])
3014 3014
3015 3015
3016 3016 class arbitraryfilectx(object):
3017 3017 """Allows you to use filectx-like functions on a file in an arbitrary
3018 3018 location on disk, possibly not in the working directory.
3019 3019 """
3020 3020
3021 3021 def __init__(self, path, repo=None):
3022 3022 # Repo is optional because contrib/simplemerge uses this class.
3023 3023 self._repo = repo
3024 3024 self._path = path
3025 3025
3026 3026 def cmp(self, fctx):
3027 3027 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3028 3028 # path if either side is a symlink.
3029 3029 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3030 3030 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3031 3031 # Add a fast-path for merge if both sides are disk-backed.
3032 3032 # Note that filecmp uses the opposite return values (True if same)
3033 3033 # from our cmp functions (True if different).
3034 3034 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3035 3035 return self.data() != fctx.data()
3036 3036
3037 3037 def path(self):
3038 3038 return self._path
3039 3039
3040 3040 def flags(self):
3041 3041 return b''
3042 3042
3043 3043 def data(self):
3044 3044 return util.readfile(self._path)
3045 3045
3046 3046 def decodeddata(self):
3047 3047 with open(self._path, b"rb") as f:
3048 3048 return f.read()
3049 3049
3050 3050 def remove(self):
3051 3051 util.unlink(self._path)
3052 3052
3053 3053 def write(self, data, flags, **kwargs):
3054 3054 assert not flags
3055 3055 with open(self._path, b"wb") as f:
3056 3056 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now