##// END OF EJS Templates
context: fix creation of ProgrammingError to not use non-existent field...
Martin von Zweigbergk -
r45460:b2e5ec0c default
parent child Browse files
Show More
@@ -1,3085 +1,3085 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 hex,
19 19 modifiednodeid,
20 20 nullid,
21 21 nullrev,
22 22 short,
23 23 wdirfilenodeids,
24 24 wdirhex,
25 25 )
26 26 from .pycompat import (
27 27 getattr,
28 28 open,
29 29 )
30 30 from . import (
31 31 copies,
32 32 dagop,
33 33 encoding,
34 34 error,
35 35 fileset,
36 36 match as matchmod,
37 37 mergestate as mergestatemod,
38 38 obsolete as obsmod,
39 39 patch,
40 40 pathutil,
41 41 phases,
42 42 pycompat,
43 43 repoview,
44 44 scmutil,
45 45 sparse,
46 46 subrepo,
47 47 subrepoutil,
48 48 util,
49 49 )
50 50 from .utils import (
51 51 dateutil,
52 52 stringutil,
53 53 )
54 54
55 55 propertycache = util.propertycache
56 56
57 57
58 58 class basectx(object):
59 59 """A basectx object represents the common logic for its children:
60 60 changectx: read-only context that is already present in the repo,
61 61 workingctx: a context that represents the working directory and can
62 62 be committed,
63 63 memctx: a context that represents changes in-memory and can also
64 64 be committed."""
65 65
66 66 def __init__(self, repo):
67 67 self._repo = repo
68 68
69 69 def __bytes__(self):
70 70 return short(self.node())
71 71
72 72 __str__ = encoding.strmethod(__bytes__)
73 73
74 74 def __repr__(self):
75 75 return "<%s %s>" % (type(self).__name__, str(self))
76 76
77 77 def __eq__(self, other):
78 78 try:
79 79 return type(self) == type(other) and self._rev == other._rev
80 80 except AttributeError:
81 81 return False
82 82
83 83 def __ne__(self, other):
84 84 return not (self == other)
85 85
86 86 def __contains__(self, key):
87 87 return key in self._manifest
88 88
89 89 def __getitem__(self, key):
90 90 return self.filectx(key)
91 91
92 92 def __iter__(self):
93 93 return iter(self._manifest)
94 94
95 95 def _buildstatusmanifest(self, status):
96 96 """Builds a manifest that includes the given status results, if this is
97 97 a working copy context. For non-working copy contexts, it just returns
98 98 the normal manifest."""
99 99 return self.manifest()
100 100
101 101 def _matchstatus(self, other, match):
102 102 """This internal method provides a way for child objects to override the
103 103 match operator.
104 104 """
105 105 return match
106 106
107 107 def _buildstatus(
108 108 self, other, s, match, listignored, listclean, listunknown
109 109 ):
110 110 """build a status with respect to another context"""
111 111 # Load earliest manifest first for caching reasons. More specifically,
112 112 # if you have revisions 1000 and 1001, 1001 is probably stored as a
113 113 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
114 114 # 1000 and cache it so that when you read 1001, we just need to apply a
115 115 # delta to what's in the cache. So that's one full reconstruction + one
116 116 # delta application.
117 117 mf2 = None
118 118 if self.rev() is not None and self.rev() < other.rev():
119 119 mf2 = self._buildstatusmanifest(s)
120 120 mf1 = other._buildstatusmanifest(s)
121 121 if mf2 is None:
122 122 mf2 = self._buildstatusmanifest(s)
123 123
124 124 modified, added = [], []
125 125 removed = []
126 126 clean = []
127 127 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
128 128 deletedset = set(deleted)
129 129 d = mf1.diff(mf2, match=match, clean=listclean)
130 130 for fn, value in pycompat.iteritems(d):
131 131 if fn in deletedset:
132 132 continue
133 133 if value is None:
134 134 clean.append(fn)
135 135 continue
136 136 (node1, flag1), (node2, flag2) = value
137 137 if node1 is None:
138 138 added.append(fn)
139 139 elif node2 is None:
140 140 removed.append(fn)
141 141 elif flag1 != flag2:
142 142 modified.append(fn)
143 143 elif node2 not in wdirfilenodeids:
144 144 # When comparing files between two commits, we save time by
145 145 # not comparing the file contents when the nodeids differ.
146 146 # Note that this means we incorrectly report a reverted change
147 147 # to a file as a modification.
148 148 modified.append(fn)
149 149 elif self[fn].cmp(other[fn]):
150 150 modified.append(fn)
151 151 else:
152 152 clean.append(fn)
153 153
154 154 if removed:
155 155 # need to filter files if they are already reported as removed
156 156 unknown = [
157 157 fn
158 158 for fn in unknown
159 159 if fn not in mf1 and (not match or match(fn))
160 160 ]
161 161 ignored = [
162 162 fn
163 163 for fn in ignored
164 164 if fn not in mf1 and (not match or match(fn))
165 165 ]
166 166 # if they're deleted, don't report them as removed
167 167 removed = [fn for fn in removed if fn not in deletedset]
168 168
169 169 return scmutil.status(
170 170 modified, added, removed, deleted, unknown, ignored, clean
171 171 )
172 172
173 173 @propertycache
174 174 def substate(self):
175 175 return subrepoutil.state(self, self._repo.ui)
176 176
177 177 def subrev(self, subpath):
178 178 return self.substate[subpath][1]
179 179
180 180 def rev(self):
181 181 return self._rev
182 182
183 183 def node(self):
184 184 return self._node
185 185
186 186 def hex(self):
187 187 return hex(self.node())
188 188
189 189 def manifest(self):
190 190 return self._manifest
191 191
192 192 def manifestctx(self):
193 193 return self._manifestctx
194 194
195 195 def repo(self):
196 196 return self._repo
197 197
198 198 def phasestr(self):
199 199 return phases.phasenames[self.phase()]
200 200
201 201 def mutable(self):
202 202 return self.phase() > phases.public
203 203
204 204 def matchfileset(self, cwd, expr, badfn=None):
205 205 return fileset.match(self, cwd, expr, badfn=badfn)
206 206
207 207 def obsolete(self):
208 208 """True if the changeset is obsolete"""
209 209 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
210 210
211 211 def extinct(self):
212 212 """True if the changeset is extinct"""
213 213 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
214 214
215 215 def orphan(self):
216 216 """True if the changeset is not obsolete, but its ancestor is"""
217 217 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
218 218
219 219 def phasedivergent(self):
220 220 """True if the changeset tries to be a successor of a public changeset
221 221
222 222 Only non-public and non-obsolete changesets may be phase-divergent.
223 223 """
224 224 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
225 225
226 226 def contentdivergent(self):
227 227 """Is a successor of a changeset with multiple possible successor sets
228 228
229 229 Only non-public and non-obsolete changesets may be content-divergent.
230 230 """
231 231 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
232 232
233 233 def isunstable(self):
234 234 """True if the changeset is either orphan, phase-divergent or
235 235 content-divergent"""
236 236 return self.orphan() or self.phasedivergent() or self.contentdivergent()
237 237
238 238 def instabilities(self):
239 239 """return the list of instabilities affecting this changeset.
240 240
241 241 Instabilities are returned as strings. possible values are:
242 242 - orphan,
243 243 - phase-divergent,
244 244 - content-divergent.
245 245 """
246 246 instabilities = []
247 247 if self.orphan():
248 248 instabilities.append(b'orphan')
249 249 if self.phasedivergent():
250 250 instabilities.append(b'phase-divergent')
251 251 if self.contentdivergent():
252 252 instabilities.append(b'content-divergent')
253 253 return instabilities
254 254
255 255 def parents(self):
256 256 """return contexts for each parent changeset"""
257 257 return self._parents
258 258
259 259 def p1(self):
260 260 return self._parents[0]
261 261
262 262 def p2(self):
263 263 parents = self._parents
264 264 if len(parents) == 2:
265 265 return parents[1]
266 266 return self._repo[nullrev]
267 267
268 268 def _fileinfo(self, path):
269 269 if '_manifest' in self.__dict__:
270 270 try:
271 271 return self._manifest.find(path)
272 272 except KeyError:
273 273 raise error.ManifestLookupError(
274 274 self._node, path, _(b'not found in manifest')
275 275 )
276 276 if '_manifestdelta' in self.__dict__ or path in self.files():
277 277 if path in self._manifestdelta:
278 278 return (
279 279 self._manifestdelta[path],
280 280 self._manifestdelta.flags(path),
281 281 )
282 282 mfl = self._repo.manifestlog
283 283 try:
284 284 node, flag = mfl[self._changeset.manifest].find(path)
285 285 except KeyError:
286 286 raise error.ManifestLookupError(
287 287 self._node, path, _(b'not found in manifest')
288 288 )
289 289
290 290 return node, flag
291 291
292 292 def filenode(self, path):
293 293 return self._fileinfo(path)[0]
294 294
295 295 def flags(self, path):
296 296 try:
297 297 return self._fileinfo(path)[1]
298 298 except error.LookupError:
299 299 return b''
300 300
301 301 @propertycache
302 302 def _copies(self):
303 303 return copies.computechangesetcopies(self)
304 304
305 305 def p1copies(self):
306 306 return self._copies[0]
307 307
308 308 def p2copies(self):
309 309 return self._copies[1]
310 310
311 311 def sub(self, path, allowcreate=True):
312 312 '''return a subrepo for the stored revision of path, never wdir()'''
313 313 return subrepo.subrepo(self, path, allowcreate=allowcreate)
314 314
315 315 def nullsub(self, path, pctx):
316 316 return subrepo.nullsubrepo(self, path, pctx)
317 317
318 318 def workingsub(self, path):
319 319 '''return a subrepo for the stored revision, or wdir if this is a wdir
320 320 context.
321 321 '''
322 322 return subrepo.subrepo(self, path, allowwdir=True)
323 323
324 324 def match(
325 325 self,
326 326 pats=None,
327 327 include=None,
328 328 exclude=None,
329 329 default=b'glob',
330 330 listsubrepos=False,
331 331 badfn=None,
332 332 cwd=None,
333 333 ):
334 334 r = self._repo
335 335 if not cwd:
336 336 cwd = r.getcwd()
337 337 return matchmod.match(
338 338 r.root,
339 339 cwd,
340 340 pats,
341 341 include,
342 342 exclude,
343 343 default,
344 344 auditor=r.nofsauditor,
345 345 ctx=self,
346 346 listsubrepos=listsubrepos,
347 347 badfn=badfn,
348 348 )
349 349
350 350 def diff(
351 351 self,
352 352 ctx2=None,
353 353 match=None,
354 354 changes=None,
355 355 opts=None,
356 356 losedatafn=None,
357 357 pathfn=None,
358 358 copy=None,
359 359 copysourcematch=None,
360 360 hunksfilterfn=None,
361 361 ):
362 362 """Returns a diff generator for the given contexts and matcher"""
363 363 if ctx2 is None:
364 364 ctx2 = self.p1()
365 365 if ctx2 is not None:
366 366 ctx2 = self._repo[ctx2]
367 367 return patch.diff(
368 368 self._repo,
369 369 ctx2,
370 370 self,
371 371 match=match,
372 372 changes=changes,
373 373 opts=opts,
374 374 losedatafn=losedatafn,
375 375 pathfn=pathfn,
376 376 copy=copy,
377 377 copysourcematch=copysourcematch,
378 378 hunksfilterfn=hunksfilterfn,
379 379 )
380 380
381 381 def dirs(self):
382 382 return self._manifest.dirs()
383 383
384 384 def hasdir(self, dir):
385 385 return self._manifest.hasdir(dir)
386 386
387 387 def status(
388 388 self,
389 389 other=None,
390 390 match=None,
391 391 listignored=False,
392 392 listclean=False,
393 393 listunknown=False,
394 394 listsubrepos=False,
395 395 ):
396 396 """return status of files between two nodes or node and working
397 397 directory.
398 398
399 399 If other is None, compare this node with working directory.
400 400
401 401 returns (modified, added, removed, deleted, unknown, ignored, clean)
402 402 """
403 403
404 404 ctx1 = self
405 405 ctx2 = self._repo[other]
406 406
407 407 # This next code block is, admittedly, fragile logic that tests for
408 408 # reversing the contexts and wouldn't need to exist if it weren't for
409 409 # the fast (and common) code path of comparing the working directory
410 410 # with its first parent.
411 411 #
412 412 # What we're aiming for here is the ability to call:
413 413 #
414 414 # workingctx.status(parentctx)
415 415 #
416 416 # If we always built the manifest for each context and compared those,
417 417 # then we'd be done. But the special case of the above call means we
418 418 # just copy the manifest of the parent.
419 419 reversed = False
420 420 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
421 421 reversed = True
422 422 ctx1, ctx2 = ctx2, ctx1
423 423
424 424 match = self._repo.narrowmatch(match)
425 425 match = ctx2._matchstatus(ctx1, match)
426 426 r = scmutil.status([], [], [], [], [], [], [])
427 427 r = ctx2._buildstatus(
428 428 ctx1, r, match, listignored, listclean, listunknown
429 429 )
430 430
431 431 if reversed:
432 432 # Reverse added and removed. Clear deleted, unknown and ignored as
433 433 # these make no sense to reverse.
434 434 r = scmutil.status(
435 435 r.modified, r.removed, r.added, [], [], [], r.clean
436 436 )
437 437
438 438 if listsubrepos:
439 439 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
440 440 try:
441 441 rev2 = ctx2.subrev(subpath)
442 442 except KeyError:
443 443 # A subrepo that existed in node1 was deleted between
444 444 # node1 and node2 (inclusive). Thus, ctx2's substate
445 445 # won't contain that subpath. The best we can do ignore it.
446 446 rev2 = None
447 447 submatch = matchmod.subdirmatcher(subpath, match)
448 448 s = sub.status(
449 449 rev2,
450 450 match=submatch,
451 451 ignored=listignored,
452 452 clean=listclean,
453 453 unknown=listunknown,
454 454 listsubrepos=True,
455 455 )
456 456 for k in (
457 457 'modified',
458 458 'added',
459 459 'removed',
460 460 'deleted',
461 461 'unknown',
462 462 'ignored',
463 463 'clean',
464 464 ):
465 465 rfiles, sfiles = getattr(r, k), getattr(s, k)
466 466 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
467 467
468 468 r.modified.sort()
469 469 r.added.sort()
470 470 r.removed.sort()
471 471 r.deleted.sort()
472 472 r.unknown.sort()
473 473 r.ignored.sort()
474 474 r.clean.sort()
475 475
476 476 return r
477 477
478 478 def mergestate(self, clean=False):
479 479 """Get a mergestate object for this context."""
480 480 raise NotImplementedError(
481 481 '%s does not implement mergestate()' % self.__class__
482 482 )
483 483
484 484
485 485 class changectx(basectx):
486 486 """A changecontext object makes access to data related to a particular
487 487 changeset convenient. It represents a read-only context already present in
488 488 the repo."""
489 489
490 490 def __init__(self, repo, rev, node, maybe_filtered=True):
491 491 super(changectx, self).__init__(repo)
492 492 self._rev = rev
493 493 self._node = node
494 494 # When maybe_filtered is True, the revision might be affected by
495 495 # changelog filtering and operation through the filtered changelog must be used.
496 496 #
497 497 # When maybe_filtered is False, the revision has already been checked
498 498 # against filtering and is not filtered. Operation through the
499 499 # unfiltered changelog might be used in some case.
500 500 self._maybe_filtered = maybe_filtered
501 501
502 502 def __hash__(self):
503 503 try:
504 504 return hash(self._rev)
505 505 except AttributeError:
506 506 return id(self)
507 507
508 508 def __nonzero__(self):
509 509 return self._rev != nullrev
510 510
511 511 __bool__ = __nonzero__
512 512
513 513 @propertycache
514 514 def _changeset(self):
515 515 if self._maybe_filtered:
516 516 repo = self._repo
517 517 else:
518 518 repo = self._repo.unfiltered()
519 519 return repo.changelog.changelogrevision(self.rev())
520 520
521 521 @propertycache
522 522 def _manifest(self):
523 523 return self._manifestctx.read()
524 524
525 525 @property
526 526 def _manifestctx(self):
527 527 return self._repo.manifestlog[self._changeset.manifest]
528 528
529 529 @propertycache
530 530 def _manifestdelta(self):
531 531 return self._manifestctx.readdelta()
532 532
533 533 @propertycache
534 534 def _parents(self):
535 535 repo = self._repo
536 536 if self._maybe_filtered:
537 537 cl = repo.changelog
538 538 else:
539 539 cl = repo.unfiltered().changelog
540 540
541 541 p1, p2 = cl.parentrevs(self._rev)
542 542 if p2 == nullrev:
543 543 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
544 544 return [
545 545 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
546 546 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
547 547 ]
548 548
549 549 def changeset(self):
550 550 c = self._changeset
551 551 return (
552 552 c.manifest,
553 553 c.user,
554 554 c.date,
555 555 c.files,
556 556 c.description,
557 557 c.extra,
558 558 )
559 559
560 560 def manifestnode(self):
561 561 return self._changeset.manifest
562 562
563 563 def user(self):
564 564 return self._changeset.user
565 565
566 566 def date(self):
567 567 return self._changeset.date
568 568
569 569 def files(self):
570 570 return self._changeset.files
571 571
572 572 def filesmodified(self):
573 573 modified = set(self.files())
574 574 modified.difference_update(self.filesadded())
575 575 modified.difference_update(self.filesremoved())
576 576 return sorted(modified)
577 577
578 578 def filesadded(self):
579 579 filesadded = self._changeset.filesadded
580 580 compute_on_none = True
581 581 if self._repo.filecopiesmode == b'changeset-sidedata':
582 582 compute_on_none = False
583 583 else:
584 584 source = self._repo.ui.config(b'experimental', b'copies.read-from')
585 585 if source == b'changeset-only':
586 586 compute_on_none = False
587 587 elif source != b'compatibility':
588 588 # filelog mode, ignore any changelog content
589 589 filesadded = None
590 590 if filesadded is None:
591 591 if compute_on_none:
592 592 filesadded = copies.computechangesetfilesadded(self)
593 593 else:
594 594 filesadded = []
595 595 return filesadded
596 596
597 597 def filesremoved(self):
598 598 filesremoved = self._changeset.filesremoved
599 599 compute_on_none = True
600 600 if self._repo.filecopiesmode == b'changeset-sidedata':
601 601 compute_on_none = False
602 602 else:
603 603 source = self._repo.ui.config(b'experimental', b'copies.read-from')
604 604 if source == b'changeset-only':
605 605 compute_on_none = False
606 606 elif source != b'compatibility':
607 607 # filelog mode, ignore any changelog content
608 608 filesremoved = None
609 609 if filesremoved is None:
610 610 if compute_on_none:
611 611 filesremoved = copies.computechangesetfilesremoved(self)
612 612 else:
613 613 filesremoved = []
614 614 return filesremoved
615 615
616 616 @propertycache
617 617 def _copies(self):
618 618 p1copies = self._changeset.p1copies
619 619 p2copies = self._changeset.p2copies
620 620 compute_on_none = True
621 621 if self._repo.filecopiesmode == b'changeset-sidedata':
622 622 compute_on_none = False
623 623 else:
624 624 source = self._repo.ui.config(b'experimental', b'copies.read-from')
625 625 # If config says to get copy metadata only from changeset, then
626 626 # return that, defaulting to {} if there was no copy metadata. In
627 627 # compatibility mode, we return copy data from the changeset if it
628 628 # was recorded there, and otherwise we fall back to getting it from
629 629 # the filelogs (below).
630 630 #
631 631 # If we are in compatiblity mode and there is not data in the
632 632 # changeset), we get the copy metadata from the filelogs.
633 633 #
634 634 # otherwise, when config said to read only from filelog, we get the
635 635 # copy metadata from the filelogs.
636 636 if source == b'changeset-only':
637 637 compute_on_none = False
638 638 elif source != b'compatibility':
639 639 # filelog mode, ignore any changelog content
640 640 p1copies = p2copies = None
641 641 if p1copies is None:
642 642 if compute_on_none:
643 643 p1copies, p2copies = super(changectx, self)._copies
644 644 else:
645 645 if p1copies is None:
646 646 p1copies = {}
647 647 if p2copies is None:
648 648 p2copies = {}
649 649 return p1copies, p2copies
650 650
651 651 def description(self):
652 652 return self._changeset.description
653 653
654 654 def branch(self):
655 655 return encoding.tolocal(self._changeset.extra.get(b"branch"))
656 656
657 657 def closesbranch(self):
658 658 return b'close' in self._changeset.extra
659 659
660 660 def extra(self):
661 661 """Return a dict of extra information."""
662 662 return self._changeset.extra
663 663
664 664 def tags(self):
665 665 """Return a list of byte tag names"""
666 666 return self._repo.nodetags(self._node)
667 667
668 668 def bookmarks(self):
669 669 """Return a list of byte bookmark names."""
670 670 return self._repo.nodebookmarks(self._node)
671 671
672 672 def phase(self):
673 673 return self._repo._phasecache.phase(self._repo, self._rev)
674 674
675 675 def hidden(self):
676 676 return self._rev in repoview.filterrevs(self._repo, b'visible')
677 677
678 678 def isinmemory(self):
679 679 return False
680 680
681 681 def children(self):
682 682 """return list of changectx contexts for each child changeset.
683 683
684 684 This returns only the immediate child changesets. Use descendants() to
685 685 recursively walk children.
686 686 """
687 687 c = self._repo.changelog.children(self._node)
688 688 return [self._repo[x] for x in c]
689 689
690 690 def ancestors(self):
691 691 for a in self._repo.changelog.ancestors([self._rev]):
692 692 yield self._repo[a]
693 693
694 694 def descendants(self):
695 695 """Recursively yield all children of the changeset.
696 696
697 697 For just the immediate children, use children()
698 698 """
699 699 for d in self._repo.changelog.descendants([self._rev]):
700 700 yield self._repo[d]
701 701
702 702 def filectx(self, path, fileid=None, filelog=None):
703 703 """get a file context from this changeset"""
704 704 if fileid is None:
705 705 fileid = self.filenode(path)
706 706 return filectx(
707 707 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
708 708 )
709 709
710 710 def ancestor(self, c2, warn=False):
711 711 """return the "best" ancestor context of self and c2
712 712
713 713 If there are multiple candidates, it will show a message and check
714 714 merge.preferancestor configuration before falling back to the
715 715 revlog ancestor."""
716 716 # deal with workingctxs
717 717 n2 = c2._node
718 718 if n2 is None:
719 719 n2 = c2._parents[0]._node
720 720 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
721 721 if not cahs:
722 722 anc = nullid
723 723 elif len(cahs) == 1:
724 724 anc = cahs[0]
725 725 else:
726 726 # experimental config: merge.preferancestor
727 727 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
728 728 try:
729 729 ctx = scmutil.revsymbol(self._repo, r)
730 730 except error.RepoLookupError:
731 731 continue
732 732 anc = ctx.node()
733 733 if anc in cahs:
734 734 break
735 735 else:
736 736 anc = self._repo.changelog.ancestor(self._node, n2)
737 737 if warn:
738 738 self._repo.ui.status(
739 739 (
740 740 _(b"note: using %s as ancestor of %s and %s\n")
741 741 % (short(anc), short(self._node), short(n2))
742 742 )
743 743 + b''.join(
744 744 _(
745 745 b" alternatively, use --config "
746 746 b"merge.preferancestor=%s\n"
747 747 )
748 748 % short(n)
749 749 for n in sorted(cahs)
750 750 if n != anc
751 751 )
752 752 )
753 753 return self._repo[anc]
754 754
755 755 def isancestorof(self, other):
756 756 """True if this changeset is an ancestor of other"""
757 757 return self._repo.changelog.isancestorrev(self._rev, other._rev)
758 758
759 759 def walk(self, match):
760 760 '''Generates matching file names.'''
761 761
762 762 # Wrap match.bad method to have message with nodeid
763 763 def bad(fn, msg):
764 764 # The manifest doesn't know about subrepos, so don't complain about
765 765 # paths into valid subrepos.
766 766 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
767 767 return
768 768 match.bad(fn, _(b'no such file in rev %s') % self)
769 769
770 770 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
771 771 return self._manifest.walk(m)
772 772
773 773 def matches(self, match):
774 774 return self.walk(match)
775 775
776 776
777 777 class basefilectx(object):
778 778 """A filecontext object represents the common logic for its children:
779 779 filectx: read-only access to a filerevision that is already present
780 780 in the repo,
781 781 workingfilectx: a filecontext that represents files from the working
782 782 directory,
783 783 memfilectx: a filecontext that represents files in-memory,
784 784 """
785 785
786 786 @propertycache
787 787 def _filelog(self):
788 788 return self._repo.file(self._path)
789 789
790 790 @propertycache
791 791 def _changeid(self):
792 792 if '_changectx' in self.__dict__:
793 793 return self._changectx.rev()
794 794 elif '_descendantrev' in self.__dict__:
795 795 # this file context was created from a revision with a known
796 796 # descendant, we can (lazily) correct for linkrev aliases
797 797 return self._adjustlinkrev(self._descendantrev)
798 798 else:
799 799 return self._filelog.linkrev(self._filerev)
800 800
801 801 @propertycache
802 802 def _filenode(self):
803 803 if '_fileid' in self.__dict__:
804 804 return self._filelog.lookup(self._fileid)
805 805 else:
806 806 return self._changectx.filenode(self._path)
807 807
808 808 @propertycache
809 809 def _filerev(self):
810 810 return self._filelog.rev(self._filenode)
811 811
812 812 @propertycache
813 813 def _repopath(self):
814 814 return self._path
815 815
816 816 def __nonzero__(self):
817 817 try:
818 818 self._filenode
819 819 return True
820 820 except error.LookupError:
821 821 # file is missing
822 822 return False
823 823
824 824 __bool__ = __nonzero__
825 825
826 826 def __bytes__(self):
827 827 try:
828 828 return b"%s@%s" % (self.path(), self._changectx)
829 829 except error.LookupError:
830 830 return b"%s@???" % self.path()
831 831
832 832 __str__ = encoding.strmethod(__bytes__)
833 833
834 834 def __repr__(self):
835 835 return "<%s %s>" % (type(self).__name__, str(self))
836 836
837 837 def __hash__(self):
838 838 try:
839 839 return hash((self._path, self._filenode))
840 840 except AttributeError:
841 841 return id(self)
842 842
843 843 def __eq__(self, other):
844 844 try:
845 845 return (
846 846 type(self) == type(other)
847 847 and self._path == other._path
848 848 and self._filenode == other._filenode
849 849 )
850 850 except AttributeError:
851 851 return False
852 852
853 853 def __ne__(self, other):
854 854 return not (self == other)
855 855
856 856 def filerev(self):
857 857 return self._filerev
858 858
859 859 def filenode(self):
860 860 return self._filenode
861 861
862 862 @propertycache
863 863 def _flags(self):
864 864 return self._changectx.flags(self._path)
865 865
866 866 def flags(self):
867 867 return self._flags
868 868
869 869 def filelog(self):
870 870 return self._filelog
871 871
872 872 def rev(self):
873 873 return self._changeid
874 874
875 875 def linkrev(self):
876 876 return self._filelog.linkrev(self._filerev)
877 877
878 878 def node(self):
879 879 return self._changectx.node()
880 880
881 881 def hex(self):
882 882 return self._changectx.hex()
883 883
884 884 def user(self):
885 885 return self._changectx.user()
886 886
887 887 def date(self):
888 888 return self._changectx.date()
889 889
890 890 def files(self):
891 891 return self._changectx.files()
892 892
893 893 def description(self):
894 894 return self._changectx.description()
895 895
896 896 def branch(self):
897 897 return self._changectx.branch()
898 898
899 899 def extra(self):
900 900 return self._changectx.extra()
901 901
902 902 def phase(self):
903 903 return self._changectx.phase()
904 904
905 905 def phasestr(self):
906 906 return self._changectx.phasestr()
907 907
908 908 def obsolete(self):
909 909 return self._changectx.obsolete()
910 910
911 911 def instabilities(self):
912 912 return self._changectx.instabilities()
913 913
914 914 def manifest(self):
915 915 return self._changectx.manifest()
916 916
917 917 def changectx(self):
918 918 return self._changectx
919 919
920 920 def renamed(self):
921 921 return self._copied
922 922
923 923 def copysource(self):
924 924 return self._copied and self._copied[0]
925 925
926 926 def repo(self):
927 927 return self._repo
928 928
929 929 def size(self):
930 930 return len(self.data())
931 931
932 932 def path(self):
933 933 return self._path
934 934
935 935 def isbinary(self):
936 936 try:
937 937 return stringutil.binary(self.data())
938 938 except IOError:
939 939 return False
940 940
941 941 def isexec(self):
942 942 return b'x' in self.flags()
943 943
944 944 def islink(self):
945 945 return b'l' in self.flags()
946 946
947 947 def isabsent(self):
948 948 """whether this filectx represents a file not in self._changectx
949 949
950 950 This is mainly for merge code to detect change/delete conflicts. This is
951 951 expected to be True for all subclasses of basectx."""
952 952 return False
953 953
954 954 _customcmp = False
955 955
956 956 def cmp(self, fctx):
957 957 """compare with other file context
958 958
959 959 returns True if different than fctx.
960 960 """
961 961 if fctx._customcmp:
962 962 return fctx.cmp(self)
963 963
964 964 if self._filenode is None:
965 965 raise error.ProgrammingError(
966 966 b'filectx.cmp() must be reimplemented if not backed by revlog'
967 967 )
968 968
969 969 if fctx._filenode is None:
970 970 if self._repo._encodefilterpats:
971 971 # can't rely on size() because wdir content may be decoded
972 972 return self._filelog.cmp(self._filenode, fctx.data())
973 973 if self.size() - 4 == fctx.size():
974 974 # size() can match:
975 975 # if file data starts with '\1\n', empty metadata block is
976 976 # prepended, which adds 4 bytes to filelog.size().
977 977 return self._filelog.cmp(self._filenode, fctx.data())
978 978 if self.size() == fctx.size():
979 979 # size() matches: need to compare content
980 980 return self._filelog.cmp(self._filenode, fctx.data())
981 981
982 982 # size() differs
983 983 return True
984 984
985 985 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
986 986 """return the first ancestor of <srcrev> introducing <fnode>
987 987
988 988 If the linkrev of the file revision does not point to an ancestor of
989 989 srcrev, we'll walk down the ancestors until we find one introducing
990 990 this file revision.
991 991
992 992 :srcrev: the changeset revision we search ancestors from
993 993 :inclusive: if true, the src revision will also be checked
994 994 :stoprev: an optional revision to stop the walk at. If no introduction
995 995 of this file content could be found before this floor
996 996 revision, the function will returns "None" and stops its
997 997 iteration.
998 998 """
999 999 repo = self._repo
1000 1000 cl = repo.unfiltered().changelog
1001 1001 mfl = repo.manifestlog
1002 1002 # fetch the linkrev
1003 1003 lkr = self.linkrev()
1004 1004 if srcrev == lkr:
1005 1005 return lkr
1006 1006 # hack to reuse ancestor computation when searching for renames
1007 1007 memberanc = getattr(self, '_ancestrycontext', None)
1008 1008 iteranc = None
1009 1009 if srcrev is None:
1010 1010 # wctx case, used by workingfilectx during mergecopy
1011 1011 revs = [p.rev() for p in self._repo[None].parents()]
1012 1012 inclusive = True # we skipped the real (revless) source
1013 1013 else:
1014 1014 revs = [srcrev]
1015 1015 if memberanc is None:
1016 1016 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1017 1017 # check if this linkrev is an ancestor of srcrev
1018 1018 if lkr not in memberanc:
1019 1019 if iteranc is None:
1020 1020 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1021 1021 fnode = self._filenode
1022 1022 path = self._path
1023 1023 for a in iteranc:
1024 1024 if stoprev is not None and a < stoprev:
1025 1025 return None
1026 1026 ac = cl.read(a) # get changeset data (we avoid object creation)
1027 1027 if path in ac[3]: # checking the 'files' field.
1028 1028 # The file has been touched, check if the content is
1029 1029 # similar to the one we search for.
1030 1030 if fnode == mfl[ac[0]].readfast().get(path):
1031 1031 return a
1032 1032 # In theory, we should never get out of that loop without a result.
1033 1033 # But if manifest uses a buggy file revision (not children of the
1034 1034 # one it replaces) we could. Such a buggy situation will likely
1035 1035 # result is crash somewhere else at to some point.
1036 1036 return lkr
1037 1037
1038 1038 def isintroducedafter(self, changelogrev):
1039 1039 """True if a filectx has been introduced after a given floor revision
1040 1040 """
1041 1041 if self.linkrev() >= changelogrev:
1042 1042 return True
1043 1043 introrev = self._introrev(stoprev=changelogrev)
1044 1044 if introrev is None:
1045 1045 return False
1046 1046 return introrev >= changelogrev
1047 1047
1048 1048 def introrev(self):
1049 1049 """return the rev of the changeset which introduced this file revision
1050 1050
1051 1051 This method is different from linkrev because it take into account the
1052 1052 changeset the filectx was created from. It ensures the returned
1053 1053 revision is one of its ancestors. This prevents bugs from
1054 1054 'linkrev-shadowing' when a file revision is used by multiple
1055 1055 changesets.
1056 1056 """
1057 1057 return self._introrev()
1058 1058
1059 1059 def _introrev(self, stoprev=None):
1060 1060 """
1061 1061 Same as `introrev` but, with an extra argument to limit changelog
1062 1062 iteration range in some internal usecase.
1063 1063
1064 1064 If `stoprev` is set, the `introrev` will not be searched past that
1065 1065 `stoprev` revision and "None" might be returned. This is useful to
1066 1066 limit the iteration range.
1067 1067 """
1068 1068 toprev = None
1069 1069 attrs = vars(self)
1070 1070 if '_changeid' in attrs:
1071 1071 # We have a cached value already
1072 1072 toprev = self._changeid
1073 1073 elif '_changectx' in attrs:
1074 1074 # We know which changelog entry we are coming from
1075 1075 toprev = self._changectx.rev()
1076 1076
1077 1077 if toprev is not None:
1078 1078 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1079 1079 elif '_descendantrev' in attrs:
1080 1080 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1081 1081 # be nice and cache the result of the computation
1082 1082 if introrev is not None:
1083 1083 self._changeid = introrev
1084 1084 return introrev
1085 1085 else:
1086 1086 return self.linkrev()
1087 1087
1088 1088 def introfilectx(self):
1089 1089 """Return filectx having identical contents, but pointing to the
1090 1090 changeset revision where this filectx was introduced"""
1091 1091 introrev = self.introrev()
1092 1092 if self.rev() == introrev:
1093 1093 return self
1094 1094 return self.filectx(self.filenode(), changeid=introrev)
1095 1095
1096 1096 def _parentfilectx(self, path, fileid, filelog):
1097 1097 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1098 1098 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1099 1099 if '_changeid' in vars(self) or '_changectx' in vars(self):
1100 1100 # If self is associated with a changeset (probably explicitly
1101 1101 # fed), ensure the created filectx is associated with a
1102 1102 # changeset that is an ancestor of self.changectx.
1103 1103 # This lets us later use _adjustlinkrev to get a correct link.
1104 1104 fctx._descendantrev = self.rev()
1105 1105 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1106 1106 elif '_descendantrev' in vars(self):
1107 1107 # Otherwise propagate _descendantrev if we have one associated.
1108 1108 fctx._descendantrev = self._descendantrev
1109 1109 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1110 1110 return fctx
1111 1111
1112 1112 def parents(self):
1113 1113 _path = self._path
1114 1114 fl = self._filelog
1115 1115 parents = self._filelog.parents(self._filenode)
1116 1116 pl = [(_path, node, fl) for node in parents if node != nullid]
1117 1117
1118 1118 r = fl.renamed(self._filenode)
1119 1119 if r:
1120 1120 # - In the simple rename case, both parent are nullid, pl is empty.
1121 1121 # - In case of merge, only one of the parent is null id and should
1122 1122 # be replaced with the rename information. This parent is -always-
1123 1123 # the first one.
1124 1124 #
1125 1125 # As null id have always been filtered out in the previous list
1126 1126 # comprehension, inserting to 0 will always result in "replacing
1127 1127 # first nullid parent with rename information.
1128 1128 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1129 1129
1130 1130 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1131 1131
1132 1132 def p1(self):
1133 1133 return self.parents()[0]
1134 1134
1135 1135 def p2(self):
1136 1136 p = self.parents()
1137 1137 if len(p) == 2:
1138 1138 return p[1]
1139 1139 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1140 1140
1141 1141 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1142 1142 """Returns a list of annotateline objects for each line in the file
1143 1143
1144 1144 - line.fctx is the filectx of the node where that line was last changed
1145 1145 - line.lineno is the line number at the first appearance in the managed
1146 1146 file
1147 1147 - line.text is the data on that line (including newline character)
1148 1148 """
1149 1149 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1150 1150
1151 1151 def parents(f):
1152 1152 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1153 1153 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1154 1154 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1155 1155 # isn't an ancestor of the srcrev.
1156 1156 f._changeid
1157 1157 pl = f.parents()
1158 1158
1159 1159 # Don't return renamed parents if we aren't following.
1160 1160 if not follow:
1161 1161 pl = [p for p in pl if p.path() == f.path()]
1162 1162
1163 1163 # renamed filectx won't have a filelog yet, so set it
1164 1164 # from the cache to save time
1165 1165 for p in pl:
1166 1166 if not '_filelog' in p.__dict__:
1167 1167 p._filelog = getlog(p.path())
1168 1168
1169 1169 return pl
1170 1170
1171 1171 # use linkrev to find the first changeset where self appeared
1172 1172 base = self.introfilectx()
1173 1173 if getattr(base, '_ancestrycontext', None) is None:
1174 1174 # it is safe to use an unfiltered repository here because we are
1175 1175 # walking ancestors only.
1176 1176 cl = self._repo.unfiltered().changelog
1177 1177 if base.rev() is None:
1178 1178 # wctx is not inclusive, but works because _ancestrycontext
1179 1179 # is used to test filelog revisions
1180 1180 ac = cl.ancestors(
1181 1181 [p.rev() for p in base.parents()], inclusive=True
1182 1182 )
1183 1183 else:
1184 1184 ac = cl.ancestors([base.rev()], inclusive=True)
1185 1185 base._ancestrycontext = ac
1186 1186
1187 1187 return dagop.annotate(
1188 1188 base, parents, skiprevs=skiprevs, diffopts=diffopts
1189 1189 )
1190 1190
1191 1191 def ancestors(self, followfirst=False):
1192 1192 visit = {}
1193 1193 c = self
1194 1194 if followfirst:
1195 1195 cut = 1
1196 1196 else:
1197 1197 cut = None
1198 1198
1199 1199 while True:
1200 1200 for parent in c.parents()[:cut]:
1201 1201 visit[(parent.linkrev(), parent.filenode())] = parent
1202 1202 if not visit:
1203 1203 break
1204 1204 c = visit.pop(max(visit))
1205 1205 yield c
1206 1206
1207 1207 def decodeddata(self):
1208 1208 """Returns `data()` after running repository decoding filters.
1209 1209
1210 1210 This is often equivalent to how the data would be expressed on disk.
1211 1211 """
1212 1212 return self._repo.wwritedata(self.path(), self.data())
1213 1213
1214 1214
1215 1215 class filectx(basefilectx):
1216 1216 """A filecontext object makes access to data related to a particular
1217 1217 filerevision convenient."""
1218 1218
1219 1219 def __init__(
1220 1220 self,
1221 1221 repo,
1222 1222 path,
1223 1223 changeid=None,
1224 1224 fileid=None,
1225 1225 filelog=None,
1226 1226 changectx=None,
1227 1227 ):
1228 1228 """changeid must be a revision number, if specified.
1229 1229 fileid can be a file revision or node."""
1230 1230 self._repo = repo
1231 1231 self._path = path
1232 1232
1233 1233 assert (
1234 1234 changeid is not None or fileid is not None or changectx is not None
1235 1235 ), (
1236 1236 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1237 1237 % (changeid, fileid, changectx,)
1238 1238 )
1239 1239
1240 1240 if filelog is not None:
1241 1241 self._filelog = filelog
1242 1242
1243 1243 if changeid is not None:
1244 1244 self._changeid = changeid
1245 1245 if changectx is not None:
1246 1246 self._changectx = changectx
1247 1247 if fileid is not None:
1248 1248 self._fileid = fileid
1249 1249
1250 1250 @propertycache
1251 1251 def _changectx(self):
1252 1252 try:
1253 1253 return self._repo[self._changeid]
1254 1254 except error.FilteredRepoLookupError:
1255 1255 # Linkrev may point to any revision in the repository. When the
1256 1256 # repository is filtered this may lead to `filectx` trying to build
1257 1257 # `changectx` for filtered revision. In such case we fallback to
1258 1258 # creating `changectx` on the unfiltered version of the reposition.
1259 1259 # This fallback should not be an issue because `changectx` from
1260 1260 # `filectx` are not used in complex operations that care about
1261 1261 # filtering.
1262 1262 #
1263 1263 # This fallback is a cheap and dirty fix that prevent several
1264 1264 # crashes. It does not ensure the behavior is correct. However the
1265 1265 # behavior was not correct before filtering either and "incorrect
1266 1266 # behavior" is seen as better as "crash"
1267 1267 #
1268 1268 # Linkrevs have several serious troubles with filtering that are
1269 1269 # complicated to solve. Proper handling of the issue here should be
1270 1270 # considered when solving linkrev issue are on the table.
1271 1271 return self._repo.unfiltered()[self._changeid]
1272 1272
1273 1273 def filectx(self, fileid, changeid=None):
1274 1274 '''opens an arbitrary revision of the file without
1275 1275 opening a new filelog'''
1276 1276 return filectx(
1277 1277 self._repo,
1278 1278 self._path,
1279 1279 fileid=fileid,
1280 1280 filelog=self._filelog,
1281 1281 changeid=changeid,
1282 1282 )
1283 1283
1284 1284 def rawdata(self):
1285 1285 return self._filelog.rawdata(self._filenode)
1286 1286
1287 1287 def rawflags(self):
1288 1288 """low-level revlog flags"""
1289 1289 return self._filelog.flags(self._filerev)
1290 1290
1291 1291 def data(self):
1292 1292 try:
1293 1293 return self._filelog.read(self._filenode)
1294 1294 except error.CensoredNodeError:
1295 1295 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1296 1296 return b""
1297 1297 raise error.Abort(
1298 1298 _(b"censored node: %s") % short(self._filenode),
1299 1299 hint=_(b"set censor.policy to ignore errors"),
1300 1300 )
1301 1301
1302 1302 def size(self):
1303 1303 return self._filelog.size(self._filerev)
1304 1304
1305 1305 @propertycache
1306 1306 def _copied(self):
1307 1307 """check if file was actually renamed in this changeset revision
1308 1308
1309 1309 If rename logged in file revision, we report copy for changeset only
1310 1310 if file revisions linkrev points back to the changeset in question
1311 1311 or both changeset parents contain different file revisions.
1312 1312 """
1313 1313
1314 1314 renamed = self._filelog.renamed(self._filenode)
1315 1315 if not renamed:
1316 1316 return None
1317 1317
1318 1318 if self.rev() == self.linkrev():
1319 1319 return renamed
1320 1320
1321 1321 name = self.path()
1322 1322 fnode = self._filenode
1323 1323 for p in self._changectx.parents():
1324 1324 try:
1325 1325 if fnode == p.filenode(name):
1326 1326 return None
1327 1327 except error.LookupError:
1328 1328 pass
1329 1329 return renamed
1330 1330
1331 1331 def children(self):
1332 1332 # hard for renames
1333 1333 c = self._filelog.children(self._filenode)
1334 1334 return [
1335 1335 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1336 1336 for x in c
1337 1337 ]
1338 1338
1339 1339
1340 1340 class committablectx(basectx):
1341 1341 """A committablectx object provides common functionality for a context that
1342 1342 wants the ability to commit, e.g. workingctx or memctx."""
1343 1343
1344 1344 def __init__(
1345 1345 self,
1346 1346 repo,
1347 1347 text=b"",
1348 1348 user=None,
1349 1349 date=None,
1350 1350 extra=None,
1351 1351 changes=None,
1352 1352 branch=None,
1353 1353 ):
1354 1354 super(committablectx, self).__init__(repo)
1355 1355 self._rev = None
1356 1356 self._node = None
1357 1357 self._text = text
1358 1358 if date:
1359 1359 self._date = dateutil.parsedate(date)
1360 1360 if user:
1361 1361 self._user = user
1362 1362 if changes:
1363 1363 self._status = changes
1364 1364
1365 1365 self._extra = {}
1366 1366 if extra:
1367 1367 self._extra = extra.copy()
1368 1368 if branch is not None:
1369 1369 self._extra[b'branch'] = encoding.fromlocal(branch)
1370 1370 if not self._extra.get(b'branch'):
1371 1371 self._extra[b'branch'] = b'default'
1372 1372
1373 1373 def __bytes__(self):
1374 1374 return bytes(self._parents[0]) + b"+"
1375 1375
1376 1376 __str__ = encoding.strmethod(__bytes__)
1377 1377
1378 1378 def __nonzero__(self):
1379 1379 return True
1380 1380
1381 1381 __bool__ = __nonzero__
1382 1382
1383 1383 @propertycache
1384 1384 def _status(self):
1385 1385 return self._repo.status()
1386 1386
1387 1387 @propertycache
1388 1388 def _user(self):
1389 1389 return self._repo.ui.username()
1390 1390
1391 1391 @propertycache
1392 1392 def _date(self):
1393 1393 ui = self._repo.ui
1394 1394 date = ui.configdate(b'devel', b'default-date')
1395 1395 if date is None:
1396 1396 date = dateutil.makedate()
1397 1397 return date
1398 1398
1399 1399 def subrev(self, subpath):
1400 1400 return None
1401 1401
1402 1402 def manifestnode(self):
1403 1403 return None
1404 1404
1405 1405 def user(self):
1406 1406 return self._user or self._repo.ui.username()
1407 1407
1408 1408 def date(self):
1409 1409 return self._date
1410 1410
1411 1411 def description(self):
1412 1412 return self._text
1413 1413
1414 1414 def files(self):
1415 1415 return sorted(
1416 1416 self._status.modified + self._status.added + self._status.removed
1417 1417 )
1418 1418
1419 1419 def modified(self):
1420 1420 return self._status.modified
1421 1421
1422 1422 def added(self):
1423 1423 return self._status.added
1424 1424
1425 1425 def removed(self):
1426 1426 return self._status.removed
1427 1427
1428 1428 def deleted(self):
1429 1429 return self._status.deleted
1430 1430
1431 1431 filesmodified = modified
1432 1432 filesadded = added
1433 1433 filesremoved = removed
1434 1434
1435 1435 def branch(self):
1436 1436 return encoding.tolocal(self._extra[b'branch'])
1437 1437
1438 1438 def closesbranch(self):
1439 1439 return b'close' in self._extra
1440 1440
1441 1441 def extra(self):
1442 1442 return self._extra
1443 1443
1444 1444 def isinmemory(self):
1445 1445 return False
1446 1446
1447 1447 def tags(self):
1448 1448 return []
1449 1449
1450 1450 def bookmarks(self):
1451 1451 b = []
1452 1452 for p in self.parents():
1453 1453 b.extend(p.bookmarks())
1454 1454 return b
1455 1455
1456 1456 def phase(self):
1457 1457 phase = phases.newcommitphase(self._repo.ui)
1458 1458 for p in self.parents():
1459 1459 phase = max(phase, p.phase())
1460 1460 return phase
1461 1461
1462 1462 def hidden(self):
1463 1463 return False
1464 1464
1465 1465 def children(self):
1466 1466 return []
1467 1467
1468 1468 def flags(self, path):
1469 1469 if '_manifest' in self.__dict__:
1470 1470 try:
1471 1471 return self._manifest.flags(path)
1472 1472 except KeyError:
1473 1473 return b''
1474 1474
1475 1475 try:
1476 1476 return self._flagfunc(path)
1477 1477 except OSError:
1478 1478 return b''
1479 1479
1480 1480 def ancestor(self, c2):
1481 1481 """return the "best" ancestor context of self and c2"""
1482 1482 return self._parents[0].ancestor(c2) # punt on two parents for now
1483 1483
1484 1484 def ancestors(self):
1485 1485 for p in self._parents:
1486 1486 yield p
1487 1487 for a in self._repo.changelog.ancestors(
1488 1488 [p.rev() for p in self._parents]
1489 1489 ):
1490 1490 yield self._repo[a]
1491 1491
1492 1492 def markcommitted(self, node):
1493 1493 """Perform post-commit cleanup necessary after committing this ctx
1494 1494
1495 1495 Specifically, this updates backing stores this working context
1496 1496 wraps to reflect the fact that the changes reflected by this
1497 1497 workingctx have been committed. For example, it marks
1498 1498 modified and added files as normal in the dirstate.
1499 1499
1500 1500 """
1501 1501
1502 1502 def dirty(self, missing=False, merge=True, branch=True):
1503 1503 return False
1504 1504
1505 1505
1506 1506 class workingctx(committablectx):
1507 1507 """A workingctx object makes access to data related to
1508 1508 the current working directory convenient.
1509 1509 date - any valid date string or (unixtime, offset), or None.
1510 1510 user - username string, or None.
1511 1511 extra - a dictionary of extra values, or None.
1512 1512 changes - a list of file lists as returned by localrepo.status()
1513 1513 or None to use the repository status.
1514 1514 """
1515 1515
1516 1516 def __init__(
1517 1517 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1518 1518 ):
1519 1519 branch = None
1520 1520 if not extra or b'branch' not in extra:
1521 1521 try:
1522 1522 branch = repo.dirstate.branch()
1523 1523 except UnicodeDecodeError:
1524 1524 raise error.Abort(_(b'branch name not in UTF-8!'))
1525 1525 super(workingctx, self).__init__(
1526 1526 repo, text, user, date, extra, changes, branch=branch
1527 1527 )
1528 1528
1529 1529 def __iter__(self):
1530 1530 d = self._repo.dirstate
1531 1531 for f in d:
1532 1532 if d[f] != b'r':
1533 1533 yield f
1534 1534
1535 1535 def __contains__(self, key):
1536 1536 return self._repo.dirstate[key] not in b"?r"
1537 1537
1538 1538 def hex(self):
1539 1539 return wdirhex
1540 1540
1541 1541 @propertycache
1542 1542 def _parents(self):
1543 1543 p = self._repo.dirstate.parents()
1544 1544 if p[1] == nullid:
1545 1545 p = p[:-1]
1546 1546 # use unfiltered repo to delay/avoid loading obsmarkers
1547 1547 unfi = self._repo.unfiltered()
1548 1548 return [
1549 1549 changectx(
1550 1550 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1551 1551 )
1552 1552 for n in p
1553 1553 ]
1554 1554
1555 1555 def setparents(self, p1node, p2node=nullid):
1556 1556 dirstate = self._repo.dirstate
1557 1557 with dirstate.parentchange():
1558 1558 copies = dirstate.setparents(p1node, p2node)
1559 1559 pctx = self._repo[p1node]
1560 1560 if copies:
1561 1561 # Adjust copy records, the dirstate cannot do it, it
1562 1562 # requires access to parents manifests. Preserve them
1563 1563 # only for entries added to first parent.
1564 1564 for f in copies:
1565 1565 if f not in pctx and copies[f] in pctx:
1566 1566 dirstate.copy(copies[f], f)
1567 1567 if p2node == nullid:
1568 1568 for f, s in sorted(dirstate.copies().items()):
1569 1569 if f not in pctx and s not in pctx:
1570 1570 dirstate.copy(None, f)
1571 1571
1572 1572 def _fileinfo(self, path):
1573 1573 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1574 1574 self._manifest
1575 1575 return super(workingctx, self)._fileinfo(path)
1576 1576
1577 1577 def _buildflagfunc(self):
1578 1578 # Create a fallback function for getting file flags when the
1579 1579 # filesystem doesn't support them
1580 1580
1581 1581 copiesget = self._repo.dirstate.copies().get
1582 1582 parents = self.parents()
1583 1583 if len(parents) < 2:
1584 1584 # when we have one parent, it's easy: copy from parent
1585 1585 man = parents[0].manifest()
1586 1586
1587 1587 def func(f):
1588 1588 f = copiesget(f, f)
1589 1589 return man.flags(f)
1590 1590
1591 1591 else:
1592 1592 # merges are tricky: we try to reconstruct the unstored
1593 1593 # result from the merge (issue1802)
1594 1594 p1, p2 = parents
1595 1595 pa = p1.ancestor(p2)
1596 1596 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1597 1597
1598 1598 def func(f):
1599 1599 f = copiesget(f, f) # may be wrong for merges with copies
1600 1600 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1601 1601 if fl1 == fl2:
1602 1602 return fl1
1603 1603 if fl1 == fla:
1604 1604 return fl2
1605 1605 if fl2 == fla:
1606 1606 return fl1
1607 1607 return b'' # punt for conflicts
1608 1608
1609 1609 return func
1610 1610
1611 1611 @propertycache
1612 1612 def _flagfunc(self):
1613 1613 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1614 1614
1615 1615 def flags(self, path):
1616 1616 try:
1617 1617 return self._flagfunc(path)
1618 1618 except OSError:
1619 1619 return b''
1620 1620
1621 1621 def filectx(self, path, filelog=None):
1622 1622 """get a file context from the working directory"""
1623 1623 return workingfilectx(
1624 1624 self._repo, path, workingctx=self, filelog=filelog
1625 1625 )
1626 1626
1627 1627 def dirty(self, missing=False, merge=True, branch=True):
1628 1628 """check whether a working directory is modified"""
1629 1629 # check subrepos first
1630 1630 for s in sorted(self.substate):
1631 1631 if self.sub(s).dirty(missing=missing):
1632 1632 return True
1633 1633 # check current working dir
1634 1634 return (
1635 1635 (merge and self.p2())
1636 1636 or (branch and self.branch() != self.p1().branch())
1637 1637 or self.modified()
1638 1638 or self.added()
1639 1639 or self.removed()
1640 1640 or (missing and self.deleted())
1641 1641 )
1642 1642
1643 1643 def add(self, list, prefix=b""):
1644 1644 with self._repo.wlock():
1645 1645 ui, ds = self._repo.ui, self._repo.dirstate
1646 1646 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1647 1647 rejected = []
1648 1648 lstat = self._repo.wvfs.lstat
1649 1649 for f in list:
1650 1650 # ds.pathto() returns an absolute file when this is invoked from
1651 1651 # the keyword extension. That gets flagged as non-portable on
1652 1652 # Windows, since it contains the drive letter and colon.
1653 1653 scmutil.checkportable(ui, os.path.join(prefix, f))
1654 1654 try:
1655 1655 st = lstat(f)
1656 1656 except OSError:
1657 1657 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1658 1658 rejected.append(f)
1659 1659 continue
1660 1660 limit = ui.configbytes(b'ui', b'large-file-limit')
1661 1661 if limit != 0 and st.st_size > limit:
1662 1662 ui.warn(
1663 1663 _(
1664 1664 b"%s: up to %d MB of RAM may be required "
1665 1665 b"to manage this file\n"
1666 1666 b"(use 'hg revert %s' to cancel the "
1667 1667 b"pending addition)\n"
1668 1668 )
1669 1669 % (f, 3 * st.st_size // 1000000, uipath(f))
1670 1670 )
1671 1671 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1672 1672 ui.warn(
1673 1673 _(
1674 1674 b"%s not added: only files and symlinks "
1675 1675 b"supported currently\n"
1676 1676 )
1677 1677 % uipath(f)
1678 1678 )
1679 1679 rejected.append(f)
1680 1680 elif ds[f] in b'amn':
1681 1681 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1682 1682 elif ds[f] == b'r':
1683 1683 ds.normallookup(f)
1684 1684 else:
1685 1685 ds.add(f)
1686 1686 return rejected
1687 1687
1688 1688 def forget(self, files, prefix=b""):
1689 1689 with self._repo.wlock():
1690 1690 ds = self._repo.dirstate
1691 1691 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1692 1692 rejected = []
1693 1693 for f in files:
1694 1694 if f not in ds:
1695 1695 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1696 1696 rejected.append(f)
1697 1697 elif ds[f] != b'a':
1698 1698 ds.remove(f)
1699 1699 else:
1700 1700 ds.drop(f)
1701 1701 return rejected
1702 1702
1703 1703 def copy(self, source, dest):
1704 1704 try:
1705 1705 st = self._repo.wvfs.lstat(dest)
1706 1706 except OSError as err:
1707 1707 if err.errno != errno.ENOENT:
1708 1708 raise
1709 1709 self._repo.ui.warn(
1710 1710 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1711 1711 )
1712 1712 return
1713 1713 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1714 1714 self._repo.ui.warn(
1715 1715 _(b"copy failed: %s is not a file or a symbolic link\n")
1716 1716 % self._repo.dirstate.pathto(dest)
1717 1717 )
1718 1718 else:
1719 1719 with self._repo.wlock():
1720 1720 ds = self._repo.dirstate
1721 1721 if ds[dest] in b'?':
1722 1722 ds.add(dest)
1723 1723 elif ds[dest] in b'r':
1724 1724 ds.normallookup(dest)
1725 1725 ds.copy(source, dest)
1726 1726
1727 1727 def match(
1728 1728 self,
1729 1729 pats=None,
1730 1730 include=None,
1731 1731 exclude=None,
1732 1732 default=b'glob',
1733 1733 listsubrepos=False,
1734 1734 badfn=None,
1735 1735 cwd=None,
1736 1736 ):
1737 1737 r = self._repo
1738 1738 if not cwd:
1739 1739 cwd = r.getcwd()
1740 1740
1741 1741 # Only a case insensitive filesystem needs magic to translate user input
1742 1742 # to actual case in the filesystem.
1743 1743 icasefs = not util.fscasesensitive(r.root)
1744 1744 return matchmod.match(
1745 1745 r.root,
1746 1746 cwd,
1747 1747 pats,
1748 1748 include,
1749 1749 exclude,
1750 1750 default,
1751 1751 auditor=r.auditor,
1752 1752 ctx=self,
1753 1753 listsubrepos=listsubrepos,
1754 1754 badfn=badfn,
1755 1755 icasefs=icasefs,
1756 1756 )
1757 1757
1758 1758 def _filtersuspectsymlink(self, files):
1759 1759 if not files or self._repo.dirstate._checklink:
1760 1760 return files
1761 1761
1762 1762 # Symlink placeholders may get non-symlink-like contents
1763 1763 # via user error or dereferencing by NFS or Samba servers,
1764 1764 # so we filter out any placeholders that don't look like a
1765 1765 # symlink
1766 1766 sane = []
1767 1767 for f in files:
1768 1768 if self.flags(f) == b'l':
1769 1769 d = self[f].data()
1770 1770 if (
1771 1771 d == b''
1772 1772 or len(d) >= 1024
1773 1773 or b'\n' in d
1774 1774 or stringutil.binary(d)
1775 1775 ):
1776 1776 self._repo.ui.debug(
1777 1777 b'ignoring suspect symlink placeholder "%s"\n' % f
1778 1778 )
1779 1779 continue
1780 1780 sane.append(f)
1781 1781 return sane
1782 1782
1783 1783 def _checklookup(self, files):
1784 1784 # check for any possibly clean files
1785 1785 if not files:
1786 1786 return [], [], []
1787 1787
1788 1788 modified = []
1789 1789 deleted = []
1790 1790 fixup = []
1791 1791 pctx = self._parents[0]
1792 1792 # do a full compare of any files that might have changed
1793 1793 for f in sorted(files):
1794 1794 try:
1795 1795 # This will return True for a file that got replaced by a
1796 1796 # directory in the interim, but fixing that is pretty hard.
1797 1797 if (
1798 1798 f not in pctx
1799 1799 or self.flags(f) != pctx.flags(f)
1800 1800 or pctx[f].cmp(self[f])
1801 1801 ):
1802 1802 modified.append(f)
1803 1803 else:
1804 1804 fixup.append(f)
1805 1805 except (IOError, OSError):
1806 1806 # A file become inaccessible in between? Mark it as deleted,
1807 1807 # matching dirstate behavior (issue5584).
1808 1808 # The dirstate has more complex behavior around whether a
1809 1809 # missing file matches a directory, etc, but we don't need to
1810 1810 # bother with that: if f has made it to this point, we're sure
1811 1811 # it's in the dirstate.
1812 1812 deleted.append(f)
1813 1813
1814 1814 return modified, deleted, fixup
1815 1815
1816 1816 def _poststatusfixup(self, status, fixup):
1817 1817 """update dirstate for files that are actually clean"""
1818 1818 poststatus = self._repo.postdsstatus()
1819 1819 if fixup or poststatus:
1820 1820 try:
1821 1821 oldid = self._repo.dirstate.identity()
1822 1822
1823 1823 # updating the dirstate is optional
1824 1824 # so we don't wait on the lock
1825 1825 # wlock can invalidate the dirstate, so cache normal _after_
1826 1826 # taking the lock
1827 1827 with self._repo.wlock(False):
1828 1828 if self._repo.dirstate.identity() == oldid:
1829 1829 if fixup:
1830 1830 normal = self._repo.dirstate.normal
1831 1831 for f in fixup:
1832 1832 normal(f)
1833 1833 # write changes out explicitly, because nesting
1834 1834 # wlock at runtime may prevent 'wlock.release()'
1835 1835 # after this block from doing so for subsequent
1836 1836 # changing files
1837 1837 tr = self._repo.currenttransaction()
1838 1838 self._repo.dirstate.write(tr)
1839 1839
1840 1840 if poststatus:
1841 1841 for ps in poststatus:
1842 1842 ps(self, status)
1843 1843 else:
1844 1844 # in this case, writing changes out breaks
1845 1845 # consistency, because .hg/dirstate was
1846 1846 # already changed simultaneously after last
1847 1847 # caching (see also issue5584 for detail)
1848 1848 self._repo.ui.debug(
1849 1849 b'skip updating dirstate: identity mismatch\n'
1850 1850 )
1851 1851 except error.LockError:
1852 1852 pass
1853 1853 finally:
1854 1854 # Even if the wlock couldn't be grabbed, clear out the list.
1855 1855 self._repo.clearpostdsstatus()
1856 1856
1857 1857 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1858 1858 '''Gets the status from the dirstate -- internal use only.'''
1859 1859 subrepos = []
1860 1860 if b'.hgsub' in self:
1861 1861 subrepos = sorted(self.substate)
1862 1862 cmp, s = self._repo.dirstate.status(
1863 1863 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1864 1864 )
1865 1865
1866 1866 # check for any possibly clean files
1867 1867 fixup = []
1868 1868 if cmp:
1869 1869 modified2, deleted2, fixup = self._checklookup(cmp)
1870 1870 s.modified.extend(modified2)
1871 1871 s.deleted.extend(deleted2)
1872 1872
1873 1873 if fixup and clean:
1874 1874 s.clean.extend(fixup)
1875 1875
1876 1876 self._poststatusfixup(s, fixup)
1877 1877
1878 1878 if match.always():
1879 1879 # cache for performance
1880 1880 if s.unknown or s.ignored or s.clean:
1881 1881 # "_status" is cached with list*=False in the normal route
1882 1882 self._status = scmutil.status(
1883 1883 s.modified, s.added, s.removed, s.deleted, [], [], []
1884 1884 )
1885 1885 else:
1886 1886 self._status = s
1887 1887
1888 1888 return s
1889 1889
1890 1890 @propertycache
1891 1891 def _copies(self):
1892 1892 p1copies = {}
1893 1893 p2copies = {}
1894 1894 parents = self._repo.dirstate.parents()
1895 1895 p1manifest = self._repo[parents[0]].manifest()
1896 1896 p2manifest = self._repo[parents[1]].manifest()
1897 1897 changedset = set(self.added()) | set(self.modified())
1898 1898 narrowmatch = self._repo.narrowmatch()
1899 1899 for dst, src in self._repo.dirstate.copies().items():
1900 1900 if dst not in changedset or not narrowmatch(dst):
1901 1901 continue
1902 1902 if src in p1manifest:
1903 1903 p1copies[dst] = src
1904 1904 elif src in p2manifest:
1905 1905 p2copies[dst] = src
1906 1906 return p1copies, p2copies
1907 1907
1908 1908 @propertycache
1909 1909 def _manifest(self):
1910 1910 """generate a manifest corresponding to the values in self._status
1911 1911
1912 1912 This reuse the file nodeid from parent, but we use special node
1913 1913 identifiers for added and modified files. This is used by manifests
1914 1914 merge to see that files are different and by update logic to avoid
1915 1915 deleting newly added files.
1916 1916 """
1917 1917 return self._buildstatusmanifest(self._status)
1918 1918
1919 1919 def _buildstatusmanifest(self, status):
1920 1920 """Builds a manifest that includes the given status results."""
1921 1921 parents = self.parents()
1922 1922
1923 1923 man = parents[0].manifest().copy()
1924 1924
1925 1925 ff = self._flagfunc
1926 1926 for i, l in (
1927 1927 (addednodeid, status.added),
1928 1928 (modifiednodeid, status.modified),
1929 1929 ):
1930 1930 for f in l:
1931 1931 man[f] = i
1932 1932 try:
1933 1933 man.setflag(f, ff(f))
1934 1934 except OSError:
1935 1935 pass
1936 1936
1937 1937 for f in status.deleted + status.removed:
1938 1938 if f in man:
1939 1939 del man[f]
1940 1940
1941 1941 return man
1942 1942
1943 1943 def _buildstatus(
1944 1944 self, other, s, match, listignored, listclean, listunknown
1945 1945 ):
1946 1946 """build a status with respect to another context
1947 1947
1948 1948 This includes logic for maintaining the fast path of status when
1949 1949 comparing the working directory against its parent, which is to skip
1950 1950 building a new manifest if self (working directory) is not comparing
1951 1951 against its parent (repo['.']).
1952 1952 """
1953 1953 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1954 1954 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1955 1955 # might have accidentally ended up with the entire contents of the file
1956 1956 # they are supposed to be linking to.
1957 1957 s.modified[:] = self._filtersuspectsymlink(s.modified)
1958 1958 if other != self._repo[b'.']:
1959 1959 s = super(workingctx, self)._buildstatus(
1960 1960 other, s, match, listignored, listclean, listunknown
1961 1961 )
1962 1962 return s
1963 1963
1964 1964 def _matchstatus(self, other, match):
1965 1965 """override the match method with a filter for directory patterns
1966 1966
1967 1967 We use inheritance to customize the match.bad method only in cases of
1968 1968 workingctx since it belongs only to the working directory when
1969 1969 comparing against the parent changeset.
1970 1970
1971 1971 If we aren't comparing against the working directory's parent, then we
1972 1972 just use the default match object sent to us.
1973 1973 """
1974 1974 if other != self._repo[b'.']:
1975 1975
1976 1976 def bad(f, msg):
1977 1977 # 'f' may be a directory pattern from 'match.files()',
1978 1978 # so 'f not in ctx1' is not enough
1979 1979 if f not in other and not other.hasdir(f):
1980 1980 self._repo.ui.warn(
1981 1981 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1982 1982 )
1983 1983
1984 1984 match.bad = bad
1985 1985 return match
1986 1986
1987 1987 def walk(self, match):
1988 1988 '''Generates matching file names.'''
1989 1989 return sorted(
1990 1990 self._repo.dirstate.walk(
1991 1991 self._repo.narrowmatch(match),
1992 1992 subrepos=sorted(self.substate),
1993 1993 unknown=True,
1994 1994 ignored=False,
1995 1995 )
1996 1996 )
1997 1997
1998 1998 def matches(self, match):
1999 1999 match = self._repo.narrowmatch(match)
2000 2000 ds = self._repo.dirstate
2001 2001 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2002 2002
2003 2003 def markcommitted(self, node):
2004 2004 with self._repo.dirstate.parentchange():
2005 2005 for f in self.modified() + self.added():
2006 2006 self._repo.dirstate.normal(f)
2007 2007 for f in self.removed():
2008 2008 self._repo.dirstate.drop(f)
2009 2009 self._repo.dirstate.setparents(node)
2010 2010 self._repo._quick_access_changeid_invalidate()
2011 2011
2012 2012 # write changes out explicitly, because nesting wlock at
2013 2013 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2014 2014 # from immediately doing so for subsequent changing files
2015 2015 self._repo.dirstate.write(self._repo.currenttransaction())
2016 2016
2017 2017 sparse.aftercommit(self._repo, node)
2018 2018
2019 2019 def mergestate(self, clean=False):
2020 2020 if clean:
2021 2021 return mergestatemod.mergestate.clean(self._repo)
2022 2022 return mergestatemod.mergestate.read(self._repo)
2023 2023
2024 2024
2025 2025 class committablefilectx(basefilectx):
2026 2026 """A committablefilectx provides common functionality for a file context
2027 2027 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2028 2028
2029 2029 def __init__(self, repo, path, filelog=None, ctx=None):
2030 2030 self._repo = repo
2031 2031 self._path = path
2032 2032 self._changeid = None
2033 2033 self._filerev = self._filenode = None
2034 2034
2035 2035 if filelog is not None:
2036 2036 self._filelog = filelog
2037 2037 if ctx:
2038 2038 self._changectx = ctx
2039 2039
2040 2040 def __nonzero__(self):
2041 2041 return True
2042 2042
2043 2043 __bool__ = __nonzero__
2044 2044
2045 2045 def linkrev(self):
2046 2046 # linked to self._changectx no matter if file is modified or not
2047 2047 return self.rev()
2048 2048
2049 2049 def renamed(self):
2050 2050 path = self.copysource()
2051 2051 if not path:
2052 2052 return None
2053 2053 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2054 2054
2055 2055 def parents(self):
2056 2056 '''return parent filectxs, following copies if necessary'''
2057 2057
2058 2058 def filenode(ctx, path):
2059 2059 return ctx._manifest.get(path, nullid)
2060 2060
2061 2061 path = self._path
2062 2062 fl = self._filelog
2063 2063 pcl = self._changectx._parents
2064 2064 renamed = self.renamed()
2065 2065
2066 2066 if renamed:
2067 2067 pl = [renamed + (None,)]
2068 2068 else:
2069 2069 pl = [(path, filenode(pcl[0], path), fl)]
2070 2070
2071 2071 for pc in pcl[1:]:
2072 2072 pl.append((path, filenode(pc, path), fl))
2073 2073
2074 2074 return [
2075 2075 self._parentfilectx(p, fileid=n, filelog=l)
2076 2076 for p, n, l in pl
2077 2077 if n != nullid
2078 2078 ]
2079 2079
2080 2080 def children(self):
2081 2081 return []
2082 2082
2083 2083
2084 2084 class workingfilectx(committablefilectx):
2085 2085 """A workingfilectx object makes access to data related to a particular
2086 2086 file in the working directory convenient."""
2087 2087
2088 2088 def __init__(self, repo, path, filelog=None, workingctx=None):
2089 2089 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2090 2090
2091 2091 @propertycache
2092 2092 def _changectx(self):
2093 2093 return workingctx(self._repo)
2094 2094
2095 2095 def data(self):
2096 2096 return self._repo.wread(self._path)
2097 2097
2098 2098 def copysource(self):
2099 2099 return self._repo.dirstate.copied(self._path)
2100 2100
2101 2101 def size(self):
2102 2102 return self._repo.wvfs.lstat(self._path).st_size
2103 2103
2104 2104 def lstat(self):
2105 2105 return self._repo.wvfs.lstat(self._path)
2106 2106
2107 2107 def date(self):
2108 2108 t, tz = self._changectx.date()
2109 2109 try:
2110 2110 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2111 2111 except OSError as err:
2112 2112 if err.errno != errno.ENOENT:
2113 2113 raise
2114 2114 return (t, tz)
2115 2115
2116 2116 def exists(self):
2117 2117 return self._repo.wvfs.exists(self._path)
2118 2118
2119 2119 def lexists(self):
2120 2120 return self._repo.wvfs.lexists(self._path)
2121 2121
2122 2122 def audit(self):
2123 2123 return self._repo.wvfs.audit(self._path)
2124 2124
2125 2125 def cmp(self, fctx):
2126 2126 """compare with other file context
2127 2127
2128 2128 returns True if different than fctx.
2129 2129 """
2130 2130 # fctx should be a filectx (not a workingfilectx)
2131 2131 # invert comparison to reuse the same code path
2132 2132 return fctx.cmp(self)
2133 2133
2134 2134 def remove(self, ignoremissing=False):
2135 2135 """wraps unlink for a repo's working directory"""
2136 2136 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2137 2137 self._repo.wvfs.unlinkpath(
2138 2138 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2139 2139 )
2140 2140
2141 2141 def write(self, data, flags, backgroundclose=False, **kwargs):
2142 2142 """wraps repo.wwrite"""
2143 2143 return self._repo.wwrite(
2144 2144 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2145 2145 )
2146 2146
2147 2147 def markcopied(self, src):
2148 2148 """marks this file a copy of `src`"""
2149 2149 self._repo.dirstate.copy(src, self._path)
2150 2150
2151 2151 def clearunknown(self):
2152 2152 """Removes conflicting items in the working directory so that
2153 2153 ``write()`` can be called successfully.
2154 2154 """
2155 2155 wvfs = self._repo.wvfs
2156 2156 f = self._path
2157 2157 wvfs.audit(f)
2158 2158 if self._repo.ui.configbool(
2159 2159 b'experimental', b'merge.checkpathconflicts'
2160 2160 ):
2161 2161 # remove files under the directory as they should already be
2162 2162 # warned and backed up
2163 2163 if wvfs.isdir(f) and not wvfs.islink(f):
2164 2164 wvfs.rmtree(f, forcibly=True)
2165 2165 for p in reversed(list(pathutil.finddirs(f))):
2166 2166 if wvfs.isfileorlink(p):
2167 2167 wvfs.unlink(p)
2168 2168 break
2169 2169 else:
2170 2170 # don't remove files if path conflicts are not processed
2171 2171 if wvfs.isdir(f) and not wvfs.islink(f):
2172 2172 wvfs.removedirs(f)
2173 2173
2174 2174 def setflags(self, l, x):
2175 2175 self._repo.wvfs.setflags(self._path, l, x)
2176 2176
2177 2177
2178 2178 class overlayworkingctx(committablectx):
2179 2179 """Wraps another mutable context with a write-back cache that can be
2180 2180 converted into a commit context.
2181 2181
2182 2182 self._cache[path] maps to a dict with keys: {
2183 2183 'exists': bool?
2184 2184 'date': date?
2185 2185 'data': str?
2186 2186 'flags': str?
2187 2187 'copied': str? (path or None)
2188 2188 }
2189 2189 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2190 2190 is `False`, the file was deleted.
2191 2191 """
2192 2192
2193 2193 def __init__(self, repo):
2194 2194 super(overlayworkingctx, self).__init__(repo)
2195 2195 self.clean()
2196 2196
2197 2197 def setbase(self, wrappedctx):
2198 2198 self._wrappedctx = wrappedctx
2199 2199 self._parents = [wrappedctx]
2200 2200 # Drop old manifest cache as it is now out of date.
2201 2201 # This is necessary when, e.g., rebasing several nodes with one
2202 2202 # ``overlayworkingctx`` (e.g. with --collapse).
2203 2203 util.clearcachedproperty(self, b'_manifest')
2204 2204
2205 2205 def setparents(self, p1node, p2node=nullid):
2206 2206 assert p1node == self._wrappedctx.node()
2207 2207 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2208 2208
2209 2209 def data(self, path):
2210 2210 if self.isdirty(path):
2211 2211 if self._cache[path][b'exists']:
2212 2212 if self._cache[path][b'data'] is not None:
2213 2213 return self._cache[path][b'data']
2214 2214 else:
2215 2215 # Must fallback here, too, because we only set flags.
2216 2216 return self._wrappedctx[path].data()
2217 2217 else:
2218 2218 raise error.ProgrammingError(
2219 2219 b"No such file or directory: %s" % path
2220 2220 )
2221 2221 else:
2222 2222 return self._wrappedctx[path].data()
2223 2223
2224 2224 @propertycache
2225 2225 def _manifest(self):
2226 2226 parents = self.parents()
2227 2227 man = parents[0].manifest().copy()
2228 2228
2229 2229 flag = self._flagfunc
2230 2230 for path in self.added():
2231 2231 man[path] = addednodeid
2232 2232 man.setflag(path, flag(path))
2233 2233 for path in self.modified():
2234 2234 man[path] = modifiednodeid
2235 2235 man.setflag(path, flag(path))
2236 2236 for path in self.removed():
2237 2237 del man[path]
2238 2238 return man
2239 2239
2240 2240 @propertycache
2241 2241 def _flagfunc(self):
2242 2242 def f(path):
2243 2243 return self._cache[path][b'flags']
2244 2244
2245 2245 return f
2246 2246
2247 2247 def files(self):
2248 2248 return sorted(self.added() + self.modified() + self.removed())
2249 2249
2250 2250 def modified(self):
2251 2251 return [
2252 2252 f
2253 2253 for f in self._cache.keys()
2254 2254 if self._cache[f][b'exists'] and self._existsinparent(f)
2255 2255 ]
2256 2256
2257 2257 def added(self):
2258 2258 return [
2259 2259 f
2260 2260 for f in self._cache.keys()
2261 2261 if self._cache[f][b'exists'] and not self._existsinparent(f)
2262 2262 ]
2263 2263
2264 2264 def removed(self):
2265 2265 return [
2266 2266 f
2267 2267 for f in self._cache.keys()
2268 2268 if not self._cache[f][b'exists'] and self._existsinparent(f)
2269 2269 ]
2270 2270
2271 2271 def p1copies(self):
2272 2272 copies = {}
2273 2273 narrowmatch = self._repo.narrowmatch()
2274 2274 for f in self._cache.keys():
2275 2275 if not narrowmatch(f):
2276 2276 continue
2277 2277 copies.pop(f, None) # delete if it exists
2278 2278 source = self._cache[f][b'copied']
2279 2279 if source:
2280 2280 copies[f] = source
2281 2281 return copies
2282 2282
2283 2283 def p2copies(self):
2284 2284 copies = {}
2285 2285 narrowmatch = self._repo.narrowmatch()
2286 2286 for f in self._cache.keys():
2287 2287 if not narrowmatch(f):
2288 2288 continue
2289 2289 copies.pop(f, None) # delete if it exists
2290 2290 source = self._cache[f][b'copied']
2291 2291 if source:
2292 2292 copies[f] = source
2293 2293 return copies
2294 2294
2295 2295 def isinmemory(self):
2296 2296 return True
2297 2297
2298 2298 def filedate(self, path):
2299 2299 if self.isdirty(path):
2300 2300 return self._cache[path][b'date']
2301 2301 else:
2302 2302 return self._wrappedctx[path].date()
2303 2303
2304 2304 def markcopied(self, path, origin):
2305 2305 self._markdirty(
2306 2306 path,
2307 2307 exists=True,
2308 2308 date=self.filedate(path),
2309 2309 flags=self.flags(path),
2310 2310 copied=origin,
2311 2311 )
2312 2312
2313 2313 def copydata(self, path):
2314 2314 if self.isdirty(path):
2315 2315 return self._cache[path][b'copied']
2316 2316 else:
2317 2317 return None
2318 2318
2319 2319 def flags(self, path):
2320 2320 if self.isdirty(path):
2321 2321 if self._cache[path][b'exists']:
2322 2322 return self._cache[path][b'flags']
2323 2323 else:
2324 2324 raise error.ProgrammingError(
2325 b"No such file or directory: %s" % self._path
2325 b"No such file or directory: %s" % path
2326 2326 )
2327 2327 else:
2328 2328 return self._wrappedctx[path].flags()
2329 2329
2330 2330 def __contains__(self, key):
2331 2331 if key in self._cache:
2332 2332 return self._cache[key][b'exists']
2333 2333 return key in self.p1()
2334 2334
2335 2335 def _existsinparent(self, path):
2336 2336 try:
2337 2337 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2338 2338 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2339 2339 # with an ``exists()`` function.
2340 2340 self._wrappedctx[path]
2341 2341 return True
2342 2342 except error.ManifestLookupError:
2343 2343 return False
2344 2344
2345 2345 def _auditconflicts(self, path):
2346 2346 """Replicates conflict checks done by wvfs.write().
2347 2347
2348 2348 Since we never write to the filesystem and never call `applyupdates` in
2349 2349 IMM, we'll never check that a path is actually writable -- e.g., because
2350 2350 it adds `a/foo`, but `a` is actually a file in the other commit.
2351 2351 """
2352 2352
2353 2353 def fail(path, component):
2354 2354 # p1() is the base and we're receiving "writes" for p2()'s
2355 2355 # files.
2356 2356 if b'l' in self.p1()[component].flags():
2357 2357 raise error.Abort(
2358 2358 b"error: %s conflicts with symlink %s "
2359 2359 b"in %d." % (path, component, self.p1().rev())
2360 2360 )
2361 2361 else:
2362 2362 raise error.Abort(
2363 2363 b"error: '%s' conflicts with file '%s' in "
2364 2364 b"%d." % (path, component, self.p1().rev())
2365 2365 )
2366 2366
2367 2367 # Test that each new directory to be created to write this path from p2
2368 2368 # is not a file in p1.
2369 2369 components = path.split(b'/')
2370 2370 for i in pycompat.xrange(len(components)):
2371 2371 component = b"/".join(components[0:i])
2372 2372 if component in self:
2373 2373 fail(path, component)
2374 2374
2375 2375 # Test the other direction -- that this path from p2 isn't a directory
2376 2376 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2377 2377 match = self.match([path], default=b'path')
2378 2378 mfiles = list(self.p1().manifest().walk(match))
2379 2379 if len(mfiles) > 0:
2380 2380 if len(mfiles) == 1 and mfiles[0] == path:
2381 2381 return
2382 2382 # omit the files which are deleted in current IMM wctx
2383 2383 mfiles = [m for m in mfiles if m in self]
2384 2384 if not mfiles:
2385 2385 return
2386 2386 raise error.Abort(
2387 2387 b"error: file '%s' cannot be written because "
2388 2388 b" '%s/' is a directory in %s (containing %d "
2389 2389 b"entries: %s)"
2390 2390 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2391 2391 )
2392 2392
2393 2393 def write(self, path, data, flags=b'', **kwargs):
2394 2394 if data is None:
2395 2395 raise error.ProgrammingError(b"data must be non-None")
2396 2396 self._auditconflicts(path)
2397 2397 self._markdirty(
2398 2398 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2399 2399 )
2400 2400
2401 2401 def setflags(self, path, l, x):
2402 2402 flag = b''
2403 2403 if l:
2404 2404 flag = b'l'
2405 2405 elif x:
2406 2406 flag = b'x'
2407 2407 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2408 2408
2409 2409 def remove(self, path):
2410 2410 self._markdirty(path, exists=False)
2411 2411
2412 2412 def exists(self, path):
2413 2413 """exists behaves like `lexists`, but needs to follow symlinks and
2414 2414 return False if they are broken.
2415 2415 """
2416 2416 if self.isdirty(path):
2417 2417 # If this path exists and is a symlink, "follow" it by calling
2418 2418 # exists on the destination path.
2419 2419 if (
2420 2420 self._cache[path][b'exists']
2421 2421 and b'l' in self._cache[path][b'flags']
2422 2422 ):
2423 2423 return self.exists(self._cache[path][b'data'].strip())
2424 2424 else:
2425 2425 return self._cache[path][b'exists']
2426 2426
2427 2427 return self._existsinparent(path)
2428 2428
2429 2429 def lexists(self, path):
2430 2430 """lexists returns True if the path exists"""
2431 2431 if self.isdirty(path):
2432 2432 return self._cache[path][b'exists']
2433 2433
2434 2434 return self._existsinparent(path)
2435 2435
2436 2436 def size(self, path):
2437 2437 if self.isdirty(path):
2438 2438 if self._cache[path][b'exists']:
2439 2439 return len(self._cache[path][b'data'])
2440 2440 else:
2441 2441 raise error.ProgrammingError(
2442 b"No such file or directory: %s" % self._path
2442 b"No such file or directory: %s" % path
2443 2443 )
2444 2444 return self._wrappedctx[path].size()
2445 2445
2446 2446 def tomemctx(
2447 2447 self,
2448 2448 text,
2449 2449 branch=None,
2450 2450 extra=None,
2451 2451 date=None,
2452 2452 parents=None,
2453 2453 user=None,
2454 2454 editor=None,
2455 2455 ):
2456 2456 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2457 2457 committed.
2458 2458
2459 2459 ``text`` is the commit message.
2460 2460 ``parents`` (optional) are rev numbers.
2461 2461 """
2462 2462 # Default parents to the wrapped context if not passed.
2463 2463 if parents is None:
2464 2464 parents = self.parents()
2465 2465 if len(parents) == 1:
2466 2466 parents = (parents[0], None)
2467 2467
2468 2468 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2469 2469 if parents[1] is None:
2470 2470 parents = (self._repo[parents[0]], None)
2471 2471 else:
2472 2472 parents = (self._repo[parents[0]], self._repo[parents[1]])
2473 2473
2474 2474 files = self.files()
2475 2475
2476 2476 def getfile(repo, memctx, path):
2477 2477 if self._cache[path][b'exists']:
2478 2478 return memfilectx(
2479 2479 repo,
2480 2480 memctx,
2481 2481 path,
2482 2482 self._cache[path][b'data'],
2483 2483 b'l' in self._cache[path][b'flags'],
2484 2484 b'x' in self._cache[path][b'flags'],
2485 2485 self._cache[path][b'copied'],
2486 2486 )
2487 2487 else:
2488 2488 # Returning None, but including the path in `files`, is
2489 2489 # necessary for memctx to register a deletion.
2490 2490 return None
2491 2491
2492 2492 if branch is None:
2493 2493 branch = self._wrappedctx.branch()
2494 2494
2495 2495 return memctx(
2496 2496 self._repo,
2497 2497 parents,
2498 2498 text,
2499 2499 files,
2500 2500 getfile,
2501 2501 date=date,
2502 2502 extra=extra,
2503 2503 user=user,
2504 2504 branch=branch,
2505 2505 editor=editor,
2506 2506 )
2507 2507
2508 2508 def tomemctx_for_amend(self, precursor):
2509 2509 extra = precursor.extra().copy()
2510 2510 extra[b'amend_source'] = precursor.hex()
2511 2511 return self.tomemctx(
2512 2512 text=precursor.description(),
2513 2513 branch=precursor.branch(),
2514 2514 extra=extra,
2515 2515 date=precursor.date(),
2516 2516 user=precursor.user(),
2517 2517 )
2518 2518
2519 2519 def isdirty(self, path):
2520 2520 return path in self._cache
2521 2521
2522 2522 def isempty(self):
2523 2523 # We need to discard any keys that are actually clean before the empty
2524 2524 # commit check.
2525 2525 self._compact()
2526 2526 return len(self._cache) == 0
2527 2527
2528 2528 def clean(self):
2529 2529 self._cache = {}
2530 2530
2531 2531 def _compact(self):
2532 2532 """Removes keys from the cache that are actually clean, by comparing
2533 2533 them with the underlying context.
2534 2534
2535 2535 This can occur during the merge process, e.g. by passing --tool :local
2536 2536 to resolve a conflict.
2537 2537 """
2538 2538 keys = []
2539 2539 # This won't be perfect, but can help performance significantly when
2540 2540 # using things like remotefilelog.
2541 2541 scmutil.prefetchfiles(
2542 2542 self.repo(),
2543 2543 [self.p1().rev()],
2544 2544 scmutil.matchfiles(self.repo(), self._cache.keys()),
2545 2545 )
2546 2546
2547 2547 for path in self._cache.keys():
2548 2548 cache = self._cache[path]
2549 2549 try:
2550 2550 underlying = self._wrappedctx[path]
2551 2551 if (
2552 2552 underlying.data() == cache[b'data']
2553 2553 and underlying.flags() == cache[b'flags']
2554 2554 ):
2555 2555 keys.append(path)
2556 2556 except error.ManifestLookupError:
2557 2557 # Path not in the underlying manifest (created).
2558 2558 continue
2559 2559
2560 2560 for path in keys:
2561 2561 del self._cache[path]
2562 2562 return keys
2563 2563
2564 2564 def _markdirty(
2565 2565 self, path, exists, data=None, date=None, flags=b'', copied=None
2566 2566 ):
2567 2567 # data not provided, let's see if we already have some; if not, let's
2568 2568 # grab it from our underlying context, so that we always have data if
2569 2569 # the file is marked as existing.
2570 2570 if exists and data is None:
2571 2571 oldentry = self._cache.get(path) or {}
2572 2572 data = oldentry.get(b'data')
2573 2573 if data is None:
2574 2574 data = self._wrappedctx[path].data()
2575 2575
2576 2576 self._cache[path] = {
2577 2577 b'exists': exists,
2578 2578 b'data': data,
2579 2579 b'date': date,
2580 2580 b'flags': flags,
2581 2581 b'copied': copied,
2582 2582 }
2583 2583
2584 2584 def filectx(self, path, filelog=None):
2585 2585 return overlayworkingfilectx(
2586 2586 self._repo, path, parent=self, filelog=filelog
2587 2587 )
2588 2588
2589 2589
2590 2590 class overlayworkingfilectx(committablefilectx):
2591 2591 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2592 2592 cache, which can be flushed through later by calling ``flush()``."""
2593 2593
2594 2594 def __init__(self, repo, path, filelog=None, parent=None):
2595 2595 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2596 2596 self._repo = repo
2597 2597 self._parent = parent
2598 2598 self._path = path
2599 2599
2600 2600 def cmp(self, fctx):
2601 2601 return self.data() != fctx.data()
2602 2602
2603 2603 def changectx(self):
2604 2604 return self._parent
2605 2605
2606 2606 def data(self):
2607 2607 return self._parent.data(self._path)
2608 2608
2609 2609 def date(self):
2610 2610 return self._parent.filedate(self._path)
2611 2611
2612 2612 def exists(self):
2613 2613 return self.lexists()
2614 2614
2615 2615 def lexists(self):
2616 2616 return self._parent.exists(self._path)
2617 2617
2618 2618 def copysource(self):
2619 2619 return self._parent.copydata(self._path)
2620 2620
2621 2621 def size(self):
2622 2622 return self._parent.size(self._path)
2623 2623
2624 2624 def markcopied(self, origin):
2625 2625 self._parent.markcopied(self._path, origin)
2626 2626
2627 2627 def audit(self):
2628 2628 pass
2629 2629
2630 2630 def flags(self):
2631 2631 return self._parent.flags(self._path)
2632 2632
2633 2633 def setflags(self, islink, isexec):
2634 2634 return self._parent.setflags(self._path, islink, isexec)
2635 2635
2636 2636 def write(self, data, flags, backgroundclose=False, **kwargs):
2637 2637 return self._parent.write(self._path, data, flags, **kwargs)
2638 2638
2639 2639 def remove(self, ignoremissing=False):
2640 2640 return self._parent.remove(self._path)
2641 2641
2642 2642 def clearunknown(self):
2643 2643 pass
2644 2644
2645 2645
2646 2646 class workingcommitctx(workingctx):
2647 2647 """A workingcommitctx object makes access to data related to
2648 2648 the revision being committed convenient.
2649 2649
2650 2650 This hides changes in the working directory, if they aren't
2651 2651 committed in this context.
2652 2652 """
2653 2653
2654 2654 def __init__(
2655 2655 self, repo, changes, text=b"", user=None, date=None, extra=None
2656 2656 ):
2657 2657 super(workingcommitctx, self).__init__(
2658 2658 repo, text, user, date, extra, changes
2659 2659 )
2660 2660
2661 2661 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2662 2662 """Return matched files only in ``self._status``
2663 2663
2664 2664 Uncommitted files appear "clean" via this context, even if
2665 2665 they aren't actually so in the working directory.
2666 2666 """
2667 2667 if clean:
2668 2668 clean = [f for f in self._manifest if f not in self._changedset]
2669 2669 else:
2670 2670 clean = []
2671 2671 return scmutil.status(
2672 2672 [f for f in self._status.modified if match(f)],
2673 2673 [f for f in self._status.added if match(f)],
2674 2674 [f for f in self._status.removed if match(f)],
2675 2675 [],
2676 2676 [],
2677 2677 [],
2678 2678 clean,
2679 2679 )
2680 2680
2681 2681 @propertycache
2682 2682 def _changedset(self):
2683 2683 """Return the set of files changed in this context
2684 2684 """
2685 2685 changed = set(self._status.modified)
2686 2686 changed.update(self._status.added)
2687 2687 changed.update(self._status.removed)
2688 2688 return changed
2689 2689
2690 2690
2691 2691 def makecachingfilectxfn(func):
2692 2692 """Create a filectxfn that caches based on the path.
2693 2693
2694 2694 We can't use util.cachefunc because it uses all arguments as the cache
2695 2695 key and this creates a cycle since the arguments include the repo and
2696 2696 memctx.
2697 2697 """
2698 2698 cache = {}
2699 2699
2700 2700 def getfilectx(repo, memctx, path):
2701 2701 if path not in cache:
2702 2702 cache[path] = func(repo, memctx, path)
2703 2703 return cache[path]
2704 2704
2705 2705 return getfilectx
2706 2706
2707 2707
2708 2708 def memfilefromctx(ctx):
2709 2709 """Given a context return a memfilectx for ctx[path]
2710 2710
2711 2711 This is a convenience method for building a memctx based on another
2712 2712 context.
2713 2713 """
2714 2714
2715 2715 def getfilectx(repo, memctx, path):
2716 2716 fctx = ctx[path]
2717 2717 copysource = fctx.copysource()
2718 2718 return memfilectx(
2719 2719 repo,
2720 2720 memctx,
2721 2721 path,
2722 2722 fctx.data(),
2723 2723 islink=fctx.islink(),
2724 2724 isexec=fctx.isexec(),
2725 2725 copysource=copysource,
2726 2726 )
2727 2727
2728 2728 return getfilectx
2729 2729
2730 2730
2731 2731 def memfilefrompatch(patchstore):
2732 2732 """Given a patch (e.g. patchstore object) return a memfilectx
2733 2733
2734 2734 This is a convenience method for building a memctx based on a patchstore.
2735 2735 """
2736 2736
2737 2737 def getfilectx(repo, memctx, path):
2738 2738 data, mode, copysource = patchstore.getfile(path)
2739 2739 if data is None:
2740 2740 return None
2741 2741 islink, isexec = mode
2742 2742 return memfilectx(
2743 2743 repo,
2744 2744 memctx,
2745 2745 path,
2746 2746 data,
2747 2747 islink=islink,
2748 2748 isexec=isexec,
2749 2749 copysource=copysource,
2750 2750 )
2751 2751
2752 2752 return getfilectx
2753 2753
2754 2754
2755 2755 class memctx(committablectx):
2756 2756 """Use memctx to perform in-memory commits via localrepo.commitctx().
2757 2757
2758 2758 Revision information is supplied at initialization time while
2759 2759 related files data and is made available through a callback
2760 2760 mechanism. 'repo' is the current localrepo, 'parents' is a
2761 2761 sequence of two parent revisions identifiers (pass None for every
2762 2762 missing parent), 'text' is the commit message and 'files' lists
2763 2763 names of files touched by the revision (normalized and relative to
2764 2764 repository root).
2765 2765
2766 2766 filectxfn(repo, memctx, path) is a callable receiving the
2767 2767 repository, the current memctx object and the normalized path of
2768 2768 requested file, relative to repository root. It is fired by the
2769 2769 commit function for every file in 'files', but calls order is
2770 2770 undefined. If the file is available in the revision being
2771 2771 committed (updated or added), filectxfn returns a memfilectx
2772 2772 object. If the file was removed, filectxfn return None for recent
2773 2773 Mercurial. Moved files are represented by marking the source file
2774 2774 removed and the new file added with copy information (see
2775 2775 memfilectx).
2776 2776
2777 2777 user receives the committer name and defaults to current
2778 2778 repository username, date is the commit date in any format
2779 2779 supported by dateutil.parsedate() and defaults to current date, extra
2780 2780 is a dictionary of metadata or is left empty.
2781 2781 """
2782 2782
2783 2783 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2784 2784 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2785 2785 # this field to determine what to do in filectxfn.
2786 2786 _returnnoneformissingfiles = True
2787 2787
2788 2788 def __init__(
2789 2789 self,
2790 2790 repo,
2791 2791 parents,
2792 2792 text,
2793 2793 files,
2794 2794 filectxfn,
2795 2795 user=None,
2796 2796 date=None,
2797 2797 extra=None,
2798 2798 branch=None,
2799 2799 editor=None,
2800 2800 ):
2801 2801 super(memctx, self).__init__(
2802 2802 repo, text, user, date, extra, branch=branch
2803 2803 )
2804 2804 self._rev = None
2805 2805 self._node = None
2806 2806 parents = [(p or nullid) for p in parents]
2807 2807 p1, p2 = parents
2808 2808 self._parents = [self._repo[p] for p in (p1, p2)]
2809 2809 files = sorted(set(files))
2810 2810 self._files = files
2811 2811 self.substate = {}
2812 2812
2813 2813 if isinstance(filectxfn, patch.filestore):
2814 2814 filectxfn = memfilefrompatch(filectxfn)
2815 2815 elif not callable(filectxfn):
2816 2816 # if store is not callable, wrap it in a function
2817 2817 filectxfn = memfilefromctx(filectxfn)
2818 2818
2819 2819 # memoizing increases performance for e.g. vcs convert scenarios.
2820 2820 self._filectxfn = makecachingfilectxfn(filectxfn)
2821 2821
2822 2822 if editor:
2823 2823 self._text = editor(self._repo, self, [])
2824 2824 self._repo.savecommitmessage(self._text)
2825 2825
2826 2826 def filectx(self, path, filelog=None):
2827 2827 """get a file context from the working directory
2828 2828
2829 2829 Returns None if file doesn't exist and should be removed."""
2830 2830 return self._filectxfn(self._repo, self, path)
2831 2831
2832 2832 def commit(self):
2833 2833 """commit context to the repo"""
2834 2834 return self._repo.commitctx(self)
2835 2835
2836 2836 @propertycache
2837 2837 def _manifest(self):
2838 2838 """generate a manifest based on the return values of filectxfn"""
2839 2839
2840 2840 # keep this simple for now; just worry about p1
2841 2841 pctx = self._parents[0]
2842 2842 man = pctx.manifest().copy()
2843 2843
2844 2844 for f in self._status.modified:
2845 2845 man[f] = modifiednodeid
2846 2846
2847 2847 for f in self._status.added:
2848 2848 man[f] = addednodeid
2849 2849
2850 2850 for f in self._status.removed:
2851 2851 if f in man:
2852 2852 del man[f]
2853 2853
2854 2854 return man
2855 2855
2856 2856 @propertycache
2857 2857 def _status(self):
2858 2858 """Calculate exact status from ``files`` specified at construction
2859 2859 """
2860 2860 man1 = self.p1().manifest()
2861 2861 p2 = self._parents[1]
2862 2862 # "1 < len(self._parents)" can't be used for checking
2863 2863 # existence of the 2nd parent, because "memctx._parents" is
2864 2864 # explicitly initialized by the list, of which length is 2.
2865 2865 if p2.node() != nullid:
2866 2866 man2 = p2.manifest()
2867 2867 managing = lambda f: f in man1 or f in man2
2868 2868 else:
2869 2869 managing = lambda f: f in man1
2870 2870
2871 2871 modified, added, removed = [], [], []
2872 2872 for f in self._files:
2873 2873 if not managing(f):
2874 2874 added.append(f)
2875 2875 elif self[f]:
2876 2876 modified.append(f)
2877 2877 else:
2878 2878 removed.append(f)
2879 2879
2880 2880 return scmutil.status(modified, added, removed, [], [], [], [])
2881 2881
2882 2882
2883 2883 class memfilectx(committablefilectx):
2884 2884 """memfilectx represents an in-memory file to commit.
2885 2885
2886 2886 See memctx and committablefilectx for more details.
2887 2887 """
2888 2888
2889 2889 def __init__(
2890 2890 self,
2891 2891 repo,
2892 2892 changectx,
2893 2893 path,
2894 2894 data,
2895 2895 islink=False,
2896 2896 isexec=False,
2897 2897 copysource=None,
2898 2898 ):
2899 2899 """
2900 2900 path is the normalized file path relative to repository root.
2901 2901 data is the file content as a string.
2902 2902 islink is True if the file is a symbolic link.
2903 2903 isexec is True if the file is executable.
2904 2904 copied is the source file path if current file was copied in the
2905 2905 revision being committed, or None."""
2906 2906 super(memfilectx, self).__init__(repo, path, None, changectx)
2907 2907 self._data = data
2908 2908 if islink:
2909 2909 self._flags = b'l'
2910 2910 elif isexec:
2911 2911 self._flags = b'x'
2912 2912 else:
2913 2913 self._flags = b''
2914 2914 self._copysource = copysource
2915 2915
2916 2916 def copysource(self):
2917 2917 return self._copysource
2918 2918
2919 2919 def cmp(self, fctx):
2920 2920 return self.data() != fctx.data()
2921 2921
2922 2922 def data(self):
2923 2923 return self._data
2924 2924
2925 2925 def remove(self, ignoremissing=False):
2926 2926 """wraps unlink for a repo's working directory"""
2927 2927 # need to figure out what to do here
2928 2928 del self._changectx[self._path]
2929 2929
2930 2930 def write(self, data, flags, **kwargs):
2931 2931 """wraps repo.wwrite"""
2932 2932 self._data = data
2933 2933
2934 2934
2935 2935 class metadataonlyctx(committablectx):
2936 2936 """Like memctx but it's reusing the manifest of different commit.
2937 2937 Intended to be used by lightweight operations that are creating
2938 2938 metadata-only changes.
2939 2939
2940 2940 Revision information is supplied at initialization time. 'repo' is the
2941 2941 current localrepo, 'ctx' is original revision which manifest we're reuisng
2942 2942 'parents' is a sequence of two parent revisions identifiers (pass None for
2943 2943 every missing parent), 'text' is the commit.
2944 2944
2945 2945 user receives the committer name and defaults to current repository
2946 2946 username, date is the commit date in any format supported by
2947 2947 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2948 2948 metadata or is left empty.
2949 2949 """
2950 2950
2951 2951 def __init__(
2952 2952 self,
2953 2953 repo,
2954 2954 originalctx,
2955 2955 parents=None,
2956 2956 text=None,
2957 2957 user=None,
2958 2958 date=None,
2959 2959 extra=None,
2960 2960 editor=None,
2961 2961 ):
2962 2962 if text is None:
2963 2963 text = originalctx.description()
2964 2964 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2965 2965 self._rev = None
2966 2966 self._node = None
2967 2967 self._originalctx = originalctx
2968 2968 self._manifestnode = originalctx.manifestnode()
2969 2969 if parents is None:
2970 2970 parents = originalctx.parents()
2971 2971 else:
2972 2972 parents = [repo[p] for p in parents if p is not None]
2973 2973 parents = parents[:]
2974 2974 while len(parents) < 2:
2975 2975 parents.append(repo[nullid])
2976 2976 p1, p2 = self._parents = parents
2977 2977
2978 2978 # sanity check to ensure that the reused manifest parents are
2979 2979 # manifests of our commit parents
2980 2980 mp1, mp2 = self.manifestctx().parents
2981 2981 if p1 != nullid and p1.manifestnode() != mp1:
2982 2982 raise RuntimeError(
2983 2983 r"can't reuse the manifest: its p1 "
2984 2984 r"doesn't match the new ctx p1"
2985 2985 )
2986 2986 if p2 != nullid and p2.manifestnode() != mp2:
2987 2987 raise RuntimeError(
2988 2988 r"can't reuse the manifest: "
2989 2989 r"its p2 doesn't match the new ctx p2"
2990 2990 )
2991 2991
2992 2992 self._files = originalctx.files()
2993 2993 self.substate = {}
2994 2994
2995 2995 if editor:
2996 2996 self._text = editor(self._repo, self, [])
2997 2997 self._repo.savecommitmessage(self._text)
2998 2998
2999 2999 def manifestnode(self):
3000 3000 return self._manifestnode
3001 3001
3002 3002 @property
3003 3003 def _manifestctx(self):
3004 3004 return self._repo.manifestlog[self._manifestnode]
3005 3005
3006 3006 def filectx(self, path, filelog=None):
3007 3007 return self._originalctx.filectx(path, filelog=filelog)
3008 3008
3009 3009 def commit(self):
3010 3010 """commit context to the repo"""
3011 3011 return self._repo.commitctx(self)
3012 3012
3013 3013 @property
3014 3014 def _manifest(self):
3015 3015 return self._originalctx.manifest()
3016 3016
3017 3017 @propertycache
3018 3018 def _status(self):
3019 3019 """Calculate exact status from ``files`` specified in the ``origctx``
3020 3020 and parents manifests.
3021 3021 """
3022 3022 man1 = self.p1().manifest()
3023 3023 p2 = self._parents[1]
3024 3024 # "1 < len(self._parents)" can't be used for checking
3025 3025 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3026 3026 # explicitly initialized by the list, of which length is 2.
3027 3027 if p2.node() != nullid:
3028 3028 man2 = p2.manifest()
3029 3029 managing = lambda f: f in man1 or f in man2
3030 3030 else:
3031 3031 managing = lambda f: f in man1
3032 3032
3033 3033 modified, added, removed = [], [], []
3034 3034 for f in self._files:
3035 3035 if not managing(f):
3036 3036 added.append(f)
3037 3037 elif f in self:
3038 3038 modified.append(f)
3039 3039 else:
3040 3040 removed.append(f)
3041 3041
3042 3042 return scmutil.status(modified, added, removed, [], [], [], [])
3043 3043
3044 3044
3045 3045 class arbitraryfilectx(object):
3046 3046 """Allows you to use filectx-like functions on a file in an arbitrary
3047 3047 location on disk, possibly not in the working directory.
3048 3048 """
3049 3049
3050 3050 def __init__(self, path, repo=None):
3051 3051 # Repo is optional because contrib/simplemerge uses this class.
3052 3052 self._repo = repo
3053 3053 self._path = path
3054 3054
3055 3055 def cmp(self, fctx):
3056 3056 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3057 3057 # path if either side is a symlink.
3058 3058 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3059 3059 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3060 3060 # Add a fast-path for merge if both sides are disk-backed.
3061 3061 # Note that filecmp uses the opposite return values (True if same)
3062 3062 # from our cmp functions (True if different).
3063 3063 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3064 3064 return self.data() != fctx.data()
3065 3065
3066 3066 def path(self):
3067 3067 return self._path
3068 3068
3069 3069 def flags(self):
3070 3070 return b''
3071 3071
3072 3072 def data(self):
3073 3073 return util.readfile(self._path)
3074 3074
3075 3075 def decodeddata(self):
3076 3076 with open(self._path, b"rb") as f:
3077 3077 return f.read()
3078 3078
3079 3079 def remove(self):
3080 3080 util.unlink(self._path)
3081 3081
3082 3082 def write(self, data, flags, **kwargs):
3083 3083 assert not flags
3084 3084 with open(self._path, b"wb") as f:
3085 3085 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now