##// END OF EJS Templates
context: some gratuitous documentation improvement...
marmoute -
r52655:dcbe7fda default
parent child Browse files
Show More
@@ -1,3145 +1,3148 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import filecmp
10 10 import os
11 11 import stat
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 hex,
16 16 nullrev,
17 17 short,
18 18 )
19 19 from . import (
20 20 dagop,
21 21 encoding,
22 22 error,
23 23 fileset,
24 24 match as matchmod,
25 25 mergestate as mergestatemod,
26 26 metadata,
27 27 obsolete as obsmod,
28 28 patch,
29 29 pathutil,
30 30 phases,
31 31 repoview,
32 32 scmutil,
33 33 sparse,
34 34 subrepo,
35 35 subrepoutil,
36 36 testing,
37 37 util,
38 38 )
39 39 from .utils import (
40 40 dateutil,
41 41 stringutil,
42 42 )
43 43 from .dirstateutils import (
44 44 timestamp,
45 45 )
46 46
47 47 propertycache = util.propertycache
48 48
49 49
50 50 class basectx:
51 51 """A basectx object represents the common logic for its children:
52 52 changectx: read-only context that is already present in the repo,
53 53 workingctx: a context that represents the working directory and can
54 54 be committed,
55 55 memctx: a context that represents changes in-memory and can also
56 56 be committed."""
57 57
58 58 def __init__(self, repo):
59 59 self._repo = repo
60 60
61 61 def __bytes__(self):
62 62 return short(self.node())
63 63
64 64 __str__ = encoding.strmethod(__bytes__)
65 65
66 66 def __repr__(self):
67 67 return "<%s %s>" % (type(self).__name__, str(self))
68 68
69 69 def __eq__(self, other):
70 70 try:
71 71 return type(self) == type(other) and self._rev == other._rev
72 72 except AttributeError:
73 73 return False
74 74
75 75 def __ne__(self, other):
76 76 return not (self == other)
77 77
78 78 def __contains__(self, key):
79 79 return key in self._manifest
80 80
81 81 def __getitem__(self, key):
82 82 return self.filectx(key)
83 83
84 84 def __iter__(self):
85 85 return iter(self._manifest)
86 86
87 87 def _buildstatusmanifest(self, status):
88 88 """Builds a manifest that includes the given status results, if this is
89 89 a working copy context. For non-working copy contexts, it just returns
90 90 the normal manifest."""
91 91 return self.manifest()
92 92
93 93 def _matchstatus(self, other, match):
94 94 """This internal method provides a way for child objects to override the
95 95 match operator.
96 96 """
97 97 return match
98 98
99 99 def _buildstatus(
100 100 self, other, s, match, listignored, listclean, listunknown
101 101 ):
102 102 """build a status with respect to another context"""
103 103 # Load earliest manifest first for caching reasons. More specifically,
104 104 # if you have revisions 1000 and 1001, 1001 is probably stored as a
105 105 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
106 106 # 1000 and cache it so that when you read 1001, we just need to apply a
107 107 # delta to what's in the cache. So that's one full reconstruction + one
108 108 # delta application.
109 109 mf2 = None
110 110 if self.rev() is not None and self.rev() < other.rev():
111 111 mf2 = self._buildstatusmanifest(s)
112 112 mf1 = other._buildstatusmanifest(s)
113 113 if mf2 is None:
114 114 mf2 = self._buildstatusmanifest(s)
115 115
116 116 modified, added = [], []
117 117 removed = []
118 118 clean = []
119 119 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
120 120 deletedset = set(deleted)
121 121 d = mf1.diff(mf2, match=match, clean=listclean)
122 122 for fn, value in d.items():
123 123 if fn in deletedset:
124 124 continue
125 125 if value is None:
126 126 clean.append(fn)
127 127 continue
128 128 (node1, flag1), (node2, flag2) = value
129 129 if node1 is None:
130 130 added.append(fn)
131 131 elif node2 is None:
132 132 removed.append(fn)
133 133 elif flag1 != flag2:
134 134 modified.append(fn)
135 135 elif node2 not in self._repo.nodeconstants.wdirfilenodeids:
136 136 # When comparing files between two commits, we save time by
137 137 # not comparing the file contents when the nodeids differ.
138 138 # Note that this means we incorrectly report a reverted change
139 139 # to a file as a modification.
140 140 modified.append(fn)
141 141 elif self[fn].cmp(other[fn]):
142 142 modified.append(fn)
143 143 else:
144 144 clean.append(fn)
145 145
146 146 if removed:
147 147 # need to filter files if they are already reported as removed
148 148 unknown = [
149 149 fn
150 150 for fn in unknown
151 151 if fn not in mf1 and (not match or match(fn))
152 152 ]
153 153 ignored = [
154 154 fn
155 155 for fn in ignored
156 156 if fn not in mf1 and (not match or match(fn))
157 157 ]
158 158 # if they're deleted, don't report them as removed
159 159 removed = [fn for fn in removed if fn not in deletedset]
160 160
161 161 return scmutil.status(
162 162 modified, added, removed, deleted, unknown, ignored, clean
163 163 )
164 164
165 165 @propertycache
166 166 def substate(self):
167 167 return subrepoutil.state(self, self._repo.ui)
168 168
169 169 def subrev(self, subpath):
170 170 return self.substate[subpath][1]
171 171
172 172 def rev(self):
173 173 return self._rev
174 174
175 175 def node(self):
176 176 return self._node
177 177
178 178 def hex(self):
179 179 return hex(self.node())
180 180
181 181 def manifest(self):
182 182 return self._manifest
183 183
184 184 def manifestctx(self):
185 185 return self._manifestctx
186 186
187 187 def repo(self):
188 188 return self._repo
189 189
190 190 def phasestr(self):
191 191 return phases.phasenames[self.phase()]
192 192
193 193 def mutable(self):
194 194 return self.phase() > phases.public
195 195
196 196 def matchfileset(self, cwd, expr, badfn=None):
197 197 return fileset.match(self, cwd, expr, badfn=badfn)
198 198
199 199 def obsolete(self):
200 200 """True if the changeset is obsolete"""
201 201 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
202 202
203 203 def extinct(self):
204 204 """True if the changeset is extinct"""
205 205 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
206 206
207 207 def orphan(self):
208 208 """True if the changeset is not obsolete, but its ancestor is"""
209 209 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
210 210
211 211 def phasedivergent(self):
212 212 """True if the changeset tries to be a successor of a public changeset
213 213
214 214 Only non-public and non-obsolete changesets may be phase-divergent.
215 215 """
216 216 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
217 217
218 218 def contentdivergent(self):
219 219 """Is a successor of a changeset with multiple possible successor sets
220 220
221 221 Only non-public and non-obsolete changesets may be content-divergent.
222 222 """
223 223 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
224 224
225 225 def isunstable(self):
226 226 """True if the changeset is either orphan, phase-divergent or
227 227 content-divergent"""
228 228 return self.orphan() or self.phasedivergent() or self.contentdivergent()
229 229
230 230 def instabilities(self):
231 231 """return the list of instabilities affecting this changeset.
232 232
233 233 Instabilities are returned as strings. possible values are:
234 234 - orphan,
235 235 - phase-divergent,
236 236 - content-divergent.
237 237 """
238 238 instabilities = []
239 239 if self.orphan():
240 240 instabilities.append(b'orphan')
241 241 if self.phasedivergent():
242 242 instabilities.append(b'phase-divergent')
243 243 if self.contentdivergent():
244 244 instabilities.append(b'content-divergent')
245 245 return instabilities
246 246
247 247 def parents(self):
248 248 """return contexts for each parent changeset"""
249 249 return self._parents
250 250
251 251 def p1(self):
252 252 return self._parents[0]
253 253
254 254 def p2(self):
255 255 parents = self._parents
256 256 if len(parents) == 2:
257 257 return parents[1]
258 258 return self._repo[nullrev]
259 259
260 260 def _fileinfo(self, path):
261 261 if '_manifest' in self.__dict__:
262 262 try:
263 263 return self._manifest.find(path)
264 264 except KeyError:
265 265 raise error.ManifestLookupError(
266 266 self._node or b'None', path, _(b'not found in manifest')
267 267 )
268 # Try to find the file in the manifest delta that can be faster to read
269 # than a full manifest. If we fail to find the file, it might still
270 # exist in the full manifest, so lets look for it there.
268 271 if '_manifestdelta' in self.__dict__ or path in self.files():
269 272 if path in self._manifestdelta:
270 273 return (
271 274 self._manifestdelta[path],
272 275 self._manifestdelta.flags(path),
273 276 )
274 277 mfl = self._repo.manifestlog
275 278 try:
276 279 node, flag = mfl[self._changeset.manifest].find(path)
277 280 except KeyError:
278 281 raise error.ManifestLookupError(
279 282 self._node or b'None', path, _(b'not found in manifest')
280 283 )
281 284
282 285 return node, flag
283 286
284 287 def filenode(self, path):
285 288 return self._fileinfo(path)[0]
286 289
287 290 def flags(self, path):
288 291 try:
289 292 return self._fileinfo(path)[1]
290 293 except error.LookupError:
291 294 return b''
292 295
293 296 @propertycache
294 297 def _copies(self):
295 298 return metadata.computechangesetcopies(self)
296 299
297 300 def p1copies(self):
298 301 return self._copies[0]
299 302
300 303 def p2copies(self):
301 304 return self._copies[1]
302 305
303 306 def sub(self, path, allowcreate=True):
304 307 '''return a subrepo for the stored revision of path, never wdir()'''
305 308 return subrepo.subrepo(self, path, allowcreate=allowcreate)
306 309
307 310 def nullsub(self, path, pctx):
308 311 return subrepo.nullsubrepo(self, path, pctx)
309 312
310 313 def workingsub(self, path):
311 314 """return a subrepo for the stored revision, or wdir if this is a wdir
312 315 context.
313 316 """
314 317 return subrepo.subrepo(self, path, allowwdir=True)
315 318
316 319 def match(
317 320 self,
318 321 pats=None,
319 322 include=None,
320 323 exclude=None,
321 324 default=b'glob',
322 325 listsubrepos=False,
323 326 badfn=None,
324 327 cwd=None,
325 328 ):
326 329 r = self._repo
327 330 if not cwd:
328 331 cwd = r.getcwd()
329 332 return matchmod.match(
330 333 r.root,
331 334 cwd,
332 335 pats,
333 336 include,
334 337 exclude,
335 338 default,
336 339 auditor=r.nofsauditor,
337 340 ctx=self,
338 341 listsubrepos=listsubrepos,
339 342 badfn=badfn,
340 343 )
341 344
342 345 def diff(
343 346 self,
344 347 ctx2=None,
345 348 match=None,
346 349 changes=None,
347 350 opts=None,
348 351 losedatafn=None,
349 352 pathfn=None,
350 353 copy=None,
351 354 copysourcematch=None,
352 355 hunksfilterfn=None,
353 356 ):
354 357 """Returns a diff generator for the given contexts and matcher"""
355 358 if ctx2 is None:
356 359 ctx2 = self.p1()
357 360 if ctx2 is not None:
358 361 ctx2 = self._repo[ctx2]
359 362 return patch.diff(
360 363 self._repo,
361 364 ctx2,
362 365 self,
363 366 match=match,
364 367 changes=changes,
365 368 opts=opts,
366 369 losedatafn=losedatafn,
367 370 pathfn=pathfn,
368 371 copy=copy,
369 372 copysourcematch=copysourcematch,
370 373 hunksfilterfn=hunksfilterfn,
371 374 )
372 375
373 376 def dirs(self):
374 377 return self._manifest.dirs()
375 378
376 379 def hasdir(self, dir):
377 380 return self._manifest.hasdir(dir)
378 381
379 382 def status(
380 383 self,
381 384 other=None,
382 385 match=None,
383 386 listignored=False,
384 387 listclean=False,
385 388 listunknown=False,
386 389 listsubrepos=False,
387 390 ):
388 391 """return status of files between two nodes or node and working
389 392 directory.
390 393
391 394 If other is None, compare this node with working directory.
392 395
393 396 ctx1.status(ctx2) returns the status of change from ctx1 to ctx2
394 397
395 398 Returns a mercurial.scmutils.status object.
396 399
397 400 Data can be accessed using either tuple notation:
398 401
399 402 (modified, added, removed, deleted, unknown, ignored, clean)
400 403
401 404 or direct attribute access:
402 405
403 406 s.modified, s.added, ...
404 407 """
405 408
406 409 ctx1 = self
407 410 ctx2 = self._repo[other]
408 411
409 412 # This next code block is, admittedly, fragile logic that tests for
410 413 # reversing the contexts and wouldn't need to exist if it weren't for
411 414 # the fast (and common) code path of comparing the working directory
412 415 # with its first parent.
413 416 #
414 417 # What we're aiming for here is the ability to call:
415 418 #
416 419 # workingctx.status(parentctx)
417 420 #
418 421 # If we always built the manifest for each context and compared those,
419 422 # then we'd be done. But the special case of the above call means we
420 423 # just copy the manifest of the parent.
421 424 reversed = False
422 425 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
423 426 reversed = True
424 427 ctx1, ctx2 = ctx2, ctx1
425 428
426 429 match = self._repo.narrowmatch(match)
427 430 match = ctx2._matchstatus(ctx1, match)
428 431 r = scmutil.status([], [], [], [], [], [], [])
429 432 r = ctx2._buildstatus(
430 433 ctx1, r, match, listignored, listclean, listunknown
431 434 )
432 435
433 436 if reversed:
434 437 # Reverse added and removed. Clear deleted, unknown and ignored as
435 438 # these make no sense to reverse.
436 439 r = scmutil.status(
437 440 r.modified, r.removed, r.added, [], [], [], r.clean
438 441 )
439 442
440 443 if listsubrepos:
441 444 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
442 445 try:
443 446 rev2 = ctx2.subrev(subpath)
444 447 except KeyError:
445 448 # A subrepo that existed in node1 was deleted between
446 449 # node1 and node2 (inclusive). Thus, ctx2's substate
447 450 # won't contain that subpath. The best we can do ignore it.
448 451 rev2 = None
449 452 submatch = matchmod.subdirmatcher(subpath, match)
450 453 s = sub.status(
451 454 rev2,
452 455 match=submatch,
453 456 ignored=listignored,
454 457 clean=listclean,
455 458 unknown=listunknown,
456 459 listsubrepos=True,
457 460 )
458 461 for k in (
459 462 'modified',
460 463 'added',
461 464 'removed',
462 465 'deleted',
463 466 'unknown',
464 467 'ignored',
465 468 'clean',
466 469 ):
467 470 rfiles, sfiles = getattr(r, k), getattr(s, k)
468 471 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
469 472
470 473 r.modified.sort()
471 474 r.added.sort()
472 475 r.removed.sort()
473 476 r.deleted.sort()
474 477 r.unknown.sort()
475 478 r.ignored.sort()
476 479 r.clean.sort()
477 480
478 481 return r
479 482
480 483 def mergestate(self, clean=False):
481 484 """Get a mergestate object for this context."""
482 485 raise NotImplementedError(
483 486 '%s does not implement mergestate()' % self.__class__
484 487 )
485 488
486 489 def isempty(self):
487 490 return not (
488 491 len(self.parents()) > 1
489 492 or self.branch() != self.p1().branch()
490 493 or self.closesbranch()
491 494 or self.files()
492 495 )
493 496
494 497
495 498 class changectx(basectx):
496 499 """A changecontext object makes access to data related to a particular
497 500 changeset convenient. It represents a read-only context already present in
498 501 the repo."""
499 502
500 503 def __init__(self, repo, rev, node, maybe_filtered=True):
501 504 super(changectx, self).__init__(repo)
502 505 self._rev = rev
503 506 self._node = node
504 507 # When maybe_filtered is True, the revision might be affected by
505 508 # changelog filtering and operation through the filtered changelog must be used.
506 509 #
507 510 # When maybe_filtered is False, the revision has already been checked
508 511 # against filtering and is not filtered. Operation through the
509 512 # unfiltered changelog might be used in some case.
510 513 self._maybe_filtered = maybe_filtered
511 514
512 515 def __hash__(self):
513 516 try:
514 517 return hash(self._rev)
515 518 except AttributeError:
516 519 return id(self)
517 520
518 521 def __nonzero__(self):
519 522 return self._rev != nullrev
520 523
521 524 __bool__ = __nonzero__
522 525
523 526 @propertycache
524 527 def _changeset(self):
525 528 if self._maybe_filtered:
526 529 repo = self._repo
527 530 else:
528 531 repo = self._repo.unfiltered()
529 532 return repo.changelog.changelogrevision(self.rev())
530 533
531 534 @propertycache
532 535 def _manifest(self):
533 536 return self._manifestctx.read()
534 537
535 538 @property
536 539 def _manifestctx(self):
537 540 return self._repo.manifestlog[self._changeset.manifest]
538 541
539 542 @propertycache
540 543 def _manifestdelta(self):
541 544 return self._manifestctx.readdelta()
542 545
543 546 @propertycache
544 547 def _parents(self):
545 548 repo = self._repo
546 549 if self._maybe_filtered:
547 550 cl = repo.changelog
548 551 else:
549 552 cl = repo.unfiltered().changelog
550 553
551 554 p1, p2 = cl.parentrevs(self._rev)
552 555 if p2 == nullrev:
553 556 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
554 557 return [
555 558 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
556 559 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
557 560 ]
558 561
559 562 def changeset(self):
560 563 c = self._changeset
561 564 return (
562 565 c.manifest,
563 566 c.user,
564 567 c.date,
565 568 c.files,
566 569 c.description,
567 570 c.extra,
568 571 )
569 572
570 573 def manifestnode(self):
571 574 return self._changeset.manifest
572 575
573 576 def user(self):
574 577 return self._changeset.user
575 578
576 579 def date(self):
577 580 return self._changeset.date
578 581
579 582 def files(self):
580 583 return self._changeset.files
581 584
582 585 def filesmodified(self):
583 586 modified = set(self.files())
584 587 modified.difference_update(self.filesadded())
585 588 modified.difference_update(self.filesremoved())
586 589 return sorted(modified)
587 590
588 591 def filesadded(self):
589 592 filesadded = self._changeset.filesadded
590 593 compute_on_none = True
591 594 if self._repo.filecopiesmode == b'changeset-sidedata':
592 595 compute_on_none = False
593 596 else:
594 597 source = self._repo.ui.config(b'experimental', b'copies.read-from')
595 598 if source == b'changeset-only':
596 599 compute_on_none = False
597 600 elif source != b'compatibility':
598 601 # filelog mode, ignore any changelog content
599 602 filesadded = None
600 603 if filesadded is None:
601 604 if compute_on_none:
602 605 filesadded = metadata.computechangesetfilesadded(self)
603 606 else:
604 607 filesadded = []
605 608 return filesadded
606 609
607 610 def filesremoved(self):
608 611 filesremoved = self._changeset.filesremoved
609 612 compute_on_none = True
610 613 if self._repo.filecopiesmode == b'changeset-sidedata':
611 614 compute_on_none = False
612 615 else:
613 616 source = self._repo.ui.config(b'experimental', b'copies.read-from')
614 617 if source == b'changeset-only':
615 618 compute_on_none = False
616 619 elif source != b'compatibility':
617 620 # filelog mode, ignore any changelog content
618 621 filesremoved = None
619 622 if filesremoved is None:
620 623 if compute_on_none:
621 624 filesremoved = metadata.computechangesetfilesremoved(self)
622 625 else:
623 626 filesremoved = []
624 627 return filesremoved
625 628
626 629 @propertycache
627 630 def _copies(self):
628 631 p1copies = self._changeset.p1copies
629 632 p2copies = self._changeset.p2copies
630 633 compute_on_none = True
631 634 if self._repo.filecopiesmode == b'changeset-sidedata':
632 635 compute_on_none = False
633 636 else:
634 637 source = self._repo.ui.config(b'experimental', b'copies.read-from')
635 638 # If config says to get copy metadata only from changeset, then
636 639 # return that, defaulting to {} if there was no copy metadata. In
637 640 # compatibility mode, we return copy data from the changeset if it
638 641 # was recorded there, and otherwise we fall back to getting it from
639 642 # the filelogs (below).
640 643 #
641 644 # If we are in compatiblity mode and there is not data in the
642 645 # changeset), we get the copy metadata from the filelogs.
643 646 #
644 647 # otherwise, when config said to read only from filelog, we get the
645 648 # copy metadata from the filelogs.
646 649 if source == b'changeset-only':
647 650 compute_on_none = False
648 651 elif source != b'compatibility':
649 652 # filelog mode, ignore any changelog content
650 653 p1copies = p2copies = None
651 654 if p1copies is None:
652 655 if compute_on_none:
653 656 p1copies, p2copies = super(changectx, self)._copies
654 657 else:
655 658 if p1copies is None:
656 659 p1copies = {}
657 660 if p2copies is None:
658 661 p2copies = {}
659 662 return p1copies, p2copies
660 663
661 664 def description(self):
662 665 return self._changeset.description
663 666
664 667 def branch(self):
665 668 return encoding.tolocal(self._changeset.extra.get(b"branch"))
666 669
667 670 def closesbranch(self):
668 671 return b'close' in self._changeset.extra
669 672
670 673 def extra(self):
671 674 """Return a dict of extra information."""
672 675 return self._changeset.extra
673 676
674 677 def tags(self):
675 678 """Return a list of byte tag names"""
676 679 return self._repo.nodetags(self._node)
677 680
678 681 def bookmarks(self):
679 682 """Return a list of byte bookmark names."""
680 683 return self._repo.nodebookmarks(self._node)
681 684
682 685 def fast_rank(self):
683 686 repo = self._repo
684 687 if self._maybe_filtered:
685 688 cl = repo.changelog
686 689 else:
687 690 cl = repo.unfiltered().changelog
688 691 return cl.fast_rank(self._rev)
689 692
690 693 def phase(self):
691 694 return self._repo._phasecache.phase(self._repo, self._rev)
692 695
693 696 def hidden(self):
694 697 return self._rev in repoview.filterrevs(self._repo, b'visible')
695 698
696 699 def isinmemory(self):
697 700 return False
698 701
699 702 def children(self):
700 703 """return list of changectx contexts for each child changeset.
701 704
702 705 This returns only the immediate child changesets. Use descendants() to
703 706 recursively walk children.
704 707 """
705 708 c = self._repo.changelog.children(self._node)
706 709 return [self._repo[x] for x in c]
707 710
708 711 def ancestors(self):
709 712 for a in self._repo.changelog.ancestors([self._rev]):
710 713 yield self._repo[a]
711 714
712 715 def descendants(self):
713 716 """Recursively yield all children of the changeset.
714 717
715 718 For just the immediate children, use children()
716 719 """
717 720 for d in self._repo.changelog.descendants([self._rev]):
718 721 yield self._repo[d]
719 722
720 723 def filectx(self, path, fileid=None, filelog=None):
721 724 """get a file context from this changeset"""
722 725 if fileid is None:
723 726 fileid = self.filenode(path)
724 727 return filectx(
725 728 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
726 729 )
727 730
728 731 def ancestor(self, c2, warn=False):
729 732 """return the "best" ancestor context of self and c2
730 733
731 734 If there are multiple candidates, it will show a message and check
732 735 merge.preferancestor configuration before falling back to the
733 736 revlog ancestor."""
734 737 # deal with workingctxs
735 738 n2 = c2._node
736 739 if n2 is None:
737 740 n2 = c2._parents[0]._node
738 741 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
739 742 if not cahs:
740 743 anc = self._repo.nodeconstants.nullid
741 744 elif len(cahs) == 1:
742 745 anc = cahs[0]
743 746 else:
744 747 # experimental config: merge.preferancestor
745 748 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
746 749 try:
747 750 ctx = scmutil.revsymbol(self._repo, r)
748 751 except error.RepoLookupError:
749 752 continue
750 753 anc = ctx.node()
751 754 if anc in cahs:
752 755 break
753 756 else:
754 757 anc = self._repo.changelog.ancestor(self._node, n2)
755 758 if warn:
756 759 self._repo.ui.status(
757 760 (
758 761 _(b"note: using %s as ancestor of %s and %s\n")
759 762 % (short(anc), short(self._node), short(n2))
760 763 )
761 764 + b''.join(
762 765 _(
763 766 b" alternatively, use --config "
764 767 b"merge.preferancestor=%s\n"
765 768 )
766 769 % short(n)
767 770 for n in sorted(cahs)
768 771 if n != anc
769 772 )
770 773 )
771 774 return self._repo[anc]
772 775
773 776 def isancestorof(self, other):
774 777 """True if this changeset is an ancestor of other"""
775 778 return self._repo.changelog.isancestorrev(self._rev, other._rev)
776 779
777 780 def walk(self, match):
778 781 '''Generates matching file names.'''
779 782
780 783 # Wrap match.bad method to have message with nodeid
781 784 def bad(fn, msg):
782 785 # The manifest doesn't know about subrepos, so don't complain about
783 786 # paths into valid subrepos.
784 787 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
785 788 return
786 789 match.bad(fn, _(b'no such file in rev %s') % self)
787 790
788 791 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
789 792 return self._manifest.walk(m)
790 793
791 794 def matches(self, match):
792 795 return self.walk(match)
793 796
794 797
795 798 class basefilectx:
796 799 """A filecontext object represents the common logic for its children:
797 800 filectx: read-only access to a filerevision that is already present
798 801 in the repo,
799 802 workingfilectx: a filecontext that represents files from the working
800 803 directory,
801 804 memfilectx: a filecontext that represents files in-memory,
802 805 """
803 806
804 807 @propertycache
805 808 def _filelog(self):
806 809 return self._repo.file(self._path)
807 810
808 811 @propertycache
809 812 def _changeid(self):
810 813 if '_changectx' in self.__dict__:
811 814 return self._changectx.rev()
812 815 elif '_descendantrev' in self.__dict__:
813 816 # this file context was created from a revision with a known
814 817 # descendant, we can (lazily) correct for linkrev aliases
815 818 return self._adjustlinkrev(self._descendantrev)
816 819 else:
817 820 return self._filelog.linkrev(self._filerev)
818 821
819 822 @propertycache
820 823 def _filenode(self):
821 824 if '_fileid' in self.__dict__:
822 825 return self._filelog.lookup(self._fileid)
823 826 else:
824 827 return self._changectx.filenode(self._path)
825 828
826 829 @propertycache
827 830 def _filerev(self):
828 831 return self._filelog.rev(self._filenode)
829 832
830 833 @propertycache
831 834 def _repopath(self):
832 835 return self._path
833 836
834 837 def __nonzero__(self):
835 838 try:
836 839 self._filenode
837 840 return True
838 841 except error.LookupError:
839 842 # file is missing
840 843 return False
841 844
842 845 __bool__ = __nonzero__
843 846
844 847 def __bytes__(self):
845 848 try:
846 849 return b"%s@%s" % (self.path(), self._changectx)
847 850 except error.LookupError:
848 851 return b"%s@???" % self.path()
849 852
850 853 __str__ = encoding.strmethod(__bytes__)
851 854
852 855 def __repr__(self):
853 856 return "<%s %s>" % (type(self).__name__, str(self))
854 857
855 858 def __hash__(self):
856 859 try:
857 860 return hash((self._path, self._filenode))
858 861 except AttributeError:
859 862 return id(self)
860 863
861 864 def __eq__(self, other):
862 865 try:
863 866 return (
864 867 type(self) == type(other)
865 868 and self._path == other._path
866 869 and self._filenode == other._filenode
867 870 )
868 871 except AttributeError:
869 872 return False
870 873
871 874 def __ne__(self, other):
872 875 return not (self == other)
873 876
874 877 def filerev(self):
875 878 return self._filerev
876 879
877 880 def filenode(self):
878 881 return self._filenode
879 882
880 883 @propertycache
881 884 def _flags(self):
882 885 return self._changectx.flags(self._path)
883 886
884 887 def flags(self):
885 888 return self._flags
886 889
887 890 def filelog(self):
888 891 return self._filelog
889 892
890 893 def rev(self):
891 894 return self._changeid
892 895
893 896 def linkrev(self):
894 897 return self._filelog.linkrev(self._filerev)
895 898
896 899 def node(self):
897 900 return self._changectx.node()
898 901
899 902 def hex(self):
900 903 return self._changectx.hex()
901 904
902 905 def user(self):
903 906 return self._changectx.user()
904 907
905 908 def date(self):
906 909 return self._changectx.date()
907 910
908 911 def files(self):
909 912 return self._changectx.files()
910 913
911 914 def description(self):
912 915 return self._changectx.description()
913 916
914 917 def branch(self):
915 918 return self._changectx.branch()
916 919
917 920 def extra(self):
918 921 return self._changectx.extra()
919 922
920 923 def phase(self):
921 924 return self._changectx.phase()
922 925
923 926 def phasestr(self):
924 927 return self._changectx.phasestr()
925 928
926 929 def obsolete(self):
927 930 return self._changectx.obsolete()
928 931
929 932 def instabilities(self):
930 933 return self._changectx.instabilities()
931 934
932 935 def manifest(self):
933 936 return self._changectx.manifest()
934 937
935 938 def changectx(self):
936 939 return self._changectx
937 940
938 941 def renamed(self):
939 942 return self._copied
940 943
941 944 def copysource(self):
942 945 return self._copied and self._copied[0]
943 946
944 947 def repo(self):
945 948 return self._repo
946 949
947 950 def size(self):
948 951 return len(self.data())
949 952
950 953 def path(self):
951 954 return self._path
952 955
953 956 def isbinary(self):
954 957 try:
955 958 return stringutil.binary(self.data())
956 959 except IOError:
957 960 return False
958 961
959 962 def isexec(self):
960 963 return b'x' in self.flags()
961 964
962 965 def islink(self):
963 966 return b'l' in self.flags()
964 967
965 968 def isabsent(self):
966 969 """whether this filectx represents a file not in self._changectx
967 970
968 971 This is mainly for merge code to detect change/delete conflicts. This is
969 972 expected to be True for all subclasses of basectx."""
970 973 return False
971 974
972 975 _customcmp = False
973 976
974 977 def cmp(self, fctx):
975 978 """compare with other file context
976 979
977 980 returns True if different than fctx.
978 981 """
979 982 if fctx._customcmp:
980 983 return fctx.cmp(self)
981 984
982 985 if self._filenode is None:
983 986 raise error.ProgrammingError(
984 987 b'filectx.cmp() must be reimplemented if not backed by revlog'
985 988 )
986 989
987 990 if fctx._filenode is None:
988 991 if self._repo._encodefilterpats:
989 992 # can't rely on size() because wdir content may be decoded
990 993 return self._filelog.cmp(self._filenode, fctx.data())
991 994 # filelog.size() has two special cases:
992 995 # - censored metadata
993 996 # - copy/rename tracking
994 997 # The first is detected by peaking into the delta,
995 998 # the second is detected by abusing parent order
996 999 # in the revlog index as flag bit. This leaves files using
997 1000 # the dummy encoding and non-standard meta attributes.
998 1001 # The following check is a special case for the empty
999 1002 # metadata block used if the raw file content starts with '\1\n'.
1000 1003 # Cases of arbitrary metadata flags are currently mishandled.
1001 1004 if self.size() - 4 == fctx.size():
1002 1005 # size() can match:
1003 1006 # if file data starts with '\1\n', empty metadata block is
1004 1007 # prepended, which adds 4 bytes to filelog.size().
1005 1008 return self._filelog.cmp(self._filenode, fctx.data())
1006 1009 if self.size() == fctx.size() or self.flags() == b'l':
1007 1010 # size() matches: need to compare content
1008 1011 # issue6456: Always compare symlinks because size can represent
1009 1012 # encrypted string for EXT-4 encryption(fscrypt).
1010 1013 return self._filelog.cmp(self._filenode, fctx.data())
1011 1014
1012 1015 # size() differs
1013 1016 return True
1014 1017
1015 1018 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
1016 1019 """return the first ancestor of <srcrev> introducing <fnode>
1017 1020
1018 1021 If the linkrev of the file revision does not point to an ancestor of
1019 1022 srcrev, we'll walk down the ancestors until we find one introducing
1020 1023 this file revision.
1021 1024
1022 1025 :srcrev: the changeset revision we search ancestors from
1023 1026 :inclusive: if true, the src revision will also be checked
1024 1027 :stoprev: an optional revision to stop the walk at. If no introduction
1025 1028 of this file content could be found before this floor
1026 1029 revision, the function will returns "None" and stops its
1027 1030 iteration.
1028 1031 """
1029 1032 repo = self._repo
1030 1033 cl = repo.unfiltered().changelog
1031 1034 mfl = repo.manifestlog
1032 1035 # fetch the linkrev
1033 1036 lkr = self.linkrev()
1034 1037 if srcrev == lkr:
1035 1038 return lkr
1036 1039 # hack to reuse ancestor computation when searching for renames
1037 1040 memberanc = getattr(self, '_ancestrycontext', None)
1038 1041 iteranc = None
1039 1042 if srcrev is None:
1040 1043 # wctx case, used by workingfilectx during mergecopy
1041 1044 revs = [p.rev() for p in self._repo[None].parents()]
1042 1045 inclusive = True # we skipped the real (revless) source
1043 1046 else:
1044 1047 revs = [srcrev]
1045 1048 if memberanc is None:
1046 1049 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1047 1050 # check if this linkrev is an ancestor of srcrev
1048 1051 if lkr not in memberanc:
1049 1052 if iteranc is None:
1050 1053 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1051 1054 fnode = self._filenode
1052 1055 path = self._path
1053 1056 for a in iteranc:
1054 1057 if stoprev is not None and a < stoprev:
1055 1058 return None
1056 1059 ac = cl.read(a) # get changeset data (we avoid object creation)
1057 1060 if path in ac[3]: # checking the 'files' field.
1058 1061 # The file has been touched, check if the content is
1059 1062 # similar to the one we search for.
1060 1063 if fnode == mfl[ac[0]].readfast().get(path):
1061 1064 return a
1062 1065 # In theory, we should never get out of that loop without a result.
1063 1066 # But if manifest uses a buggy file revision (not children of the
1064 1067 # one it replaces) we could. Such a buggy situation will likely
1065 1068 # result is crash somewhere else at to some point.
1066 1069 return lkr
1067 1070
1068 1071 def isintroducedafter(self, changelogrev):
1069 1072 """True if a filectx has been introduced after a given floor revision"""
1070 1073 if self.linkrev() >= changelogrev:
1071 1074 return True
1072 1075 introrev = self._introrev(stoprev=changelogrev)
1073 1076 if introrev is None:
1074 1077 return False
1075 1078 return introrev >= changelogrev
1076 1079
1077 1080 def introrev(self):
1078 1081 """return the rev of the changeset which introduced this file revision
1079 1082
1080 1083 This method is different from linkrev because it take into account the
1081 1084 changeset the filectx was created from. It ensures the returned
1082 1085 revision is one of its ancestors. This prevents bugs from
1083 1086 'linkrev-shadowing' when a file revision is used by multiple
1084 1087 changesets.
1085 1088 """
1086 1089 return self._introrev()
1087 1090
1088 1091 def _introrev(self, stoprev=None):
1089 1092 """
1090 1093 Same as `introrev` but, with an extra argument to limit changelog
1091 1094 iteration range in some internal usecase.
1092 1095
1093 1096 If `stoprev` is set, the `introrev` will not be searched past that
1094 1097 `stoprev` revision and "None" might be returned. This is useful to
1095 1098 limit the iteration range.
1096 1099 """
1097 1100 toprev = None
1098 1101 attrs = vars(self)
1099 1102 if '_changeid' in attrs:
1100 1103 # We have a cached value already
1101 1104 toprev = self._changeid
1102 1105 elif '_changectx' in attrs:
1103 1106 # We know which changelog entry we are coming from
1104 1107 toprev = self._changectx.rev()
1105 1108
1106 1109 if toprev is not None:
1107 1110 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1108 1111 elif '_descendantrev' in attrs:
1109 1112 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1110 1113 # be nice and cache the result of the computation
1111 1114 if introrev is not None:
1112 1115 self._changeid = introrev
1113 1116 return introrev
1114 1117 else:
1115 1118 return self.linkrev()
1116 1119
1117 1120 def introfilectx(self):
1118 1121 """Return filectx having identical contents, but pointing to the
1119 1122 changeset revision where this filectx was introduced"""
1120 1123 introrev = self.introrev()
1121 1124 if self.rev() == introrev:
1122 1125 return self
1123 1126 return self.filectx(self.filenode(), changeid=introrev)
1124 1127
1125 1128 def _parentfilectx(self, path, fileid, filelog):
1126 1129 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1127 1130 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1128 1131 if '_changeid' in vars(self) or '_changectx' in vars(self):
1129 1132 # If self is associated with a changeset (probably explicitly
1130 1133 # fed), ensure the created filectx is associated with a
1131 1134 # changeset that is an ancestor of self.changectx.
1132 1135 # This lets us later use _adjustlinkrev to get a correct link.
1133 1136 fctx._descendantrev = self.rev()
1134 1137 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1135 1138 elif '_descendantrev' in vars(self):
1136 1139 # Otherwise propagate _descendantrev if we have one associated.
1137 1140 fctx._descendantrev = self._descendantrev
1138 1141 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1139 1142 return fctx
1140 1143
1141 1144 def parents(self):
1142 1145 _path = self._path
1143 1146 fl = self._filelog
1144 1147 parents = self._filelog.parents(self._filenode)
1145 1148 pl = [
1146 1149 (_path, node, fl)
1147 1150 for node in parents
1148 1151 if node != self._repo.nodeconstants.nullid
1149 1152 ]
1150 1153
1151 1154 r = fl.renamed(self._filenode)
1152 1155 if r:
1153 1156 # - In the simple rename case, both parent are nullid, pl is empty.
1154 1157 # - In case of merge, only one of the parent is null id and should
1155 1158 # be replaced with the rename information. This parent is -always-
1156 1159 # the first one.
1157 1160 #
1158 1161 # As null id have always been filtered out in the previous list
1159 1162 # comprehension, inserting to 0 will always result in "replacing
1160 1163 # first nullid parent with rename information.
1161 1164 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1162 1165
1163 1166 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1164 1167
1165 1168 def p1(self):
1166 1169 return self.parents()[0]
1167 1170
1168 1171 def p2(self):
1169 1172 p = self.parents()
1170 1173 if len(p) == 2:
1171 1174 return p[1]
1172 1175 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1173 1176
1174 1177 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1175 1178 """Returns a list of annotateline objects for each line in the file
1176 1179
1177 1180 - line.fctx is the filectx of the node where that line was last changed
1178 1181 - line.lineno is the line number at the first appearance in the managed
1179 1182 file
1180 1183 - line.text is the data on that line (including newline character)
1181 1184 """
1182 1185 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1183 1186
1184 1187 def parents(f):
1185 1188 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1186 1189 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1187 1190 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1188 1191 # isn't an ancestor of the srcrev.
1189 1192 f._changeid
1190 1193 pl = f.parents()
1191 1194
1192 1195 # Don't return renamed parents if we aren't following.
1193 1196 if not follow:
1194 1197 pl = [p for p in pl if p.path() == f.path()]
1195 1198
1196 1199 # renamed filectx won't have a filelog yet, so set it
1197 1200 # from the cache to save time
1198 1201 for p in pl:
1199 1202 if not '_filelog' in p.__dict__:
1200 1203 p._filelog = getlog(p.path())
1201 1204
1202 1205 return pl
1203 1206
1204 1207 # use linkrev to find the first changeset where self appeared
1205 1208 base = self.introfilectx()
1206 1209 if getattr(base, '_ancestrycontext', None) is None:
1207 1210 # it is safe to use an unfiltered repository here because we are
1208 1211 # walking ancestors only.
1209 1212 cl = self._repo.unfiltered().changelog
1210 1213 if base.rev() is None:
1211 1214 # wctx is not inclusive, but works because _ancestrycontext
1212 1215 # is used to test filelog revisions
1213 1216 ac = cl.ancestors(
1214 1217 [p.rev() for p in base.parents()], inclusive=True
1215 1218 )
1216 1219 else:
1217 1220 ac = cl.ancestors([base.rev()], inclusive=True)
1218 1221 base._ancestrycontext = ac
1219 1222
1220 1223 return dagop.annotate(
1221 1224 base, parents, skiprevs=skiprevs, diffopts=diffopts
1222 1225 )
1223 1226
1224 1227 def ancestors(self, followfirst=False):
1225 1228 visit = {}
1226 1229 c = self
1227 1230 if followfirst:
1228 1231 cut = 1
1229 1232 else:
1230 1233 cut = None
1231 1234
1232 1235 while True:
1233 1236 for parent in c.parents()[:cut]:
1234 1237 visit[(parent.linkrev(), parent.filenode())] = parent
1235 1238 if not visit:
1236 1239 break
1237 1240 c = visit.pop(max(visit))
1238 1241 yield c
1239 1242
1240 1243 def decodeddata(self):
1241 1244 """Returns `data()` after running repository decoding filters.
1242 1245
1243 1246 This is often equivalent to how the data would be expressed on disk.
1244 1247 """
1245 1248 return self._repo.wwritedata(self.path(), self.data())
1246 1249
1247 1250
1248 1251 class filectx(basefilectx):
1249 1252 """A filecontext object makes access to data related to a particular
1250 1253 filerevision convenient."""
1251 1254
1252 1255 def __init__(
1253 1256 self,
1254 1257 repo,
1255 1258 path,
1256 1259 changeid=None,
1257 1260 fileid=None,
1258 1261 filelog=None,
1259 1262 changectx=None,
1260 1263 ):
1261 1264 """changeid must be a revision number, if specified.
1262 1265 fileid can be a file revision or node."""
1263 1266 self._repo = repo
1264 1267 self._path = path
1265 1268
1266 1269 assert (
1267 1270 changeid is not None or fileid is not None or changectx is not None
1268 1271 ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
1269 1272 changeid,
1270 1273 fileid,
1271 1274 changectx,
1272 1275 )
1273 1276
1274 1277 if filelog is not None:
1275 1278 self._filelog = filelog
1276 1279
1277 1280 if changeid is not None:
1278 1281 self._changeid = changeid
1279 1282 if changectx is not None:
1280 1283 self._changectx = changectx
1281 1284 if fileid is not None:
1282 1285 self._fileid = fileid
1283 1286
1284 1287 @propertycache
1285 1288 def _changectx(self):
1286 1289 try:
1287 1290 return self._repo[self._changeid]
1288 1291 except error.FilteredRepoLookupError:
1289 1292 # Linkrev may point to any revision in the repository. When the
1290 1293 # repository is filtered this may lead to `filectx` trying to build
1291 1294 # `changectx` for filtered revision. In such case we fallback to
1292 1295 # creating `changectx` on the unfiltered version of the reposition.
1293 1296 # This fallback should not be an issue because `changectx` from
1294 1297 # `filectx` are not used in complex operations that care about
1295 1298 # filtering.
1296 1299 #
1297 1300 # This fallback is a cheap and dirty fix that prevent several
1298 1301 # crashes. It does not ensure the behavior is correct. However the
1299 1302 # behavior was not correct before filtering either and "incorrect
1300 1303 # behavior" is seen as better as "crash"
1301 1304 #
1302 1305 # Linkrevs have several serious troubles with filtering that are
1303 1306 # complicated to solve. Proper handling of the issue here should be
1304 1307 # considered when solving linkrev issue are on the table.
1305 1308 return self._repo.unfiltered()[self._changeid]
1306 1309
1307 1310 def filectx(self, fileid, changeid=None):
1308 1311 """opens an arbitrary revision of the file without
1309 1312 opening a new filelog"""
1310 1313 return filectx(
1311 1314 self._repo,
1312 1315 self._path,
1313 1316 fileid=fileid,
1314 1317 filelog=self._filelog,
1315 1318 changeid=changeid,
1316 1319 )
1317 1320
1318 1321 def rawdata(self):
1319 1322 return self._filelog.rawdata(self._filenode)
1320 1323
1321 1324 def rawflags(self):
1322 1325 """low-level revlog flags"""
1323 1326 return self._filelog.flags(self._filerev)
1324 1327
1325 1328 def data(self):
1326 1329 try:
1327 1330 return self._filelog.read(self._filenode)
1328 1331 except error.CensoredNodeError:
1329 1332 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1330 1333 return b""
1331 1334 raise error.Abort(
1332 1335 _(b"censored node: %s") % short(self._filenode),
1333 1336 hint=_(b"set censor.policy to ignore errors"),
1334 1337 )
1335 1338
1336 1339 def size(self):
1337 1340 return self._filelog.size(self._filerev)
1338 1341
1339 1342 @propertycache
1340 1343 def _copied(self):
1341 1344 """check if file was actually renamed in this changeset revision
1342 1345
1343 1346 If rename logged in file revision, we report copy for changeset only
1344 1347 if file revisions linkrev points back to the changeset in question
1345 1348 or both changeset parents contain different file revisions.
1346 1349 """
1347 1350
1348 1351 renamed = self._filelog.renamed(self._filenode)
1349 1352 if not renamed:
1350 1353 return None
1351 1354
1352 1355 if self.rev() == self.linkrev():
1353 1356 return renamed
1354 1357
1355 1358 name = self.path()
1356 1359 fnode = self._filenode
1357 1360 for p in self._changectx.parents():
1358 1361 try:
1359 1362 if fnode == p.filenode(name):
1360 1363 return None
1361 1364 except error.LookupError:
1362 1365 pass
1363 1366 return renamed
1364 1367
1365 1368 def children(self):
1366 1369 # hard for renames
1367 1370 c = self._filelog.children(self._filenode)
1368 1371 return [
1369 1372 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1370 1373 for x in c
1371 1374 ]
1372 1375
1373 1376
1374 1377 class committablectx(basectx):
1375 1378 """A committablectx object provides common functionality for a context that
1376 1379 wants the ability to commit, e.g. workingctx or memctx."""
1377 1380
1378 1381 def __init__(
1379 1382 self,
1380 1383 repo,
1381 1384 text=b"",
1382 1385 user=None,
1383 1386 date=None,
1384 1387 extra=None,
1385 1388 changes=None,
1386 1389 branch=None,
1387 1390 ):
1388 1391 super(committablectx, self).__init__(repo)
1389 1392 self._rev = None
1390 1393 self._node = None
1391 1394 self._text = text
1392 1395 if date:
1393 1396 self._date = dateutil.parsedate(date)
1394 1397 if user:
1395 1398 self._user = user
1396 1399 if changes:
1397 1400 self._status = changes
1398 1401
1399 1402 self._extra = {}
1400 1403 if extra:
1401 1404 self._extra = extra.copy()
1402 1405 if branch is not None:
1403 1406 self._extra[b'branch'] = encoding.fromlocal(branch)
1404 1407 if not self._extra.get(b'branch'):
1405 1408 self._extra[b'branch'] = b'default'
1406 1409
1407 1410 def __bytes__(self):
1408 1411 return bytes(self._parents[0]) + b"+"
1409 1412
1410 1413 def hex(self):
1411 1414 self._repo.nodeconstants.wdirhex
1412 1415
1413 1416 __str__ = encoding.strmethod(__bytes__)
1414 1417
1415 1418 def __nonzero__(self):
1416 1419 return True
1417 1420
1418 1421 __bool__ = __nonzero__
1419 1422
1420 1423 @propertycache
1421 1424 def _status(self):
1422 1425 return self._repo.status()
1423 1426
1424 1427 @propertycache
1425 1428 def _user(self):
1426 1429 return self._repo.ui.username()
1427 1430
1428 1431 @propertycache
1429 1432 def _date(self):
1430 1433 ui = self._repo.ui
1431 1434 date = ui.configdate(b'devel', b'default-date')
1432 1435 if date is None:
1433 1436 date = dateutil.makedate()
1434 1437 return date
1435 1438
1436 1439 def subrev(self, subpath):
1437 1440 return None
1438 1441
1439 1442 def manifestnode(self):
1440 1443 return None
1441 1444
1442 1445 def user(self):
1443 1446 return self._user or self._repo.ui.username()
1444 1447
1445 1448 def date(self):
1446 1449 return self._date
1447 1450
1448 1451 def description(self):
1449 1452 return self._text
1450 1453
1451 1454 def files(self):
1452 1455 return sorted(
1453 1456 self._status.modified + self._status.added + self._status.removed
1454 1457 )
1455 1458
1456 1459 def modified(self):
1457 1460 return self._status.modified
1458 1461
1459 1462 def added(self):
1460 1463 return self._status.added
1461 1464
1462 1465 def removed(self):
1463 1466 return self._status.removed
1464 1467
1465 1468 def deleted(self):
1466 1469 return self._status.deleted
1467 1470
1468 1471 filesmodified = modified
1469 1472 filesadded = added
1470 1473 filesremoved = removed
1471 1474
1472 1475 def branch(self):
1473 1476 return encoding.tolocal(self._extra[b'branch'])
1474 1477
1475 1478 def closesbranch(self):
1476 1479 return b'close' in self._extra
1477 1480
1478 1481 def extra(self):
1479 1482 return self._extra
1480 1483
1481 1484 def isinmemory(self):
1482 1485 return False
1483 1486
1484 1487 def tags(self):
1485 1488 return []
1486 1489
1487 1490 def bookmarks(self):
1488 1491 b = []
1489 1492 for p in self.parents():
1490 1493 b.extend(p.bookmarks())
1491 1494 return b
1492 1495
1493 1496 def phase(self):
1494 1497 phase = phases.newcommitphase(self._repo.ui)
1495 1498 for p in self.parents():
1496 1499 phase = max(phase, p.phase())
1497 1500 return phase
1498 1501
1499 1502 def hidden(self):
1500 1503 return False
1501 1504
1502 1505 def children(self):
1503 1506 return []
1504 1507
1505 1508 def flags(self, path):
1506 1509 if '_manifest' in self.__dict__:
1507 1510 try:
1508 1511 return self._manifest.flags(path)
1509 1512 except KeyError:
1510 1513 return b''
1511 1514
1512 1515 try:
1513 1516 return self._flagfunc(path)
1514 1517 except OSError:
1515 1518 return b''
1516 1519
1517 1520 def ancestor(self, c2):
1518 1521 """return the "best" ancestor context of self and c2"""
1519 1522 return self._parents[0].ancestor(c2) # punt on two parents for now
1520 1523
1521 1524 def ancestors(self):
1522 1525 for p in self._parents:
1523 1526 yield p
1524 1527 for a in self._repo.changelog.ancestors(
1525 1528 [p.rev() for p in self._parents]
1526 1529 ):
1527 1530 yield self._repo[a]
1528 1531
1529 1532 def markcommitted(self, node):
1530 1533 """Perform post-commit cleanup necessary after committing this ctx
1531 1534
1532 1535 Specifically, this updates backing stores this working context
1533 1536 wraps to reflect the fact that the changes reflected by this
1534 1537 workingctx have been committed. For example, it marks
1535 1538 modified and added files as normal in the dirstate.
1536 1539
1537 1540 """
1538 1541
1539 1542 def dirty(self, missing=False, merge=True, branch=True):
1540 1543 return False
1541 1544
1542 1545
1543 1546 class workingctx(committablectx):
1544 1547 """A workingctx object makes access to data related to
1545 1548 the current working directory convenient.
1546 1549 date - any valid date string or (unixtime, offset), or None.
1547 1550 user - username string, or None.
1548 1551 extra - a dictionary of extra values, or None.
1549 1552 changes - a list of file lists as returned by localrepo.status()
1550 1553 or None to use the repository status.
1551 1554 """
1552 1555
1553 1556 def __init__(
1554 1557 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1555 1558 ):
1556 1559 branch = None
1557 1560 if not extra or b'branch' not in extra:
1558 1561 try:
1559 1562 branch = repo.dirstate.branch()
1560 1563 except UnicodeDecodeError:
1561 1564 raise error.Abort(_(b'branch name not in UTF-8!'))
1562 1565 super(workingctx, self).__init__(
1563 1566 repo, text, user, date, extra, changes, branch=branch
1564 1567 )
1565 1568
1566 1569 def __iter__(self):
1567 1570 d = self._repo.dirstate
1568 1571 for f in d:
1569 1572 if d.get_entry(f).tracked:
1570 1573 yield f
1571 1574
1572 1575 def __contains__(self, key):
1573 1576 return self._repo.dirstate.get_entry(key).tracked
1574 1577
1575 1578 def hex(self):
1576 1579 return self._repo.nodeconstants.wdirhex
1577 1580
1578 1581 @propertycache
1579 1582 def _parents(self):
1580 1583 p = self._repo.dirstate.parents()
1581 1584 if p[1] == self._repo.nodeconstants.nullid:
1582 1585 p = p[:-1]
1583 1586 # use unfiltered repo to delay/avoid loading obsmarkers
1584 1587 unfi = self._repo.unfiltered()
1585 1588 return [
1586 1589 changectx(
1587 1590 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1588 1591 )
1589 1592 for n in p
1590 1593 ]
1591 1594
1592 1595 def setparents(self, p1node, p2node=None):
1593 1596 if p2node is None:
1594 1597 p2node = self._repo.nodeconstants.nullid
1595 1598 dirstate = self._repo.dirstate
1596 1599 with dirstate.changing_parents(self._repo):
1597 1600 copies = dirstate.setparents(p1node, p2node)
1598 1601 pctx = self._repo[p1node]
1599 1602 if copies:
1600 1603 # Adjust copy records, the dirstate cannot do it, it
1601 1604 # requires access to parents manifests. Preserve them
1602 1605 # only for entries added to first parent.
1603 1606 for f in copies:
1604 1607 if f not in pctx and copies[f] in pctx:
1605 1608 dirstate.copy(copies[f], f)
1606 1609 if p2node == self._repo.nodeconstants.nullid:
1607 1610 for f, s in sorted(dirstate.copies().items()):
1608 1611 if f not in pctx and s not in pctx:
1609 1612 dirstate.copy(None, f)
1610 1613
1611 1614 def _fileinfo(self, path):
1612 1615 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1613 1616 self._manifest
1614 1617 return super(workingctx, self)._fileinfo(path)
1615 1618
1616 1619 def _buildflagfunc(self):
1617 1620 # Create a fallback function for getting file flags when the
1618 1621 # filesystem doesn't support them
1619 1622
1620 1623 copiesget = self._repo.dirstate.copies().get
1621 1624 parents = self.parents()
1622 1625 if len(parents) < 2:
1623 1626 # when we have one parent, it's easy: copy from parent
1624 1627 man = parents[0].manifest()
1625 1628
1626 1629 def func(f):
1627 1630 f = copiesget(f, f)
1628 1631 return man.flags(f)
1629 1632
1630 1633 else:
1631 1634 # merges are tricky: we try to reconstruct the unstored
1632 1635 # result from the merge (issue1802)
1633 1636 p1, p2 = parents
1634 1637 pa = p1.ancestor(p2)
1635 1638 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1636 1639
1637 1640 def func(f):
1638 1641 f = copiesget(f, f) # may be wrong for merges with copies
1639 1642 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1640 1643 if fl1 == fl2:
1641 1644 return fl1
1642 1645 if fl1 == fla:
1643 1646 return fl2
1644 1647 if fl2 == fla:
1645 1648 return fl1
1646 1649 return b'' # punt for conflicts
1647 1650
1648 1651 return func
1649 1652
1650 1653 @propertycache
1651 1654 def _flagfunc(self):
1652 1655 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1653 1656
1654 1657 def flags(self, path):
1655 1658 try:
1656 1659 return self._flagfunc(path)
1657 1660 except OSError:
1658 1661 return b''
1659 1662
1660 1663 def filectx(self, path, filelog=None):
1661 1664 """get a file context from the working directory"""
1662 1665 return workingfilectx(
1663 1666 self._repo, path, workingctx=self, filelog=filelog
1664 1667 )
1665 1668
1666 1669 def dirty(self, missing=False, merge=True, branch=True):
1667 1670 """check whether a working directory is modified"""
1668 1671 # check subrepos first
1669 1672 for s in sorted(self.substate):
1670 1673 if self.sub(s).dirty(missing=missing):
1671 1674 return True
1672 1675 # check current working dir
1673 1676 return (
1674 1677 (merge and self.p2())
1675 1678 or (branch and self.branch() != self.p1().branch())
1676 1679 or self.modified()
1677 1680 or self.added()
1678 1681 or self.removed()
1679 1682 or (missing and self.deleted())
1680 1683 )
1681 1684
1682 1685 def add(self, list, prefix=b""):
1683 1686 with self._repo.wlock():
1684 1687 ui, ds = self._repo.ui, self._repo.dirstate
1685 1688 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1686 1689 rejected = []
1687 1690 lstat = self._repo.wvfs.lstat
1688 1691 for f in list:
1689 1692 # ds.pathto() returns an absolute file when this is invoked from
1690 1693 # the keyword extension. That gets flagged as non-portable on
1691 1694 # Windows, since it contains the drive letter and colon.
1692 1695 scmutil.checkportable(ui, os.path.join(prefix, f))
1693 1696 try:
1694 1697 st = lstat(f)
1695 1698 except OSError:
1696 1699 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1697 1700 rejected.append(f)
1698 1701 continue
1699 1702 limit = ui.configbytes(b'ui', b'large-file-limit')
1700 1703 if limit != 0 and st.st_size > limit:
1701 1704 ui.warn(
1702 1705 _(
1703 1706 b"%s: up to %d MB of RAM may be required "
1704 1707 b"to manage this file\n"
1705 1708 b"(use 'hg revert %s' to cancel the "
1706 1709 b"pending addition)\n"
1707 1710 )
1708 1711 % (f, 3 * st.st_size // 1000000, uipath(f))
1709 1712 )
1710 1713 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1711 1714 ui.warn(
1712 1715 _(
1713 1716 b"%s not added: only files and symlinks "
1714 1717 b"supported currently\n"
1715 1718 )
1716 1719 % uipath(f)
1717 1720 )
1718 1721 rejected.append(f)
1719 1722 elif not ds.set_tracked(f):
1720 1723 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1721 1724 return rejected
1722 1725
1723 1726 def forget(self, files, prefix=b""):
1724 1727 with self._repo.wlock():
1725 1728 ds = self._repo.dirstate
1726 1729 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1727 1730 rejected = []
1728 1731 for f in files:
1729 1732 if not ds.set_untracked(f):
1730 1733 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1731 1734 rejected.append(f)
1732 1735 return rejected
1733 1736
1734 1737 def copy(self, source, dest):
1735 1738 try:
1736 1739 st = self._repo.wvfs.lstat(dest)
1737 1740 except FileNotFoundError:
1738 1741 self._repo.ui.warn(
1739 1742 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1740 1743 )
1741 1744 return
1742 1745 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1743 1746 self._repo.ui.warn(
1744 1747 _(b"copy failed: %s is not a file or a symbolic link\n")
1745 1748 % self._repo.dirstate.pathto(dest)
1746 1749 )
1747 1750 else:
1748 1751 with self._repo.wlock():
1749 1752 ds = self._repo.dirstate
1750 1753 ds.set_tracked(dest)
1751 1754 ds.copy(source, dest)
1752 1755
1753 1756 def match(
1754 1757 self,
1755 1758 pats=None,
1756 1759 include=None,
1757 1760 exclude=None,
1758 1761 default=b'glob',
1759 1762 listsubrepos=False,
1760 1763 badfn=None,
1761 1764 cwd=None,
1762 1765 ):
1763 1766 r = self._repo
1764 1767 if not cwd:
1765 1768 cwd = r.getcwd()
1766 1769
1767 1770 # Only a case insensitive filesystem needs magic to translate user input
1768 1771 # to actual case in the filesystem.
1769 1772 icasefs = not util.fscasesensitive(r.root)
1770 1773 return matchmod.match(
1771 1774 r.root,
1772 1775 cwd,
1773 1776 pats,
1774 1777 include,
1775 1778 exclude,
1776 1779 default,
1777 1780 auditor=r.auditor,
1778 1781 ctx=self,
1779 1782 listsubrepos=listsubrepos,
1780 1783 badfn=badfn,
1781 1784 icasefs=icasefs,
1782 1785 )
1783 1786
1784 1787 def _filtersuspectsymlink(self, files):
1785 1788 if not files or self._repo.dirstate._checklink:
1786 1789 return files
1787 1790
1788 1791 # Symlink placeholders may get non-symlink-like contents
1789 1792 # via user error or dereferencing by NFS or Samba servers,
1790 1793 # so we filter out any placeholders that don't look like a
1791 1794 # symlink
1792 1795 sane = []
1793 1796 for f in files:
1794 1797 if self.flags(f) == b'l':
1795 1798 d = self[f].data()
1796 1799 if (
1797 1800 d == b''
1798 1801 or len(d) >= 1024
1799 1802 or b'\n' in d
1800 1803 or stringutil.binary(d)
1801 1804 ):
1802 1805 self._repo.ui.debug(
1803 1806 b'ignoring suspect symlink placeholder "%s"\n' % f
1804 1807 )
1805 1808 continue
1806 1809 sane.append(f)
1807 1810 return sane
1808 1811
1809 1812 def _checklookup(self, files, mtime_boundary):
1810 1813 # check for any possibly clean files
1811 1814 if not files:
1812 1815 return [], [], [], []
1813 1816
1814 1817 modified = []
1815 1818 deleted = []
1816 1819 clean = []
1817 1820 fixup = []
1818 1821 pctx = self._parents[0]
1819 1822 # do a full compare of any files that might have changed
1820 1823 for f in sorted(files):
1821 1824 try:
1822 1825 # This will return True for a file that got replaced by a
1823 1826 # directory in the interim, but fixing that is pretty hard.
1824 1827 if (
1825 1828 f not in pctx
1826 1829 or self.flags(f) != pctx.flags(f)
1827 1830 or pctx[f].cmp(self[f])
1828 1831 ):
1829 1832 modified.append(f)
1830 1833 elif mtime_boundary is None:
1831 1834 clean.append(f)
1832 1835 else:
1833 1836 s = self[f].lstat()
1834 1837 mode = s.st_mode
1835 1838 size = s.st_size
1836 1839 file_mtime = timestamp.reliable_mtime_of(s, mtime_boundary)
1837 1840 if file_mtime is not None:
1838 1841 cache_info = (mode, size, file_mtime)
1839 1842 fixup.append((f, cache_info))
1840 1843 else:
1841 1844 clean.append(f)
1842 1845 except (IOError, OSError):
1843 1846 # A file become inaccessible in between? Mark it as deleted,
1844 1847 # matching dirstate behavior (issue5584).
1845 1848 # The dirstate has more complex behavior around whether a
1846 1849 # missing file matches a directory, etc, but we don't need to
1847 1850 # bother with that: if f has made it to this point, we're sure
1848 1851 # it's in the dirstate.
1849 1852 deleted.append(f)
1850 1853
1851 1854 return modified, deleted, clean, fixup
1852 1855
1853 1856 def _poststatusfixup(self, status, fixup):
1854 1857 """update dirstate for files that are actually clean"""
1855 1858 testing.wait_on_cfg(self._repo.ui, b'status.pre-dirstate-write-file')
1856 1859 dirstate = self._repo.dirstate
1857 1860 poststatus = self._repo.postdsstatus()
1858 1861 if fixup:
1859 1862 if dirstate.is_changing_parents:
1860 1863 normal = lambda f, pfd: dirstate.update_file(
1861 1864 f,
1862 1865 p1_tracked=True,
1863 1866 wc_tracked=True,
1864 1867 )
1865 1868 else:
1866 1869 normal = dirstate.set_clean
1867 1870 for f, pdf in fixup:
1868 1871 normal(f, pdf)
1869 1872 if poststatus or self._repo.dirstate._dirty:
1870 1873 try:
1871 1874 # updating the dirstate is optional
1872 1875 # so we don't wait on the lock
1873 1876 # wlock can invalidate the dirstate, so cache normal _after_
1874 1877 # taking the lock
1875 1878 pre_dirty = dirstate._dirty
1876 1879 with self._repo.wlock(False):
1877 1880 assert self._repo.dirstate is dirstate
1878 1881 post_dirty = dirstate._dirty
1879 1882 if post_dirty:
1880 1883 tr = self._repo.currenttransaction()
1881 1884 dirstate.write(tr)
1882 1885 elif pre_dirty:
1883 1886 # the wlock grabbing detected that dirtate changes
1884 1887 # needed to be dropped
1885 1888 m = b'skip updating dirstate: identity mismatch\n'
1886 1889 self._repo.ui.debug(m)
1887 1890 if poststatus:
1888 1891 for ps in poststatus:
1889 1892 ps(self, status)
1890 1893 except error.LockError:
1891 1894 dirstate.invalidate()
1892 1895 finally:
1893 1896 # Even if the wlock couldn't be grabbed, clear out the list.
1894 1897 self._repo.clearpostdsstatus()
1895 1898
1896 1899 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1897 1900 '''Gets the status from the dirstate -- internal use only.'''
1898 1901 subrepos = []
1899 1902 if b'.hgsub' in self:
1900 1903 subrepos = sorted(self.substate)
1901 1904 dirstate = self._repo.dirstate
1902 1905 with dirstate.running_status(self._repo):
1903 1906 cmp, s, mtime_boundary = dirstate.status(
1904 1907 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1905 1908 )
1906 1909
1907 1910 # check for any possibly clean files
1908 1911 fixup = []
1909 1912 if cmp:
1910 1913 modified2, deleted2, clean_set, fixup = self._checklookup(
1911 1914 cmp, mtime_boundary
1912 1915 )
1913 1916 s.modified.extend(modified2)
1914 1917 s.deleted.extend(deleted2)
1915 1918
1916 1919 if clean_set and clean:
1917 1920 s.clean.extend(clean_set)
1918 1921 if fixup and clean:
1919 1922 s.clean.extend((f for f, _ in fixup))
1920 1923
1921 1924 self._poststatusfixup(s, fixup)
1922 1925
1923 1926 if match.always():
1924 1927 # cache for performance
1925 1928 if s.unknown or s.ignored or s.clean:
1926 1929 # "_status" is cached with list*=False in the normal route
1927 1930 self._status = scmutil.status(
1928 1931 s.modified, s.added, s.removed, s.deleted, [], [], []
1929 1932 )
1930 1933 else:
1931 1934 self._status = s
1932 1935
1933 1936 return s
1934 1937
1935 1938 @propertycache
1936 1939 def _copies(self):
1937 1940 p1copies = {}
1938 1941 p2copies = {}
1939 1942 parents = self._repo.dirstate.parents()
1940 1943 p1manifest = self._repo[parents[0]].manifest()
1941 1944 p2manifest = self._repo[parents[1]].manifest()
1942 1945 changedset = set(self.added()) | set(self.modified())
1943 1946 narrowmatch = self._repo.narrowmatch()
1944 1947 for dst, src in self._repo.dirstate.copies().items():
1945 1948 if dst not in changedset or not narrowmatch(dst):
1946 1949 continue
1947 1950 if src in p1manifest:
1948 1951 p1copies[dst] = src
1949 1952 elif src in p2manifest:
1950 1953 p2copies[dst] = src
1951 1954 return p1copies, p2copies
1952 1955
1953 1956 @propertycache
1954 1957 def _manifest(self):
1955 1958 """generate a manifest corresponding to the values in self._status
1956 1959
1957 1960 This reuse the file nodeid from parent, but we use special node
1958 1961 identifiers for added and modified files. This is used by manifests
1959 1962 merge to see that files are different and by update logic to avoid
1960 1963 deleting newly added files.
1961 1964 """
1962 1965 return self._buildstatusmanifest(self._status)
1963 1966
1964 1967 def _buildstatusmanifest(self, status):
1965 1968 """Builds a manifest that includes the given status results."""
1966 1969 parents = self.parents()
1967 1970
1968 1971 man = parents[0].manifest().copy()
1969 1972
1970 1973 ff = self._flagfunc
1971 1974 for i, l in (
1972 1975 (self._repo.nodeconstants.addednodeid, status.added),
1973 1976 (self._repo.nodeconstants.modifiednodeid, status.modified),
1974 1977 ):
1975 1978 for f in l:
1976 1979 man[f] = i
1977 1980 try:
1978 1981 man.setflag(f, ff(f))
1979 1982 except OSError:
1980 1983 pass
1981 1984
1982 1985 for f in status.deleted + status.removed:
1983 1986 if f in man:
1984 1987 del man[f]
1985 1988
1986 1989 return man
1987 1990
1988 1991 def _buildstatus(
1989 1992 self, other, s, match, listignored, listclean, listunknown
1990 1993 ):
1991 1994 """build a status with respect to another context
1992 1995
1993 1996 This includes logic for maintaining the fast path of status when
1994 1997 comparing the working directory against its parent, which is to skip
1995 1998 building a new manifest if self (working directory) is not comparing
1996 1999 against its parent (repo['.']).
1997 2000 """
1998 2001 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1999 2002 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
2000 2003 # might have accidentally ended up with the entire contents of the file
2001 2004 # they are supposed to be linking to.
2002 2005 s.modified[:] = self._filtersuspectsymlink(s.modified)
2003 2006 if other != self._repo[b'.']:
2004 2007 s = super(workingctx, self)._buildstatus(
2005 2008 other, s, match, listignored, listclean, listunknown
2006 2009 )
2007 2010 return s
2008 2011
2009 2012 def _matchstatus(self, other, match):
2010 2013 """override the match method with a filter for directory patterns
2011 2014
2012 2015 We use inheritance to customize the match.bad method only in cases of
2013 2016 workingctx since it belongs only to the working directory when
2014 2017 comparing against the parent changeset.
2015 2018
2016 2019 If we aren't comparing against the working directory's parent, then we
2017 2020 just use the default match object sent to us.
2018 2021 """
2019 2022 if other != self._repo[b'.']:
2020 2023
2021 2024 def bad(f, msg):
2022 2025 # 'f' may be a directory pattern from 'match.files()',
2023 2026 # so 'f not in ctx1' is not enough
2024 2027 if f not in other and not other.hasdir(f):
2025 2028 self._repo.ui.warn(
2026 2029 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
2027 2030 )
2028 2031
2029 2032 match.bad = bad
2030 2033 return match
2031 2034
2032 2035 def walk(self, match):
2033 2036 '''Generates matching file names.'''
2034 2037 return sorted(
2035 2038 self._repo.dirstate.walk(
2036 2039 self._repo.narrowmatch(match),
2037 2040 subrepos=sorted(self.substate),
2038 2041 unknown=True,
2039 2042 ignored=False,
2040 2043 )
2041 2044 )
2042 2045
2043 2046 def matches(self, match):
2044 2047 match = self._repo.narrowmatch(match)
2045 2048 ds = self._repo.dirstate
2046 2049 return sorted(f for f in ds.matches(match) if ds.get_entry(f).tracked)
2047 2050
2048 2051 def markcommitted(self, node):
2049 2052 with self._repo.dirstate.changing_parents(self._repo):
2050 2053 for f in self.modified() + self.added():
2051 2054 self._repo.dirstate.update_file(
2052 2055 f, p1_tracked=True, wc_tracked=True
2053 2056 )
2054 2057 for f in self.removed():
2055 2058 self._repo.dirstate.update_file(
2056 2059 f, p1_tracked=False, wc_tracked=False
2057 2060 )
2058 2061 self._repo.dirstate.setparents(node)
2059 2062 self._repo._quick_access_changeid_invalidate()
2060 2063
2061 2064 sparse.aftercommit(self._repo, node)
2062 2065
2063 2066 # write changes out explicitly, because nesting wlock at
2064 2067 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2065 2068 # from immediately doing so for subsequent changing files
2066 2069 self._repo.dirstate.write(self._repo.currenttransaction())
2067 2070
2068 2071 def mergestate(self, clean=False):
2069 2072 if clean:
2070 2073 return mergestatemod.mergestate.clean(self._repo)
2071 2074 return mergestatemod.mergestate.read(self._repo)
2072 2075
2073 2076
2074 2077 class committablefilectx(basefilectx):
2075 2078 """A committablefilectx provides common functionality for a file context
2076 2079 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2077 2080
2078 2081 def __init__(self, repo, path, filelog=None, ctx=None):
2079 2082 self._repo = repo
2080 2083 self._path = path
2081 2084 self._changeid = None
2082 2085 self._filerev = self._filenode = None
2083 2086
2084 2087 if filelog is not None:
2085 2088 self._filelog = filelog
2086 2089 if ctx:
2087 2090 self._changectx = ctx
2088 2091
2089 2092 def __nonzero__(self):
2090 2093 return True
2091 2094
2092 2095 __bool__ = __nonzero__
2093 2096
2094 2097 def linkrev(self):
2095 2098 # linked to self._changectx no matter if file is modified or not
2096 2099 return self.rev()
2097 2100
2098 2101 def renamed(self):
2099 2102 path = self.copysource()
2100 2103 if not path:
2101 2104 return None
2102 2105 return (
2103 2106 path,
2104 2107 self._changectx._parents[0]._manifest.get(
2105 2108 path, self._repo.nodeconstants.nullid
2106 2109 ),
2107 2110 )
2108 2111
2109 2112 def parents(self):
2110 2113 '''return parent filectxs, following copies if necessary'''
2111 2114
2112 2115 def filenode(ctx, path):
2113 2116 return ctx._manifest.get(path, self._repo.nodeconstants.nullid)
2114 2117
2115 2118 path = self._path
2116 2119 fl = self._filelog
2117 2120 pcl = self._changectx._parents
2118 2121 renamed = self.renamed()
2119 2122
2120 2123 if renamed:
2121 2124 pl = [renamed + (None,)]
2122 2125 else:
2123 2126 pl = [(path, filenode(pcl[0], path), fl)]
2124 2127
2125 2128 for pc in pcl[1:]:
2126 2129 pl.append((path, filenode(pc, path), fl))
2127 2130
2128 2131 return [
2129 2132 self._parentfilectx(p, fileid=n, filelog=l)
2130 2133 for p, n, l in pl
2131 2134 if n != self._repo.nodeconstants.nullid
2132 2135 ]
2133 2136
2134 2137 def children(self):
2135 2138 return []
2136 2139
2137 2140
2138 2141 class workingfilectx(committablefilectx):
2139 2142 """A workingfilectx object makes access to data related to a particular
2140 2143 file in the working directory convenient."""
2141 2144
2142 2145 def __init__(self, repo, path, filelog=None, workingctx=None):
2143 2146 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2144 2147
2145 2148 @propertycache
2146 2149 def _changectx(self):
2147 2150 return workingctx(self._repo)
2148 2151
2149 2152 def data(self):
2150 2153 return self._repo.wread(self._path)
2151 2154
2152 2155 def copysource(self):
2153 2156 return self._repo.dirstate.copied(self._path)
2154 2157
2155 2158 def size(self):
2156 2159 return self._repo.wvfs.lstat(self._path).st_size
2157 2160
2158 2161 def lstat(self):
2159 2162 return self._repo.wvfs.lstat(self._path)
2160 2163
2161 2164 def date(self):
2162 2165 t, tz = self._changectx.date()
2163 2166 try:
2164 2167 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2165 2168 except FileNotFoundError:
2166 2169 return (t, tz)
2167 2170
2168 2171 def exists(self):
2169 2172 return self._repo.wvfs.exists(self._path)
2170 2173
2171 2174 def lexists(self):
2172 2175 return self._repo.wvfs.lexists(self._path)
2173 2176
2174 2177 def audit(self):
2175 2178 return self._repo.wvfs.audit(self._path)
2176 2179
2177 2180 def cmp(self, fctx):
2178 2181 """compare with other file context
2179 2182
2180 2183 returns True if different than fctx.
2181 2184 """
2182 2185 # fctx should be a filectx (not a workingfilectx)
2183 2186 # invert comparison to reuse the same code path
2184 2187 return fctx.cmp(self)
2185 2188
2186 2189 def remove(self, ignoremissing=False):
2187 2190 """wraps unlink for a repo's working directory"""
2188 2191 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2189 2192 self._repo.wvfs.unlinkpath(
2190 2193 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2191 2194 )
2192 2195
2193 2196 def write(self, data, flags, backgroundclose=False, **kwargs):
2194 2197 """wraps repo.wwrite"""
2195 2198 return self._repo.wwrite(
2196 2199 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2197 2200 )
2198 2201
2199 2202 def markcopied(self, src):
2200 2203 """marks this file a copy of `src`"""
2201 2204 self._repo.dirstate.copy(src, self._path)
2202 2205
2203 2206 def clearunknown(self):
2204 2207 """Removes conflicting items in the working directory so that
2205 2208 ``write()`` can be called successfully.
2206 2209 """
2207 2210 wvfs = self._repo.wvfs
2208 2211 f = self._path
2209 2212 wvfs.audit(f)
2210 2213 if self._repo.ui.configbool(
2211 2214 b'experimental', b'merge.checkpathconflicts'
2212 2215 ):
2213 2216 # remove files under the directory as they should already be
2214 2217 # warned and backed up
2215 2218 if wvfs.isdir(f) and not wvfs.islink(f):
2216 2219 wvfs.rmtree(f, forcibly=True)
2217 2220 for p in reversed(list(pathutil.finddirs(f))):
2218 2221 if wvfs.isfileorlink(p):
2219 2222 wvfs.unlink(p)
2220 2223 break
2221 2224 else:
2222 2225 # don't remove files if path conflicts are not processed
2223 2226 if wvfs.isdir(f) and not wvfs.islink(f):
2224 2227 wvfs.removedirs(f)
2225 2228
2226 2229 def setflags(self, l, x):
2227 2230 self._repo.wvfs.setflags(self._path, l, x)
2228 2231
2229 2232
2230 2233 class overlayworkingctx(committablectx):
2231 2234 """Wraps another mutable context with a write-back cache that can be
2232 2235 converted into a commit context.
2233 2236
2234 2237 self._cache[path] maps to a dict with keys: {
2235 2238 'exists': bool?
2236 2239 'date': date?
2237 2240 'data': str?
2238 2241 'flags': str?
2239 2242 'copied': str? (path or None)
2240 2243 }
2241 2244 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2242 2245 is `False`, the file was deleted.
2243 2246 """
2244 2247
2245 2248 def __init__(self, repo):
2246 2249 super(overlayworkingctx, self).__init__(repo)
2247 2250 self.clean()
2248 2251
2249 2252 def setbase(self, wrappedctx):
2250 2253 self._wrappedctx = wrappedctx
2251 2254 self._parents = [wrappedctx]
2252 2255 # Drop old manifest cache as it is now out of date.
2253 2256 # This is necessary when, e.g., rebasing several nodes with one
2254 2257 # ``overlayworkingctx`` (e.g. with --collapse).
2255 2258 util.clearcachedproperty(self, b'_manifest')
2256 2259
2257 2260 def setparents(self, p1node, p2node=None):
2258 2261 if p2node is None:
2259 2262 p2node = self._repo.nodeconstants.nullid
2260 2263 assert p1node == self._wrappedctx.node()
2261 2264 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2262 2265
2263 2266 def data(self, path):
2264 2267 if self.isdirty(path):
2265 2268 if self._cache[path][b'exists']:
2266 2269 if self._cache[path][b'data'] is not None:
2267 2270 return self._cache[path][b'data']
2268 2271 else:
2269 2272 # Must fallback here, too, because we only set flags.
2270 2273 return self._wrappedctx[path].data()
2271 2274 else:
2272 2275 raise error.ProgrammingError(
2273 2276 b"No such file or directory: %s" % path
2274 2277 )
2275 2278 else:
2276 2279 return self._wrappedctx[path].data()
2277 2280
2278 2281 @propertycache
2279 2282 def _manifest(self):
2280 2283 parents = self.parents()
2281 2284 man = parents[0].manifest().copy()
2282 2285
2283 2286 flag = self._flagfunc
2284 2287 for path in self.added():
2285 2288 man[path] = self._repo.nodeconstants.addednodeid
2286 2289 man.setflag(path, flag(path))
2287 2290 for path in self.modified():
2288 2291 man[path] = self._repo.nodeconstants.modifiednodeid
2289 2292 man.setflag(path, flag(path))
2290 2293 for path in self.removed():
2291 2294 del man[path]
2292 2295 return man
2293 2296
2294 2297 @propertycache
2295 2298 def _flagfunc(self):
2296 2299 def f(path):
2297 2300 return self._cache[path][b'flags']
2298 2301
2299 2302 return f
2300 2303
2301 2304 def files(self):
2302 2305 return sorted(self.added() + self.modified() + self.removed())
2303 2306
2304 2307 def modified(self):
2305 2308 return [
2306 2309 f
2307 2310 for f in self._cache.keys()
2308 2311 if self._cache[f][b'exists'] and self._existsinparent(f)
2309 2312 ]
2310 2313
2311 2314 def added(self):
2312 2315 return [
2313 2316 f
2314 2317 for f in self._cache.keys()
2315 2318 if self._cache[f][b'exists'] and not self._existsinparent(f)
2316 2319 ]
2317 2320
2318 2321 def removed(self):
2319 2322 return [
2320 2323 f
2321 2324 for f in self._cache.keys()
2322 2325 if not self._cache[f][b'exists'] and self._existsinparent(f)
2323 2326 ]
2324 2327
2325 2328 def p1copies(self):
2326 2329 copies = {}
2327 2330 narrowmatch = self._repo.narrowmatch()
2328 2331 for f in self._cache.keys():
2329 2332 if not narrowmatch(f):
2330 2333 continue
2331 2334 copies.pop(f, None) # delete if it exists
2332 2335 source = self._cache[f][b'copied']
2333 2336 if source:
2334 2337 copies[f] = source
2335 2338 return copies
2336 2339
2337 2340 def p2copies(self):
2338 2341 copies = {}
2339 2342 narrowmatch = self._repo.narrowmatch()
2340 2343 for f in self._cache.keys():
2341 2344 if not narrowmatch(f):
2342 2345 continue
2343 2346 copies.pop(f, None) # delete if it exists
2344 2347 source = self._cache[f][b'copied']
2345 2348 if source:
2346 2349 copies[f] = source
2347 2350 return copies
2348 2351
2349 2352 def isinmemory(self):
2350 2353 return True
2351 2354
2352 2355 def filedate(self, path):
2353 2356 if self.isdirty(path):
2354 2357 return self._cache[path][b'date']
2355 2358 else:
2356 2359 return self._wrappedctx[path].date()
2357 2360
2358 2361 def markcopied(self, path, origin):
2359 2362 self._markdirty(
2360 2363 path,
2361 2364 exists=True,
2362 2365 date=self.filedate(path),
2363 2366 flags=self.flags(path),
2364 2367 copied=origin,
2365 2368 )
2366 2369
2367 2370 def copydata(self, path):
2368 2371 if self.isdirty(path):
2369 2372 return self._cache[path][b'copied']
2370 2373 else:
2371 2374 return None
2372 2375
2373 2376 def flags(self, path):
2374 2377 if self.isdirty(path):
2375 2378 if self._cache[path][b'exists']:
2376 2379 return self._cache[path][b'flags']
2377 2380 else:
2378 2381 raise error.ProgrammingError(
2379 2382 b"No such file or directory: %s" % path
2380 2383 )
2381 2384 else:
2382 2385 return self._wrappedctx[path].flags()
2383 2386
2384 2387 def __contains__(self, key):
2385 2388 if key in self._cache:
2386 2389 return self._cache[key][b'exists']
2387 2390 return key in self.p1()
2388 2391
2389 2392 def _existsinparent(self, path):
2390 2393 try:
2391 2394 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2392 2395 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2393 2396 # with an ``exists()`` function.
2394 2397 self._wrappedctx[path]
2395 2398 return True
2396 2399 except error.ManifestLookupError:
2397 2400 return False
2398 2401
2399 2402 def _auditconflicts(self, path):
2400 2403 """Replicates conflict checks done by wvfs.write().
2401 2404
2402 2405 Since we never write to the filesystem and never call `applyupdates` in
2403 2406 IMM, we'll never check that a path is actually writable -- e.g., because
2404 2407 it adds `a/foo`, but `a` is actually a file in the other commit.
2405 2408 """
2406 2409
2407 2410 def fail(path, component):
2408 2411 # p1() is the base and we're receiving "writes" for p2()'s
2409 2412 # files.
2410 2413 if b'l' in self.p1()[component].flags():
2411 2414 raise error.Abort(
2412 2415 b"error: %s conflicts with symlink %s "
2413 2416 b"in %d." % (path, component, self.p1().rev())
2414 2417 )
2415 2418 else:
2416 2419 raise error.Abort(
2417 2420 b"error: '%s' conflicts with file '%s' in "
2418 2421 b"%d." % (path, component, self.p1().rev())
2419 2422 )
2420 2423
2421 2424 # Test that each new directory to be created to write this path from p2
2422 2425 # is not a file in p1.
2423 2426 components = path.split(b'/')
2424 2427 for i in range(len(components)):
2425 2428 component = b"/".join(components[0:i])
2426 2429 if component in self:
2427 2430 fail(path, component)
2428 2431
2429 2432 # Test the other direction -- that this path from p2 isn't a directory
2430 2433 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2431 2434 match = self.match([path], default=b'path')
2432 2435 mfiles = list(self.p1().manifest().walk(match))
2433 2436 if len(mfiles) > 0:
2434 2437 if len(mfiles) == 1 and mfiles[0] == path:
2435 2438 return
2436 2439 # omit the files which are deleted in current IMM wctx
2437 2440 mfiles = [m for m in mfiles if m in self]
2438 2441 if not mfiles:
2439 2442 return
2440 2443 raise error.Abort(
2441 2444 b"error: file '%s' cannot be written because "
2442 2445 b" '%s/' is a directory in %s (containing %d "
2443 2446 b"entries: %s)"
2444 2447 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2445 2448 )
2446 2449
2447 2450 def write(self, path, data, flags=b'', **kwargs):
2448 2451 if data is None:
2449 2452 raise error.ProgrammingError(b"data must be non-None")
2450 2453 self._auditconflicts(path)
2451 2454 self._markdirty(
2452 2455 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2453 2456 )
2454 2457
2455 2458 def setflags(self, path, l, x):
2456 2459 flag = b''
2457 2460 if l:
2458 2461 flag = b'l'
2459 2462 elif x:
2460 2463 flag = b'x'
2461 2464 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2462 2465
2463 2466 def remove(self, path):
2464 2467 self._markdirty(path, exists=False)
2465 2468
2466 2469 def exists(self, path):
2467 2470 """exists behaves like `lexists`, but needs to follow symlinks and
2468 2471 return False if they are broken.
2469 2472 """
2470 2473 if self.isdirty(path):
2471 2474 # If this path exists and is a symlink, "follow" it by calling
2472 2475 # exists on the destination path.
2473 2476 if (
2474 2477 self._cache[path][b'exists']
2475 2478 and b'l' in self._cache[path][b'flags']
2476 2479 ):
2477 2480 return self.exists(self._cache[path][b'data'].strip())
2478 2481 else:
2479 2482 return self._cache[path][b'exists']
2480 2483
2481 2484 return self._existsinparent(path)
2482 2485
2483 2486 def lexists(self, path):
2484 2487 """lexists returns True if the path exists"""
2485 2488 if self.isdirty(path):
2486 2489 return self._cache[path][b'exists']
2487 2490
2488 2491 return self._existsinparent(path)
2489 2492
2490 2493 def size(self, path):
2491 2494 if self.isdirty(path):
2492 2495 if self._cache[path][b'exists']:
2493 2496 return len(self._cache[path][b'data'])
2494 2497 else:
2495 2498 raise error.ProgrammingError(
2496 2499 b"No such file or directory: %s" % path
2497 2500 )
2498 2501 return self._wrappedctx[path].size()
2499 2502
2500 2503 def tomemctx(
2501 2504 self,
2502 2505 text,
2503 2506 branch=None,
2504 2507 extra=None,
2505 2508 date=None,
2506 2509 parents=None,
2507 2510 user=None,
2508 2511 editor=None,
2509 2512 ):
2510 2513 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2511 2514 committed.
2512 2515
2513 2516 ``text`` is the commit message.
2514 2517 ``parents`` (optional) are rev numbers.
2515 2518 """
2516 2519 # Default parents to the wrapped context if not passed.
2517 2520 if parents is None:
2518 2521 parents = self.parents()
2519 2522 if len(parents) == 1:
2520 2523 parents = (parents[0], None)
2521 2524
2522 2525 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2523 2526 if parents[1] is None:
2524 2527 parents = (self._repo[parents[0]], None)
2525 2528 else:
2526 2529 parents = (self._repo[parents[0]], self._repo[parents[1]])
2527 2530
2528 2531 files = self.files()
2529 2532
2530 2533 def getfile(repo, memctx, path):
2531 2534 if self._cache[path][b'exists']:
2532 2535 return memfilectx(
2533 2536 repo,
2534 2537 memctx,
2535 2538 path,
2536 2539 self._cache[path][b'data'],
2537 2540 b'l' in self._cache[path][b'flags'],
2538 2541 b'x' in self._cache[path][b'flags'],
2539 2542 self._cache[path][b'copied'],
2540 2543 )
2541 2544 else:
2542 2545 # Returning None, but including the path in `files`, is
2543 2546 # necessary for memctx to register a deletion.
2544 2547 return None
2545 2548
2546 2549 if branch is None:
2547 2550 branch = self._wrappedctx.branch()
2548 2551
2549 2552 return memctx(
2550 2553 self._repo,
2551 2554 parents,
2552 2555 text,
2553 2556 files,
2554 2557 getfile,
2555 2558 date=date,
2556 2559 extra=extra,
2557 2560 user=user,
2558 2561 branch=branch,
2559 2562 editor=editor,
2560 2563 )
2561 2564
2562 2565 def tomemctx_for_amend(self, precursor):
2563 2566 extra = precursor.extra().copy()
2564 2567 extra[b'amend_source'] = precursor.hex()
2565 2568 return self.tomemctx(
2566 2569 text=precursor.description(),
2567 2570 branch=precursor.branch(),
2568 2571 extra=extra,
2569 2572 date=precursor.date(),
2570 2573 user=precursor.user(),
2571 2574 )
2572 2575
2573 2576 def isdirty(self, path):
2574 2577 return path in self._cache
2575 2578
2576 2579 def clean(self):
2577 2580 self._mergestate = None
2578 2581 self._cache = {}
2579 2582
2580 2583 def _compact(self):
2581 2584 """Removes keys from the cache that are actually clean, by comparing
2582 2585 them with the underlying context.
2583 2586
2584 2587 This can occur during the merge process, e.g. by passing --tool :local
2585 2588 to resolve a conflict.
2586 2589 """
2587 2590 keys = []
2588 2591 # This won't be perfect, but can help performance significantly when
2589 2592 # using things like remotefilelog.
2590 2593 scmutil.prefetchfiles(
2591 2594 self.repo(),
2592 2595 [
2593 2596 (
2594 2597 self.p1().rev(),
2595 2598 scmutil.matchfiles(self.repo(), self._cache.keys()),
2596 2599 )
2597 2600 ],
2598 2601 )
2599 2602
2600 2603 for path in self._cache.keys():
2601 2604 cache = self._cache[path]
2602 2605 try:
2603 2606 underlying = self._wrappedctx[path]
2604 2607 if (
2605 2608 underlying.data() == cache[b'data']
2606 2609 and underlying.flags() == cache[b'flags']
2607 2610 ):
2608 2611 keys.append(path)
2609 2612 except error.ManifestLookupError:
2610 2613 # Path not in the underlying manifest (created).
2611 2614 continue
2612 2615
2613 2616 for path in keys:
2614 2617 del self._cache[path]
2615 2618 return keys
2616 2619
2617 2620 def _markdirty(
2618 2621 self, path, exists, data=None, date=None, flags=b'', copied=None
2619 2622 ):
2620 2623 # data not provided, let's see if we already have some; if not, let's
2621 2624 # grab it from our underlying context, so that we always have data if
2622 2625 # the file is marked as existing.
2623 2626 if exists and data is None:
2624 2627 oldentry = self._cache.get(path) or {}
2625 2628 data = oldentry.get(b'data')
2626 2629 if data is None:
2627 2630 data = self._wrappedctx[path].data()
2628 2631
2629 2632 self._cache[path] = {
2630 2633 b'exists': exists,
2631 2634 b'data': data,
2632 2635 b'date': date,
2633 2636 b'flags': flags,
2634 2637 b'copied': copied,
2635 2638 }
2636 2639 util.clearcachedproperty(self, b'_manifest')
2637 2640
2638 2641 def filectx(self, path, filelog=None):
2639 2642 return overlayworkingfilectx(
2640 2643 self._repo, path, parent=self, filelog=filelog
2641 2644 )
2642 2645
2643 2646 def mergestate(self, clean=False):
2644 2647 if clean or self._mergestate is None:
2645 2648 self._mergestate = mergestatemod.memmergestate(self._repo)
2646 2649 return self._mergestate
2647 2650
2648 2651
2649 2652 class overlayworkingfilectx(committablefilectx):
2650 2653 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2651 2654 cache, which can be flushed through later by calling ``flush()``."""
2652 2655
2653 2656 def __init__(self, repo, path, filelog=None, parent=None):
2654 2657 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2655 2658 self._repo = repo
2656 2659 self._parent = parent
2657 2660 self._path = path
2658 2661
2659 2662 def cmp(self, fctx):
2660 2663 return self.data() != fctx.data()
2661 2664
2662 2665 def changectx(self):
2663 2666 return self._parent
2664 2667
2665 2668 def data(self):
2666 2669 return self._parent.data(self._path)
2667 2670
2668 2671 def date(self):
2669 2672 return self._parent.filedate(self._path)
2670 2673
2671 2674 def exists(self):
2672 2675 return self.lexists()
2673 2676
2674 2677 def lexists(self):
2675 2678 return self._parent.exists(self._path)
2676 2679
2677 2680 def copysource(self):
2678 2681 return self._parent.copydata(self._path)
2679 2682
2680 2683 def size(self):
2681 2684 return self._parent.size(self._path)
2682 2685
2683 2686 def markcopied(self, origin):
2684 2687 self._parent.markcopied(self._path, origin)
2685 2688
2686 2689 def audit(self):
2687 2690 pass
2688 2691
2689 2692 def flags(self):
2690 2693 return self._parent.flags(self._path)
2691 2694
2692 2695 def setflags(self, islink, isexec):
2693 2696 return self._parent.setflags(self._path, islink, isexec)
2694 2697
2695 2698 def write(self, data, flags, backgroundclose=False, **kwargs):
2696 2699 return self._parent.write(self._path, data, flags, **kwargs)
2697 2700
2698 2701 def remove(self, ignoremissing=False):
2699 2702 return self._parent.remove(self._path)
2700 2703
2701 2704 def clearunknown(self):
2702 2705 pass
2703 2706
2704 2707
2705 2708 class workingcommitctx(workingctx):
2706 2709 """A workingcommitctx object makes access to data related to
2707 2710 the revision being committed convenient.
2708 2711
2709 2712 This hides changes in the working directory, if they aren't
2710 2713 committed in this context.
2711 2714 """
2712 2715
2713 2716 def __init__(
2714 2717 self, repo, changes, text=b"", user=None, date=None, extra=None
2715 2718 ):
2716 2719 super(workingcommitctx, self).__init__(
2717 2720 repo, text, user, date, extra, changes
2718 2721 )
2719 2722
2720 2723 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2721 2724 """Return matched files only in ``self._status``
2722 2725
2723 2726 Uncommitted files appear "clean" via this context, even if
2724 2727 they aren't actually so in the working directory.
2725 2728 """
2726 2729 if clean:
2727 2730 clean = [f for f in self._manifest if f not in self._changedset]
2728 2731 else:
2729 2732 clean = []
2730 2733 return scmutil.status(
2731 2734 [f for f in self._status.modified if match(f)],
2732 2735 [f for f in self._status.added if match(f)],
2733 2736 [f for f in self._status.removed if match(f)],
2734 2737 [],
2735 2738 [],
2736 2739 [],
2737 2740 clean,
2738 2741 )
2739 2742
2740 2743 @propertycache
2741 2744 def _changedset(self):
2742 2745 """Return the set of files changed in this context"""
2743 2746 changed = set(self._status.modified)
2744 2747 changed.update(self._status.added)
2745 2748 changed.update(self._status.removed)
2746 2749 return changed
2747 2750
2748 2751
2749 2752 def makecachingfilectxfn(func):
2750 2753 """Create a filectxfn that caches based on the path.
2751 2754
2752 2755 We can't use util.cachefunc because it uses all arguments as the cache
2753 2756 key and this creates a cycle since the arguments include the repo and
2754 2757 memctx.
2755 2758 """
2756 2759 cache = {}
2757 2760
2758 2761 def getfilectx(repo, memctx, path):
2759 2762 if path not in cache:
2760 2763 cache[path] = func(repo, memctx, path)
2761 2764 return cache[path]
2762 2765
2763 2766 return getfilectx
2764 2767
2765 2768
2766 2769 def memfilefromctx(ctx):
2767 2770 """Given a context return a memfilectx for ctx[path]
2768 2771
2769 2772 This is a convenience method for building a memctx based on another
2770 2773 context.
2771 2774 """
2772 2775
2773 2776 def getfilectx(repo, memctx, path):
2774 2777 fctx = ctx[path]
2775 2778 copysource = fctx.copysource()
2776 2779 return memfilectx(
2777 2780 repo,
2778 2781 memctx,
2779 2782 path,
2780 2783 fctx.data(),
2781 2784 islink=fctx.islink(),
2782 2785 isexec=fctx.isexec(),
2783 2786 copysource=copysource,
2784 2787 )
2785 2788
2786 2789 return getfilectx
2787 2790
2788 2791
2789 2792 def memfilefrompatch(patchstore):
2790 2793 """Given a patch (e.g. patchstore object) return a memfilectx
2791 2794
2792 2795 This is a convenience method for building a memctx based on a patchstore.
2793 2796 """
2794 2797
2795 2798 def getfilectx(repo, memctx, path):
2796 2799 data, mode, copysource = patchstore.getfile(path)
2797 2800 if data is None:
2798 2801 return None
2799 2802 islink, isexec = mode
2800 2803 return memfilectx(
2801 2804 repo,
2802 2805 memctx,
2803 2806 path,
2804 2807 data,
2805 2808 islink=islink,
2806 2809 isexec=isexec,
2807 2810 copysource=copysource,
2808 2811 )
2809 2812
2810 2813 return getfilectx
2811 2814
2812 2815
2813 2816 class memctx(committablectx):
2814 2817 """Use memctx to perform in-memory commits via localrepo.commitctx().
2815 2818
2816 2819 Revision information is supplied at initialization time while
2817 2820 related files data and is made available through a callback
2818 2821 mechanism. 'repo' is the current localrepo, 'parents' is a
2819 2822 sequence of two parent revisions identifiers (pass None for every
2820 2823 missing parent), 'text' is the commit message and 'files' lists
2821 2824 names of files touched by the revision (normalized and relative to
2822 2825 repository root).
2823 2826
2824 2827 filectxfn(repo, memctx, path) is a callable receiving the
2825 2828 repository, the current memctx object and the normalized path of
2826 2829 requested file, relative to repository root. It is fired by the
2827 2830 commit function for every file in 'files', but calls order is
2828 2831 undefined. If the file is available in the revision being
2829 2832 committed (updated or added), filectxfn returns a memfilectx
2830 2833 object. If the file was removed, filectxfn return None for recent
2831 2834 Mercurial. Moved files are represented by marking the source file
2832 2835 removed and the new file added with copy information (see
2833 2836 memfilectx).
2834 2837
2835 2838 user receives the committer name and defaults to current
2836 2839 repository username, date is the commit date in any format
2837 2840 supported by dateutil.parsedate() and defaults to current date, extra
2838 2841 is a dictionary of metadata or is left empty.
2839 2842 """
2840 2843
2841 2844 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2842 2845 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2843 2846 # this field to determine what to do in filectxfn.
2844 2847 _returnnoneformissingfiles = True
2845 2848
2846 2849 def __init__(
2847 2850 self,
2848 2851 repo,
2849 2852 parents,
2850 2853 text,
2851 2854 files,
2852 2855 filectxfn,
2853 2856 user=None,
2854 2857 date=None,
2855 2858 extra=None,
2856 2859 branch=None,
2857 2860 editor=None,
2858 2861 ):
2859 2862 super(memctx, self).__init__(
2860 2863 repo, text, user, date, extra, branch=branch
2861 2864 )
2862 2865 self._rev = None
2863 2866 self._node = None
2864 2867 parents = [(p or self._repo.nodeconstants.nullid) for p in parents]
2865 2868 p1, p2 = parents
2866 2869 self._parents = [self._repo[p] for p in (p1, p2)]
2867 2870 files = sorted(set(files))
2868 2871 self._files = files
2869 2872 self.substate = {}
2870 2873
2871 2874 if isinstance(filectxfn, patch.filestore):
2872 2875 filectxfn = memfilefrompatch(filectxfn)
2873 2876 elif not callable(filectxfn):
2874 2877 # if store is not callable, wrap it in a function
2875 2878 filectxfn = memfilefromctx(filectxfn)
2876 2879
2877 2880 # memoizing increases performance for e.g. vcs convert scenarios.
2878 2881 self._filectxfn = makecachingfilectxfn(filectxfn)
2879 2882
2880 2883 if editor:
2881 2884 self._text = editor(self._repo, self, [])
2882 2885 self._repo.savecommitmessage(self._text)
2883 2886
2884 2887 def filectx(self, path, filelog=None):
2885 2888 """get a file context from the working directory
2886 2889
2887 2890 Returns None if file doesn't exist and should be removed."""
2888 2891 return self._filectxfn(self._repo, self, path)
2889 2892
2890 2893 def commit(self):
2891 2894 """commit context to the repo"""
2892 2895 return self._repo.commitctx(self)
2893 2896
2894 2897 @propertycache
2895 2898 def _manifest(self):
2896 2899 """generate a manifest based on the return values of filectxfn"""
2897 2900
2898 2901 # keep this simple for now; just worry about p1
2899 2902 pctx = self._parents[0]
2900 2903 man = pctx.manifest().copy()
2901 2904
2902 2905 for f in self._status.modified:
2903 2906 man[f] = self._repo.nodeconstants.modifiednodeid
2904 2907
2905 2908 for f in self._status.added:
2906 2909 man[f] = self._repo.nodeconstants.addednodeid
2907 2910
2908 2911 for f in self._status.removed:
2909 2912 if f in man:
2910 2913 del man[f]
2911 2914
2912 2915 return man
2913 2916
2914 2917 @propertycache
2915 2918 def _status(self):
2916 2919 """Calculate exact status from ``files`` specified at construction"""
2917 2920 man1 = self.p1().manifest()
2918 2921 p2 = self._parents[1]
2919 2922 # "1 < len(self._parents)" can't be used for checking
2920 2923 # existence of the 2nd parent, because "memctx._parents" is
2921 2924 # explicitly initialized by the list, of which length is 2.
2922 2925 if p2.rev() != nullrev:
2923 2926 man2 = p2.manifest()
2924 2927 managing = lambda f: f in man1 or f in man2
2925 2928 else:
2926 2929 managing = lambda f: f in man1
2927 2930
2928 2931 modified, added, removed = [], [], []
2929 2932 for f in self._files:
2930 2933 if not managing(f):
2931 2934 added.append(f)
2932 2935 elif self[f]:
2933 2936 modified.append(f)
2934 2937 else:
2935 2938 removed.append(f)
2936 2939
2937 2940 return scmutil.status(modified, added, removed, [], [], [], [])
2938 2941
2939 2942 def parents(self):
2940 2943 if self._parents[1].rev() == nullrev:
2941 2944 return [self._parents[0]]
2942 2945 return self._parents
2943 2946
2944 2947
2945 2948 class memfilectx(committablefilectx):
2946 2949 """memfilectx represents an in-memory file to commit.
2947 2950
2948 2951 See memctx and committablefilectx for more details.
2949 2952 """
2950 2953
2951 2954 def __init__(
2952 2955 self,
2953 2956 repo,
2954 2957 changectx,
2955 2958 path,
2956 2959 data,
2957 2960 islink=False,
2958 2961 isexec=False,
2959 2962 copysource=None,
2960 2963 ):
2961 2964 """
2962 2965 path is the normalized file path relative to repository root.
2963 2966 data is the file content as a string.
2964 2967 islink is True if the file is a symbolic link.
2965 2968 isexec is True if the file is executable.
2966 2969 copied is the source file path if current file was copied in the
2967 2970 revision being committed, or None."""
2968 2971 super(memfilectx, self).__init__(repo, path, None, changectx)
2969 2972 self._data = data
2970 2973 if islink:
2971 2974 self._flags = b'l'
2972 2975 elif isexec:
2973 2976 self._flags = b'x'
2974 2977 else:
2975 2978 self._flags = b''
2976 2979 self._copysource = copysource
2977 2980
2978 2981 def copysource(self):
2979 2982 return self._copysource
2980 2983
2981 2984 def cmp(self, fctx):
2982 2985 return self.data() != fctx.data()
2983 2986
2984 2987 def data(self):
2985 2988 return self._data
2986 2989
2987 2990 def remove(self, ignoremissing=False):
2988 2991 """wraps unlink for a repo's working directory"""
2989 2992 # need to figure out what to do here
2990 2993 del self._changectx[self._path]
2991 2994
2992 2995 def write(self, data, flags, **kwargs):
2993 2996 """wraps repo.wwrite"""
2994 2997 self._data = data
2995 2998
2996 2999
2997 3000 class metadataonlyctx(committablectx):
2998 3001 """Like memctx but it's reusing the manifest of different commit.
2999 3002 Intended to be used by lightweight operations that are creating
3000 3003 metadata-only changes.
3001 3004
3002 3005 Revision information is supplied at initialization time. 'repo' is the
3003 3006 current localrepo, 'ctx' is original revision which manifest we're reuisng
3004 3007 'parents' is a sequence of two parent revisions identifiers (pass None for
3005 3008 every missing parent), 'text' is the commit.
3006 3009
3007 3010 user receives the committer name and defaults to current repository
3008 3011 username, date is the commit date in any format supported by
3009 3012 dateutil.parsedate() and defaults to current date, extra is a dictionary of
3010 3013 metadata or is left empty.
3011 3014 """
3012 3015
3013 3016 def __init__(
3014 3017 self,
3015 3018 repo,
3016 3019 originalctx,
3017 3020 parents=None,
3018 3021 text=None,
3019 3022 user=None,
3020 3023 date=None,
3021 3024 extra=None,
3022 3025 editor=None,
3023 3026 ):
3024 3027 if text is None:
3025 3028 text = originalctx.description()
3026 3029 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
3027 3030 self._rev = None
3028 3031 self._node = None
3029 3032 self._originalctx = originalctx
3030 3033 self._manifestnode = originalctx.manifestnode()
3031 3034 if parents is None:
3032 3035 parents = originalctx.parents()
3033 3036 else:
3034 3037 parents = [repo[p] for p in parents if p is not None]
3035 3038 parents = parents[:]
3036 3039 while len(parents) < 2:
3037 3040 parents.append(repo[nullrev])
3038 3041 p1, p2 = self._parents = parents
3039 3042
3040 3043 # sanity check to ensure that the reused manifest parents are
3041 3044 # manifests of our commit parents
3042 3045 mp1, mp2 = self.manifestctx().parents
3043 3046 if p1 != self._repo.nodeconstants.nullid and p1.manifestnode() != mp1:
3044 3047 raise RuntimeError(
3045 3048 r"can't reuse the manifest: its p1 "
3046 3049 r"doesn't match the new ctx p1"
3047 3050 )
3048 3051 if p2 != self._repo.nodeconstants.nullid and p2.manifestnode() != mp2:
3049 3052 raise RuntimeError(
3050 3053 r"can't reuse the manifest: "
3051 3054 r"its p2 doesn't match the new ctx p2"
3052 3055 )
3053 3056
3054 3057 self._files = originalctx.files()
3055 3058 self.substate = {}
3056 3059
3057 3060 if editor:
3058 3061 self._text = editor(self._repo, self, [])
3059 3062 self._repo.savecommitmessage(self._text)
3060 3063
3061 3064 def manifestnode(self):
3062 3065 return self._manifestnode
3063 3066
3064 3067 @property
3065 3068 def _manifestctx(self):
3066 3069 return self._repo.manifestlog[self._manifestnode]
3067 3070
3068 3071 def filectx(self, path, filelog=None):
3069 3072 return self._originalctx.filectx(path, filelog=filelog)
3070 3073
3071 3074 def commit(self):
3072 3075 """commit context to the repo"""
3073 3076 return self._repo.commitctx(self)
3074 3077
3075 3078 @property
3076 3079 def _manifest(self):
3077 3080 return self._originalctx.manifest()
3078 3081
3079 3082 @propertycache
3080 3083 def _status(self):
3081 3084 """Calculate exact status from ``files`` specified in the ``origctx``
3082 3085 and parents manifests.
3083 3086 """
3084 3087 man1 = self.p1().manifest()
3085 3088 p2 = self._parents[1]
3086 3089 # "1 < len(self._parents)" can't be used for checking
3087 3090 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3088 3091 # explicitly initialized by the list, of which length is 2.
3089 3092 if p2.rev() != nullrev:
3090 3093 man2 = p2.manifest()
3091 3094 managing = lambda f: f in man1 or f in man2
3092 3095 else:
3093 3096 managing = lambda f: f in man1
3094 3097
3095 3098 modified, added, removed = [], [], []
3096 3099 for f in self._files:
3097 3100 if not managing(f):
3098 3101 added.append(f)
3099 3102 elif f in self:
3100 3103 modified.append(f)
3101 3104 else:
3102 3105 removed.append(f)
3103 3106
3104 3107 return scmutil.status(modified, added, removed, [], [], [], [])
3105 3108
3106 3109
3107 3110 class arbitraryfilectx:
3108 3111 """Allows you to use filectx-like functions on a file in an arbitrary
3109 3112 location on disk, possibly not in the working directory.
3110 3113 """
3111 3114
3112 3115 def __init__(self, path, repo=None):
3113 3116 # Repo is optional because contrib/simplemerge uses this class.
3114 3117 self._repo = repo
3115 3118 self._path = path
3116 3119
3117 3120 def cmp(self, fctx):
3118 3121 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3119 3122 # path if either side is a symlink.
3120 3123 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3121 3124 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3122 3125 # Add a fast-path for merge if both sides are disk-backed.
3123 3126 # Note that filecmp uses the opposite return values (True if same)
3124 3127 # from our cmp functions (True if different).
3125 3128 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3126 3129 return self.data() != fctx.data()
3127 3130
3128 3131 def path(self):
3129 3132 return self._path
3130 3133
3131 3134 def flags(self):
3132 3135 return b''
3133 3136
3134 3137 def data(self):
3135 3138 return util.readfile(self._path)
3136 3139
3137 3140 def decodeddata(self):
3138 3141 return util.readfile(self._path)
3139 3142
3140 3143 def remove(self):
3141 3144 util.unlink(self._path)
3142 3145
3143 3146 def write(self, data, flags, **kwargs):
3144 3147 assert not flags
3145 3148 util.writefile(self._path, data)
General Comments 0
You need to be logged in to leave comments. Login now