##// END OF EJS Templates
changectx: use unfiltered changelog to access parents of unfiltered revs...
marmoute -
r44203:f9068413 default
parent child Browse files
Show More
@@ -1,3011 +1,3016 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 hex,
19 19 modifiednodeid,
20 20 nullid,
21 21 nullrev,
22 22 short,
23 23 wdirfilenodeids,
24 24 wdirhex,
25 25 )
26 26 from .pycompat import (
27 27 getattr,
28 28 open,
29 29 )
30 30 from . import (
31 31 copies,
32 32 dagop,
33 33 encoding,
34 34 error,
35 35 fileset,
36 36 match as matchmod,
37 37 obsolete as obsmod,
38 38 patch,
39 39 pathutil,
40 40 phases,
41 41 pycompat,
42 42 repoview,
43 43 scmutil,
44 44 sparse,
45 45 subrepo,
46 46 subrepoutil,
47 47 util,
48 48 )
49 49 from .utils import (
50 50 dateutil,
51 51 stringutil,
52 52 )
53 53
54 54 propertycache = util.propertycache
55 55
56 56
57 57 class basectx(object):
58 58 """A basectx object represents the common logic for its children:
59 59 changectx: read-only context that is already present in the repo,
60 60 workingctx: a context that represents the working directory and can
61 61 be committed,
62 62 memctx: a context that represents changes in-memory and can also
63 63 be committed."""
64 64
65 65 def __init__(self, repo):
66 66 self._repo = repo
67 67
68 68 def __bytes__(self):
69 69 return short(self.node())
70 70
71 71 __str__ = encoding.strmethod(__bytes__)
72 72
73 73 def __repr__(self):
74 74 return "<%s %s>" % (type(self).__name__, str(self))
75 75
76 76 def __eq__(self, other):
77 77 try:
78 78 return type(self) == type(other) and self._rev == other._rev
79 79 except AttributeError:
80 80 return False
81 81
82 82 def __ne__(self, other):
83 83 return not (self == other)
84 84
85 85 def __contains__(self, key):
86 86 return key in self._manifest
87 87
88 88 def __getitem__(self, key):
89 89 return self.filectx(key)
90 90
91 91 def __iter__(self):
92 92 return iter(self._manifest)
93 93
94 94 def _buildstatusmanifest(self, status):
95 95 """Builds a manifest that includes the given status results, if this is
96 96 a working copy context. For non-working copy contexts, it just returns
97 97 the normal manifest."""
98 98 return self.manifest()
99 99
100 100 def _matchstatus(self, other, match):
101 101 """This internal method provides a way for child objects to override the
102 102 match operator.
103 103 """
104 104 return match
105 105
106 106 def _buildstatus(
107 107 self, other, s, match, listignored, listclean, listunknown
108 108 ):
109 109 """build a status with respect to another context"""
110 110 # Load earliest manifest first for caching reasons. More specifically,
111 111 # if you have revisions 1000 and 1001, 1001 is probably stored as a
112 112 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
113 113 # 1000 and cache it so that when you read 1001, we just need to apply a
114 114 # delta to what's in the cache. So that's one full reconstruction + one
115 115 # delta application.
116 116 mf2 = None
117 117 if self.rev() is not None and self.rev() < other.rev():
118 118 mf2 = self._buildstatusmanifest(s)
119 119 mf1 = other._buildstatusmanifest(s)
120 120 if mf2 is None:
121 121 mf2 = self._buildstatusmanifest(s)
122 122
123 123 modified, added = [], []
124 124 removed = []
125 125 clean = []
126 126 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
127 127 deletedset = set(deleted)
128 128 d = mf1.diff(mf2, match=match, clean=listclean)
129 129 for fn, value in pycompat.iteritems(d):
130 130 if fn in deletedset:
131 131 continue
132 132 if value is None:
133 133 clean.append(fn)
134 134 continue
135 135 (node1, flag1), (node2, flag2) = value
136 136 if node1 is None:
137 137 added.append(fn)
138 138 elif node2 is None:
139 139 removed.append(fn)
140 140 elif flag1 != flag2:
141 141 modified.append(fn)
142 142 elif node2 not in wdirfilenodeids:
143 143 # When comparing files between two commits, we save time by
144 144 # not comparing the file contents when the nodeids differ.
145 145 # Note that this means we incorrectly report a reverted change
146 146 # to a file as a modification.
147 147 modified.append(fn)
148 148 elif self[fn].cmp(other[fn]):
149 149 modified.append(fn)
150 150 else:
151 151 clean.append(fn)
152 152
153 153 if removed:
154 154 # need to filter files if they are already reported as removed
155 155 unknown = [
156 156 fn
157 157 for fn in unknown
158 158 if fn not in mf1 and (not match or match(fn))
159 159 ]
160 160 ignored = [
161 161 fn
162 162 for fn in ignored
163 163 if fn not in mf1 and (not match or match(fn))
164 164 ]
165 165 # if they're deleted, don't report them as removed
166 166 removed = [fn for fn in removed if fn not in deletedset]
167 167
168 168 return scmutil.status(
169 169 modified, added, removed, deleted, unknown, ignored, clean
170 170 )
171 171
172 172 @propertycache
173 173 def substate(self):
174 174 return subrepoutil.state(self, self._repo.ui)
175 175
176 176 def subrev(self, subpath):
177 177 return self.substate[subpath][1]
178 178
179 179 def rev(self):
180 180 return self._rev
181 181
182 182 def node(self):
183 183 return self._node
184 184
185 185 def hex(self):
186 186 return hex(self.node())
187 187
188 188 def manifest(self):
189 189 return self._manifest
190 190
191 191 def manifestctx(self):
192 192 return self._manifestctx
193 193
194 194 def repo(self):
195 195 return self._repo
196 196
197 197 def phasestr(self):
198 198 return phases.phasenames[self.phase()]
199 199
200 200 def mutable(self):
201 201 return self.phase() > phases.public
202 202
203 203 def matchfileset(self, expr, badfn=None):
204 204 return fileset.match(self, expr, badfn=badfn)
205 205
206 206 def obsolete(self):
207 207 """True if the changeset is obsolete"""
208 208 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
209 209
210 210 def extinct(self):
211 211 """True if the changeset is extinct"""
212 212 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
213 213
214 214 def orphan(self):
215 215 """True if the changeset is not obsolete, but its ancestor is"""
216 216 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
217 217
218 218 def phasedivergent(self):
219 219 """True if the changeset tries to be a successor of a public changeset
220 220
221 221 Only non-public and non-obsolete changesets may be phase-divergent.
222 222 """
223 223 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
224 224
225 225 def contentdivergent(self):
226 226 """Is a successor of a changeset with multiple possible successor sets
227 227
228 228 Only non-public and non-obsolete changesets may be content-divergent.
229 229 """
230 230 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
231 231
232 232 def isunstable(self):
233 233 """True if the changeset is either orphan, phase-divergent or
234 234 content-divergent"""
235 235 return self.orphan() or self.phasedivergent() or self.contentdivergent()
236 236
237 237 def instabilities(self):
238 238 """return the list of instabilities affecting this changeset.
239 239
240 240 Instabilities are returned as strings. possible values are:
241 241 - orphan,
242 242 - phase-divergent,
243 243 - content-divergent.
244 244 """
245 245 instabilities = []
246 246 if self.orphan():
247 247 instabilities.append(b'orphan')
248 248 if self.phasedivergent():
249 249 instabilities.append(b'phase-divergent')
250 250 if self.contentdivergent():
251 251 instabilities.append(b'content-divergent')
252 252 return instabilities
253 253
254 254 def parents(self):
255 255 """return contexts for each parent changeset"""
256 256 return self._parents
257 257
258 258 def p1(self):
259 259 return self._parents[0]
260 260
261 261 def p2(self):
262 262 parents = self._parents
263 263 if len(parents) == 2:
264 264 return parents[1]
265 265 return self._repo[nullrev]
266 266
267 267 def _fileinfo(self, path):
268 268 if '_manifest' in self.__dict__:
269 269 try:
270 270 return self._manifest[path], self._manifest.flags(path)
271 271 except KeyError:
272 272 raise error.ManifestLookupError(
273 273 self._node, path, _(b'not found in manifest')
274 274 )
275 275 if '_manifestdelta' in self.__dict__ or path in self.files():
276 276 if path in self._manifestdelta:
277 277 return (
278 278 self._manifestdelta[path],
279 279 self._manifestdelta.flags(path),
280 280 )
281 281 mfl = self._repo.manifestlog
282 282 try:
283 283 node, flag = mfl[self._changeset.manifest].find(path)
284 284 except KeyError:
285 285 raise error.ManifestLookupError(
286 286 self._node, path, _(b'not found in manifest')
287 287 )
288 288
289 289 return node, flag
290 290
291 291 def filenode(self, path):
292 292 return self._fileinfo(path)[0]
293 293
294 294 def flags(self, path):
295 295 try:
296 296 return self._fileinfo(path)[1]
297 297 except error.LookupError:
298 298 return b''
299 299
300 300 @propertycache
301 301 def _copies(self):
302 302 return copies.computechangesetcopies(self)
303 303
304 304 def p1copies(self):
305 305 return self._copies[0]
306 306
307 307 def p2copies(self):
308 308 return self._copies[1]
309 309
310 310 def sub(self, path, allowcreate=True):
311 311 '''return a subrepo for the stored revision of path, never wdir()'''
312 312 return subrepo.subrepo(self, path, allowcreate=allowcreate)
313 313
314 314 def nullsub(self, path, pctx):
315 315 return subrepo.nullsubrepo(self, path, pctx)
316 316
317 317 def workingsub(self, path):
318 318 '''return a subrepo for the stored revision, or wdir if this is a wdir
319 319 context.
320 320 '''
321 321 return subrepo.subrepo(self, path, allowwdir=True)
322 322
323 323 def match(
324 324 self,
325 325 pats=None,
326 326 include=None,
327 327 exclude=None,
328 328 default=b'glob',
329 329 listsubrepos=False,
330 330 badfn=None,
331 331 ):
332 332 r = self._repo
333 333 return matchmod.match(
334 334 r.root,
335 335 r.getcwd(),
336 336 pats,
337 337 include,
338 338 exclude,
339 339 default,
340 340 auditor=r.nofsauditor,
341 341 ctx=self,
342 342 listsubrepos=listsubrepos,
343 343 badfn=badfn,
344 344 )
345 345
346 346 def diff(
347 347 self,
348 348 ctx2=None,
349 349 match=None,
350 350 changes=None,
351 351 opts=None,
352 352 losedatafn=None,
353 353 pathfn=None,
354 354 copy=None,
355 355 copysourcematch=None,
356 356 hunksfilterfn=None,
357 357 ):
358 358 """Returns a diff generator for the given contexts and matcher"""
359 359 if ctx2 is None:
360 360 ctx2 = self.p1()
361 361 if ctx2 is not None:
362 362 ctx2 = self._repo[ctx2]
363 363 return patch.diff(
364 364 self._repo,
365 365 ctx2,
366 366 self,
367 367 match=match,
368 368 changes=changes,
369 369 opts=opts,
370 370 losedatafn=losedatafn,
371 371 pathfn=pathfn,
372 372 copy=copy,
373 373 copysourcematch=copysourcematch,
374 374 hunksfilterfn=hunksfilterfn,
375 375 )
376 376
377 377 def dirs(self):
378 378 return self._manifest.dirs()
379 379
380 380 def hasdir(self, dir):
381 381 return self._manifest.hasdir(dir)
382 382
383 383 def status(
384 384 self,
385 385 other=None,
386 386 match=None,
387 387 listignored=False,
388 388 listclean=False,
389 389 listunknown=False,
390 390 listsubrepos=False,
391 391 ):
392 392 """return status of files between two nodes or node and working
393 393 directory.
394 394
395 395 If other is None, compare this node with working directory.
396 396
397 397 returns (modified, added, removed, deleted, unknown, ignored, clean)
398 398 """
399 399
400 400 ctx1 = self
401 401 ctx2 = self._repo[other]
402 402
403 403 # This next code block is, admittedly, fragile logic that tests for
404 404 # reversing the contexts and wouldn't need to exist if it weren't for
405 405 # the fast (and common) code path of comparing the working directory
406 406 # with its first parent.
407 407 #
408 408 # What we're aiming for here is the ability to call:
409 409 #
410 410 # workingctx.status(parentctx)
411 411 #
412 412 # If we always built the manifest for each context and compared those,
413 413 # then we'd be done. But the special case of the above call means we
414 414 # just copy the manifest of the parent.
415 415 reversed = False
416 416 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
417 417 reversed = True
418 418 ctx1, ctx2 = ctx2, ctx1
419 419
420 420 match = self._repo.narrowmatch(match)
421 421 match = ctx2._matchstatus(ctx1, match)
422 422 r = scmutil.status([], [], [], [], [], [], [])
423 423 r = ctx2._buildstatus(
424 424 ctx1, r, match, listignored, listclean, listunknown
425 425 )
426 426
427 427 if reversed:
428 428 # Reverse added and removed. Clear deleted, unknown and ignored as
429 429 # these make no sense to reverse.
430 430 r = scmutil.status(
431 431 r.modified, r.removed, r.added, [], [], [], r.clean
432 432 )
433 433
434 434 if listsubrepos:
435 435 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
436 436 try:
437 437 rev2 = ctx2.subrev(subpath)
438 438 except KeyError:
439 439 # A subrepo that existed in node1 was deleted between
440 440 # node1 and node2 (inclusive). Thus, ctx2's substate
441 441 # won't contain that subpath. The best we can do ignore it.
442 442 rev2 = None
443 443 submatch = matchmod.subdirmatcher(subpath, match)
444 444 s = sub.status(
445 445 rev2,
446 446 match=submatch,
447 447 ignored=listignored,
448 448 clean=listclean,
449 449 unknown=listunknown,
450 450 listsubrepos=True,
451 451 )
452 452 for k in (
453 453 'modified',
454 454 'added',
455 455 'removed',
456 456 'deleted',
457 457 'unknown',
458 458 'ignored',
459 459 'clean',
460 460 ):
461 461 rfiles, sfiles = getattr(r, k), getattr(s, k)
462 462 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
463 463
464 464 r.modified.sort()
465 465 r.added.sort()
466 466 r.removed.sort()
467 467 r.deleted.sort()
468 468 r.unknown.sort()
469 469 r.ignored.sort()
470 470 r.clean.sort()
471 471
472 472 return r
473 473
474 474
475 475 class changectx(basectx):
476 476 """A changecontext object makes access to data related to a particular
477 477 changeset convenient. It represents a read-only context already present in
478 478 the repo."""
479 479
480 480 def __init__(self, repo, rev, node, maybe_filtered=True):
481 481 super(changectx, self).__init__(repo)
482 482 self._rev = rev
483 483 self._node = node
484 484 # When maybe_filtered is True, the revision might be affected by
485 485 # changelog filtering and operation through the filtered changelog must be used.
486 486 #
487 487 # When maybe_filtered is False, the revision has already been checked
488 488 # against filtering and is not filtered. Operation through the
489 489 # unfiltered changelog might be used in some case.
490 490 self._maybe_filtered = maybe_filtered
491 491
492 492 def __hash__(self):
493 493 try:
494 494 return hash(self._rev)
495 495 except AttributeError:
496 496 return id(self)
497 497
498 498 def __nonzero__(self):
499 499 return self._rev != nullrev
500 500
501 501 __bool__ = __nonzero__
502 502
503 503 @propertycache
504 504 def _changeset(self):
505 505 if self._maybe_filtered:
506 506 repo = self._repo
507 507 else:
508 508 repo = self._repo.unfiltered()
509 509 return repo.changelog.changelogrevision(self.rev())
510 510
511 511 @propertycache
512 512 def _manifest(self):
513 513 return self._manifestctx.read()
514 514
515 515 @property
516 516 def _manifestctx(self):
517 517 return self._repo.manifestlog[self._changeset.manifest]
518 518
519 519 @propertycache
520 520 def _manifestdelta(self):
521 521 return self._manifestctx.readdelta()
522 522
523 523 @propertycache
524 524 def _parents(self):
525 525 repo = self._repo
526 p1, p2 = repo.changelog.parentrevs(self._rev)
526 if self._maybe_filtered:
527 cl = repo.changelog
528 else:
529 cl = repo.unfiltered().changelog
530
531 p1, p2 = cl.parentrevs(self._rev)
527 532 if p2 == nullrev:
528 533 return [repo[p1]]
529 534 return [repo[p1], repo[p2]]
530 535
531 536 def changeset(self):
532 537 c = self._changeset
533 538 return (
534 539 c.manifest,
535 540 c.user,
536 541 c.date,
537 542 c.files,
538 543 c.description,
539 544 c.extra,
540 545 )
541 546
542 547 def manifestnode(self):
543 548 return self._changeset.manifest
544 549
545 550 def user(self):
546 551 return self._changeset.user
547 552
548 553 def date(self):
549 554 return self._changeset.date
550 555
551 556 def files(self):
552 557 return self._changeset.files
553 558
554 559 def filesmodified(self):
555 560 modified = set(self.files())
556 561 modified.difference_update(self.filesadded())
557 562 modified.difference_update(self.filesremoved())
558 563 return sorted(modified)
559 564
560 565 def filesadded(self):
561 566 filesadded = self._changeset.filesadded
562 567 compute_on_none = True
563 568 if self._repo.filecopiesmode == b'changeset-sidedata':
564 569 compute_on_none = False
565 570 else:
566 571 source = self._repo.ui.config(b'experimental', b'copies.read-from')
567 572 if source == b'changeset-only':
568 573 compute_on_none = False
569 574 elif source != b'compatibility':
570 575 # filelog mode, ignore any changelog content
571 576 filesadded = None
572 577 if filesadded is None:
573 578 if compute_on_none:
574 579 filesadded = copies.computechangesetfilesadded(self)
575 580 else:
576 581 filesadded = []
577 582 return filesadded
578 583
579 584 def filesremoved(self):
580 585 filesremoved = self._changeset.filesremoved
581 586 compute_on_none = True
582 587 if self._repo.filecopiesmode == b'changeset-sidedata':
583 588 compute_on_none = False
584 589 else:
585 590 source = self._repo.ui.config(b'experimental', b'copies.read-from')
586 591 if source == b'changeset-only':
587 592 compute_on_none = False
588 593 elif source != b'compatibility':
589 594 # filelog mode, ignore any changelog content
590 595 filesremoved = None
591 596 if filesremoved is None:
592 597 if compute_on_none:
593 598 filesremoved = copies.computechangesetfilesremoved(self)
594 599 else:
595 600 filesremoved = []
596 601 return filesremoved
597 602
598 603 @propertycache
599 604 def _copies(self):
600 605 p1copies = self._changeset.p1copies
601 606 p2copies = self._changeset.p2copies
602 607 compute_on_none = True
603 608 if self._repo.filecopiesmode == b'changeset-sidedata':
604 609 compute_on_none = False
605 610 else:
606 611 source = self._repo.ui.config(b'experimental', b'copies.read-from')
607 612 # If config says to get copy metadata only from changeset, then
608 613 # return that, defaulting to {} if there was no copy metadata. In
609 614 # compatibility mode, we return copy data from the changeset if it
610 615 # was recorded there, and otherwise we fall back to getting it from
611 616 # the filelogs (below).
612 617 #
613 618 # If we are in compatiblity mode and there is not data in the
614 619 # changeset), we get the copy metadata from the filelogs.
615 620 #
616 621 # otherwise, when config said to read only from filelog, we get the
617 622 # copy metadata from the filelogs.
618 623 if source == b'changeset-only':
619 624 compute_on_none = False
620 625 elif source != b'compatibility':
621 626 # filelog mode, ignore any changelog content
622 627 p1copies = p2copies = None
623 628 if p1copies is None:
624 629 if compute_on_none:
625 630 p1copies, p2copies = super(changectx, self)._copies
626 631 else:
627 632 if p1copies is None:
628 633 p1copies = {}
629 634 if p2copies is None:
630 635 p2copies = {}
631 636 return p1copies, p2copies
632 637
633 638 def description(self):
634 639 return self._changeset.description
635 640
636 641 def branch(self):
637 642 return encoding.tolocal(self._changeset.extra.get(b"branch"))
638 643
639 644 def closesbranch(self):
640 645 return b'close' in self._changeset.extra
641 646
642 647 def extra(self):
643 648 """Return a dict of extra information."""
644 649 return self._changeset.extra
645 650
646 651 def tags(self):
647 652 """Return a list of byte tag names"""
648 653 return self._repo.nodetags(self._node)
649 654
650 655 def bookmarks(self):
651 656 """Return a list of byte bookmark names."""
652 657 return self._repo.nodebookmarks(self._node)
653 658
654 659 def phase(self):
655 660 return self._repo._phasecache.phase(self._repo, self._rev)
656 661
657 662 def hidden(self):
658 663 return self._rev in repoview.filterrevs(self._repo, b'visible')
659 664
660 665 def isinmemory(self):
661 666 return False
662 667
663 668 def children(self):
664 669 """return list of changectx contexts for each child changeset.
665 670
666 671 This returns only the immediate child changesets. Use descendants() to
667 672 recursively walk children.
668 673 """
669 674 c = self._repo.changelog.children(self._node)
670 675 return [self._repo[x] for x in c]
671 676
672 677 def ancestors(self):
673 678 for a in self._repo.changelog.ancestors([self._rev]):
674 679 yield self._repo[a]
675 680
676 681 def descendants(self):
677 682 """Recursively yield all children of the changeset.
678 683
679 684 For just the immediate children, use children()
680 685 """
681 686 for d in self._repo.changelog.descendants([self._rev]):
682 687 yield self._repo[d]
683 688
684 689 def filectx(self, path, fileid=None, filelog=None):
685 690 """get a file context from this changeset"""
686 691 if fileid is None:
687 692 fileid = self.filenode(path)
688 693 return filectx(
689 694 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
690 695 )
691 696
692 697 def ancestor(self, c2, warn=False):
693 698 """return the "best" ancestor context of self and c2
694 699
695 700 If there are multiple candidates, it will show a message and check
696 701 merge.preferancestor configuration before falling back to the
697 702 revlog ancestor."""
698 703 # deal with workingctxs
699 704 n2 = c2._node
700 705 if n2 is None:
701 706 n2 = c2._parents[0]._node
702 707 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
703 708 if not cahs:
704 709 anc = nullid
705 710 elif len(cahs) == 1:
706 711 anc = cahs[0]
707 712 else:
708 713 # experimental config: merge.preferancestor
709 714 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
710 715 try:
711 716 ctx = scmutil.revsymbol(self._repo, r)
712 717 except error.RepoLookupError:
713 718 continue
714 719 anc = ctx.node()
715 720 if anc in cahs:
716 721 break
717 722 else:
718 723 anc = self._repo.changelog.ancestor(self._node, n2)
719 724 if warn:
720 725 self._repo.ui.status(
721 726 (
722 727 _(b"note: using %s as ancestor of %s and %s\n")
723 728 % (short(anc), short(self._node), short(n2))
724 729 )
725 730 + b''.join(
726 731 _(
727 732 b" alternatively, use --config "
728 733 b"merge.preferancestor=%s\n"
729 734 )
730 735 % short(n)
731 736 for n in sorted(cahs)
732 737 if n != anc
733 738 )
734 739 )
735 740 return self._repo[anc]
736 741
737 742 def isancestorof(self, other):
738 743 """True if this changeset is an ancestor of other"""
739 744 return self._repo.changelog.isancestorrev(self._rev, other._rev)
740 745
741 746 def walk(self, match):
742 747 '''Generates matching file names.'''
743 748
744 749 # Wrap match.bad method to have message with nodeid
745 750 def bad(fn, msg):
746 751 # The manifest doesn't know about subrepos, so don't complain about
747 752 # paths into valid subrepos.
748 753 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
749 754 return
750 755 match.bad(fn, _(b'no such file in rev %s') % self)
751 756
752 757 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
753 758 return self._manifest.walk(m)
754 759
755 760 def matches(self, match):
756 761 return self.walk(match)
757 762
758 763
759 764 class basefilectx(object):
760 765 """A filecontext object represents the common logic for its children:
761 766 filectx: read-only access to a filerevision that is already present
762 767 in the repo,
763 768 workingfilectx: a filecontext that represents files from the working
764 769 directory,
765 770 memfilectx: a filecontext that represents files in-memory,
766 771 """
767 772
768 773 @propertycache
769 774 def _filelog(self):
770 775 return self._repo.file(self._path)
771 776
772 777 @propertycache
773 778 def _changeid(self):
774 779 if '_changectx' in self.__dict__:
775 780 return self._changectx.rev()
776 781 elif '_descendantrev' in self.__dict__:
777 782 # this file context was created from a revision with a known
778 783 # descendant, we can (lazily) correct for linkrev aliases
779 784 return self._adjustlinkrev(self._descendantrev)
780 785 else:
781 786 return self._filelog.linkrev(self._filerev)
782 787
783 788 @propertycache
784 789 def _filenode(self):
785 790 if '_fileid' in self.__dict__:
786 791 return self._filelog.lookup(self._fileid)
787 792 else:
788 793 return self._changectx.filenode(self._path)
789 794
790 795 @propertycache
791 796 def _filerev(self):
792 797 return self._filelog.rev(self._filenode)
793 798
794 799 @propertycache
795 800 def _repopath(self):
796 801 return self._path
797 802
798 803 def __nonzero__(self):
799 804 try:
800 805 self._filenode
801 806 return True
802 807 except error.LookupError:
803 808 # file is missing
804 809 return False
805 810
806 811 __bool__ = __nonzero__
807 812
808 813 def __bytes__(self):
809 814 try:
810 815 return b"%s@%s" % (self.path(), self._changectx)
811 816 except error.LookupError:
812 817 return b"%s@???" % self.path()
813 818
814 819 __str__ = encoding.strmethod(__bytes__)
815 820
816 821 def __repr__(self):
817 822 return "<%s %s>" % (type(self).__name__, str(self))
818 823
819 824 def __hash__(self):
820 825 try:
821 826 return hash((self._path, self._filenode))
822 827 except AttributeError:
823 828 return id(self)
824 829
825 830 def __eq__(self, other):
826 831 try:
827 832 return (
828 833 type(self) == type(other)
829 834 and self._path == other._path
830 835 and self._filenode == other._filenode
831 836 )
832 837 except AttributeError:
833 838 return False
834 839
835 840 def __ne__(self, other):
836 841 return not (self == other)
837 842
838 843 def filerev(self):
839 844 return self._filerev
840 845
841 846 def filenode(self):
842 847 return self._filenode
843 848
844 849 @propertycache
845 850 def _flags(self):
846 851 return self._changectx.flags(self._path)
847 852
848 853 def flags(self):
849 854 return self._flags
850 855
851 856 def filelog(self):
852 857 return self._filelog
853 858
854 859 def rev(self):
855 860 return self._changeid
856 861
857 862 def linkrev(self):
858 863 return self._filelog.linkrev(self._filerev)
859 864
860 865 def node(self):
861 866 return self._changectx.node()
862 867
863 868 def hex(self):
864 869 return self._changectx.hex()
865 870
866 871 def user(self):
867 872 return self._changectx.user()
868 873
869 874 def date(self):
870 875 return self._changectx.date()
871 876
872 877 def files(self):
873 878 return self._changectx.files()
874 879
875 880 def description(self):
876 881 return self._changectx.description()
877 882
878 883 def branch(self):
879 884 return self._changectx.branch()
880 885
881 886 def extra(self):
882 887 return self._changectx.extra()
883 888
884 889 def phase(self):
885 890 return self._changectx.phase()
886 891
887 892 def phasestr(self):
888 893 return self._changectx.phasestr()
889 894
890 895 def obsolete(self):
891 896 return self._changectx.obsolete()
892 897
893 898 def instabilities(self):
894 899 return self._changectx.instabilities()
895 900
896 901 def manifest(self):
897 902 return self._changectx.manifest()
898 903
899 904 def changectx(self):
900 905 return self._changectx
901 906
902 907 def renamed(self):
903 908 return self._copied
904 909
905 910 def copysource(self):
906 911 return self._copied and self._copied[0]
907 912
908 913 def repo(self):
909 914 return self._repo
910 915
911 916 def size(self):
912 917 return len(self.data())
913 918
914 919 def path(self):
915 920 return self._path
916 921
917 922 def isbinary(self):
918 923 try:
919 924 return stringutil.binary(self.data())
920 925 except IOError:
921 926 return False
922 927
923 928 def isexec(self):
924 929 return b'x' in self.flags()
925 930
926 931 def islink(self):
927 932 return b'l' in self.flags()
928 933
929 934 def isabsent(self):
930 935 """whether this filectx represents a file not in self._changectx
931 936
932 937 This is mainly for merge code to detect change/delete conflicts. This is
933 938 expected to be True for all subclasses of basectx."""
934 939 return False
935 940
936 941 _customcmp = False
937 942
938 943 def cmp(self, fctx):
939 944 """compare with other file context
940 945
941 946 returns True if different than fctx.
942 947 """
943 948 if fctx._customcmp:
944 949 return fctx.cmp(self)
945 950
946 951 if self._filenode is None:
947 952 raise error.ProgrammingError(
948 953 b'filectx.cmp() must be reimplemented if not backed by revlog'
949 954 )
950 955
951 956 if fctx._filenode is None:
952 957 if self._repo._encodefilterpats:
953 958 # can't rely on size() because wdir content may be decoded
954 959 return self._filelog.cmp(self._filenode, fctx.data())
955 960 if self.size() - 4 == fctx.size():
956 961 # size() can match:
957 962 # if file data starts with '\1\n', empty metadata block is
958 963 # prepended, which adds 4 bytes to filelog.size().
959 964 return self._filelog.cmp(self._filenode, fctx.data())
960 965 if self.size() == fctx.size():
961 966 # size() matches: need to compare content
962 967 return self._filelog.cmp(self._filenode, fctx.data())
963 968
964 969 # size() differs
965 970 return True
966 971
967 972 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
968 973 """return the first ancestor of <srcrev> introducing <fnode>
969 974
970 975 If the linkrev of the file revision does not point to an ancestor of
971 976 srcrev, we'll walk down the ancestors until we find one introducing
972 977 this file revision.
973 978
974 979 :srcrev: the changeset revision we search ancestors from
975 980 :inclusive: if true, the src revision will also be checked
976 981 :stoprev: an optional revision to stop the walk at. If no introduction
977 982 of this file content could be found before this floor
978 983 revision, the function will returns "None" and stops its
979 984 iteration.
980 985 """
981 986 repo = self._repo
982 987 cl = repo.unfiltered().changelog
983 988 mfl = repo.manifestlog
984 989 # fetch the linkrev
985 990 lkr = self.linkrev()
986 991 if srcrev == lkr:
987 992 return lkr
988 993 # hack to reuse ancestor computation when searching for renames
989 994 memberanc = getattr(self, '_ancestrycontext', None)
990 995 iteranc = None
991 996 if srcrev is None:
992 997 # wctx case, used by workingfilectx during mergecopy
993 998 revs = [p.rev() for p in self._repo[None].parents()]
994 999 inclusive = True # we skipped the real (revless) source
995 1000 else:
996 1001 revs = [srcrev]
997 1002 if memberanc is None:
998 1003 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
999 1004 # check if this linkrev is an ancestor of srcrev
1000 1005 if lkr not in memberanc:
1001 1006 if iteranc is None:
1002 1007 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1003 1008 fnode = self._filenode
1004 1009 path = self._path
1005 1010 for a in iteranc:
1006 1011 if stoprev is not None and a < stoprev:
1007 1012 return None
1008 1013 ac = cl.read(a) # get changeset data (we avoid object creation)
1009 1014 if path in ac[3]: # checking the 'files' field.
1010 1015 # The file has been touched, check if the content is
1011 1016 # similar to the one we search for.
1012 1017 if fnode == mfl[ac[0]].readfast().get(path):
1013 1018 return a
1014 1019 # In theory, we should never get out of that loop without a result.
1015 1020 # But if manifest uses a buggy file revision (not children of the
1016 1021 # one it replaces) we could. Such a buggy situation will likely
1017 1022 # result is crash somewhere else at to some point.
1018 1023 return lkr
1019 1024
1020 1025 def isintroducedafter(self, changelogrev):
1021 1026 """True if a filectx has been introduced after a given floor revision
1022 1027 """
1023 1028 if self.linkrev() >= changelogrev:
1024 1029 return True
1025 1030 introrev = self._introrev(stoprev=changelogrev)
1026 1031 if introrev is None:
1027 1032 return False
1028 1033 return introrev >= changelogrev
1029 1034
1030 1035 def introrev(self):
1031 1036 """return the rev of the changeset which introduced this file revision
1032 1037
1033 1038 This method is different from linkrev because it take into account the
1034 1039 changeset the filectx was created from. It ensures the returned
1035 1040 revision is one of its ancestors. This prevents bugs from
1036 1041 'linkrev-shadowing' when a file revision is used by multiple
1037 1042 changesets.
1038 1043 """
1039 1044 return self._introrev()
1040 1045
1041 1046 def _introrev(self, stoprev=None):
1042 1047 """
1043 1048 Same as `introrev` but, with an extra argument to limit changelog
1044 1049 iteration range in some internal usecase.
1045 1050
1046 1051 If `stoprev` is set, the `introrev` will not be searched past that
1047 1052 `stoprev` revision and "None" might be returned. This is useful to
1048 1053 limit the iteration range.
1049 1054 """
1050 1055 toprev = None
1051 1056 attrs = vars(self)
1052 1057 if '_changeid' in attrs:
1053 1058 # We have a cached value already
1054 1059 toprev = self._changeid
1055 1060 elif '_changectx' in attrs:
1056 1061 # We know which changelog entry we are coming from
1057 1062 toprev = self._changectx.rev()
1058 1063
1059 1064 if toprev is not None:
1060 1065 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1061 1066 elif '_descendantrev' in attrs:
1062 1067 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1063 1068 # be nice and cache the result of the computation
1064 1069 if introrev is not None:
1065 1070 self._changeid = introrev
1066 1071 return introrev
1067 1072 else:
1068 1073 return self.linkrev()
1069 1074
1070 1075 def introfilectx(self):
1071 1076 """Return filectx having identical contents, but pointing to the
1072 1077 changeset revision where this filectx was introduced"""
1073 1078 introrev = self.introrev()
1074 1079 if self.rev() == introrev:
1075 1080 return self
1076 1081 return self.filectx(self.filenode(), changeid=introrev)
1077 1082
1078 1083 def _parentfilectx(self, path, fileid, filelog):
1079 1084 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1080 1085 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1081 1086 if '_changeid' in vars(self) or '_changectx' in vars(self):
1082 1087 # If self is associated with a changeset (probably explicitly
1083 1088 # fed), ensure the created filectx is associated with a
1084 1089 # changeset that is an ancestor of self.changectx.
1085 1090 # This lets us later use _adjustlinkrev to get a correct link.
1086 1091 fctx._descendantrev = self.rev()
1087 1092 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1088 1093 elif '_descendantrev' in vars(self):
1089 1094 # Otherwise propagate _descendantrev if we have one associated.
1090 1095 fctx._descendantrev = self._descendantrev
1091 1096 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1092 1097 return fctx
1093 1098
1094 1099 def parents(self):
1095 1100 _path = self._path
1096 1101 fl = self._filelog
1097 1102 parents = self._filelog.parents(self._filenode)
1098 1103 pl = [(_path, node, fl) for node in parents if node != nullid]
1099 1104
1100 1105 r = fl.renamed(self._filenode)
1101 1106 if r:
1102 1107 # - In the simple rename case, both parent are nullid, pl is empty.
1103 1108 # - In case of merge, only one of the parent is null id and should
1104 1109 # be replaced with the rename information. This parent is -always-
1105 1110 # the first one.
1106 1111 #
1107 1112 # As null id have always been filtered out in the previous list
1108 1113 # comprehension, inserting to 0 will always result in "replacing
1109 1114 # first nullid parent with rename information.
1110 1115 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1111 1116
1112 1117 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1113 1118
1114 1119 def p1(self):
1115 1120 return self.parents()[0]
1116 1121
1117 1122 def p2(self):
1118 1123 p = self.parents()
1119 1124 if len(p) == 2:
1120 1125 return p[1]
1121 1126 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1122 1127
1123 1128 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1124 1129 """Returns a list of annotateline objects for each line in the file
1125 1130
1126 1131 - line.fctx is the filectx of the node where that line was last changed
1127 1132 - line.lineno is the line number at the first appearance in the managed
1128 1133 file
1129 1134 - line.text is the data on that line (including newline character)
1130 1135 """
1131 1136 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1132 1137
1133 1138 def parents(f):
1134 1139 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1135 1140 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1136 1141 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1137 1142 # isn't an ancestor of the srcrev.
1138 1143 f._changeid
1139 1144 pl = f.parents()
1140 1145
1141 1146 # Don't return renamed parents if we aren't following.
1142 1147 if not follow:
1143 1148 pl = [p for p in pl if p.path() == f.path()]
1144 1149
1145 1150 # renamed filectx won't have a filelog yet, so set it
1146 1151 # from the cache to save time
1147 1152 for p in pl:
1148 1153 if not '_filelog' in p.__dict__:
1149 1154 p._filelog = getlog(p.path())
1150 1155
1151 1156 return pl
1152 1157
1153 1158 # use linkrev to find the first changeset where self appeared
1154 1159 base = self.introfilectx()
1155 1160 if getattr(base, '_ancestrycontext', None) is None:
1156 1161 cl = self._repo.changelog
1157 1162 if base.rev() is None:
1158 1163 # wctx is not inclusive, but works because _ancestrycontext
1159 1164 # is used to test filelog revisions
1160 1165 ac = cl.ancestors(
1161 1166 [p.rev() for p in base.parents()], inclusive=True
1162 1167 )
1163 1168 else:
1164 1169 ac = cl.ancestors([base.rev()], inclusive=True)
1165 1170 base._ancestrycontext = ac
1166 1171
1167 1172 return dagop.annotate(
1168 1173 base, parents, skiprevs=skiprevs, diffopts=diffopts
1169 1174 )
1170 1175
1171 1176 def ancestors(self, followfirst=False):
1172 1177 visit = {}
1173 1178 c = self
1174 1179 if followfirst:
1175 1180 cut = 1
1176 1181 else:
1177 1182 cut = None
1178 1183
1179 1184 while True:
1180 1185 for parent in c.parents()[:cut]:
1181 1186 visit[(parent.linkrev(), parent.filenode())] = parent
1182 1187 if not visit:
1183 1188 break
1184 1189 c = visit.pop(max(visit))
1185 1190 yield c
1186 1191
1187 1192 def decodeddata(self):
1188 1193 """Returns `data()` after running repository decoding filters.
1189 1194
1190 1195 This is often equivalent to how the data would be expressed on disk.
1191 1196 """
1192 1197 return self._repo.wwritedata(self.path(), self.data())
1193 1198
1194 1199
1195 1200 class filectx(basefilectx):
1196 1201 """A filecontext object makes access to data related to a particular
1197 1202 filerevision convenient."""
1198 1203
1199 1204 def __init__(
1200 1205 self,
1201 1206 repo,
1202 1207 path,
1203 1208 changeid=None,
1204 1209 fileid=None,
1205 1210 filelog=None,
1206 1211 changectx=None,
1207 1212 ):
1208 1213 """changeid must be a revision number, if specified.
1209 1214 fileid can be a file revision or node."""
1210 1215 self._repo = repo
1211 1216 self._path = path
1212 1217
1213 1218 assert (
1214 1219 changeid is not None or fileid is not None or changectx is not None
1215 1220 ), (
1216 1221 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1217 1222 % (changeid, fileid, changectx,)
1218 1223 )
1219 1224
1220 1225 if filelog is not None:
1221 1226 self._filelog = filelog
1222 1227
1223 1228 if changeid is not None:
1224 1229 self._changeid = changeid
1225 1230 if changectx is not None:
1226 1231 self._changectx = changectx
1227 1232 if fileid is not None:
1228 1233 self._fileid = fileid
1229 1234
1230 1235 @propertycache
1231 1236 def _changectx(self):
1232 1237 try:
1233 1238 return self._repo[self._changeid]
1234 1239 except error.FilteredRepoLookupError:
1235 1240 # Linkrev may point to any revision in the repository. When the
1236 1241 # repository is filtered this may lead to `filectx` trying to build
1237 1242 # `changectx` for filtered revision. In such case we fallback to
1238 1243 # creating `changectx` on the unfiltered version of the reposition.
1239 1244 # This fallback should not be an issue because `changectx` from
1240 1245 # `filectx` are not used in complex operations that care about
1241 1246 # filtering.
1242 1247 #
1243 1248 # This fallback is a cheap and dirty fix that prevent several
1244 1249 # crashes. It does not ensure the behavior is correct. However the
1245 1250 # behavior was not correct before filtering either and "incorrect
1246 1251 # behavior" is seen as better as "crash"
1247 1252 #
1248 1253 # Linkrevs have several serious troubles with filtering that are
1249 1254 # complicated to solve. Proper handling of the issue here should be
1250 1255 # considered when solving linkrev issue are on the table.
1251 1256 return self._repo.unfiltered()[self._changeid]
1252 1257
1253 1258 def filectx(self, fileid, changeid=None):
1254 1259 '''opens an arbitrary revision of the file without
1255 1260 opening a new filelog'''
1256 1261 return filectx(
1257 1262 self._repo,
1258 1263 self._path,
1259 1264 fileid=fileid,
1260 1265 filelog=self._filelog,
1261 1266 changeid=changeid,
1262 1267 )
1263 1268
1264 1269 def rawdata(self):
1265 1270 return self._filelog.rawdata(self._filenode)
1266 1271
1267 1272 def rawflags(self):
1268 1273 """low-level revlog flags"""
1269 1274 return self._filelog.flags(self._filerev)
1270 1275
1271 1276 def data(self):
1272 1277 try:
1273 1278 return self._filelog.read(self._filenode)
1274 1279 except error.CensoredNodeError:
1275 1280 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1276 1281 return b""
1277 1282 raise error.Abort(
1278 1283 _(b"censored node: %s") % short(self._filenode),
1279 1284 hint=_(b"set censor.policy to ignore errors"),
1280 1285 )
1281 1286
1282 1287 def size(self):
1283 1288 return self._filelog.size(self._filerev)
1284 1289
1285 1290 @propertycache
1286 1291 def _copied(self):
1287 1292 """check if file was actually renamed in this changeset revision
1288 1293
1289 1294 If rename logged in file revision, we report copy for changeset only
1290 1295 if file revisions linkrev points back to the changeset in question
1291 1296 or both changeset parents contain different file revisions.
1292 1297 """
1293 1298
1294 1299 renamed = self._filelog.renamed(self._filenode)
1295 1300 if not renamed:
1296 1301 return None
1297 1302
1298 1303 if self.rev() == self.linkrev():
1299 1304 return renamed
1300 1305
1301 1306 name = self.path()
1302 1307 fnode = self._filenode
1303 1308 for p in self._changectx.parents():
1304 1309 try:
1305 1310 if fnode == p.filenode(name):
1306 1311 return None
1307 1312 except error.LookupError:
1308 1313 pass
1309 1314 return renamed
1310 1315
1311 1316 def children(self):
1312 1317 # hard for renames
1313 1318 c = self._filelog.children(self._filenode)
1314 1319 return [
1315 1320 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1316 1321 for x in c
1317 1322 ]
1318 1323
1319 1324
1320 1325 class committablectx(basectx):
1321 1326 """A committablectx object provides common functionality for a context that
1322 1327 wants the ability to commit, e.g. workingctx or memctx."""
1323 1328
1324 1329 def __init__(
1325 1330 self,
1326 1331 repo,
1327 1332 text=b"",
1328 1333 user=None,
1329 1334 date=None,
1330 1335 extra=None,
1331 1336 changes=None,
1332 1337 branch=None,
1333 1338 ):
1334 1339 super(committablectx, self).__init__(repo)
1335 1340 self._rev = None
1336 1341 self._node = None
1337 1342 self._text = text
1338 1343 if date:
1339 1344 self._date = dateutil.parsedate(date)
1340 1345 if user:
1341 1346 self._user = user
1342 1347 if changes:
1343 1348 self._status = changes
1344 1349
1345 1350 self._extra = {}
1346 1351 if extra:
1347 1352 self._extra = extra.copy()
1348 1353 if branch is not None:
1349 1354 self._extra[b'branch'] = encoding.fromlocal(branch)
1350 1355 if not self._extra.get(b'branch'):
1351 1356 self._extra[b'branch'] = b'default'
1352 1357
1353 1358 def __bytes__(self):
1354 1359 return bytes(self._parents[0]) + b"+"
1355 1360
1356 1361 __str__ = encoding.strmethod(__bytes__)
1357 1362
1358 1363 def __nonzero__(self):
1359 1364 return True
1360 1365
1361 1366 __bool__ = __nonzero__
1362 1367
1363 1368 @propertycache
1364 1369 def _status(self):
1365 1370 return self._repo.status()
1366 1371
1367 1372 @propertycache
1368 1373 def _user(self):
1369 1374 return self._repo.ui.username()
1370 1375
1371 1376 @propertycache
1372 1377 def _date(self):
1373 1378 ui = self._repo.ui
1374 1379 date = ui.configdate(b'devel', b'default-date')
1375 1380 if date is None:
1376 1381 date = dateutil.makedate()
1377 1382 return date
1378 1383
1379 1384 def subrev(self, subpath):
1380 1385 return None
1381 1386
1382 1387 def manifestnode(self):
1383 1388 return None
1384 1389
1385 1390 def user(self):
1386 1391 return self._user or self._repo.ui.username()
1387 1392
1388 1393 def date(self):
1389 1394 return self._date
1390 1395
1391 1396 def description(self):
1392 1397 return self._text
1393 1398
1394 1399 def files(self):
1395 1400 return sorted(
1396 1401 self._status.modified + self._status.added + self._status.removed
1397 1402 )
1398 1403
1399 1404 def modified(self):
1400 1405 return self._status.modified
1401 1406
1402 1407 def added(self):
1403 1408 return self._status.added
1404 1409
1405 1410 def removed(self):
1406 1411 return self._status.removed
1407 1412
1408 1413 def deleted(self):
1409 1414 return self._status.deleted
1410 1415
1411 1416 filesmodified = modified
1412 1417 filesadded = added
1413 1418 filesremoved = removed
1414 1419
1415 1420 def branch(self):
1416 1421 return encoding.tolocal(self._extra[b'branch'])
1417 1422
1418 1423 def closesbranch(self):
1419 1424 return b'close' in self._extra
1420 1425
1421 1426 def extra(self):
1422 1427 return self._extra
1423 1428
1424 1429 def isinmemory(self):
1425 1430 return False
1426 1431
1427 1432 def tags(self):
1428 1433 return []
1429 1434
1430 1435 def bookmarks(self):
1431 1436 b = []
1432 1437 for p in self.parents():
1433 1438 b.extend(p.bookmarks())
1434 1439 return b
1435 1440
1436 1441 def phase(self):
1437 1442 phase = phases.draft # default phase to draft
1438 1443 for p in self.parents():
1439 1444 phase = max(phase, p.phase())
1440 1445 return phase
1441 1446
1442 1447 def hidden(self):
1443 1448 return False
1444 1449
1445 1450 def children(self):
1446 1451 return []
1447 1452
1448 1453 def ancestor(self, c2):
1449 1454 """return the "best" ancestor context of self and c2"""
1450 1455 return self._parents[0].ancestor(c2) # punt on two parents for now
1451 1456
1452 1457 def ancestors(self):
1453 1458 for p in self._parents:
1454 1459 yield p
1455 1460 for a in self._repo.changelog.ancestors(
1456 1461 [p.rev() for p in self._parents]
1457 1462 ):
1458 1463 yield self._repo[a]
1459 1464
1460 1465 def markcommitted(self, node):
1461 1466 """Perform post-commit cleanup necessary after committing this ctx
1462 1467
1463 1468 Specifically, this updates backing stores this working context
1464 1469 wraps to reflect the fact that the changes reflected by this
1465 1470 workingctx have been committed. For example, it marks
1466 1471 modified and added files as normal in the dirstate.
1467 1472
1468 1473 """
1469 1474
1470 1475 def dirty(self, missing=False, merge=True, branch=True):
1471 1476 return False
1472 1477
1473 1478
1474 1479 class workingctx(committablectx):
1475 1480 """A workingctx object makes access to data related to
1476 1481 the current working directory convenient.
1477 1482 date - any valid date string or (unixtime, offset), or None.
1478 1483 user - username string, or None.
1479 1484 extra - a dictionary of extra values, or None.
1480 1485 changes - a list of file lists as returned by localrepo.status()
1481 1486 or None to use the repository status.
1482 1487 """
1483 1488
1484 1489 def __init__(
1485 1490 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1486 1491 ):
1487 1492 branch = None
1488 1493 if not extra or b'branch' not in extra:
1489 1494 try:
1490 1495 branch = repo.dirstate.branch()
1491 1496 except UnicodeDecodeError:
1492 1497 raise error.Abort(_(b'branch name not in UTF-8!'))
1493 1498 super(workingctx, self).__init__(
1494 1499 repo, text, user, date, extra, changes, branch=branch
1495 1500 )
1496 1501
1497 1502 def __iter__(self):
1498 1503 d = self._repo.dirstate
1499 1504 for f in d:
1500 1505 if d[f] != b'r':
1501 1506 yield f
1502 1507
1503 1508 def __contains__(self, key):
1504 1509 return self._repo.dirstate[key] not in b"?r"
1505 1510
1506 1511 def hex(self):
1507 1512 return wdirhex
1508 1513
1509 1514 @propertycache
1510 1515 def _parents(self):
1511 1516 p = self._repo.dirstate.parents()
1512 1517 if p[1] == nullid:
1513 1518 p = p[:-1]
1514 1519 # use unfiltered repo to delay/avoid loading obsmarkers
1515 1520 unfi = self._repo.unfiltered()
1516 1521 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1517 1522
1518 1523 def _fileinfo(self, path):
1519 1524 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1520 1525 self._manifest
1521 1526 return super(workingctx, self)._fileinfo(path)
1522 1527
1523 1528 def _buildflagfunc(self):
1524 1529 # Create a fallback function for getting file flags when the
1525 1530 # filesystem doesn't support them
1526 1531
1527 1532 copiesget = self._repo.dirstate.copies().get
1528 1533 parents = self.parents()
1529 1534 if len(parents) < 2:
1530 1535 # when we have one parent, it's easy: copy from parent
1531 1536 man = parents[0].manifest()
1532 1537
1533 1538 def func(f):
1534 1539 f = copiesget(f, f)
1535 1540 return man.flags(f)
1536 1541
1537 1542 else:
1538 1543 # merges are tricky: we try to reconstruct the unstored
1539 1544 # result from the merge (issue1802)
1540 1545 p1, p2 = parents
1541 1546 pa = p1.ancestor(p2)
1542 1547 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1543 1548
1544 1549 def func(f):
1545 1550 f = copiesget(f, f) # may be wrong for merges with copies
1546 1551 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1547 1552 if fl1 == fl2:
1548 1553 return fl1
1549 1554 if fl1 == fla:
1550 1555 return fl2
1551 1556 if fl2 == fla:
1552 1557 return fl1
1553 1558 return b'' # punt for conflicts
1554 1559
1555 1560 return func
1556 1561
1557 1562 @propertycache
1558 1563 def _flagfunc(self):
1559 1564 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1560 1565
1561 1566 def flags(self, path):
1562 1567 if '_manifest' in self.__dict__:
1563 1568 try:
1564 1569 return self._manifest.flags(path)
1565 1570 except KeyError:
1566 1571 return b''
1567 1572
1568 1573 try:
1569 1574 return self._flagfunc(path)
1570 1575 except OSError:
1571 1576 return b''
1572 1577
1573 1578 def filectx(self, path, filelog=None):
1574 1579 """get a file context from the working directory"""
1575 1580 return workingfilectx(
1576 1581 self._repo, path, workingctx=self, filelog=filelog
1577 1582 )
1578 1583
1579 1584 def dirty(self, missing=False, merge=True, branch=True):
1580 1585 b"check whether a working directory is modified"
1581 1586 # check subrepos first
1582 1587 for s in sorted(self.substate):
1583 1588 if self.sub(s).dirty(missing=missing):
1584 1589 return True
1585 1590 # check current working dir
1586 1591 return (
1587 1592 (merge and self.p2())
1588 1593 or (branch and self.branch() != self.p1().branch())
1589 1594 or self.modified()
1590 1595 or self.added()
1591 1596 or self.removed()
1592 1597 or (missing and self.deleted())
1593 1598 )
1594 1599
1595 1600 def add(self, list, prefix=b""):
1596 1601 with self._repo.wlock():
1597 1602 ui, ds = self._repo.ui, self._repo.dirstate
1598 1603 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1599 1604 rejected = []
1600 1605 lstat = self._repo.wvfs.lstat
1601 1606 for f in list:
1602 1607 # ds.pathto() returns an absolute file when this is invoked from
1603 1608 # the keyword extension. That gets flagged as non-portable on
1604 1609 # Windows, since it contains the drive letter and colon.
1605 1610 scmutil.checkportable(ui, os.path.join(prefix, f))
1606 1611 try:
1607 1612 st = lstat(f)
1608 1613 except OSError:
1609 1614 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1610 1615 rejected.append(f)
1611 1616 continue
1612 1617 limit = ui.configbytes(b'ui', b'large-file-limit')
1613 1618 if limit != 0 and st.st_size > limit:
1614 1619 ui.warn(
1615 1620 _(
1616 1621 b"%s: up to %d MB of RAM may be required "
1617 1622 b"to manage this file\n"
1618 1623 b"(use 'hg revert %s' to cancel the "
1619 1624 b"pending addition)\n"
1620 1625 )
1621 1626 % (f, 3 * st.st_size // 1000000, uipath(f))
1622 1627 )
1623 1628 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1624 1629 ui.warn(
1625 1630 _(
1626 1631 b"%s not added: only files and symlinks "
1627 1632 b"supported currently\n"
1628 1633 )
1629 1634 % uipath(f)
1630 1635 )
1631 1636 rejected.append(f)
1632 1637 elif ds[f] in b'amn':
1633 1638 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1634 1639 elif ds[f] == b'r':
1635 1640 ds.normallookup(f)
1636 1641 else:
1637 1642 ds.add(f)
1638 1643 return rejected
1639 1644
1640 1645 def forget(self, files, prefix=b""):
1641 1646 with self._repo.wlock():
1642 1647 ds = self._repo.dirstate
1643 1648 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1644 1649 rejected = []
1645 1650 for f in files:
1646 1651 if f not in ds:
1647 1652 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1648 1653 rejected.append(f)
1649 1654 elif ds[f] != b'a':
1650 1655 ds.remove(f)
1651 1656 else:
1652 1657 ds.drop(f)
1653 1658 return rejected
1654 1659
1655 1660 def copy(self, source, dest):
1656 1661 try:
1657 1662 st = self._repo.wvfs.lstat(dest)
1658 1663 except OSError as err:
1659 1664 if err.errno != errno.ENOENT:
1660 1665 raise
1661 1666 self._repo.ui.warn(
1662 1667 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1663 1668 )
1664 1669 return
1665 1670 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1666 1671 self._repo.ui.warn(
1667 1672 _(b"copy failed: %s is not a file or a symbolic link\n")
1668 1673 % self._repo.dirstate.pathto(dest)
1669 1674 )
1670 1675 else:
1671 1676 with self._repo.wlock():
1672 1677 ds = self._repo.dirstate
1673 1678 if ds[dest] in b'?':
1674 1679 ds.add(dest)
1675 1680 elif ds[dest] in b'r':
1676 1681 ds.normallookup(dest)
1677 1682 ds.copy(source, dest)
1678 1683
1679 1684 def match(
1680 1685 self,
1681 1686 pats=None,
1682 1687 include=None,
1683 1688 exclude=None,
1684 1689 default=b'glob',
1685 1690 listsubrepos=False,
1686 1691 badfn=None,
1687 1692 ):
1688 1693 r = self._repo
1689 1694
1690 1695 # Only a case insensitive filesystem needs magic to translate user input
1691 1696 # to actual case in the filesystem.
1692 1697 icasefs = not util.fscasesensitive(r.root)
1693 1698 return matchmod.match(
1694 1699 r.root,
1695 1700 r.getcwd(),
1696 1701 pats,
1697 1702 include,
1698 1703 exclude,
1699 1704 default,
1700 1705 auditor=r.auditor,
1701 1706 ctx=self,
1702 1707 listsubrepos=listsubrepos,
1703 1708 badfn=badfn,
1704 1709 icasefs=icasefs,
1705 1710 )
1706 1711
1707 1712 def _filtersuspectsymlink(self, files):
1708 1713 if not files or self._repo.dirstate._checklink:
1709 1714 return files
1710 1715
1711 1716 # Symlink placeholders may get non-symlink-like contents
1712 1717 # via user error or dereferencing by NFS or Samba servers,
1713 1718 # so we filter out any placeholders that don't look like a
1714 1719 # symlink
1715 1720 sane = []
1716 1721 for f in files:
1717 1722 if self.flags(f) == b'l':
1718 1723 d = self[f].data()
1719 1724 if (
1720 1725 d == b''
1721 1726 or len(d) >= 1024
1722 1727 or b'\n' in d
1723 1728 or stringutil.binary(d)
1724 1729 ):
1725 1730 self._repo.ui.debug(
1726 1731 b'ignoring suspect symlink placeholder "%s"\n' % f
1727 1732 )
1728 1733 continue
1729 1734 sane.append(f)
1730 1735 return sane
1731 1736
1732 1737 def _checklookup(self, files):
1733 1738 # check for any possibly clean files
1734 1739 if not files:
1735 1740 return [], [], []
1736 1741
1737 1742 modified = []
1738 1743 deleted = []
1739 1744 fixup = []
1740 1745 pctx = self._parents[0]
1741 1746 # do a full compare of any files that might have changed
1742 1747 for f in sorted(files):
1743 1748 try:
1744 1749 # This will return True for a file that got replaced by a
1745 1750 # directory in the interim, but fixing that is pretty hard.
1746 1751 if (
1747 1752 f not in pctx
1748 1753 or self.flags(f) != pctx.flags(f)
1749 1754 or pctx[f].cmp(self[f])
1750 1755 ):
1751 1756 modified.append(f)
1752 1757 else:
1753 1758 fixup.append(f)
1754 1759 except (IOError, OSError):
1755 1760 # A file become inaccessible in between? Mark it as deleted,
1756 1761 # matching dirstate behavior (issue5584).
1757 1762 # The dirstate has more complex behavior around whether a
1758 1763 # missing file matches a directory, etc, but we don't need to
1759 1764 # bother with that: if f has made it to this point, we're sure
1760 1765 # it's in the dirstate.
1761 1766 deleted.append(f)
1762 1767
1763 1768 return modified, deleted, fixup
1764 1769
1765 1770 def _poststatusfixup(self, status, fixup):
1766 1771 """update dirstate for files that are actually clean"""
1767 1772 poststatus = self._repo.postdsstatus()
1768 1773 if fixup or poststatus:
1769 1774 try:
1770 1775 oldid = self._repo.dirstate.identity()
1771 1776
1772 1777 # updating the dirstate is optional
1773 1778 # so we don't wait on the lock
1774 1779 # wlock can invalidate the dirstate, so cache normal _after_
1775 1780 # taking the lock
1776 1781 with self._repo.wlock(False):
1777 1782 if self._repo.dirstate.identity() == oldid:
1778 1783 if fixup:
1779 1784 normal = self._repo.dirstate.normal
1780 1785 for f in fixup:
1781 1786 normal(f)
1782 1787 # write changes out explicitly, because nesting
1783 1788 # wlock at runtime may prevent 'wlock.release()'
1784 1789 # after this block from doing so for subsequent
1785 1790 # changing files
1786 1791 tr = self._repo.currenttransaction()
1787 1792 self._repo.dirstate.write(tr)
1788 1793
1789 1794 if poststatus:
1790 1795 for ps in poststatus:
1791 1796 ps(self, status)
1792 1797 else:
1793 1798 # in this case, writing changes out breaks
1794 1799 # consistency, because .hg/dirstate was
1795 1800 # already changed simultaneously after last
1796 1801 # caching (see also issue5584 for detail)
1797 1802 self._repo.ui.debug(
1798 1803 b'skip updating dirstate: identity mismatch\n'
1799 1804 )
1800 1805 except error.LockError:
1801 1806 pass
1802 1807 finally:
1803 1808 # Even if the wlock couldn't be grabbed, clear out the list.
1804 1809 self._repo.clearpostdsstatus()
1805 1810
1806 1811 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1807 1812 '''Gets the status from the dirstate -- internal use only.'''
1808 1813 subrepos = []
1809 1814 if b'.hgsub' in self:
1810 1815 subrepos = sorted(self.substate)
1811 1816 cmp, s = self._repo.dirstate.status(
1812 1817 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1813 1818 )
1814 1819
1815 1820 # check for any possibly clean files
1816 1821 fixup = []
1817 1822 if cmp:
1818 1823 modified2, deleted2, fixup = self._checklookup(cmp)
1819 1824 s.modified.extend(modified2)
1820 1825 s.deleted.extend(deleted2)
1821 1826
1822 1827 if fixup and clean:
1823 1828 s.clean.extend(fixup)
1824 1829
1825 1830 self._poststatusfixup(s, fixup)
1826 1831
1827 1832 if match.always():
1828 1833 # cache for performance
1829 1834 if s.unknown or s.ignored or s.clean:
1830 1835 # "_status" is cached with list*=False in the normal route
1831 1836 self._status = scmutil.status(
1832 1837 s.modified, s.added, s.removed, s.deleted, [], [], []
1833 1838 )
1834 1839 else:
1835 1840 self._status = s
1836 1841
1837 1842 return s
1838 1843
1839 1844 @propertycache
1840 1845 def _copies(self):
1841 1846 p1copies = {}
1842 1847 p2copies = {}
1843 1848 parents = self._repo.dirstate.parents()
1844 1849 p1manifest = self._repo[parents[0]].manifest()
1845 1850 p2manifest = self._repo[parents[1]].manifest()
1846 1851 changedset = set(self.added()) | set(self.modified())
1847 1852 narrowmatch = self._repo.narrowmatch()
1848 1853 for dst, src in self._repo.dirstate.copies().items():
1849 1854 if dst not in changedset or not narrowmatch(dst):
1850 1855 continue
1851 1856 if src in p1manifest:
1852 1857 p1copies[dst] = src
1853 1858 elif src in p2manifest:
1854 1859 p2copies[dst] = src
1855 1860 return p1copies, p2copies
1856 1861
1857 1862 @propertycache
1858 1863 def _manifest(self):
1859 1864 """generate a manifest corresponding to the values in self._status
1860 1865
1861 1866 This reuse the file nodeid from parent, but we use special node
1862 1867 identifiers for added and modified files. This is used by manifests
1863 1868 merge to see that files are different and by update logic to avoid
1864 1869 deleting newly added files.
1865 1870 """
1866 1871 return self._buildstatusmanifest(self._status)
1867 1872
1868 1873 def _buildstatusmanifest(self, status):
1869 1874 """Builds a manifest that includes the given status results."""
1870 1875 parents = self.parents()
1871 1876
1872 1877 man = parents[0].manifest().copy()
1873 1878
1874 1879 ff = self._flagfunc
1875 1880 for i, l in (
1876 1881 (addednodeid, status.added),
1877 1882 (modifiednodeid, status.modified),
1878 1883 ):
1879 1884 for f in l:
1880 1885 man[f] = i
1881 1886 try:
1882 1887 man.setflag(f, ff(f))
1883 1888 except OSError:
1884 1889 pass
1885 1890
1886 1891 for f in status.deleted + status.removed:
1887 1892 if f in man:
1888 1893 del man[f]
1889 1894
1890 1895 return man
1891 1896
1892 1897 def _buildstatus(
1893 1898 self, other, s, match, listignored, listclean, listunknown
1894 1899 ):
1895 1900 """build a status with respect to another context
1896 1901
1897 1902 This includes logic for maintaining the fast path of status when
1898 1903 comparing the working directory against its parent, which is to skip
1899 1904 building a new manifest if self (working directory) is not comparing
1900 1905 against its parent (repo['.']).
1901 1906 """
1902 1907 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1903 1908 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1904 1909 # might have accidentally ended up with the entire contents of the file
1905 1910 # they are supposed to be linking to.
1906 1911 s.modified[:] = self._filtersuspectsymlink(s.modified)
1907 1912 if other != self._repo[b'.']:
1908 1913 s = super(workingctx, self)._buildstatus(
1909 1914 other, s, match, listignored, listclean, listunknown
1910 1915 )
1911 1916 return s
1912 1917
1913 1918 def _matchstatus(self, other, match):
1914 1919 """override the match method with a filter for directory patterns
1915 1920
1916 1921 We use inheritance to customize the match.bad method only in cases of
1917 1922 workingctx since it belongs only to the working directory when
1918 1923 comparing against the parent changeset.
1919 1924
1920 1925 If we aren't comparing against the working directory's parent, then we
1921 1926 just use the default match object sent to us.
1922 1927 """
1923 1928 if other != self._repo[b'.']:
1924 1929
1925 1930 def bad(f, msg):
1926 1931 # 'f' may be a directory pattern from 'match.files()',
1927 1932 # so 'f not in ctx1' is not enough
1928 1933 if f not in other and not other.hasdir(f):
1929 1934 self._repo.ui.warn(
1930 1935 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1931 1936 )
1932 1937
1933 1938 match.bad = bad
1934 1939 return match
1935 1940
1936 1941 def walk(self, match):
1937 1942 '''Generates matching file names.'''
1938 1943 return sorted(
1939 1944 self._repo.dirstate.walk(
1940 1945 self._repo.narrowmatch(match),
1941 1946 subrepos=sorted(self.substate),
1942 1947 unknown=True,
1943 1948 ignored=False,
1944 1949 )
1945 1950 )
1946 1951
1947 1952 def matches(self, match):
1948 1953 match = self._repo.narrowmatch(match)
1949 1954 ds = self._repo.dirstate
1950 1955 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
1951 1956
1952 1957 def markcommitted(self, node):
1953 1958 with self._repo.dirstate.parentchange():
1954 1959 for f in self.modified() + self.added():
1955 1960 self._repo.dirstate.normal(f)
1956 1961 for f in self.removed():
1957 1962 self._repo.dirstate.drop(f)
1958 1963 self._repo.dirstate.setparents(node)
1959 1964
1960 1965 # write changes out explicitly, because nesting wlock at
1961 1966 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1962 1967 # from immediately doing so for subsequent changing files
1963 1968 self._repo.dirstate.write(self._repo.currenttransaction())
1964 1969
1965 1970 sparse.aftercommit(self._repo, node)
1966 1971
1967 1972
1968 1973 class committablefilectx(basefilectx):
1969 1974 """A committablefilectx provides common functionality for a file context
1970 1975 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1971 1976
1972 1977 def __init__(self, repo, path, filelog=None, ctx=None):
1973 1978 self._repo = repo
1974 1979 self._path = path
1975 1980 self._changeid = None
1976 1981 self._filerev = self._filenode = None
1977 1982
1978 1983 if filelog is not None:
1979 1984 self._filelog = filelog
1980 1985 if ctx:
1981 1986 self._changectx = ctx
1982 1987
1983 1988 def __nonzero__(self):
1984 1989 return True
1985 1990
1986 1991 __bool__ = __nonzero__
1987 1992
1988 1993 def linkrev(self):
1989 1994 # linked to self._changectx no matter if file is modified or not
1990 1995 return self.rev()
1991 1996
1992 1997 def renamed(self):
1993 1998 path = self.copysource()
1994 1999 if not path:
1995 2000 return None
1996 2001 return path, self._changectx._parents[0]._manifest.get(path, nullid)
1997 2002
1998 2003 def parents(self):
1999 2004 '''return parent filectxs, following copies if necessary'''
2000 2005
2001 2006 def filenode(ctx, path):
2002 2007 return ctx._manifest.get(path, nullid)
2003 2008
2004 2009 path = self._path
2005 2010 fl = self._filelog
2006 2011 pcl = self._changectx._parents
2007 2012 renamed = self.renamed()
2008 2013
2009 2014 if renamed:
2010 2015 pl = [renamed + (None,)]
2011 2016 else:
2012 2017 pl = [(path, filenode(pcl[0], path), fl)]
2013 2018
2014 2019 for pc in pcl[1:]:
2015 2020 pl.append((path, filenode(pc, path), fl))
2016 2021
2017 2022 return [
2018 2023 self._parentfilectx(p, fileid=n, filelog=l)
2019 2024 for p, n, l in pl
2020 2025 if n != nullid
2021 2026 ]
2022 2027
2023 2028 def children(self):
2024 2029 return []
2025 2030
2026 2031
2027 2032 class workingfilectx(committablefilectx):
2028 2033 """A workingfilectx object makes access to data related to a particular
2029 2034 file in the working directory convenient."""
2030 2035
2031 2036 def __init__(self, repo, path, filelog=None, workingctx=None):
2032 2037 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2033 2038
2034 2039 @propertycache
2035 2040 def _changectx(self):
2036 2041 return workingctx(self._repo)
2037 2042
2038 2043 def data(self):
2039 2044 return self._repo.wread(self._path)
2040 2045
2041 2046 def copysource(self):
2042 2047 return self._repo.dirstate.copied(self._path)
2043 2048
2044 2049 def size(self):
2045 2050 return self._repo.wvfs.lstat(self._path).st_size
2046 2051
2047 2052 def lstat(self):
2048 2053 return self._repo.wvfs.lstat(self._path)
2049 2054
2050 2055 def date(self):
2051 2056 t, tz = self._changectx.date()
2052 2057 try:
2053 2058 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2054 2059 except OSError as err:
2055 2060 if err.errno != errno.ENOENT:
2056 2061 raise
2057 2062 return (t, tz)
2058 2063
2059 2064 def exists(self):
2060 2065 return self._repo.wvfs.exists(self._path)
2061 2066
2062 2067 def lexists(self):
2063 2068 return self._repo.wvfs.lexists(self._path)
2064 2069
2065 2070 def audit(self):
2066 2071 return self._repo.wvfs.audit(self._path)
2067 2072
2068 2073 def cmp(self, fctx):
2069 2074 """compare with other file context
2070 2075
2071 2076 returns True if different than fctx.
2072 2077 """
2073 2078 # fctx should be a filectx (not a workingfilectx)
2074 2079 # invert comparison to reuse the same code path
2075 2080 return fctx.cmp(self)
2076 2081
2077 2082 def remove(self, ignoremissing=False):
2078 2083 """wraps unlink for a repo's working directory"""
2079 2084 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2080 2085 self._repo.wvfs.unlinkpath(
2081 2086 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2082 2087 )
2083 2088
2084 2089 def write(self, data, flags, backgroundclose=False, **kwargs):
2085 2090 """wraps repo.wwrite"""
2086 2091 return self._repo.wwrite(
2087 2092 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2088 2093 )
2089 2094
2090 2095 def markcopied(self, src):
2091 2096 """marks this file a copy of `src`"""
2092 2097 self._repo.dirstate.copy(src, self._path)
2093 2098
2094 2099 def clearunknown(self):
2095 2100 """Removes conflicting items in the working directory so that
2096 2101 ``write()`` can be called successfully.
2097 2102 """
2098 2103 wvfs = self._repo.wvfs
2099 2104 f = self._path
2100 2105 wvfs.audit(f)
2101 2106 if self._repo.ui.configbool(
2102 2107 b'experimental', b'merge.checkpathconflicts'
2103 2108 ):
2104 2109 # remove files under the directory as they should already be
2105 2110 # warned and backed up
2106 2111 if wvfs.isdir(f) and not wvfs.islink(f):
2107 2112 wvfs.rmtree(f, forcibly=True)
2108 2113 for p in reversed(list(pathutil.finddirs(f))):
2109 2114 if wvfs.isfileorlink(p):
2110 2115 wvfs.unlink(p)
2111 2116 break
2112 2117 else:
2113 2118 # don't remove files if path conflicts are not processed
2114 2119 if wvfs.isdir(f) and not wvfs.islink(f):
2115 2120 wvfs.removedirs(f)
2116 2121
2117 2122 def setflags(self, l, x):
2118 2123 self._repo.wvfs.setflags(self._path, l, x)
2119 2124
2120 2125
2121 2126 class overlayworkingctx(committablectx):
2122 2127 """Wraps another mutable context with a write-back cache that can be
2123 2128 converted into a commit context.
2124 2129
2125 2130 self._cache[path] maps to a dict with keys: {
2126 2131 'exists': bool?
2127 2132 'date': date?
2128 2133 'data': str?
2129 2134 'flags': str?
2130 2135 'copied': str? (path or None)
2131 2136 }
2132 2137 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2133 2138 is `False`, the file was deleted.
2134 2139 """
2135 2140
2136 2141 def __init__(self, repo):
2137 2142 super(overlayworkingctx, self).__init__(repo)
2138 2143 self.clean()
2139 2144
2140 2145 def setbase(self, wrappedctx):
2141 2146 self._wrappedctx = wrappedctx
2142 2147 self._parents = [wrappedctx]
2143 2148 # Drop old manifest cache as it is now out of date.
2144 2149 # This is necessary when, e.g., rebasing several nodes with one
2145 2150 # ``overlayworkingctx`` (e.g. with --collapse).
2146 2151 util.clearcachedproperty(self, b'_manifest')
2147 2152
2148 2153 def data(self, path):
2149 2154 if self.isdirty(path):
2150 2155 if self._cache[path][b'exists']:
2151 2156 if self._cache[path][b'data'] is not None:
2152 2157 return self._cache[path][b'data']
2153 2158 else:
2154 2159 # Must fallback here, too, because we only set flags.
2155 2160 return self._wrappedctx[path].data()
2156 2161 else:
2157 2162 raise error.ProgrammingError(
2158 2163 b"No such file or directory: %s" % path
2159 2164 )
2160 2165 else:
2161 2166 return self._wrappedctx[path].data()
2162 2167
2163 2168 @propertycache
2164 2169 def _manifest(self):
2165 2170 parents = self.parents()
2166 2171 man = parents[0].manifest().copy()
2167 2172
2168 2173 flag = self._flagfunc
2169 2174 for path in self.added():
2170 2175 man[path] = addednodeid
2171 2176 man.setflag(path, flag(path))
2172 2177 for path in self.modified():
2173 2178 man[path] = modifiednodeid
2174 2179 man.setflag(path, flag(path))
2175 2180 for path in self.removed():
2176 2181 del man[path]
2177 2182 return man
2178 2183
2179 2184 @propertycache
2180 2185 def _flagfunc(self):
2181 2186 def f(path):
2182 2187 return self._cache[path][b'flags']
2183 2188
2184 2189 return f
2185 2190
2186 2191 def files(self):
2187 2192 return sorted(self.added() + self.modified() + self.removed())
2188 2193
2189 2194 def modified(self):
2190 2195 return [
2191 2196 f
2192 2197 for f in self._cache.keys()
2193 2198 if self._cache[f][b'exists'] and self._existsinparent(f)
2194 2199 ]
2195 2200
2196 2201 def added(self):
2197 2202 return [
2198 2203 f
2199 2204 for f in self._cache.keys()
2200 2205 if self._cache[f][b'exists'] and not self._existsinparent(f)
2201 2206 ]
2202 2207
2203 2208 def removed(self):
2204 2209 return [
2205 2210 f
2206 2211 for f in self._cache.keys()
2207 2212 if not self._cache[f][b'exists'] and self._existsinparent(f)
2208 2213 ]
2209 2214
2210 2215 def p1copies(self):
2211 2216 copies = self._repo._wrappedctx.p1copies().copy()
2212 2217 narrowmatch = self._repo.narrowmatch()
2213 2218 for f in self._cache.keys():
2214 2219 if not narrowmatch(f):
2215 2220 continue
2216 2221 copies.pop(f, None) # delete if it exists
2217 2222 source = self._cache[f][b'copied']
2218 2223 if source:
2219 2224 copies[f] = source
2220 2225 return copies
2221 2226
2222 2227 def p2copies(self):
2223 2228 copies = self._repo._wrappedctx.p2copies().copy()
2224 2229 narrowmatch = self._repo.narrowmatch()
2225 2230 for f in self._cache.keys():
2226 2231 if not narrowmatch(f):
2227 2232 continue
2228 2233 copies.pop(f, None) # delete if it exists
2229 2234 source = self._cache[f][b'copied']
2230 2235 if source:
2231 2236 copies[f] = source
2232 2237 return copies
2233 2238
2234 2239 def isinmemory(self):
2235 2240 return True
2236 2241
2237 2242 def filedate(self, path):
2238 2243 if self.isdirty(path):
2239 2244 return self._cache[path][b'date']
2240 2245 else:
2241 2246 return self._wrappedctx[path].date()
2242 2247
2243 2248 def markcopied(self, path, origin):
2244 2249 self._markdirty(
2245 2250 path,
2246 2251 exists=True,
2247 2252 date=self.filedate(path),
2248 2253 flags=self.flags(path),
2249 2254 copied=origin,
2250 2255 )
2251 2256
2252 2257 def copydata(self, path):
2253 2258 if self.isdirty(path):
2254 2259 return self._cache[path][b'copied']
2255 2260 else:
2256 2261 return None
2257 2262
2258 2263 def flags(self, path):
2259 2264 if self.isdirty(path):
2260 2265 if self._cache[path][b'exists']:
2261 2266 return self._cache[path][b'flags']
2262 2267 else:
2263 2268 raise error.ProgrammingError(
2264 2269 b"No such file or directory: %s" % self._path
2265 2270 )
2266 2271 else:
2267 2272 return self._wrappedctx[path].flags()
2268 2273
2269 2274 def __contains__(self, key):
2270 2275 if key in self._cache:
2271 2276 return self._cache[key][b'exists']
2272 2277 return key in self.p1()
2273 2278
2274 2279 def _existsinparent(self, path):
2275 2280 try:
2276 2281 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2277 2282 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2278 2283 # with an ``exists()`` function.
2279 2284 self._wrappedctx[path]
2280 2285 return True
2281 2286 except error.ManifestLookupError:
2282 2287 return False
2283 2288
2284 2289 def _auditconflicts(self, path):
2285 2290 """Replicates conflict checks done by wvfs.write().
2286 2291
2287 2292 Since we never write to the filesystem and never call `applyupdates` in
2288 2293 IMM, we'll never check that a path is actually writable -- e.g., because
2289 2294 it adds `a/foo`, but `a` is actually a file in the other commit.
2290 2295 """
2291 2296
2292 2297 def fail(path, component):
2293 2298 # p1() is the base and we're receiving "writes" for p2()'s
2294 2299 # files.
2295 2300 if b'l' in self.p1()[component].flags():
2296 2301 raise error.Abort(
2297 2302 b"error: %s conflicts with symlink %s "
2298 2303 b"in %d." % (path, component, self.p1().rev())
2299 2304 )
2300 2305 else:
2301 2306 raise error.Abort(
2302 2307 b"error: '%s' conflicts with file '%s' in "
2303 2308 b"%d." % (path, component, self.p1().rev())
2304 2309 )
2305 2310
2306 2311 # Test that each new directory to be created to write this path from p2
2307 2312 # is not a file in p1.
2308 2313 components = path.split(b'/')
2309 2314 for i in pycompat.xrange(len(components)):
2310 2315 component = b"/".join(components[0:i])
2311 2316 if component in self:
2312 2317 fail(path, component)
2313 2318
2314 2319 # Test the other direction -- that this path from p2 isn't a directory
2315 2320 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2316 2321 match = self.match([path], default=b'path')
2317 2322 matches = self.p1().manifest().matches(match)
2318 2323 mfiles = matches.keys()
2319 2324 if len(mfiles) > 0:
2320 2325 if len(mfiles) == 1 and mfiles[0] == path:
2321 2326 return
2322 2327 # omit the files which are deleted in current IMM wctx
2323 2328 mfiles = [m for m in mfiles if m in self]
2324 2329 if not mfiles:
2325 2330 return
2326 2331 raise error.Abort(
2327 2332 b"error: file '%s' cannot be written because "
2328 2333 b" '%s/' is a directory in %s (containing %d "
2329 2334 b"entries: %s)"
2330 2335 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2331 2336 )
2332 2337
2333 2338 def write(self, path, data, flags=b'', **kwargs):
2334 2339 if data is None:
2335 2340 raise error.ProgrammingError(b"data must be non-None")
2336 2341 self._auditconflicts(path)
2337 2342 self._markdirty(
2338 2343 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2339 2344 )
2340 2345
2341 2346 def setflags(self, path, l, x):
2342 2347 flag = b''
2343 2348 if l:
2344 2349 flag = b'l'
2345 2350 elif x:
2346 2351 flag = b'x'
2347 2352 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2348 2353
2349 2354 def remove(self, path):
2350 2355 self._markdirty(path, exists=False)
2351 2356
2352 2357 def exists(self, path):
2353 2358 """exists behaves like `lexists`, but needs to follow symlinks and
2354 2359 return False if they are broken.
2355 2360 """
2356 2361 if self.isdirty(path):
2357 2362 # If this path exists and is a symlink, "follow" it by calling
2358 2363 # exists on the destination path.
2359 2364 if (
2360 2365 self._cache[path][b'exists']
2361 2366 and b'l' in self._cache[path][b'flags']
2362 2367 ):
2363 2368 return self.exists(self._cache[path][b'data'].strip())
2364 2369 else:
2365 2370 return self._cache[path][b'exists']
2366 2371
2367 2372 return self._existsinparent(path)
2368 2373
2369 2374 def lexists(self, path):
2370 2375 """lexists returns True if the path exists"""
2371 2376 if self.isdirty(path):
2372 2377 return self._cache[path][b'exists']
2373 2378
2374 2379 return self._existsinparent(path)
2375 2380
2376 2381 def size(self, path):
2377 2382 if self.isdirty(path):
2378 2383 if self._cache[path][b'exists']:
2379 2384 return len(self._cache[path][b'data'])
2380 2385 else:
2381 2386 raise error.ProgrammingError(
2382 2387 b"No such file or directory: %s" % self._path
2383 2388 )
2384 2389 return self._wrappedctx[path].size()
2385 2390
2386 2391 def tomemctx(
2387 2392 self,
2388 2393 text,
2389 2394 branch=None,
2390 2395 extra=None,
2391 2396 date=None,
2392 2397 parents=None,
2393 2398 user=None,
2394 2399 editor=None,
2395 2400 ):
2396 2401 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2397 2402 committed.
2398 2403
2399 2404 ``text`` is the commit message.
2400 2405 ``parents`` (optional) are rev numbers.
2401 2406 """
2402 2407 # Default parents to the wrapped contexts' if not passed.
2403 2408 if parents is None:
2404 2409 parents = self._wrappedctx.parents()
2405 2410 if len(parents) == 1:
2406 2411 parents = (parents[0], None)
2407 2412
2408 2413 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2409 2414 if parents[1] is None:
2410 2415 parents = (self._repo[parents[0]], None)
2411 2416 else:
2412 2417 parents = (self._repo[parents[0]], self._repo[parents[1]])
2413 2418
2414 2419 files = self.files()
2415 2420
2416 2421 def getfile(repo, memctx, path):
2417 2422 if self._cache[path][b'exists']:
2418 2423 return memfilectx(
2419 2424 repo,
2420 2425 memctx,
2421 2426 path,
2422 2427 self._cache[path][b'data'],
2423 2428 b'l' in self._cache[path][b'flags'],
2424 2429 b'x' in self._cache[path][b'flags'],
2425 2430 self._cache[path][b'copied'],
2426 2431 )
2427 2432 else:
2428 2433 # Returning None, but including the path in `files`, is
2429 2434 # necessary for memctx to register a deletion.
2430 2435 return None
2431 2436
2432 2437 return memctx(
2433 2438 self._repo,
2434 2439 parents,
2435 2440 text,
2436 2441 files,
2437 2442 getfile,
2438 2443 date=date,
2439 2444 extra=extra,
2440 2445 user=user,
2441 2446 branch=branch,
2442 2447 editor=editor,
2443 2448 )
2444 2449
2445 2450 def isdirty(self, path):
2446 2451 return path in self._cache
2447 2452
2448 2453 def isempty(self):
2449 2454 # We need to discard any keys that are actually clean before the empty
2450 2455 # commit check.
2451 2456 self._compact()
2452 2457 return len(self._cache) == 0
2453 2458
2454 2459 def clean(self):
2455 2460 self._cache = {}
2456 2461
2457 2462 def _compact(self):
2458 2463 """Removes keys from the cache that are actually clean, by comparing
2459 2464 them with the underlying context.
2460 2465
2461 2466 This can occur during the merge process, e.g. by passing --tool :local
2462 2467 to resolve a conflict.
2463 2468 """
2464 2469 keys = []
2465 2470 # This won't be perfect, but can help performance significantly when
2466 2471 # using things like remotefilelog.
2467 2472 scmutil.prefetchfiles(
2468 2473 self.repo(),
2469 2474 [self.p1().rev()],
2470 2475 scmutil.matchfiles(self.repo(), self._cache.keys()),
2471 2476 )
2472 2477
2473 2478 for path in self._cache.keys():
2474 2479 cache = self._cache[path]
2475 2480 try:
2476 2481 underlying = self._wrappedctx[path]
2477 2482 if (
2478 2483 underlying.data() == cache[b'data']
2479 2484 and underlying.flags() == cache[b'flags']
2480 2485 ):
2481 2486 keys.append(path)
2482 2487 except error.ManifestLookupError:
2483 2488 # Path not in the underlying manifest (created).
2484 2489 continue
2485 2490
2486 2491 for path in keys:
2487 2492 del self._cache[path]
2488 2493 return keys
2489 2494
2490 2495 def _markdirty(
2491 2496 self, path, exists, data=None, date=None, flags=b'', copied=None
2492 2497 ):
2493 2498 # data not provided, let's see if we already have some; if not, let's
2494 2499 # grab it from our underlying context, so that we always have data if
2495 2500 # the file is marked as existing.
2496 2501 if exists and data is None:
2497 2502 oldentry = self._cache.get(path) or {}
2498 2503 data = oldentry.get(b'data')
2499 2504 if data is None:
2500 2505 data = self._wrappedctx[path].data()
2501 2506
2502 2507 self._cache[path] = {
2503 2508 b'exists': exists,
2504 2509 b'data': data,
2505 2510 b'date': date,
2506 2511 b'flags': flags,
2507 2512 b'copied': copied,
2508 2513 }
2509 2514
2510 2515 def filectx(self, path, filelog=None):
2511 2516 return overlayworkingfilectx(
2512 2517 self._repo, path, parent=self, filelog=filelog
2513 2518 )
2514 2519
2515 2520
2516 2521 class overlayworkingfilectx(committablefilectx):
2517 2522 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2518 2523 cache, which can be flushed through later by calling ``flush()``."""
2519 2524
2520 2525 def __init__(self, repo, path, filelog=None, parent=None):
2521 2526 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2522 2527 self._repo = repo
2523 2528 self._parent = parent
2524 2529 self._path = path
2525 2530
2526 2531 def cmp(self, fctx):
2527 2532 return self.data() != fctx.data()
2528 2533
2529 2534 def changectx(self):
2530 2535 return self._parent
2531 2536
2532 2537 def data(self):
2533 2538 return self._parent.data(self._path)
2534 2539
2535 2540 def date(self):
2536 2541 return self._parent.filedate(self._path)
2537 2542
2538 2543 def exists(self):
2539 2544 return self.lexists()
2540 2545
2541 2546 def lexists(self):
2542 2547 return self._parent.exists(self._path)
2543 2548
2544 2549 def copysource(self):
2545 2550 return self._parent.copydata(self._path)
2546 2551
2547 2552 def size(self):
2548 2553 return self._parent.size(self._path)
2549 2554
2550 2555 def markcopied(self, origin):
2551 2556 self._parent.markcopied(self._path, origin)
2552 2557
2553 2558 def audit(self):
2554 2559 pass
2555 2560
2556 2561 def flags(self):
2557 2562 return self._parent.flags(self._path)
2558 2563
2559 2564 def setflags(self, islink, isexec):
2560 2565 return self._parent.setflags(self._path, islink, isexec)
2561 2566
2562 2567 def write(self, data, flags, backgroundclose=False, **kwargs):
2563 2568 return self._parent.write(self._path, data, flags, **kwargs)
2564 2569
2565 2570 def remove(self, ignoremissing=False):
2566 2571 return self._parent.remove(self._path)
2567 2572
2568 2573 def clearunknown(self):
2569 2574 pass
2570 2575
2571 2576
2572 2577 class workingcommitctx(workingctx):
2573 2578 """A workingcommitctx object makes access to data related to
2574 2579 the revision being committed convenient.
2575 2580
2576 2581 This hides changes in the working directory, if they aren't
2577 2582 committed in this context.
2578 2583 """
2579 2584
2580 2585 def __init__(
2581 2586 self, repo, changes, text=b"", user=None, date=None, extra=None
2582 2587 ):
2583 2588 super(workingcommitctx, self).__init__(
2584 2589 repo, text, user, date, extra, changes
2585 2590 )
2586 2591
2587 2592 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2588 2593 """Return matched files only in ``self._status``
2589 2594
2590 2595 Uncommitted files appear "clean" via this context, even if
2591 2596 they aren't actually so in the working directory.
2592 2597 """
2593 2598 if clean:
2594 2599 clean = [f for f in self._manifest if f not in self._changedset]
2595 2600 else:
2596 2601 clean = []
2597 2602 return scmutil.status(
2598 2603 [f for f in self._status.modified if match(f)],
2599 2604 [f for f in self._status.added if match(f)],
2600 2605 [f for f in self._status.removed if match(f)],
2601 2606 [],
2602 2607 [],
2603 2608 [],
2604 2609 clean,
2605 2610 )
2606 2611
2607 2612 @propertycache
2608 2613 def _changedset(self):
2609 2614 """Return the set of files changed in this context
2610 2615 """
2611 2616 changed = set(self._status.modified)
2612 2617 changed.update(self._status.added)
2613 2618 changed.update(self._status.removed)
2614 2619 return changed
2615 2620
2616 2621
2617 2622 def makecachingfilectxfn(func):
2618 2623 """Create a filectxfn that caches based on the path.
2619 2624
2620 2625 We can't use util.cachefunc because it uses all arguments as the cache
2621 2626 key and this creates a cycle since the arguments include the repo and
2622 2627 memctx.
2623 2628 """
2624 2629 cache = {}
2625 2630
2626 2631 def getfilectx(repo, memctx, path):
2627 2632 if path not in cache:
2628 2633 cache[path] = func(repo, memctx, path)
2629 2634 return cache[path]
2630 2635
2631 2636 return getfilectx
2632 2637
2633 2638
2634 2639 def memfilefromctx(ctx):
2635 2640 """Given a context return a memfilectx for ctx[path]
2636 2641
2637 2642 This is a convenience method for building a memctx based on another
2638 2643 context.
2639 2644 """
2640 2645
2641 2646 def getfilectx(repo, memctx, path):
2642 2647 fctx = ctx[path]
2643 2648 copysource = fctx.copysource()
2644 2649 return memfilectx(
2645 2650 repo,
2646 2651 memctx,
2647 2652 path,
2648 2653 fctx.data(),
2649 2654 islink=fctx.islink(),
2650 2655 isexec=fctx.isexec(),
2651 2656 copysource=copysource,
2652 2657 )
2653 2658
2654 2659 return getfilectx
2655 2660
2656 2661
2657 2662 def memfilefrompatch(patchstore):
2658 2663 """Given a patch (e.g. patchstore object) return a memfilectx
2659 2664
2660 2665 This is a convenience method for building a memctx based on a patchstore.
2661 2666 """
2662 2667
2663 2668 def getfilectx(repo, memctx, path):
2664 2669 data, mode, copysource = patchstore.getfile(path)
2665 2670 if data is None:
2666 2671 return None
2667 2672 islink, isexec = mode
2668 2673 return memfilectx(
2669 2674 repo,
2670 2675 memctx,
2671 2676 path,
2672 2677 data,
2673 2678 islink=islink,
2674 2679 isexec=isexec,
2675 2680 copysource=copysource,
2676 2681 )
2677 2682
2678 2683 return getfilectx
2679 2684
2680 2685
2681 2686 class memctx(committablectx):
2682 2687 """Use memctx to perform in-memory commits via localrepo.commitctx().
2683 2688
2684 2689 Revision information is supplied at initialization time while
2685 2690 related files data and is made available through a callback
2686 2691 mechanism. 'repo' is the current localrepo, 'parents' is a
2687 2692 sequence of two parent revisions identifiers (pass None for every
2688 2693 missing parent), 'text' is the commit message and 'files' lists
2689 2694 names of files touched by the revision (normalized and relative to
2690 2695 repository root).
2691 2696
2692 2697 filectxfn(repo, memctx, path) is a callable receiving the
2693 2698 repository, the current memctx object and the normalized path of
2694 2699 requested file, relative to repository root. It is fired by the
2695 2700 commit function for every file in 'files', but calls order is
2696 2701 undefined. If the file is available in the revision being
2697 2702 committed (updated or added), filectxfn returns a memfilectx
2698 2703 object. If the file was removed, filectxfn return None for recent
2699 2704 Mercurial. Moved files are represented by marking the source file
2700 2705 removed and the new file added with copy information (see
2701 2706 memfilectx).
2702 2707
2703 2708 user receives the committer name and defaults to current
2704 2709 repository username, date is the commit date in any format
2705 2710 supported by dateutil.parsedate() and defaults to current date, extra
2706 2711 is a dictionary of metadata or is left empty.
2707 2712 """
2708 2713
2709 2714 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2710 2715 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2711 2716 # this field to determine what to do in filectxfn.
2712 2717 _returnnoneformissingfiles = True
2713 2718
2714 2719 def __init__(
2715 2720 self,
2716 2721 repo,
2717 2722 parents,
2718 2723 text,
2719 2724 files,
2720 2725 filectxfn,
2721 2726 user=None,
2722 2727 date=None,
2723 2728 extra=None,
2724 2729 branch=None,
2725 2730 editor=False,
2726 2731 ):
2727 2732 super(memctx, self).__init__(
2728 2733 repo, text, user, date, extra, branch=branch
2729 2734 )
2730 2735 self._rev = None
2731 2736 self._node = None
2732 2737 parents = [(p or nullid) for p in parents]
2733 2738 p1, p2 = parents
2734 2739 self._parents = [self._repo[p] for p in (p1, p2)]
2735 2740 files = sorted(set(files))
2736 2741 self._files = files
2737 2742 self.substate = {}
2738 2743
2739 2744 if isinstance(filectxfn, patch.filestore):
2740 2745 filectxfn = memfilefrompatch(filectxfn)
2741 2746 elif not callable(filectxfn):
2742 2747 # if store is not callable, wrap it in a function
2743 2748 filectxfn = memfilefromctx(filectxfn)
2744 2749
2745 2750 # memoizing increases performance for e.g. vcs convert scenarios.
2746 2751 self._filectxfn = makecachingfilectxfn(filectxfn)
2747 2752
2748 2753 if editor:
2749 2754 self._text = editor(self._repo, self, [])
2750 2755 self._repo.savecommitmessage(self._text)
2751 2756
2752 2757 def filectx(self, path, filelog=None):
2753 2758 """get a file context from the working directory
2754 2759
2755 2760 Returns None if file doesn't exist and should be removed."""
2756 2761 return self._filectxfn(self._repo, self, path)
2757 2762
2758 2763 def commit(self):
2759 2764 """commit context to the repo"""
2760 2765 return self._repo.commitctx(self)
2761 2766
2762 2767 @propertycache
2763 2768 def _manifest(self):
2764 2769 """generate a manifest based on the return values of filectxfn"""
2765 2770
2766 2771 # keep this simple for now; just worry about p1
2767 2772 pctx = self._parents[0]
2768 2773 man = pctx.manifest().copy()
2769 2774
2770 2775 for f in self._status.modified:
2771 2776 man[f] = modifiednodeid
2772 2777
2773 2778 for f in self._status.added:
2774 2779 man[f] = addednodeid
2775 2780
2776 2781 for f in self._status.removed:
2777 2782 if f in man:
2778 2783 del man[f]
2779 2784
2780 2785 return man
2781 2786
2782 2787 @propertycache
2783 2788 def _status(self):
2784 2789 """Calculate exact status from ``files`` specified at construction
2785 2790 """
2786 2791 man1 = self.p1().manifest()
2787 2792 p2 = self._parents[1]
2788 2793 # "1 < len(self._parents)" can't be used for checking
2789 2794 # existence of the 2nd parent, because "memctx._parents" is
2790 2795 # explicitly initialized by the list, of which length is 2.
2791 2796 if p2.node() != nullid:
2792 2797 man2 = p2.manifest()
2793 2798 managing = lambda f: f in man1 or f in man2
2794 2799 else:
2795 2800 managing = lambda f: f in man1
2796 2801
2797 2802 modified, added, removed = [], [], []
2798 2803 for f in self._files:
2799 2804 if not managing(f):
2800 2805 added.append(f)
2801 2806 elif self[f]:
2802 2807 modified.append(f)
2803 2808 else:
2804 2809 removed.append(f)
2805 2810
2806 2811 return scmutil.status(modified, added, removed, [], [], [], [])
2807 2812
2808 2813
2809 2814 class memfilectx(committablefilectx):
2810 2815 """memfilectx represents an in-memory file to commit.
2811 2816
2812 2817 See memctx and committablefilectx for more details.
2813 2818 """
2814 2819
2815 2820 def __init__(
2816 2821 self,
2817 2822 repo,
2818 2823 changectx,
2819 2824 path,
2820 2825 data,
2821 2826 islink=False,
2822 2827 isexec=False,
2823 2828 copysource=None,
2824 2829 ):
2825 2830 """
2826 2831 path is the normalized file path relative to repository root.
2827 2832 data is the file content as a string.
2828 2833 islink is True if the file is a symbolic link.
2829 2834 isexec is True if the file is executable.
2830 2835 copied is the source file path if current file was copied in the
2831 2836 revision being committed, or None."""
2832 2837 super(memfilectx, self).__init__(repo, path, None, changectx)
2833 2838 self._data = data
2834 2839 if islink:
2835 2840 self._flags = b'l'
2836 2841 elif isexec:
2837 2842 self._flags = b'x'
2838 2843 else:
2839 2844 self._flags = b''
2840 2845 self._copysource = copysource
2841 2846
2842 2847 def copysource(self):
2843 2848 return self._copysource
2844 2849
2845 2850 def cmp(self, fctx):
2846 2851 return self.data() != fctx.data()
2847 2852
2848 2853 def data(self):
2849 2854 return self._data
2850 2855
2851 2856 def remove(self, ignoremissing=False):
2852 2857 """wraps unlink for a repo's working directory"""
2853 2858 # need to figure out what to do here
2854 2859 del self._changectx[self._path]
2855 2860
2856 2861 def write(self, data, flags, **kwargs):
2857 2862 """wraps repo.wwrite"""
2858 2863 self._data = data
2859 2864
2860 2865
2861 2866 class metadataonlyctx(committablectx):
2862 2867 """Like memctx but it's reusing the manifest of different commit.
2863 2868 Intended to be used by lightweight operations that are creating
2864 2869 metadata-only changes.
2865 2870
2866 2871 Revision information is supplied at initialization time. 'repo' is the
2867 2872 current localrepo, 'ctx' is original revision which manifest we're reuisng
2868 2873 'parents' is a sequence of two parent revisions identifiers (pass None for
2869 2874 every missing parent), 'text' is the commit.
2870 2875
2871 2876 user receives the committer name and defaults to current repository
2872 2877 username, date is the commit date in any format supported by
2873 2878 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2874 2879 metadata or is left empty.
2875 2880 """
2876 2881
2877 2882 def __init__(
2878 2883 self,
2879 2884 repo,
2880 2885 originalctx,
2881 2886 parents=None,
2882 2887 text=None,
2883 2888 user=None,
2884 2889 date=None,
2885 2890 extra=None,
2886 2891 editor=False,
2887 2892 ):
2888 2893 if text is None:
2889 2894 text = originalctx.description()
2890 2895 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2891 2896 self._rev = None
2892 2897 self._node = None
2893 2898 self._originalctx = originalctx
2894 2899 self._manifestnode = originalctx.manifestnode()
2895 2900 if parents is None:
2896 2901 parents = originalctx.parents()
2897 2902 else:
2898 2903 parents = [repo[p] for p in parents if p is not None]
2899 2904 parents = parents[:]
2900 2905 while len(parents) < 2:
2901 2906 parents.append(repo[nullid])
2902 2907 p1, p2 = self._parents = parents
2903 2908
2904 2909 # sanity check to ensure that the reused manifest parents are
2905 2910 # manifests of our commit parents
2906 2911 mp1, mp2 = self.manifestctx().parents
2907 2912 if p1 != nullid and p1.manifestnode() != mp1:
2908 2913 raise RuntimeError(
2909 2914 r"can't reuse the manifest: its p1 "
2910 2915 r"doesn't match the new ctx p1"
2911 2916 )
2912 2917 if p2 != nullid and p2.manifestnode() != mp2:
2913 2918 raise RuntimeError(
2914 2919 r"can't reuse the manifest: "
2915 2920 r"its p2 doesn't match the new ctx p2"
2916 2921 )
2917 2922
2918 2923 self._files = originalctx.files()
2919 2924 self.substate = {}
2920 2925
2921 2926 if editor:
2922 2927 self._text = editor(self._repo, self, [])
2923 2928 self._repo.savecommitmessage(self._text)
2924 2929
2925 2930 def manifestnode(self):
2926 2931 return self._manifestnode
2927 2932
2928 2933 @property
2929 2934 def _manifestctx(self):
2930 2935 return self._repo.manifestlog[self._manifestnode]
2931 2936
2932 2937 def filectx(self, path, filelog=None):
2933 2938 return self._originalctx.filectx(path, filelog=filelog)
2934 2939
2935 2940 def commit(self):
2936 2941 """commit context to the repo"""
2937 2942 return self._repo.commitctx(self)
2938 2943
2939 2944 @property
2940 2945 def _manifest(self):
2941 2946 return self._originalctx.manifest()
2942 2947
2943 2948 @propertycache
2944 2949 def _status(self):
2945 2950 """Calculate exact status from ``files`` specified in the ``origctx``
2946 2951 and parents manifests.
2947 2952 """
2948 2953 man1 = self.p1().manifest()
2949 2954 p2 = self._parents[1]
2950 2955 # "1 < len(self._parents)" can't be used for checking
2951 2956 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2952 2957 # explicitly initialized by the list, of which length is 2.
2953 2958 if p2.node() != nullid:
2954 2959 man2 = p2.manifest()
2955 2960 managing = lambda f: f in man1 or f in man2
2956 2961 else:
2957 2962 managing = lambda f: f in man1
2958 2963
2959 2964 modified, added, removed = [], [], []
2960 2965 for f in self._files:
2961 2966 if not managing(f):
2962 2967 added.append(f)
2963 2968 elif f in self:
2964 2969 modified.append(f)
2965 2970 else:
2966 2971 removed.append(f)
2967 2972
2968 2973 return scmutil.status(modified, added, removed, [], [], [], [])
2969 2974
2970 2975
2971 2976 class arbitraryfilectx(object):
2972 2977 """Allows you to use filectx-like functions on a file in an arbitrary
2973 2978 location on disk, possibly not in the working directory.
2974 2979 """
2975 2980
2976 2981 def __init__(self, path, repo=None):
2977 2982 # Repo is optional because contrib/simplemerge uses this class.
2978 2983 self._repo = repo
2979 2984 self._path = path
2980 2985
2981 2986 def cmp(self, fctx):
2982 2987 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2983 2988 # path if either side is a symlink.
2984 2989 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
2985 2990 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2986 2991 # Add a fast-path for merge if both sides are disk-backed.
2987 2992 # Note that filecmp uses the opposite return values (True if same)
2988 2993 # from our cmp functions (True if different).
2989 2994 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2990 2995 return self.data() != fctx.data()
2991 2996
2992 2997 def path(self):
2993 2998 return self._path
2994 2999
2995 3000 def flags(self):
2996 3001 return b''
2997 3002
2998 3003 def data(self):
2999 3004 return util.readfile(self._path)
3000 3005
3001 3006 def decodeddata(self):
3002 3007 with open(self._path, b"rb") as f:
3003 3008 return f.read()
3004 3009
3005 3010 def remove(self):
3006 3011 util.unlink(self._path)
3007 3012
3008 3013 def write(self, data, flags, **kwargs):
3009 3014 assert not flags
3010 3015 with open(self._path, b"wb") as f:
3011 3016 f.write(data)
@@ -1,154 +1,153 b''
1 1 ===================================
2 2 Test repository filtering avoidance
3 3 ===================================
4 4
5 5 This test file is a bit special as he does not check feature, but performance related internal code path.
6 6
7 7 Right now, filtering a repository comes with a cost that might be significant.
8 8 Until this get better, ther are various operation that try hard not to trigger
9 9 a filtering computation. This test file make sure we don't reintroduce code that trigger the filtering for these operation:
10 10
11 11 Setup
12 12 -----
13 13 $ hg init test-repo
14 14 $ cd test-repo
15 15 $ echo "some line" > z
16 16 $ echo a > a
17 17 $ hg commit -Am a
18 18 adding a
19 19 adding z
20 20 $ echo "in a" >> z
21 21 $ echo b > b
22 22 $ hg commit -Am b
23 23 adding b
24 24 $ echo "file" >> z
25 25 $ echo c > c
26 26 $ hg commit -Am c
27 27 adding c
28 28 $ hg rm a
29 29 $ echo c1 > c
30 30 $ hg add c
31 31 c already tracked!
32 32 $ echo d > d
33 33 $ hg add d
34 34 $ rm b
35 35
36 36 $ cat << EOF >> $HGRCPATH
37 37 > [devel]
38 38 > debug.repo-filters = yes
39 39 > [ui]
40 40 > debug = yes
41 41 > EOF
42 42
43 43
44 44 tests
45 45 -----
46 46
47 47 Getting the node of `null`
48 48
49 49 $ hg log -r null -T "{node}\n"
50 50 0000000000000000000000000000000000000000
51 51
52 52 Getting basic changeset inforation about `null`
53 53
54 54 $ hg log -r null -T "{node}\n{date}\n"
55 55 0000000000000000000000000000000000000000
56 56 0.00
57 57
58 58 Getting status of null
59 59
60 60 $ hg status --change null
61 debug.filters: computing revision filter for "visible"
62 61
63 62 Getting status of working copy
64 63
65 64 $ hg status
66 65 debug.filters: computing revision filter for "visible"
67 66 M c
68 67 A d
69 68 R a
70 69 ! b
71 70
72 71 Getting data about the working copy parent
73 72
74 73 $ hg log -r '.' -T "{node}\n{date}\n"
75 74 debug.filters: computing revision filter for "visible"
76 75 c2932ca7786be30b67154d541a8764fae5532261
77 76 0.00
78 77
79 78 Getting working copy diff
80 79
81 80 $ hg diff
82 81 debug.filters: computing revision filter for "visible"
83 82 diff -r c2932ca7786be30b67154d541a8764fae5532261 a
84 83 --- a/a Thu Jan 01 00:00:00 1970 +0000
85 84 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
86 85 @@ -1,1 +0,0 @@
87 86 -a
88 87 diff -r c2932ca7786be30b67154d541a8764fae5532261 c
89 88 --- a/c Thu Jan 01 00:00:00 1970 +0000
90 89 +++ b/c Thu Jan 01 00:00:00 1970 +0000
91 90 @@ -1,1 +1,1 @@
92 91 -c
93 92 +c1
94 93 diff -r c2932ca7786be30b67154d541a8764fae5532261 d
95 94 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
96 95 +++ b/d Thu Jan 01 00:00:00 1970 +0000
97 96 @@ -0,0 +1,1 @@
98 97 +d
99 98 $ hg diff --change .
100 99 debug.filters: computing revision filter for "visible"
101 100 diff -r 05293e5dd8d1ae4f84a8520a11c6f97cad26deca -r c2932ca7786be30b67154d541a8764fae5532261 c
102 101 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
103 102 +++ b/c Thu Jan 01 00:00:00 1970 +0000
104 103 @@ -0,0 +1,1 @@
105 104 +c
106 105 diff -r 05293e5dd8d1ae4f84a8520a11c6f97cad26deca -r c2932ca7786be30b67154d541a8764fae5532261 z
107 106 --- a/z Thu Jan 01 00:00:00 1970 +0000
108 107 +++ b/z Thu Jan 01 00:00:00 1970 +0000
109 108 @@ -1,2 +1,3 @@
110 109 some line
111 110 in a
112 111 +file
113 112
114 113 exporting the current changeset
115 114
116 115 $ hg export
117 116 debug.filters: computing revision filter for "visible"
118 117 exporting patch:
119 118 # HG changeset patch
120 119 # User test
121 120 # Date 0 0
122 121 # Thu Jan 01 00:00:00 1970 +0000
123 122 # Node ID c2932ca7786be30b67154d541a8764fae5532261
124 123 # Parent 05293e5dd8d1ae4f84a8520a11c6f97cad26deca
125 124 c
126 125
127 126 diff -r 05293e5dd8d1ae4f84a8520a11c6f97cad26deca -r c2932ca7786be30b67154d541a8764fae5532261 c
128 127 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
129 128 +++ b/c Thu Jan 01 00:00:00 1970 +0000
130 129 @@ -0,0 +1,1 @@
131 130 +c
132 131 diff -r 05293e5dd8d1ae4f84a8520a11c6f97cad26deca -r c2932ca7786be30b67154d541a8764fae5532261 z
133 132 --- a/z Thu Jan 01 00:00:00 1970 +0000
134 133 +++ b/z Thu Jan 01 00:00:00 1970 +0000
135 134 @@ -1,2 +1,3 @@
136 135 some line
137 136 in a
138 137 +file
139 138
140 139 using annotate
141 140
142 141 - file with a single change
143 142
144 143 $ hg annotate a
145 144 debug.filters: computing revision filter for "visible"
146 145 0: a
147 146
148 147 - file with multiple change
149 148
150 149 $ hg annotate z
151 150 debug.filters: computing revision filter for "visible"
152 151 0: some line
153 152 1: in a
154 153 2: file
General Comments 0
You need to be logged in to leave comments. Login now