##// END OF EJS Templates
dirstate-item: use `tracked` instead of the `state` in context's iter...
marmoute -
r48906:3fe500d1 default
parent child Browse files
Show More
@@ -1,3123 +1,3123 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 hex,
18 18 nullrev,
19 19 short,
20 20 )
21 21 from .pycompat import (
22 22 getattr,
23 23 open,
24 24 )
25 25 from . import (
26 26 dagop,
27 27 encoding,
28 28 error,
29 29 fileset,
30 30 match as matchmod,
31 31 mergestate as mergestatemod,
32 32 metadata,
33 33 obsolete as obsmod,
34 34 patch,
35 35 pathutil,
36 36 phases,
37 37 pycompat,
38 38 repoview,
39 39 scmutil,
40 40 sparse,
41 41 subrepo,
42 42 subrepoutil,
43 43 util,
44 44 )
45 45 from .utils import (
46 46 dateutil,
47 47 stringutil,
48 48 )
49 49
50 50 propertycache = util.propertycache
51 51
52 52
53 53 class basectx(object):
54 54 """A basectx object represents the common logic for its children:
55 55 changectx: read-only context that is already present in the repo,
56 56 workingctx: a context that represents the working directory and can
57 57 be committed,
58 58 memctx: a context that represents changes in-memory and can also
59 59 be committed."""
60 60
61 61 def __init__(self, repo):
62 62 self._repo = repo
63 63
64 64 def __bytes__(self):
65 65 return short(self.node())
66 66
67 67 __str__ = encoding.strmethod(__bytes__)
68 68
69 69 def __repr__(self):
70 70 return "<%s %s>" % (type(self).__name__, str(self))
71 71
72 72 def __eq__(self, other):
73 73 try:
74 74 return type(self) == type(other) and self._rev == other._rev
75 75 except AttributeError:
76 76 return False
77 77
78 78 def __ne__(self, other):
79 79 return not (self == other)
80 80
81 81 def __contains__(self, key):
82 82 return key in self._manifest
83 83
84 84 def __getitem__(self, key):
85 85 return self.filectx(key)
86 86
87 87 def __iter__(self):
88 88 return iter(self._manifest)
89 89
90 90 def _buildstatusmanifest(self, status):
91 91 """Builds a manifest that includes the given status results, if this is
92 92 a working copy context. For non-working copy contexts, it just returns
93 93 the normal manifest."""
94 94 return self.manifest()
95 95
96 96 def _matchstatus(self, other, match):
97 97 """This internal method provides a way for child objects to override the
98 98 match operator.
99 99 """
100 100 return match
101 101
102 102 def _buildstatus(
103 103 self, other, s, match, listignored, listclean, listunknown
104 104 ):
105 105 """build a status with respect to another context"""
106 106 # Load earliest manifest first for caching reasons. More specifically,
107 107 # if you have revisions 1000 and 1001, 1001 is probably stored as a
108 108 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
109 109 # 1000 and cache it so that when you read 1001, we just need to apply a
110 110 # delta to what's in the cache. So that's one full reconstruction + one
111 111 # delta application.
112 112 mf2 = None
113 113 if self.rev() is not None and self.rev() < other.rev():
114 114 mf2 = self._buildstatusmanifest(s)
115 115 mf1 = other._buildstatusmanifest(s)
116 116 if mf2 is None:
117 117 mf2 = self._buildstatusmanifest(s)
118 118
119 119 modified, added = [], []
120 120 removed = []
121 121 clean = []
122 122 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
123 123 deletedset = set(deleted)
124 124 d = mf1.diff(mf2, match=match, clean=listclean)
125 125 for fn, value in pycompat.iteritems(d):
126 126 if fn in deletedset:
127 127 continue
128 128 if value is None:
129 129 clean.append(fn)
130 130 continue
131 131 (node1, flag1), (node2, flag2) = value
132 132 if node1 is None:
133 133 added.append(fn)
134 134 elif node2 is None:
135 135 removed.append(fn)
136 136 elif flag1 != flag2:
137 137 modified.append(fn)
138 138 elif node2 not in self._repo.nodeconstants.wdirfilenodeids:
139 139 # When comparing files between two commits, we save time by
140 140 # not comparing the file contents when the nodeids differ.
141 141 # Note that this means we incorrectly report a reverted change
142 142 # to a file as a modification.
143 143 modified.append(fn)
144 144 elif self[fn].cmp(other[fn]):
145 145 modified.append(fn)
146 146 else:
147 147 clean.append(fn)
148 148
149 149 if removed:
150 150 # need to filter files if they are already reported as removed
151 151 unknown = [
152 152 fn
153 153 for fn in unknown
154 154 if fn not in mf1 and (not match or match(fn))
155 155 ]
156 156 ignored = [
157 157 fn
158 158 for fn in ignored
159 159 if fn not in mf1 and (not match or match(fn))
160 160 ]
161 161 # if they're deleted, don't report them as removed
162 162 removed = [fn for fn in removed if fn not in deletedset]
163 163
164 164 return scmutil.status(
165 165 modified, added, removed, deleted, unknown, ignored, clean
166 166 )
167 167
168 168 @propertycache
169 169 def substate(self):
170 170 return subrepoutil.state(self, self._repo.ui)
171 171
172 172 def subrev(self, subpath):
173 173 return self.substate[subpath][1]
174 174
175 175 def rev(self):
176 176 return self._rev
177 177
178 178 def node(self):
179 179 return self._node
180 180
181 181 def hex(self):
182 182 return hex(self.node())
183 183
184 184 def manifest(self):
185 185 return self._manifest
186 186
187 187 def manifestctx(self):
188 188 return self._manifestctx
189 189
190 190 def repo(self):
191 191 return self._repo
192 192
193 193 def phasestr(self):
194 194 return phases.phasenames[self.phase()]
195 195
196 196 def mutable(self):
197 197 return self.phase() > phases.public
198 198
199 199 def matchfileset(self, cwd, expr, badfn=None):
200 200 return fileset.match(self, cwd, expr, badfn=badfn)
201 201
202 202 def obsolete(self):
203 203 """True if the changeset is obsolete"""
204 204 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
205 205
206 206 def extinct(self):
207 207 """True if the changeset is extinct"""
208 208 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
209 209
210 210 def orphan(self):
211 211 """True if the changeset is not obsolete, but its ancestor is"""
212 212 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
213 213
214 214 def phasedivergent(self):
215 215 """True if the changeset tries to be a successor of a public changeset
216 216
217 217 Only non-public and non-obsolete changesets may be phase-divergent.
218 218 """
219 219 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
220 220
221 221 def contentdivergent(self):
222 222 """Is a successor of a changeset with multiple possible successor sets
223 223
224 224 Only non-public and non-obsolete changesets may be content-divergent.
225 225 """
226 226 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
227 227
228 228 def isunstable(self):
229 229 """True if the changeset is either orphan, phase-divergent or
230 230 content-divergent"""
231 231 return self.orphan() or self.phasedivergent() or self.contentdivergent()
232 232
233 233 def instabilities(self):
234 234 """return the list of instabilities affecting this changeset.
235 235
236 236 Instabilities are returned as strings. possible values are:
237 237 - orphan,
238 238 - phase-divergent,
239 239 - content-divergent.
240 240 """
241 241 instabilities = []
242 242 if self.orphan():
243 243 instabilities.append(b'orphan')
244 244 if self.phasedivergent():
245 245 instabilities.append(b'phase-divergent')
246 246 if self.contentdivergent():
247 247 instabilities.append(b'content-divergent')
248 248 return instabilities
249 249
250 250 def parents(self):
251 251 """return contexts for each parent changeset"""
252 252 return self._parents
253 253
254 254 def p1(self):
255 255 return self._parents[0]
256 256
257 257 def p2(self):
258 258 parents = self._parents
259 259 if len(parents) == 2:
260 260 return parents[1]
261 261 return self._repo[nullrev]
262 262
263 263 def _fileinfo(self, path):
264 264 if '_manifest' in self.__dict__:
265 265 try:
266 266 return self._manifest.find(path)
267 267 except KeyError:
268 268 raise error.ManifestLookupError(
269 269 self._node or b'None', path, _(b'not found in manifest')
270 270 )
271 271 if '_manifestdelta' in self.__dict__ or path in self.files():
272 272 if path in self._manifestdelta:
273 273 return (
274 274 self._manifestdelta[path],
275 275 self._manifestdelta.flags(path),
276 276 )
277 277 mfl = self._repo.manifestlog
278 278 try:
279 279 node, flag = mfl[self._changeset.manifest].find(path)
280 280 except KeyError:
281 281 raise error.ManifestLookupError(
282 282 self._node or b'None', path, _(b'not found in manifest')
283 283 )
284 284
285 285 return node, flag
286 286
287 287 def filenode(self, path):
288 288 return self._fileinfo(path)[0]
289 289
290 290 def flags(self, path):
291 291 try:
292 292 return self._fileinfo(path)[1]
293 293 except error.LookupError:
294 294 return b''
295 295
296 296 @propertycache
297 297 def _copies(self):
298 298 return metadata.computechangesetcopies(self)
299 299
300 300 def p1copies(self):
301 301 return self._copies[0]
302 302
303 303 def p2copies(self):
304 304 return self._copies[1]
305 305
306 306 def sub(self, path, allowcreate=True):
307 307 '''return a subrepo for the stored revision of path, never wdir()'''
308 308 return subrepo.subrepo(self, path, allowcreate=allowcreate)
309 309
310 310 def nullsub(self, path, pctx):
311 311 return subrepo.nullsubrepo(self, path, pctx)
312 312
313 313 def workingsub(self, path):
314 314 """return a subrepo for the stored revision, or wdir if this is a wdir
315 315 context.
316 316 """
317 317 return subrepo.subrepo(self, path, allowwdir=True)
318 318
319 319 def match(
320 320 self,
321 321 pats=None,
322 322 include=None,
323 323 exclude=None,
324 324 default=b'glob',
325 325 listsubrepos=False,
326 326 badfn=None,
327 327 cwd=None,
328 328 ):
329 329 r = self._repo
330 330 if not cwd:
331 331 cwd = r.getcwd()
332 332 return matchmod.match(
333 333 r.root,
334 334 cwd,
335 335 pats,
336 336 include,
337 337 exclude,
338 338 default,
339 339 auditor=r.nofsauditor,
340 340 ctx=self,
341 341 listsubrepos=listsubrepos,
342 342 badfn=badfn,
343 343 )
344 344
345 345 def diff(
346 346 self,
347 347 ctx2=None,
348 348 match=None,
349 349 changes=None,
350 350 opts=None,
351 351 losedatafn=None,
352 352 pathfn=None,
353 353 copy=None,
354 354 copysourcematch=None,
355 355 hunksfilterfn=None,
356 356 ):
357 357 """Returns a diff generator for the given contexts and matcher"""
358 358 if ctx2 is None:
359 359 ctx2 = self.p1()
360 360 if ctx2 is not None:
361 361 ctx2 = self._repo[ctx2]
362 362 return patch.diff(
363 363 self._repo,
364 364 ctx2,
365 365 self,
366 366 match=match,
367 367 changes=changes,
368 368 opts=opts,
369 369 losedatafn=losedatafn,
370 370 pathfn=pathfn,
371 371 copy=copy,
372 372 copysourcematch=copysourcematch,
373 373 hunksfilterfn=hunksfilterfn,
374 374 )
375 375
376 376 def dirs(self):
377 377 return self._manifest.dirs()
378 378
379 379 def hasdir(self, dir):
380 380 return self._manifest.hasdir(dir)
381 381
382 382 def status(
383 383 self,
384 384 other=None,
385 385 match=None,
386 386 listignored=False,
387 387 listclean=False,
388 388 listunknown=False,
389 389 listsubrepos=False,
390 390 ):
391 391 """return status of files between two nodes or node and working
392 392 directory.
393 393
394 394 If other is None, compare this node with working directory.
395 395
396 396 ctx1.status(ctx2) returns the status of change from ctx1 to ctx2
397 397
398 398 Returns a mercurial.scmutils.status object.
399 399
400 400 Data can be accessed using either tuple notation:
401 401
402 402 (modified, added, removed, deleted, unknown, ignored, clean)
403 403
404 404 or direct attribute access:
405 405
406 406 s.modified, s.added, ...
407 407 """
408 408
409 409 ctx1 = self
410 410 ctx2 = self._repo[other]
411 411
412 412 # This next code block is, admittedly, fragile logic that tests for
413 413 # reversing the contexts and wouldn't need to exist if it weren't for
414 414 # the fast (and common) code path of comparing the working directory
415 415 # with its first parent.
416 416 #
417 417 # What we're aiming for here is the ability to call:
418 418 #
419 419 # workingctx.status(parentctx)
420 420 #
421 421 # If we always built the manifest for each context and compared those,
422 422 # then we'd be done. But the special case of the above call means we
423 423 # just copy the manifest of the parent.
424 424 reversed = False
425 425 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
426 426 reversed = True
427 427 ctx1, ctx2 = ctx2, ctx1
428 428
429 429 match = self._repo.narrowmatch(match)
430 430 match = ctx2._matchstatus(ctx1, match)
431 431 r = scmutil.status([], [], [], [], [], [], [])
432 432 r = ctx2._buildstatus(
433 433 ctx1, r, match, listignored, listclean, listunknown
434 434 )
435 435
436 436 if reversed:
437 437 # Reverse added and removed. Clear deleted, unknown and ignored as
438 438 # these make no sense to reverse.
439 439 r = scmutil.status(
440 440 r.modified, r.removed, r.added, [], [], [], r.clean
441 441 )
442 442
443 443 if listsubrepos:
444 444 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
445 445 try:
446 446 rev2 = ctx2.subrev(subpath)
447 447 except KeyError:
448 448 # A subrepo that existed in node1 was deleted between
449 449 # node1 and node2 (inclusive). Thus, ctx2's substate
450 450 # won't contain that subpath. The best we can do ignore it.
451 451 rev2 = None
452 452 submatch = matchmod.subdirmatcher(subpath, match)
453 453 s = sub.status(
454 454 rev2,
455 455 match=submatch,
456 456 ignored=listignored,
457 457 clean=listclean,
458 458 unknown=listunknown,
459 459 listsubrepos=True,
460 460 )
461 461 for k in (
462 462 'modified',
463 463 'added',
464 464 'removed',
465 465 'deleted',
466 466 'unknown',
467 467 'ignored',
468 468 'clean',
469 469 ):
470 470 rfiles, sfiles = getattr(r, k), getattr(s, k)
471 471 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
472 472
473 473 r.modified.sort()
474 474 r.added.sort()
475 475 r.removed.sort()
476 476 r.deleted.sort()
477 477 r.unknown.sort()
478 478 r.ignored.sort()
479 479 r.clean.sort()
480 480
481 481 return r
482 482
483 483 def mergestate(self, clean=False):
484 484 """Get a mergestate object for this context."""
485 485 raise NotImplementedError(
486 486 '%s does not implement mergestate()' % self.__class__
487 487 )
488 488
489 489 def isempty(self):
490 490 return not (
491 491 len(self.parents()) > 1
492 492 or self.branch() != self.p1().branch()
493 493 or self.closesbranch()
494 494 or self.files()
495 495 )
496 496
497 497
498 498 class changectx(basectx):
499 499 """A changecontext object makes access to data related to a particular
500 500 changeset convenient. It represents a read-only context already present in
501 501 the repo."""
502 502
503 503 def __init__(self, repo, rev, node, maybe_filtered=True):
504 504 super(changectx, self).__init__(repo)
505 505 self._rev = rev
506 506 self._node = node
507 507 # When maybe_filtered is True, the revision might be affected by
508 508 # changelog filtering and operation through the filtered changelog must be used.
509 509 #
510 510 # When maybe_filtered is False, the revision has already been checked
511 511 # against filtering and is not filtered. Operation through the
512 512 # unfiltered changelog might be used in some case.
513 513 self._maybe_filtered = maybe_filtered
514 514
515 515 def __hash__(self):
516 516 try:
517 517 return hash(self._rev)
518 518 except AttributeError:
519 519 return id(self)
520 520
521 521 def __nonzero__(self):
522 522 return self._rev != nullrev
523 523
524 524 __bool__ = __nonzero__
525 525
526 526 @propertycache
527 527 def _changeset(self):
528 528 if self._maybe_filtered:
529 529 repo = self._repo
530 530 else:
531 531 repo = self._repo.unfiltered()
532 532 return repo.changelog.changelogrevision(self.rev())
533 533
534 534 @propertycache
535 535 def _manifest(self):
536 536 return self._manifestctx.read()
537 537
538 538 @property
539 539 def _manifestctx(self):
540 540 return self._repo.manifestlog[self._changeset.manifest]
541 541
542 542 @propertycache
543 543 def _manifestdelta(self):
544 544 return self._manifestctx.readdelta()
545 545
546 546 @propertycache
547 547 def _parents(self):
548 548 repo = self._repo
549 549 if self._maybe_filtered:
550 550 cl = repo.changelog
551 551 else:
552 552 cl = repo.unfiltered().changelog
553 553
554 554 p1, p2 = cl.parentrevs(self._rev)
555 555 if p2 == nullrev:
556 556 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
557 557 return [
558 558 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
559 559 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
560 560 ]
561 561
562 562 def changeset(self):
563 563 c = self._changeset
564 564 return (
565 565 c.manifest,
566 566 c.user,
567 567 c.date,
568 568 c.files,
569 569 c.description,
570 570 c.extra,
571 571 )
572 572
573 573 def manifestnode(self):
574 574 return self._changeset.manifest
575 575
576 576 def user(self):
577 577 return self._changeset.user
578 578
579 579 def date(self):
580 580 return self._changeset.date
581 581
582 582 def files(self):
583 583 return self._changeset.files
584 584
585 585 def filesmodified(self):
586 586 modified = set(self.files())
587 587 modified.difference_update(self.filesadded())
588 588 modified.difference_update(self.filesremoved())
589 589 return sorted(modified)
590 590
591 591 def filesadded(self):
592 592 filesadded = self._changeset.filesadded
593 593 compute_on_none = True
594 594 if self._repo.filecopiesmode == b'changeset-sidedata':
595 595 compute_on_none = False
596 596 else:
597 597 source = self._repo.ui.config(b'experimental', b'copies.read-from')
598 598 if source == b'changeset-only':
599 599 compute_on_none = False
600 600 elif source != b'compatibility':
601 601 # filelog mode, ignore any changelog content
602 602 filesadded = None
603 603 if filesadded is None:
604 604 if compute_on_none:
605 605 filesadded = metadata.computechangesetfilesadded(self)
606 606 else:
607 607 filesadded = []
608 608 return filesadded
609 609
610 610 def filesremoved(self):
611 611 filesremoved = self._changeset.filesremoved
612 612 compute_on_none = True
613 613 if self._repo.filecopiesmode == b'changeset-sidedata':
614 614 compute_on_none = False
615 615 else:
616 616 source = self._repo.ui.config(b'experimental', b'copies.read-from')
617 617 if source == b'changeset-only':
618 618 compute_on_none = False
619 619 elif source != b'compatibility':
620 620 # filelog mode, ignore any changelog content
621 621 filesremoved = None
622 622 if filesremoved is None:
623 623 if compute_on_none:
624 624 filesremoved = metadata.computechangesetfilesremoved(self)
625 625 else:
626 626 filesremoved = []
627 627 return filesremoved
628 628
629 629 @propertycache
630 630 def _copies(self):
631 631 p1copies = self._changeset.p1copies
632 632 p2copies = self._changeset.p2copies
633 633 compute_on_none = True
634 634 if self._repo.filecopiesmode == b'changeset-sidedata':
635 635 compute_on_none = False
636 636 else:
637 637 source = self._repo.ui.config(b'experimental', b'copies.read-from')
638 638 # If config says to get copy metadata only from changeset, then
639 639 # return that, defaulting to {} if there was no copy metadata. In
640 640 # compatibility mode, we return copy data from the changeset if it
641 641 # was recorded there, and otherwise we fall back to getting it from
642 642 # the filelogs (below).
643 643 #
644 644 # If we are in compatiblity mode and there is not data in the
645 645 # changeset), we get the copy metadata from the filelogs.
646 646 #
647 647 # otherwise, when config said to read only from filelog, we get the
648 648 # copy metadata from the filelogs.
649 649 if source == b'changeset-only':
650 650 compute_on_none = False
651 651 elif source != b'compatibility':
652 652 # filelog mode, ignore any changelog content
653 653 p1copies = p2copies = None
654 654 if p1copies is None:
655 655 if compute_on_none:
656 656 p1copies, p2copies = super(changectx, self)._copies
657 657 else:
658 658 if p1copies is None:
659 659 p1copies = {}
660 660 if p2copies is None:
661 661 p2copies = {}
662 662 return p1copies, p2copies
663 663
664 664 def description(self):
665 665 return self._changeset.description
666 666
667 667 def branch(self):
668 668 return encoding.tolocal(self._changeset.extra.get(b"branch"))
669 669
670 670 def closesbranch(self):
671 671 return b'close' in self._changeset.extra
672 672
673 673 def extra(self):
674 674 """Return a dict of extra information."""
675 675 return self._changeset.extra
676 676
677 677 def tags(self):
678 678 """Return a list of byte tag names"""
679 679 return self._repo.nodetags(self._node)
680 680
681 681 def bookmarks(self):
682 682 """Return a list of byte bookmark names."""
683 683 return self._repo.nodebookmarks(self._node)
684 684
685 685 def phase(self):
686 686 return self._repo._phasecache.phase(self._repo, self._rev)
687 687
688 688 def hidden(self):
689 689 return self._rev in repoview.filterrevs(self._repo, b'visible')
690 690
691 691 def isinmemory(self):
692 692 return False
693 693
694 694 def children(self):
695 695 """return list of changectx contexts for each child changeset.
696 696
697 697 This returns only the immediate child changesets. Use descendants() to
698 698 recursively walk children.
699 699 """
700 700 c = self._repo.changelog.children(self._node)
701 701 return [self._repo[x] for x in c]
702 702
703 703 def ancestors(self):
704 704 for a in self._repo.changelog.ancestors([self._rev]):
705 705 yield self._repo[a]
706 706
707 707 def descendants(self):
708 708 """Recursively yield all children of the changeset.
709 709
710 710 For just the immediate children, use children()
711 711 """
712 712 for d in self._repo.changelog.descendants([self._rev]):
713 713 yield self._repo[d]
714 714
715 715 def filectx(self, path, fileid=None, filelog=None):
716 716 """get a file context from this changeset"""
717 717 if fileid is None:
718 718 fileid = self.filenode(path)
719 719 return filectx(
720 720 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
721 721 )
722 722
723 723 def ancestor(self, c2, warn=False):
724 724 """return the "best" ancestor context of self and c2
725 725
726 726 If there are multiple candidates, it will show a message and check
727 727 merge.preferancestor configuration before falling back to the
728 728 revlog ancestor."""
729 729 # deal with workingctxs
730 730 n2 = c2._node
731 731 if n2 is None:
732 732 n2 = c2._parents[0]._node
733 733 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
734 734 if not cahs:
735 735 anc = self._repo.nodeconstants.nullid
736 736 elif len(cahs) == 1:
737 737 anc = cahs[0]
738 738 else:
739 739 # experimental config: merge.preferancestor
740 740 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
741 741 try:
742 742 ctx = scmutil.revsymbol(self._repo, r)
743 743 except error.RepoLookupError:
744 744 continue
745 745 anc = ctx.node()
746 746 if anc in cahs:
747 747 break
748 748 else:
749 749 anc = self._repo.changelog.ancestor(self._node, n2)
750 750 if warn:
751 751 self._repo.ui.status(
752 752 (
753 753 _(b"note: using %s as ancestor of %s and %s\n")
754 754 % (short(anc), short(self._node), short(n2))
755 755 )
756 756 + b''.join(
757 757 _(
758 758 b" alternatively, use --config "
759 759 b"merge.preferancestor=%s\n"
760 760 )
761 761 % short(n)
762 762 for n in sorted(cahs)
763 763 if n != anc
764 764 )
765 765 )
766 766 return self._repo[anc]
767 767
768 768 def isancestorof(self, other):
769 769 """True if this changeset is an ancestor of other"""
770 770 return self._repo.changelog.isancestorrev(self._rev, other._rev)
771 771
772 772 def walk(self, match):
773 773 '''Generates matching file names.'''
774 774
775 775 # Wrap match.bad method to have message with nodeid
776 776 def bad(fn, msg):
777 777 # The manifest doesn't know about subrepos, so don't complain about
778 778 # paths into valid subrepos.
779 779 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
780 780 return
781 781 match.bad(fn, _(b'no such file in rev %s') % self)
782 782
783 783 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
784 784 return self._manifest.walk(m)
785 785
786 786 def matches(self, match):
787 787 return self.walk(match)
788 788
789 789
790 790 class basefilectx(object):
791 791 """A filecontext object represents the common logic for its children:
792 792 filectx: read-only access to a filerevision that is already present
793 793 in the repo,
794 794 workingfilectx: a filecontext that represents files from the working
795 795 directory,
796 796 memfilectx: a filecontext that represents files in-memory,
797 797 """
798 798
799 799 @propertycache
800 800 def _filelog(self):
801 801 return self._repo.file(self._path)
802 802
803 803 @propertycache
804 804 def _changeid(self):
805 805 if '_changectx' in self.__dict__:
806 806 return self._changectx.rev()
807 807 elif '_descendantrev' in self.__dict__:
808 808 # this file context was created from a revision with a known
809 809 # descendant, we can (lazily) correct for linkrev aliases
810 810 return self._adjustlinkrev(self._descendantrev)
811 811 else:
812 812 return self._filelog.linkrev(self._filerev)
813 813
814 814 @propertycache
815 815 def _filenode(self):
816 816 if '_fileid' in self.__dict__:
817 817 return self._filelog.lookup(self._fileid)
818 818 else:
819 819 return self._changectx.filenode(self._path)
820 820
821 821 @propertycache
822 822 def _filerev(self):
823 823 return self._filelog.rev(self._filenode)
824 824
825 825 @propertycache
826 826 def _repopath(self):
827 827 return self._path
828 828
829 829 def __nonzero__(self):
830 830 try:
831 831 self._filenode
832 832 return True
833 833 except error.LookupError:
834 834 # file is missing
835 835 return False
836 836
837 837 __bool__ = __nonzero__
838 838
839 839 def __bytes__(self):
840 840 try:
841 841 return b"%s@%s" % (self.path(), self._changectx)
842 842 except error.LookupError:
843 843 return b"%s@???" % self.path()
844 844
845 845 __str__ = encoding.strmethod(__bytes__)
846 846
847 847 def __repr__(self):
848 848 return "<%s %s>" % (type(self).__name__, str(self))
849 849
850 850 def __hash__(self):
851 851 try:
852 852 return hash((self._path, self._filenode))
853 853 except AttributeError:
854 854 return id(self)
855 855
856 856 def __eq__(self, other):
857 857 try:
858 858 return (
859 859 type(self) == type(other)
860 860 and self._path == other._path
861 861 and self._filenode == other._filenode
862 862 )
863 863 except AttributeError:
864 864 return False
865 865
866 866 def __ne__(self, other):
867 867 return not (self == other)
868 868
869 869 def filerev(self):
870 870 return self._filerev
871 871
872 872 def filenode(self):
873 873 return self._filenode
874 874
875 875 @propertycache
876 876 def _flags(self):
877 877 return self._changectx.flags(self._path)
878 878
879 879 def flags(self):
880 880 return self._flags
881 881
882 882 def filelog(self):
883 883 return self._filelog
884 884
885 885 def rev(self):
886 886 return self._changeid
887 887
888 888 def linkrev(self):
889 889 return self._filelog.linkrev(self._filerev)
890 890
891 891 def node(self):
892 892 return self._changectx.node()
893 893
894 894 def hex(self):
895 895 return self._changectx.hex()
896 896
897 897 def user(self):
898 898 return self._changectx.user()
899 899
900 900 def date(self):
901 901 return self._changectx.date()
902 902
903 903 def files(self):
904 904 return self._changectx.files()
905 905
906 906 def description(self):
907 907 return self._changectx.description()
908 908
909 909 def branch(self):
910 910 return self._changectx.branch()
911 911
912 912 def extra(self):
913 913 return self._changectx.extra()
914 914
915 915 def phase(self):
916 916 return self._changectx.phase()
917 917
918 918 def phasestr(self):
919 919 return self._changectx.phasestr()
920 920
921 921 def obsolete(self):
922 922 return self._changectx.obsolete()
923 923
924 924 def instabilities(self):
925 925 return self._changectx.instabilities()
926 926
927 927 def manifest(self):
928 928 return self._changectx.manifest()
929 929
930 930 def changectx(self):
931 931 return self._changectx
932 932
933 933 def renamed(self):
934 934 return self._copied
935 935
936 936 def copysource(self):
937 937 return self._copied and self._copied[0]
938 938
939 939 def repo(self):
940 940 return self._repo
941 941
942 942 def size(self):
943 943 return len(self.data())
944 944
945 945 def path(self):
946 946 return self._path
947 947
948 948 def isbinary(self):
949 949 try:
950 950 return stringutil.binary(self.data())
951 951 except IOError:
952 952 return False
953 953
954 954 def isexec(self):
955 955 return b'x' in self.flags()
956 956
957 957 def islink(self):
958 958 return b'l' in self.flags()
959 959
960 960 def isabsent(self):
961 961 """whether this filectx represents a file not in self._changectx
962 962
963 963 This is mainly for merge code to detect change/delete conflicts. This is
964 964 expected to be True for all subclasses of basectx."""
965 965 return False
966 966
967 967 _customcmp = False
968 968
969 969 def cmp(self, fctx):
970 970 """compare with other file context
971 971
972 972 returns True if different than fctx.
973 973 """
974 974 if fctx._customcmp:
975 975 return fctx.cmp(self)
976 976
977 977 if self._filenode is None:
978 978 raise error.ProgrammingError(
979 979 b'filectx.cmp() must be reimplemented if not backed by revlog'
980 980 )
981 981
982 982 if fctx._filenode is None:
983 983 if self._repo._encodefilterpats:
984 984 # can't rely on size() because wdir content may be decoded
985 985 return self._filelog.cmp(self._filenode, fctx.data())
986 986 if self.size() - 4 == fctx.size():
987 987 # size() can match:
988 988 # if file data starts with '\1\n', empty metadata block is
989 989 # prepended, which adds 4 bytes to filelog.size().
990 990 return self._filelog.cmp(self._filenode, fctx.data())
991 991 if self.size() == fctx.size() or self.flags() == b'l':
992 992 # size() matches: need to compare content
993 993 # issue6456: Always compare symlinks because size can represent
994 994 # encrypted string for EXT-4 encryption(fscrypt).
995 995 return self._filelog.cmp(self._filenode, fctx.data())
996 996
997 997 # size() differs
998 998 return True
999 999
1000 1000 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
1001 1001 """return the first ancestor of <srcrev> introducing <fnode>
1002 1002
1003 1003 If the linkrev of the file revision does not point to an ancestor of
1004 1004 srcrev, we'll walk down the ancestors until we find one introducing
1005 1005 this file revision.
1006 1006
1007 1007 :srcrev: the changeset revision we search ancestors from
1008 1008 :inclusive: if true, the src revision will also be checked
1009 1009 :stoprev: an optional revision to stop the walk at. If no introduction
1010 1010 of this file content could be found before this floor
1011 1011 revision, the function will returns "None" and stops its
1012 1012 iteration.
1013 1013 """
1014 1014 repo = self._repo
1015 1015 cl = repo.unfiltered().changelog
1016 1016 mfl = repo.manifestlog
1017 1017 # fetch the linkrev
1018 1018 lkr = self.linkrev()
1019 1019 if srcrev == lkr:
1020 1020 return lkr
1021 1021 # hack to reuse ancestor computation when searching for renames
1022 1022 memberanc = getattr(self, '_ancestrycontext', None)
1023 1023 iteranc = None
1024 1024 if srcrev is None:
1025 1025 # wctx case, used by workingfilectx during mergecopy
1026 1026 revs = [p.rev() for p in self._repo[None].parents()]
1027 1027 inclusive = True # we skipped the real (revless) source
1028 1028 else:
1029 1029 revs = [srcrev]
1030 1030 if memberanc is None:
1031 1031 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1032 1032 # check if this linkrev is an ancestor of srcrev
1033 1033 if lkr not in memberanc:
1034 1034 if iteranc is None:
1035 1035 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1036 1036 fnode = self._filenode
1037 1037 path = self._path
1038 1038 for a in iteranc:
1039 1039 if stoprev is not None and a < stoprev:
1040 1040 return None
1041 1041 ac = cl.read(a) # get changeset data (we avoid object creation)
1042 1042 if path in ac[3]: # checking the 'files' field.
1043 1043 # The file has been touched, check if the content is
1044 1044 # similar to the one we search for.
1045 1045 if fnode == mfl[ac[0]].readfast().get(path):
1046 1046 return a
1047 1047 # In theory, we should never get out of that loop without a result.
1048 1048 # But if manifest uses a buggy file revision (not children of the
1049 1049 # one it replaces) we could. Such a buggy situation will likely
1050 1050 # result is crash somewhere else at to some point.
1051 1051 return lkr
1052 1052
1053 1053 def isintroducedafter(self, changelogrev):
1054 1054 """True if a filectx has been introduced after a given floor revision"""
1055 1055 if self.linkrev() >= changelogrev:
1056 1056 return True
1057 1057 introrev = self._introrev(stoprev=changelogrev)
1058 1058 if introrev is None:
1059 1059 return False
1060 1060 return introrev >= changelogrev
1061 1061
1062 1062 def introrev(self):
1063 1063 """return the rev of the changeset which introduced this file revision
1064 1064
1065 1065 This method is different from linkrev because it take into account the
1066 1066 changeset the filectx was created from. It ensures the returned
1067 1067 revision is one of its ancestors. This prevents bugs from
1068 1068 'linkrev-shadowing' when a file revision is used by multiple
1069 1069 changesets.
1070 1070 """
1071 1071 return self._introrev()
1072 1072
1073 1073 def _introrev(self, stoprev=None):
1074 1074 """
1075 1075 Same as `introrev` but, with an extra argument to limit changelog
1076 1076 iteration range in some internal usecase.
1077 1077
1078 1078 If `stoprev` is set, the `introrev` will not be searched past that
1079 1079 `stoprev` revision and "None" might be returned. This is useful to
1080 1080 limit the iteration range.
1081 1081 """
1082 1082 toprev = None
1083 1083 attrs = vars(self)
1084 1084 if '_changeid' in attrs:
1085 1085 # We have a cached value already
1086 1086 toprev = self._changeid
1087 1087 elif '_changectx' in attrs:
1088 1088 # We know which changelog entry we are coming from
1089 1089 toprev = self._changectx.rev()
1090 1090
1091 1091 if toprev is not None:
1092 1092 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1093 1093 elif '_descendantrev' in attrs:
1094 1094 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1095 1095 # be nice and cache the result of the computation
1096 1096 if introrev is not None:
1097 1097 self._changeid = introrev
1098 1098 return introrev
1099 1099 else:
1100 1100 return self.linkrev()
1101 1101
1102 1102 def introfilectx(self):
1103 1103 """Return filectx having identical contents, but pointing to the
1104 1104 changeset revision where this filectx was introduced"""
1105 1105 introrev = self.introrev()
1106 1106 if self.rev() == introrev:
1107 1107 return self
1108 1108 return self.filectx(self.filenode(), changeid=introrev)
1109 1109
1110 1110 def _parentfilectx(self, path, fileid, filelog):
1111 1111 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1112 1112 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1113 1113 if '_changeid' in vars(self) or '_changectx' in vars(self):
1114 1114 # If self is associated with a changeset (probably explicitly
1115 1115 # fed), ensure the created filectx is associated with a
1116 1116 # changeset that is an ancestor of self.changectx.
1117 1117 # This lets us later use _adjustlinkrev to get a correct link.
1118 1118 fctx._descendantrev = self.rev()
1119 1119 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1120 1120 elif '_descendantrev' in vars(self):
1121 1121 # Otherwise propagate _descendantrev if we have one associated.
1122 1122 fctx._descendantrev = self._descendantrev
1123 1123 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1124 1124 return fctx
1125 1125
1126 1126 def parents(self):
1127 1127 _path = self._path
1128 1128 fl = self._filelog
1129 1129 parents = self._filelog.parents(self._filenode)
1130 1130 pl = [
1131 1131 (_path, node, fl)
1132 1132 for node in parents
1133 1133 if node != self._repo.nodeconstants.nullid
1134 1134 ]
1135 1135
1136 1136 r = fl.renamed(self._filenode)
1137 1137 if r:
1138 1138 # - In the simple rename case, both parent are nullid, pl is empty.
1139 1139 # - In case of merge, only one of the parent is null id and should
1140 1140 # be replaced with the rename information. This parent is -always-
1141 1141 # the first one.
1142 1142 #
1143 1143 # As null id have always been filtered out in the previous list
1144 1144 # comprehension, inserting to 0 will always result in "replacing
1145 1145 # first nullid parent with rename information.
1146 1146 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1147 1147
1148 1148 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1149 1149
1150 1150 def p1(self):
1151 1151 return self.parents()[0]
1152 1152
1153 1153 def p2(self):
1154 1154 p = self.parents()
1155 1155 if len(p) == 2:
1156 1156 return p[1]
1157 1157 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1158 1158
1159 1159 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1160 1160 """Returns a list of annotateline objects for each line in the file
1161 1161
1162 1162 - line.fctx is the filectx of the node where that line was last changed
1163 1163 - line.lineno is the line number at the first appearance in the managed
1164 1164 file
1165 1165 - line.text is the data on that line (including newline character)
1166 1166 """
1167 1167 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1168 1168
1169 1169 def parents(f):
1170 1170 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1171 1171 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1172 1172 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1173 1173 # isn't an ancestor of the srcrev.
1174 1174 f._changeid
1175 1175 pl = f.parents()
1176 1176
1177 1177 # Don't return renamed parents if we aren't following.
1178 1178 if not follow:
1179 1179 pl = [p for p in pl if p.path() == f.path()]
1180 1180
1181 1181 # renamed filectx won't have a filelog yet, so set it
1182 1182 # from the cache to save time
1183 1183 for p in pl:
1184 1184 if not '_filelog' in p.__dict__:
1185 1185 p._filelog = getlog(p.path())
1186 1186
1187 1187 return pl
1188 1188
1189 1189 # use linkrev to find the first changeset where self appeared
1190 1190 base = self.introfilectx()
1191 1191 if getattr(base, '_ancestrycontext', None) is None:
1192 1192 # it is safe to use an unfiltered repository here because we are
1193 1193 # walking ancestors only.
1194 1194 cl = self._repo.unfiltered().changelog
1195 1195 if base.rev() is None:
1196 1196 # wctx is not inclusive, but works because _ancestrycontext
1197 1197 # is used to test filelog revisions
1198 1198 ac = cl.ancestors(
1199 1199 [p.rev() for p in base.parents()], inclusive=True
1200 1200 )
1201 1201 else:
1202 1202 ac = cl.ancestors([base.rev()], inclusive=True)
1203 1203 base._ancestrycontext = ac
1204 1204
1205 1205 return dagop.annotate(
1206 1206 base, parents, skiprevs=skiprevs, diffopts=diffopts
1207 1207 )
1208 1208
1209 1209 def ancestors(self, followfirst=False):
1210 1210 visit = {}
1211 1211 c = self
1212 1212 if followfirst:
1213 1213 cut = 1
1214 1214 else:
1215 1215 cut = None
1216 1216
1217 1217 while True:
1218 1218 for parent in c.parents()[:cut]:
1219 1219 visit[(parent.linkrev(), parent.filenode())] = parent
1220 1220 if not visit:
1221 1221 break
1222 1222 c = visit.pop(max(visit))
1223 1223 yield c
1224 1224
1225 1225 def decodeddata(self):
1226 1226 """Returns `data()` after running repository decoding filters.
1227 1227
1228 1228 This is often equivalent to how the data would be expressed on disk.
1229 1229 """
1230 1230 return self._repo.wwritedata(self.path(), self.data())
1231 1231
1232 1232
1233 1233 class filectx(basefilectx):
1234 1234 """A filecontext object makes access to data related to a particular
1235 1235 filerevision convenient."""
1236 1236
1237 1237 def __init__(
1238 1238 self,
1239 1239 repo,
1240 1240 path,
1241 1241 changeid=None,
1242 1242 fileid=None,
1243 1243 filelog=None,
1244 1244 changectx=None,
1245 1245 ):
1246 1246 """changeid must be a revision number, if specified.
1247 1247 fileid can be a file revision or node."""
1248 1248 self._repo = repo
1249 1249 self._path = path
1250 1250
1251 1251 assert (
1252 1252 changeid is not None or fileid is not None or changectx is not None
1253 1253 ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
1254 1254 changeid,
1255 1255 fileid,
1256 1256 changectx,
1257 1257 )
1258 1258
1259 1259 if filelog is not None:
1260 1260 self._filelog = filelog
1261 1261
1262 1262 if changeid is not None:
1263 1263 self._changeid = changeid
1264 1264 if changectx is not None:
1265 1265 self._changectx = changectx
1266 1266 if fileid is not None:
1267 1267 self._fileid = fileid
1268 1268
1269 1269 @propertycache
1270 1270 def _changectx(self):
1271 1271 try:
1272 1272 return self._repo[self._changeid]
1273 1273 except error.FilteredRepoLookupError:
1274 1274 # Linkrev may point to any revision in the repository. When the
1275 1275 # repository is filtered this may lead to `filectx` trying to build
1276 1276 # `changectx` for filtered revision. In such case we fallback to
1277 1277 # creating `changectx` on the unfiltered version of the reposition.
1278 1278 # This fallback should not be an issue because `changectx` from
1279 1279 # `filectx` are not used in complex operations that care about
1280 1280 # filtering.
1281 1281 #
1282 1282 # This fallback is a cheap and dirty fix that prevent several
1283 1283 # crashes. It does not ensure the behavior is correct. However the
1284 1284 # behavior was not correct before filtering either and "incorrect
1285 1285 # behavior" is seen as better as "crash"
1286 1286 #
1287 1287 # Linkrevs have several serious troubles with filtering that are
1288 1288 # complicated to solve. Proper handling of the issue here should be
1289 1289 # considered when solving linkrev issue are on the table.
1290 1290 return self._repo.unfiltered()[self._changeid]
1291 1291
1292 1292 def filectx(self, fileid, changeid=None):
1293 1293 """opens an arbitrary revision of the file without
1294 1294 opening a new filelog"""
1295 1295 return filectx(
1296 1296 self._repo,
1297 1297 self._path,
1298 1298 fileid=fileid,
1299 1299 filelog=self._filelog,
1300 1300 changeid=changeid,
1301 1301 )
1302 1302
1303 1303 def rawdata(self):
1304 1304 return self._filelog.rawdata(self._filenode)
1305 1305
1306 1306 def rawflags(self):
1307 1307 """low-level revlog flags"""
1308 1308 return self._filelog.flags(self._filerev)
1309 1309
1310 1310 def data(self):
1311 1311 try:
1312 1312 return self._filelog.read(self._filenode)
1313 1313 except error.CensoredNodeError:
1314 1314 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1315 1315 return b""
1316 1316 raise error.Abort(
1317 1317 _(b"censored node: %s") % short(self._filenode),
1318 1318 hint=_(b"set censor.policy to ignore errors"),
1319 1319 )
1320 1320
1321 1321 def size(self):
1322 1322 return self._filelog.size(self._filerev)
1323 1323
1324 1324 @propertycache
1325 1325 def _copied(self):
1326 1326 """check if file was actually renamed in this changeset revision
1327 1327
1328 1328 If rename logged in file revision, we report copy for changeset only
1329 1329 if file revisions linkrev points back to the changeset in question
1330 1330 or both changeset parents contain different file revisions.
1331 1331 """
1332 1332
1333 1333 renamed = self._filelog.renamed(self._filenode)
1334 1334 if not renamed:
1335 1335 return None
1336 1336
1337 1337 if self.rev() == self.linkrev():
1338 1338 return renamed
1339 1339
1340 1340 name = self.path()
1341 1341 fnode = self._filenode
1342 1342 for p in self._changectx.parents():
1343 1343 try:
1344 1344 if fnode == p.filenode(name):
1345 1345 return None
1346 1346 except error.LookupError:
1347 1347 pass
1348 1348 return renamed
1349 1349
1350 1350 def children(self):
1351 1351 # hard for renames
1352 1352 c = self._filelog.children(self._filenode)
1353 1353 return [
1354 1354 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1355 1355 for x in c
1356 1356 ]
1357 1357
1358 1358
1359 1359 class committablectx(basectx):
1360 1360 """A committablectx object provides common functionality for a context that
1361 1361 wants the ability to commit, e.g. workingctx or memctx."""
1362 1362
1363 1363 def __init__(
1364 1364 self,
1365 1365 repo,
1366 1366 text=b"",
1367 1367 user=None,
1368 1368 date=None,
1369 1369 extra=None,
1370 1370 changes=None,
1371 1371 branch=None,
1372 1372 ):
1373 1373 super(committablectx, self).__init__(repo)
1374 1374 self._rev = None
1375 1375 self._node = None
1376 1376 self._text = text
1377 1377 if date:
1378 1378 self._date = dateutil.parsedate(date)
1379 1379 if user:
1380 1380 self._user = user
1381 1381 if changes:
1382 1382 self._status = changes
1383 1383
1384 1384 self._extra = {}
1385 1385 if extra:
1386 1386 self._extra = extra.copy()
1387 1387 if branch is not None:
1388 1388 self._extra[b'branch'] = encoding.fromlocal(branch)
1389 1389 if not self._extra.get(b'branch'):
1390 1390 self._extra[b'branch'] = b'default'
1391 1391
1392 1392 def __bytes__(self):
1393 1393 return bytes(self._parents[0]) + b"+"
1394 1394
1395 1395 def hex(self):
1396 1396 self._repo.nodeconstants.wdirhex
1397 1397
1398 1398 __str__ = encoding.strmethod(__bytes__)
1399 1399
1400 1400 def __nonzero__(self):
1401 1401 return True
1402 1402
1403 1403 __bool__ = __nonzero__
1404 1404
1405 1405 @propertycache
1406 1406 def _status(self):
1407 1407 return self._repo.status()
1408 1408
1409 1409 @propertycache
1410 1410 def _user(self):
1411 1411 return self._repo.ui.username()
1412 1412
1413 1413 @propertycache
1414 1414 def _date(self):
1415 1415 ui = self._repo.ui
1416 1416 date = ui.configdate(b'devel', b'default-date')
1417 1417 if date is None:
1418 1418 date = dateutil.makedate()
1419 1419 return date
1420 1420
1421 1421 def subrev(self, subpath):
1422 1422 return None
1423 1423
1424 1424 def manifestnode(self):
1425 1425 return None
1426 1426
1427 1427 def user(self):
1428 1428 return self._user or self._repo.ui.username()
1429 1429
1430 1430 def date(self):
1431 1431 return self._date
1432 1432
1433 1433 def description(self):
1434 1434 return self._text
1435 1435
1436 1436 def files(self):
1437 1437 return sorted(
1438 1438 self._status.modified + self._status.added + self._status.removed
1439 1439 )
1440 1440
1441 1441 def modified(self):
1442 1442 return self._status.modified
1443 1443
1444 1444 def added(self):
1445 1445 return self._status.added
1446 1446
1447 1447 def removed(self):
1448 1448 return self._status.removed
1449 1449
1450 1450 def deleted(self):
1451 1451 return self._status.deleted
1452 1452
1453 1453 filesmodified = modified
1454 1454 filesadded = added
1455 1455 filesremoved = removed
1456 1456
1457 1457 def branch(self):
1458 1458 return encoding.tolocal(self._extra[b'branch'])
1459 1459
1460 1460 def closesbranch(self):
1461 1461 return b'close' in self._extra
1462 1462
1463 1463 def extra(self):
1464 1464 return self._extra
1465 1465
1466 1466 def isinmemory(self):
1467 1467 return False
1468 1468
1469 1469 def tags(self):
1470 1470 return []
1471 1471
1472 1472 def bookmarks(self):
1473 1473 b = []
1474 1474 for p in self.parents():
1475 1475 b.extend(p.bookmarks())
1476 1476 return b
1477 1477
1478 1478 def phase(self):
1479 1479 phase = phases.newcommitphase(self._repo.ui)
1480 1480 for p in self.parents():
1481 1481 phase = max(phase, p.phase())
1482 1482 return phase
1483 1483
1484 1484 def hidden(self):
1485 1485 return False
1486 1486
1487 1487 def children(self):
1488 1488 return []
1489 1489
1490 1490 def flags(self, path):
1491 1491 if '_manifest' in self.__dict__:
1492 1492 try:
1493 1493 return self._manifest.flags(path)
1494 1494 except KeyError:
1495 1495 return b''
1496 1496
1497 1497 try:
1498 1498 return self._flagfunc(path)
1499 1499 except OSError:
1500 1500 return b''
1501 1501
1502 1502 def ancestor(self, c2):
1503 1503 """return the "best" ancestor context of self and c2"""
1504 1504 return self._parents[0].ancestor(c2) # punt on two parents for now
1505 1505
1506 1506 def ancestors(self):
1507 1507 for p in self._parents:
1508 1508 yield p
1509 1509 for a in self._repo.changelog.ancestors(
1510 1510 [p.rev() for p in self._parents]
1511 1511 ):
1512 1512 yield self._repo[a]
1513 1513
1514 1514 def markcommitted(self, node):
1515 1515 """Perform post-commit cleanup necessary after committing this ctx
1516 1516
1517 1517 Specifically, this updates backing stores this working context
1518 1518 wraps to reflect the fact that the changes reflected by this
1519 1519 workingctx have been committed. For example, it marks
1520 1520 modified and added files as normal in the dirstate.
1521 1521
1522 1522 """
1523 1523
1524 1524 def dirty(self, missing=False, merge=True, branch=True):
1525 1525 return False
1526 1526
1527 1527
1528 1528 class workingctx(committablectx):
1529 1529 """A workingctx object makes access to data related to
1530 1530 the current working directory convenient.
1531 1531 date - any valid date string or (unixtime, offset), or None.
1532 1532 user - username string, or None.
1533 1533 extra - a dictionary of extra values, or None.
1534 1534 changes - a list of file lists as returned by localrepo.status()
1535 1535 or None to use the repository status.
1536 1536 """
1537 1537
1538 1538 def __init__(
1539 1539 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1540 1540 ):
1541 1541 branch = None
1542 1542 if not extra or b'branch' not in extra:
1543 1543 try:
1544 1544 branch = repo.dirstate.branch()
1545 1545 except UnicodeDecodeError:
1546 1546 raise error.Abort(_(b'branch name not in UTF-8!'))
1547 1547 super(workingctx, self).__init__(
1548 1548 repo, text, user, date, extra, changes, branch=branch
1549 1549 )
1550 1550
1551 1551 def __iter__(self):
1552 1552 d = self._repo.dirstate
1553 1553 for f in d:
1554 if d[f] != b'r':
1554 if d.get_entry(f).tracked:
1555 1555 yield f
1556 1556
1557 1557 def __contains__(self, key):
1558 1558 return self._repo.dirstate.get_entry(key).tracked
1559 1559
1560 1560 def hex(self):
1561 1561 return self._repo.nodeconstants.wdirhex
1562 1562
1563 1563 @propertycache
1564 1564 def _parents(self):
1565 1565 p = self._repo.dirstate.parents()
1566 1566 if p[1] == self._repo.nodeconstants.nullid:
1567 1567 p = p[:-1]
1568 1568 # use unfiltered repo to delay/avoid loading obsmarkers
1569 1569 unfi = self._repo.unfiltered()
1570 1570 return [
1571 1571 changectx(
1572 1572 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1573 1573 )
1574 1574 for n in p
1575 1575 ]
1576 1576
1577 1577 def setparents(self, p1node, p2node=None):
1578 1578 if p2node is None:
1579 1579 p2node = self._repo.nodeconstants.nullid
1580 1580 dirstate = self._repo.dirstate
1581 1581 with dirstate.parentchange():
1582 1582 copies = dirstate.setparents(p1node, p2node)
1583 1583 pctx = self._repo[p1node]
1584 1584 if copies:
1585 1585 # Adjust copy records, the dirstate cannot do it, it
1586 1586 # requires access to parents manifests. Preserve them
1587 1587 # only for entries added to first parent.
1588 1588 for f in copies:
1589 1589 if f not in pctx and copies[f] in pctx:
1590 1590 dirstate.copy(copies[f], f)
1591 1591 if p2node == self._repo.nodeconstants.nullid:
1592 1592 for f, s in sorted(dirstate.copies().items()):
1593 1593 if f not in pctx and s not in pctx:
1594 1594 dirstate.copy(None, f)
1595 1595
1596 1596 def _fileinfo(self, path):
1597 1597 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1598 1598 self._manifest
1599 1599 return super(workingctx, self)._fileinfo(path)
1600 1600
1601 1601 def _buildflagfunc(self):
1602 1602 # Create a fallback function for getting file flags when the
1603 1603 # filesystem doesn't support them
1604 1604
1605 1605 copiesget = self._repo.dirstate.copies().get
1606 1606 parents = self.parents()
1607 1607 if len(parents) < 2:
1608 1608 # when we have one parent, it's easy: copy from parent
1609 1609 man = parents[0].manifest()
1610 1610
1611 1611 def func(f):
1612 1612 f = copiesget(f, f)
1613 1613 return man.flags(f)
1614 1614
1615 1615 else:
1616 1616 # merges are tricky: we try to reconstruct the unstored
1617 1617 # result from the merge (issue1802)
1618 1618 p1, p2 = parents
1619 1619 pa = p1.ancestor(p2)
1620 1620 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1621 1621
1622 1622 def func(f):
1623 1623 f = copiesget(f, f) # may be wrong for merges with copies
1624 1624 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1625 1625 if fl1 == fl2:
1626 1626 return fl1
1627 1627 if fl1 == fla:
1628 1628 return fl2
1629 1629 if fl2 == fla:
1630 1630 return fl1
1631 1631 return b'' # punt for conflicts
1632 1632
1633 1633 return func
1634 1634
1635 1635 @propertycache
1636 1636 def _flagfunc(self):
1637 1637 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1638 1638
1639 1639 def flags(self, path):
1640 1640 try:
1641 1641 return self._flagfunc(path)
1642 1642 except OSError:
1643 1643 return b''
1644 1644
1645 1645 def filectx(self, path, filelog=None):
1646 1646 """get a file context from the working directory"""
1647 1647 return workingfilectx(
1648 1648 self._repo, path, workingctx=self, filelog=filelog
1649 1649 )
1650 1650
1651 1651 def dirty(self, missing=False, merge=True, branch=True):
1652 1652 """check whether a working directory is modified"""
1653 1653 # check subrepos first
1654 1654 for s in sorted(self.substate):
1655 1655 if self.sub(s).dirty(missing=missing):
1656 1656 return True
1657 1657 # check current working dir
1658 1658 return (
1659 1659 (merge and self.p2())
1660 1660 or (branch and self.branch() != self.p1().branch())
1661 1661 or self.modified()
1662 1662 or self.added()
1663 1663 or self.removed()
1664 1664 or (missing and self.deleted())
1665 1665 )
1666 1666
1667 1667 def add(self, list, prefix=b""):
1668 1668 with self._repo.wlock():
1669 1669 ui, ds = self._repo.ui, self._repo.dirstate
1670 1670 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1671 1671 rejected = []
1672 1672 lstat = self._repo.wvfs.lstat
1673 1673 for f in list:
1674 1674 # ds.pathto() returns an absolute file when this is invoked from
1675 1675 # the keyword extension. That gets flagged as non-portable on
1676 1676 # Windows, since it contains the drive letter and colon.
1677 1677 scmutil.checkportable(ui, os.path.join(prefix, f))
1678 1678 try:
1679 1679 st = lstat(f)
1680 1680 except OSError:
1681 1681 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1682 1682 rejected.append(f)
1683 1683 continue
1684 1684 limit = ui.configbytes(b'ui', b'large-file-limit')
1685 1685 if limit != 0 and st.st_size > limit:
1686 1686 ui.warn(
1687 1687 _(
1688 1688 b"%s: up to %d MB of RAM may be required "
1689 1689 b"to manage this file\n"
1690 1690 b"(use 'hg revert %s' to cancel the "
1691 1691 b"pending addition)\n"
1692 1692 )
1693 1693 % (f, 3 * st.st_size // 1000000, uipath(f))
1694 1694 )
1695 1695 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1696 1696 ui.warn(
1697 1697 _(
1698 1698 b"%s not added: only files and symlinks "
1699 1699 b"supported currently\n"
1700 1700 )
1701 1701 % uipath(f)
1702 1702 )
1703 1703 rejected.append(f)
1704 1704 elif not ds.set_tracked(f):
1705 1705 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1706 1706 return rejected
1707 1707
1708 1708 def forget(self, files, prefix=b""):
1709 1709 with self._repo.wlock():
1710 1710 ds = self._repo.dirstate
1711 1711 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1712 1712 rejected = []
1713 1713 for f in files:
1714 1714 if not ds.set_untracked(f):
1715 1715 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1716 1716 rejected.append(f)
1717 1717 return rejected
1718 1718
1719 1719 def copy(self, source, dest):
1720 1720 try:
1721 1721 st = self._repo.wvfs.lstat(dest)
1722 1722 except OSError as err:
1723 1723 if err.errno != errno.ENOENT:
1724 1724 raise
1725 1725 self._repo.ui.warn(
1726 1726 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1727 1727 )
1728 1728 return
1729 1729 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1730 1730 self._repo.ui.warn(
1731 1731 _(b"copy failed: %s is not a file or a symbolic link\n")
1732 1732 % self._repo.dirstate.pathto(dest)
1733 1733 )
1734 1734 else:
1735 1735 with self._repo.wlock():
1736 1736 ds = self._repo.dirstate
1737 1737 ds.set_tracked(dest)
1738 1738 ds.copy(source, dest)
1739 1739
1740 1740 def match(
1741 1741 self,
1742 1742 pats=None,
1743 1743 include=None,
1744 1744 exclude=None,
1745 1745 default=b'glob',
1746 1746 listsubrepos=False,
1747 1747 badfn=None,
1748 1748 cwd=None,
1749 1749 ):
1750 1750 r = self._repo
1751 1751 if not cwd:
1752 1752 cwd = r.getcwd()
1753 1753
1754 1754 # Only a case insensitive filesystem needs magic to translate user input
1755 1755 # to actual case in the filesystem.
1756 1756 icasefs = not util.fscasesensitive(r.root)
1757 1757 return matchmod.match(
1758 1758 r.root,
1759 1759 cwd,
1760 1760 pats,
1761 1761 include,
1762 1762 exclude,
1763 1763 default,
1764 1764 auditor=r.auditor,
1765 1765 ctx=self,
1766 1766 listsubrepos=listsubrepos,
1767 1767 badfn=badfn,
1768 1768 icasefs=icasefs,
1769 1769 )
1770 1770
1771 1771 def _filtersuspectsymlink(self, files):
1772 1772 if not files or self._repo.dirstate._checklink:
1773 1773 return files
1774 1774
1775 1775 # Symlink placeholders may get non-symlink-like contents
1776 1776 # via user error or dereferencing by NFS or Samba servers,
1777 1777 # so we filter out any placeholders that don't look like a
1778 1778 # symlink
1779 1779 sane = []
1780 1780 for f in files:
1781 1781 if self.flags(f) == b'l':
1782 1782 d = self[f].data()
1783 1783 if (
1784 1784 d == b''
1785 1785 or len(d) >= 1024
1786 1786 or b'\n' in d
1787 1787 or stringutil.binary(d)
1788 1788 ):
1789 1789 self._repo.ui.debug(
1790 1790 b'ignoring suspect symlink placeholder "%s"\n' % f
1791 1791 )
1792 1792 continue
1793 1793 sane.append(f)
1794 1794 return sane
1795 1795
1796 1796 def _checklookup(self, files):
1797 1797 # check for any possibly clean files
1798 1798 if not files:
1799 1799 return [], [], []
1800 1800
1801 1801 modified = []
1802 1802 deleted = []
1803 1803 fixup = []
1804 1804 pctx = self._parents[0]
1805 1805 # do a full compare of any files that might have changed
1806 1806 for f in sorted(files):
1807 1807 try:
1808 1808 # This will return True for a file that got replaced by a
1809 1809 # directory in the interim, but fixing that is pretty hard.
1810 1810 if (
1811 1811 f not in pctx
1812 1812 or self.flags(f) != pctx.flags(f)
1813 1813 or pctx[f].cmp(self[f])
1814 1814 ):
1815 1815 modified.append(f)
1816 1816 else:
1817 1817 fixup.append(f)
1818 1818 except (IOError, OSError):
1819 1819 # A file become inaccessible in between? Mark it as deleted,
1820 1820 # matching dirstate behavior (issue5584).
1821 1821 # The dirstate has more complex behavior around whether a
1822 1822 # missing file matches a directory, etc, but we don't need to
1823 1823 # bother with that: if f has made it to this point, we're sure
1824 1824 # it's in the dirstate.
1825 1825 deleted.append(f)
1826 1826
1827 1827 return modified, deleted, fixup
1828 1828
1829 1829 def _poststatusfixup(self, status, fixup):
1830 1830 """update dirstate for files that are actually clean"""
1831 1831 poststatus = self._repo.postdsstatus()
1832 1832 if fixup or poststatus or self._repo.dirstate._dirty:
1833 1833 try:
1834 1834 oldid = self._repo.dirstate.identity()
1835 1835
1836 1836 # updating the dirstate is optional
1837 1837 # so we don't wait on the lock
1838 1838 # wlock can invalidate the dirstate, so cache normal _after_
1839 1839 # taking the lock
1840 1840 with self._repo.wlock(False):
1841 1841 dirstate = self._repo.dirstate
1842 1842 if dirstate.identity() == oldid:
1843 1843 if fixup:
1844 1844 if dirstate.pendingparentchange():
1845 1845 normal = lambda f: dirstate.update_file(
1846 1846 f, p1_tracked=True, wc_tracked=True
1847 1847 )
1848 1848 else:
1849 1849 normal = dirstate.set_clean
1850 1850 for f in fixup:
1851 1851 normal(f)
1852 1852 # write changes out explicitly, because nesting
1853 1853 # wlock at runtime may prevent 'wlock.release()'
1854 1854 # after this block from doing so for subsequent
1855 1855 # changing files
1856 1856 tr = self._repo.currenttransaction()
1857 1857 self._repo.dirstate.write(tr)
1858 1858
1859 1859 if poststatus:
1860 1860 for ps in poststatus:
1861 1861 ps(self, status)
1862 1862 else:
1863 1863 # in this case, writing changes out breaks
1864 1864 # consistency, because .hg/dirstate was
1865 1865 # already changed simultaneously after last
1866 1866 # caching (see also issue5584 for detail)
1867 1867 self._repo.ui.debug(
1868 1868 b'skip updating dirstate: identity mismatch\n'
1869 1869 )
1870 1870 except error.LockError:
1871 1871 pass
1872 1872 finally:
1873 1873 # Even if the wlock couldn't be grabbed, clear out the list.
1874 1874 self._repo.clearpostdsstatus()
1875 1875
1876 1876 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1877 1877 '''Gets the status from the dirstate -- internal use only.'''
1878 1878 subrepos = []
1879 1879 if b'.hgsub' in self:
1880 1880 subrepos = sorted(self.substate)
1881 1881 cmp, s = self._repo.dirstate.status(
1882 1882 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1883 1883 )
1884 1884
1885 1885 # check for any possibly clean files
1886 1886 fixup = []
1887 1887 if cmp:
1888 1888 modified2, deleted2, fixup = self._checklookup(cmp)
1889 1889 s.modified.extend(modified2)
1890 1890 s.deleted.extend(deleted2)
1891 1891
1892 1892 if fixup and clean:
1893 1893 s.clean.extend(fixup)
1894 1894
1895 1895 self._poststatusfixup(s, fixup)
1896 1896
1897 1897 if match.always():
1898 1898 # cache for performance
1899 1899 if s.unknown or s.ignored or s.clean:
1900 1900 # "_status" is cached with list*=False in the normal route
1901 1901 self._status = scmutil.status(
1902 1902 s.modified, s.added, s.removed, s.deleted, [], [], []
1903 1903 )
1904 1904 else:
1905 1905 self._status = s
1906 1906
1907 1907 return s
1908 1908
1909 1909 @propertycache
1910 1910 def _copies(self):
1911 1911 p1copies = {}
1912 1912 p2copies = {}
1913 1913 parents = self._repo.dirstate.parents()
1914 1914 p1manifest = self._repo[parents[0]].manifest()
1915 1915 p2manifest = self._repo[parents[1]].manifest()
1916 1916 changedset = set(self.added()) | set(self.modified())
1917 1917 narrowmatch = self._repo.narrowmatch()
1918 1918 for dst, src in self._repo.dirstate.copies().items():
1919 1919 if dst not in changedset or not narrowmatch(dst):
1920 1920 continue
1921 1921 if src in p1manifest:
1922 1922 p1copies[dst] = src
1923 1923 elif src in p2manifest:
1924 1924 p2copies[dst] = src
1925 1925 return p1copies, p2copies
1926 1926
1927 1927 @propertycache
1928 1928 def _manifest(self):
1929 1929 """generate a manifest corresponding to the values in self._status
1930 1930
1931 1931 This reuse the file nodeid from parent, but we use special node
1932 1932 identifiers for added and modified files. This is used by manifests
1933 1933 merge to see that files are different and by update logic to avoid
1934 1934 deleting newly added files.
1935 1935 """
1936 1936 return self._buildstatusmanifest(self._status)
1937 1937
1938 1938 def _buildstatusmanifest(self, status):
1939 1939 """Builds a manifest that includes the given status results."""
1940 1940 parents = self.parents()
1941 1941
1942 1942 man = parents[0].manifest().copy()
1943 1943
1944 1944 ff = self._flagfunc
1945 1945 for i, l in (
1946 1946 (self._repo.nodeconstants.addednodeid, status.added),
1947 1947 (self._repo.nodeconstants.modifiednodeid, status.modified),
1948 1948 ):
1949 1949 for f in l:
1950 1950 man[f] = i
1951 1951 try:
1952 1952 man.setflag(f, ff(f))
1953 1953 except OSError:
1954 1954 pass
1955 1955
1956 1956 for f in status.deleted + status.removed:
1957 1957 if f in man:
1958 1958 del man[f]
1959 1959
1960 1960 return man
1961 1961
1962 1962 def _buildstatus(
1963 1963 self, other, s, match, listignored, listclean, listunknown
1964 1964 ):
1965 1965 """build a status with respect to another context
1966 1966
1967 1967 This includes logic for maintaining the fast path of status when
1968 1968 comparing the working directory against its parent, which is to skip
1969 1969 building a new manifest if self (working directory) is not comparing
1970 1970 against its parent (repo['.']).
1971 1971 """
1972 1972 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1973 1973 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1974 1974 # might have accidentally ended up with the entire contents of the file
1975 1975 # they are supposed to be linking to.
1976 1976 s.modified[:] = self._filtersuspectsymlink(s.modified)
1977 1977 if other != self._repo[b'.']:
1978 1978 s = super(workingctx, self)._buildstatus(
1979 1979 other, s, match, listignored, listclean, listunknown
1980 1980 )
1981 1981 return s
1982 1982
1983 1983 def _matchstatus(self, other, match):
1984 1984 """override the match method with a filter for directory patterns
1985 1985
1986 1986 We use inheritance to customize the match.bad method only in cases of
1987 1987 workingctx since it belongs only to the working directory when
1988 1988 comparing against the parent changeset.
1989 1989
1990 1990 If we aren't comparing against the working directory's parent, then we
1991 1991 just use the default match object sent to us.
1992 1992 """
1993 1993 if other != self._repo[b'.']:
1994 1994
1995 1995 def bad(f, msg):
1996 1996 # 'f' may be a directory pattern from 'match.files()',
1997 1997 # so 'f not in ctx1' is not enough
1998 1998 if f not in other and not other.hasdir(f):
1999 1999 self._repo.ui.warn(
2000 2000 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
2001 2001 )
2002 2002
2003 2003 match.bad = bad
2004 2004 return match
2005 2005
2006 2006 def walk(self, match):
2007 2007 '''Generates matching file names.'''
2008 2008 return sorted(
2009 2009 self._repo.dirstate.walk(
2010 2010 self._repo.narrowmatch(match),
2011 2011 subrepos=sorted(self.substate),
2012 2012 unknown=True,
2013 2013 ignored=False,
2014 2014 )
2015 2015 )
2016 2016
2017 2017 def matches(self, match):
2018 2018 match = self._repo.narrowmatch(match)
2019 2019 ds = self._repo.dirstate
2020 2020 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2021 2021
2022 2022 def markcommitted(self, node):
2023 2023 with self._repo.dirstate.parentchange():
2024 2024 for f in self.modified() + self.added():
2025 2025 self._repo.dirstate.update_file(
2026 2026 f, p1_tracked=True, wc_tracked=True
2027 2027 )
2028 2028 for f in self.removed():
2029 2029 self._repo.dirstate.update_file(
2030 2030 f, p1_tracked=False, wc_tracked=False
2031 2031 )
2032 2032 self._repo.dirstate.setparents(node)
2033 2033 self._repo._quick_access_changeid_invalidate()
2034 2034
2035 2035 sparse.aftercommit(self._repo, node)
2036 2036
2037 2037 # write changes out explicitly, because nesting wlock at
2038 2038 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2039 2039 # from immediately doing so for subsequent changing files
2040 2040 self._repo.dirstate.write(self._repo.currenttransaction())
2041 2041
2042 2042 def mergestate(self, clean=False):
2043 2043 if clean:
2044 2044 return mergestatemod.mergestate.clean(self._repo)
2045 2045 return mergestatemod.mergestate.read(self._repo)
2046 2046
2047 2047
2048 2048 class committablefilectx(basefilectx):
2049 2049 """A committablefilectx provides common functionality for a file context
2050 2050 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2051 2051
2052 2052 def __init__(self, repo, path, filelog=None, ctx=None):
2053 2053 self._repo = repo
2054 2054 self._path = path
2055 2055 self._changeid = None
2056 2056 self._filerev = self._filenode = None
2057 2057
2058 2058 if filelog is not None:
2059 2059 self._filelog = filelog
2060 2060 if ctx:
2061 2061 self._changectx = ctx
2062 2062
2063 2063 def __nonzero__(self):
2064 2064 return True
2065 2065
2066 2066 __bool__ = __nonzero__
2067 2067
2068 2068 def linkrev(self):
2069 2069 # linked to self._changectx no matter if file is modified or not
2070 2070 return self.rev()
2071 2071
2072 2072 def renamed(self):
2073 2073 path = self.copysource()
2074 2074 if not path:
2075 2075 return None
2076 2076 return (
2077 2077 path,
2078 2078 self._changectx._parents[0]._manifest.get(
2079 2079 path, self._repo.nodeconstants.nullid
2080 2080 ),
2081 2081 )
2082 2082
2083 2083 def parents(self):
2084 2084 '''return parent filectxs, following copies if necessary'''
2085 2085
2086 2086 def filenode(ctx, path):
2087 2087 return ctx._manifest.get(path, self._repo.nodeconstants.nullid)
2088 2088
2089 2089 path = self._path
2090 2090 fl = self._filelog
2091 2091 pcl = self._changectx._parents
2092 2092 renamed = self.renamed()
2093 2093
2094 2094 if renamed:
2095 2095 pl = [renamed + (None,)]
2096 2096 else:
2097 2097 pl = [(path, filenode(pcl[0], path), fl)]
2098 2098
2099 2099 for pc in pcl[1:]:
2100 2100 pl.append((path, filenode(pc, path), fl))
2101 2101
2102 2102 return [
2103 2103 self._parentfilectx(p, fileid=n, filelog=l)
2104 2104 for p, n, l in pl
2105 2105 if n != self._repo.nodeconstants.nullid
2106 2106 ]
2107 2107
2108 2108 def children(self):
2109 2109 return []
2110 2110
2111 2111
2112 2112 class workingfilectx(committablefilectx):
2113 2113 """A workingfilectx object makes access to data related to a particular
2114 2114 file in the working directory convenient."""
2115 2115
2116 2116 def __init__(self, repo, path, filelog=None, workingctx=None):
2117 2117 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2118 2118
2119 2119 @propertycache
2120 2120 def _changectx(self):
2121 2121 return workingctx(self._repo)
2122 2122
2123 2123 def data(self):
2124 2124 return self._repo.wread(self._path)
2125 2125
2126 2126 def copysource(self):
2127 2127 return self._repo.dirstate.copied(self._path)
2128 2128
2129 2129 def size(self):
2130 2130 return self._repo.wvfs.lstat(self._path).st_size
2131 2131
2132 2132 def lstat(self):
2133 2133 return self._repo.wvfs.lstat(self._path)
2134 2134
2135 2135 def date(self):
2136 2136 t, tz = self._changectx.date()
2137 2137 try:
2138 2138 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2139 2139 except OSError as err:
2140 2140 if err.errno != errno.ENOENT:
2141 2141 raise
2142 2142 return (t, tz)
2143 2143
2144 2144 def exists(self):
2145 2145 return self._repo.wvfs.exists(self._path)
2146 2146
2147 2147 def lexists(self):
2148 2148 return self._repo.wvfs.lexists(self._path)
2149 2149
2150 2150 def audit(self):
2151 2151 return self._repo.wvfs.audit(self._path)
2152 2152
2153 2153 def cmp(self, fctx):
2154 2154 """compare with other file context
2155 2155
2156 2156 returns True if different than fctx.
2157 2157 """
2158 2158 # fctx should be a filectx (not a workingfilectx)
2159 2159 # invert comparison to reuse the same code path
2160 2160 return fctx.cmp(self)
2161 2161
2162 2162 def remove(self, ignoremissing=False):
2163 2163 """wraps unlink for a repo's working directory"""
2164 2164 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2165 2165 self._repo.wvfs.unlinkpath(
2166 2166 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2167 2167 )
2168 2168
2169 2169 def write(self, data, flags, backgroundclose=False, **kwargs):
2170 2170 """wraps repo.wwrite"""
2171 2171 return self._repo.wwrite(
2172 2172 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2173 2173 )
2174 2174
2175 2175 def markcopied(self, src):
2176 2176 """marks this file a copy of `src`"""
2177 2177 self._repo.dirstate.copy(src, self._path)
2178 2178
2179 2179 def clearunknown(self):
2180 2180 """Removes conflicting items in the working directory so that
2181 2181 ``write()`` can be called successfully.
2182 2182 """
2183 2183 wvfs = self._repo.wvfs
2184 2184 f = self._path
2185 2185 wvfs.audit(f)
2186 2186 if self._repo.ui.configbool(
2187 2187 b'experimental', b'merge.checkpathconflicts'
2188 2188 ):
2189 2189 # remove files under the directory as they should already be
2190 2190 # warned and backed up
2191 2191 if wvfs.isdir(f) and not wvfs.islink(f):
2192 2192 wvfs.rmtree(f, forcibly=True)
2193 2193 for p in reversed(list(pathutil.finddirs(f))):
2194 2194 if wvfs.isfileorlink(p):
2195 2195 wvfs.unlink(p)
2196 2196 break
2197 2197 else:
2198 2198 # don't remove files if path conflicts are not processed
2199 2199 if wvfs.isdir(f) and not wvfs.islink(f):
2200 2200 wvfs.removedirs(f)
2201 2201
2202 2202 def setflags(self, l, x):
2203 2203 self._repo.wvfs.setflags(self._path, l, x)
2204 2204
2205 2205
2206 2206 class overlayworkingctx(committablectx):
2207 2207 """Wraps another mutable context with a write-back cache that can be
2208 2208 converted into a commit context.
2209 2209
2210 2210 self._cache[path] maps to a dict with keys: {
2211 2211 'exists': bool?
2212 2212 'date': date?
2213 2213 'data': str?
2214 2214 'flags': str?
2215 2215 'copied': str? (path or None)
2216 2216 }
2217 2217 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2218 2218 is `False`, the file was deleted.
2219 2219 """
2220 2220
2221 2221 def __init__(self, repo):
2222 2222 super(overlayworkingctx, self).__init__(repo)
2223 2223 self.clean()
2224 2224
2225 2225 def setbase(self, wrappedctx):
2226 2226 self._wrappedctx = wrappedctx
2227 2227 self._parents = [wrappedctx]
2228 2228 # Drop old manifest cache as it is now out of date.
2229 2229 # This is necessary when, e.g., rebasing several nodes with one
2230 2230 # ``overlayworkingctx`` (e.g. with --collapse).
2231 2231 util.clearcachedproperty(self, b'_manifest')
2232 2232
2233 2233 def setparents(self, p1node, p2node=None):
2234 2234 if p2node is None:
2235 2235 p2node = self._repo.nodeconstants.nullid
2236 2236 assert p1node == self._wrappedctx.node()
2237 2237 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2238 2238
2239 2239 def data(self, path):
2240 2240 if self.isdirty(path):
2241 2241 if self._cache[path][b'exists']:
2242 2242 if self._cache[path][b'data'] is not None:
2243 2243 return self._cache[path][b'data']
2244 2244 else:
2245 2245 # Must fallback here, too, because we only set flags.
2246 2246 return self._wrappedctx[path].data()
2247 2247 else:
2248 2248 raise error.ProgrammingError(
2249 2249 b"No such file or directory: %s" % path
2250 2250 )
2251 2251 else:
2252 2252 return self._wrappedctx[path].data()
2253 2253
2254 2254 @propertycache
2255 2255 def _manifest(self):
2256 2256 parents = self.parents()
2257 2257 man = parents[0].manifest().copy()
2258 2258
2259 2259 flag = self._flagfunc
2260 2260 for path in self.added():
2261 2261 man[path] = self._repo.nodeconstants.addednodeid
2262 2262 man.setflag(path, flag(path))
2263 2263 for path in self.modified():
2264 2264 man[path] = self._repo.nodeconstants.modifiednodeid
2265 2265 man.setflag(path, flag(path))
2266 2266 for path in self.removed():
2267 2267 del man[path]
2268 2268 return man
2269 2269
2270 2270 @propertycache
2271 2271 def _flagfunc(self):
2272 2272 def f(path):
2273 2273 return self._cache[path][b'flags']
2274 2274
2275 2275 return f
2276 2276
2277 2277 def files(self):
2278 2278 return sorted(self.added() + self.modified() + self.removed())
2279 2279
2280 2280 def modified(self):
2281 2281 return [
2282 2282 f
2283 2283 for f in self._cache.keys()
2284 2284 if self._cache[f][b'exists'] and self._existsinparent(f)
2285 2285 ]
2286 2286
2287 2287 def added(self):
2288 2288 return [
2289 2289 f
2290 2290 for f in self._cache.keys()
2291 2291 if self._cache[f][b'exists'] and not self._existsinparent(f)
2292 2292 ]
2293 2293
2294 2294 def removed(self):
2295 2295 return [
2296 2296 f
2297 2297 for f in self._cache.keys()
2298 2298 if not self._cache[f][b'exists'] and self._existsinparent(f)
2299 2299 ]
2300 2300
2301 2301 def p1copies(self):
2302 2302 copies = {}
2303 2303 narrowmatch = self._repo.narrowmatch()
2304 2304 for f in self._cache.keys():
2305 2305 if not narrowmatch(f):
2306 2306 continue
2307 2307 copies.pop(f, None) # delete if it exists
2308 2308 source = self._cache[f][b'copied']
2309 2309 if source:
2310 2310 copies[f] = source
2311 2311 return copies
2312 2312
2313 2313 def p2copies(self):
2314 2314 copies = {}
2315 2315 narrowmatch = self._repo.narrowmatch()
2316 2316 for f in self._cache.keys():
2317 2317 if not narrowmatch(f):
2318 2318 continue
2319 2319 copies.pop(f, None) # delete if it exists
2320 2320 source = self._cache[f][b'copied']
2321 2321 if source:
2322 2322 copies[f] = source
2323 2323 return copies
2324 2324
2325 2325 def isinmemory(self):
2326 2326 return True
2327 2327
2328 2328 def filedate(self, path):
2329 2329 if self.isdirty(path):
2330 2330 return self._cache[path][b'date']
2331 2331 else:
2332 2332 return self._wrappedctx[path].date()
2333 2333
2334 2334 def markcopied(self, path, origin):
2335 2335 self._markdirty(
2336 2336 path,
2337 2337 exists=True,
2338 2338 date=self.filedate(path),
2339 2339 flags=self.flags(path),
2340 2340 copied=origin,
2341 2341 )
2342 2342
2343 2343 def copydata(self, path):
2344 2344 if self.isdirty(path):
2345 2345 return self._cache[path][b'copied']
2346 2346 else:
2347 2347 return None
2348 2348
2349 2349 def flags(self, path):
2350 2350 if self.isdirty(path):
2351 2351 if self._cache[path][b'exists']:
2352 2352 return self._cache[path][b'flags']
2353 2353 else:
2354 2354 raise error.ProgrammingError(
2355 2355 b"No such file or directory: %s" % path
2356 2356 )
2357 2357 else:
2358 2358 return self._wrappedctx[path].flags()
2359 2359
2360 2360 def __contains__(self, key):
2361 2361 if key in self._cache:
2362 2362 return self._cache[key][b'exists']
2363 2363 return key in self.p1()
2364 2364
2365 2365 def _existsinparent(self, path):
2366 2366 try:
2367 2367 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2368 2368 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2369 2369 # with an ``exists()`` function.
2370 2370 self._wrappedctx[path]
2371 2371 return True
2372 2372 except error.ManifestLookupError:
2373 2373 return False
2374 2374
2375 2375 def _auditconflicts(self, path):
2376 2376 """Replicates conflict checks done by wvfs.write().
2377 2377
2378 2378 Since we never write to the filesystem and never call `applyupdates` in
2379 2379 IMM, we'll never check that a path is actually writable -- e.g., because
2380 2380 it adds `a/foo`, but `a` is actually a file in the other commit.
2381 2381 """
2382 2382
2383 2383 def fail(path, component):
2384 2384 # p1() is the base and we're receiving "writes" for p2()'s
2385 2385 # files.
2386 2386 if b'l' in self.p1()[component].flags():
2387 2387 raise error.Abort(
2388 2388 b"error: %s conflicts with symlink %s "
2389 2389 b"in %d." % (path, component, self.p1().rev())
2390 2390 )
2391 2391 else:
2392 2392 raise error.Abort(
2393 2393 b"error: '%s' conflicts with file '%s' in "
2394 2394 b"%d." % (path, component, self.p1().rev())
2395 2395 )
2396 2396
2397 2397 # Test that each new directory to be created to write this path from p2
2398 2398 # is not a file in p1.
2399 2399 components = path.split(b'/')
2400 2400 for i in pycompat.xrange(len(components)):
2401 2401 component = b"/".join(components[0:i])
2402 2402 if component in self:
2403 2403 fail(path, component)
2404 2404
2405 2405 # Test the other direction -- that this path from p2 isn't a directory
2406 2406 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2407 2407 match = self.match([path], default=b'path')
2408 2408 mfiles = list(self.p1().manifest().walk(match))
2409 2409 if len(mfiles) > 0:
2410 2410 if len(mfiles) == 1 and mfiles[0] == path:
2411 2411 return
2412 2412 # omit the files which are deleted in current IMM wctx
2413 2413 mfiles = [m for m in mfiles if m in self]
2414 2414 if not mfiles:
2415 2415 return
2416 2416 raise error.Abort(
2417 2417 b"error: file '%s' cannot be written because "
2418 2418 b" '%s/' is a directory in %s (containing %d "
2419 2419 b"entries: %s)"
2420 2420 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2421 2421 )
2422 2422
2423 2423 def write(self, path, data, flags=b'', **kwargs):
2424 2424 if data is None:
2425 2425 raise error.ProgrammingError(b"data must be non-None")
2426 2426 self._auditconflicts(path)
2427 2427 self._markdirty(
2428 2428 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2429 2429 )
2430 2430
2431 2431 def setflags(self, path, l, x):
2432 2432 flag = b''
2433 2433 if l:
2434 2434 flag = b'l'
2435 2435 elif x:
2436 2436 flag = b'x'
2437 2437 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2438 2438
2439 2439 def remove(self, path):
2440 2440 self._markdirty(path, exists=False)
2441 2441
2442 2442 def exists(self, path):
2443 2443 """exists behaves like `lexists`, but needs to follow symlinks and
2444 2444 return False if they are broken.
2445 2445 """
2446 2446 if self.isdirty(path):
2447 2447 # If this path exists and is a symlink, "follow" it by calling
2448 2448 # exists on the destination path.
2449 2449 if (
2450 2450 self._cache[path][b'exists']
2451 2451 and b'l' in self._cache[path][b'flags']
2452 2452 ):
2453 2453 return self.exists(self._cache[path][b'data'].strip())
2454 2454 else:
2455 2455 return self._cache[path][b'exists']
2456 2456
2457 2457 return self._existsinparent(path)
2458 2458
2459 2459 def lexists(self, path):
2460 2460 """lexists returns True if the path exists"""
2461 2461 if self.isdirty(path):
2462 2462 return self._cache[path][b'exists']
2463 2463
2464 2464 return self._existsinparent(path)
2465 2465
2466 2466 def size(self, path):
2467 2467 if self.isdirty(path):
2468 2468 if self._cache[path][b'exists']:
2469 2469 return len(self._cache[path][b'data'])
2470 2470 else:
2471 2471 raise error.ProgrammingError(
2472 2472 b"No such file or directory: %s" % path
2473 2473 )
2474 2474 return self._wrappedctx[path].size()
2475 2475
2476 2476 def tomemctx(
2477 2477 self,
2478 2478 text,
2479 2479 branch=None,
2480 2480 extra=None,
2481 2481 date=None,
2482 2482 parents=None,
2483 2483 user=None,
2484 2484 editor=None,
2485 2485 ):
2486 2486 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2487 2487 committed.
2488 2488
2489 2489 ``text`` is the commit message.
2490 2490 ``parents`` (optional) are rev numbers.
2491 2491 """
2492 2492 # Default parents to the wrapped context if not passed.
2493 2493 if parents is None:
2494 2494 parents = self.parents()
2495 2495 if len(parents) == 1:
2496 2496 parents = (parents[0], None)
2497 2497
2498 2498 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2499 2499 if parents[1] is None:
2500 2500 parents = (self._repo[parents[0]], None)
2501 2501 else:
2502 2502 parents = (self._repo[parents[0]], self._repo[parents[1]])
2503 2503
2504 2504 files = self.files()
2505 2505
2506 2506 def getfile(repo, memctx, path):
2507 2507 if self._cache[path][b'exists']:
2508 2508 return memfilectx(
2509 2509 repo,
2510 2510 memctx,
2511 2511 path,
2512 2512 self._cache[path][b'data'],
2513 2513 b'l' in self._cache[path][b'flags'],
2514 2514 b'x' in self._cache[path][b'flags'],
2515 2515 self._cache[path][b'copied'],
2516 2516 )
2517 2517 else:
2518 2518 # Returning None, but including the path in `files`, is
2519 2519 # necessary for memctx to register a deletion.
2520 2520 return None
2521 2521
2522 2522 if branch is None:
2523 2523 branch = self._wrappedctx.branch()
2524 2524
2525 2525 return memctx(
2526 2526 self._repo,
2527 2527 parents,
2528 2528 text,
2529 2529 files,
2530 2530 getfile,
2531 2531 date=date,
2532 2532 extra=extra,
2533 2533 user=user,
2534 2534 branch=branch,
2535 2535 editor=editor,
2536 2536 )
2537 2537
2538 2538 def tomemctx_for_amend(self, precursor):
2539 2539 extra = precursor.extra().copy()
2540 2540 extra[b'amend_source'] = precursor.hex()
2541 2541 return self.tomemctx(
2542 2542 text=precursor.description(),
2543 2543 branch=precursor.branch(),
2544 2544 extra=extra,
2545 2545 date=precursor.date(),
2546 2546 user=precursor.user(),
2547 2547 )
2548 2548
2549 2549 def isdirty(self, path):
2550 2550 return path in self._cache
2551 2551
2552 2552 def clean(self):
2553 2553 self._mergestate = None
2554 2554 self._cache = {}
2555 2555
2556 2556 def _compact(self):
2557 2557 """Removes keys from the cache that are actually clean, by comparing
2558 2558 them with the underlying context.
2559 2559
2560 2560 This can occur during the merge process, e.g. by passing --tool :local
2561 2561 to resolve a conflict.
2562 2562 """
2563 2563 keys = []
2564 2564 # This won't be perfect, but can help performance significantly when
2565 2565 # using things like remotefilelog.
2566 2566 scmutil.prefetchfiles(
2567 2567 self.repo(),
2568 2568 [
2569 2569 (
2570 2570 self.p1().rev(),
2571 2571 scmutil.matchfiles(self.repo(), self._cache.keys()),
2572 2572 )
2573 2573 ],
2574 2574 )
2575 2575
2576 2576 for path in self._cache.keys():
2577 2577 cache = self._cache[path]
2578 2578 try:
2579 2579 underlying = self._wrappedctx[path]
2580 2580 if (
2581 2581 underlying.data() == cache[b'data']
2582 2582 and underlying.flags() == cache[b'flags']
2583 2583 ):
2584 2584 keys.append(path)
2585 2585 except error.ManifestLookupError:
2586 2586 # Path not in the underlying manifest (created).
2587 2587 continue
2588 2588
2589 2589 for path in keys:
2590 2590 del self._cache[path]
2591 2591 return keys
2592 2592
2593 2593 def _markdirty(
2594 2594 self, path, exists, data=None, date=None, flags=b'', copied=None
2595 2595 ):
2596 2596 # data not provided, let's see if we already have some; if not, let's
2597 2597 # grab it from our underlying context, so that we always have data if
2598 2598 # the file is marked as existing.
2599 2599 if exists and data is None:
2600 2600 oldentry = self._cache.get(path) or {}
2601 2601 data = oldentry.get(b'data')
2602 2602 if data is None:
2603 2603 data = self._wrappedctx[path].data()
2604 2604
2605 2605 self._cache[path] = {
2606 2606 b'exists': exists,
2607 2607 b'data': data,
2608 2608 b'date': date,
2609 2609 b'flags': flags,
2610 2610 b'copied': copied,
2611 2611 }
2612 2612 util.clearcachedproperty(self, b'_manifest')
2613 2613
2614 2614 def filectx(self, path, filelog=None):
2615 2615 return overlayworkingfilectx(
2616 2616 self._repo, path, parent=self, filelog=filelog
2617 2617 )
2618 2618
2619 2619 def mergestate(self, clean=False):
2620 2620 if clean or self._mergestate is None:
2621 2621 self._mergestate = mergestatemod.memmergestate(self._repo)
2622 2622 return self._mergestate
2623 2623
2624 2624
2625 2625 class overlayworkingfilectx(committablefilectx):
2626 2626 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2627 2627 cache, which can be flushed through later by calling ``flush()``."""
2628 2628
2629 2629 def __init__(self, repo, path, filelog=None, parent=None):
2630 2630 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2631 2631 self._repo = repo
2632 2632 self._parent = parent
2633 2633 self._path = path
2634 2634
2635 2635 def cmp(self, fctx):
2636 2636 return self.data() != fctx.data()
2637 2637
2638 2638 def changectx(self):
2639 2639 return self._parent
2640 2640
2641 2641 def data(self):
2642 2642 return self._parent.data(self._path)
2643 2643
2644 2644 def date(self):
2645 2645 return self._parent.filedate(self._path)
2646 2646
2647 2647 def exists(self):
2648 2648 return self.lexists()
2649 2649
2650 2650 def lexists(self):
2651 2651 return self._parent.exists(self._path)
2652 2652
2653 2653 def copysource(self):
2654 2654 return self._parent.copydata(self._path)
2655 2655
2656 2656 def size(self):
2657 2657 return self._parent.size(self._path)
2658 2658
2659 2659 def markcopied(self, origin):
2660 2660 self._parent.markcopied(self._path, origin)
2661 2661
2662 2662 def audit(self):
2663 2663 pass
2664 2664
2665 2665 def flags(self):
2666 2666 return self._parent.flags(self._path)
2667 2667
2668 2668 def setflags(self, islink, isexec):
2669 2669 return self._parent.setflags(self._path, islink, isexec)
2670 2670
2671 2671 def write(self, data, flags, backgroundclose=False, **kwargs):
2672 2672 return self._parent.write(self._path, data, flags, **kwargs)
2673 2673
2674 2674 def remove(self, ignoremissing=False):
2675 2675 return self._parent.remove(self._path)
2676 2676
2677 2677 def clearunknown(self):
2678 2678 pass
2679 2679
2680 2680
2681 2681 class workingcommitctx(workingctx):
2682 2682 """A workingcommitctx object makes access to data related to
2683 2683 the revision being committed convenient.
2684 2684
2685 2685 This hides changes in the working directory, if they aren't
2686 2686 committed in this context.
2687 2687 """
2688 2688
2689 2689 def __init__(
2690 2690 self, repo, changes, text=b"", user=None, date=None, extra=None
2691 2691 ):
2692 2692 super(workingcommitctx, self).__init__(
2693 2693 repo, text, user, date, extra, changes
2694 2694 )
2695 2695
2696 2696 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2697 2697 """Return matched files only in ``self._status``
2698 2698
2699 2699 Uncommitted files appear "clean" via this context, even if
2700 2700 they aren't actually so in the working directory.
2701 2701 """
2702 2702 if clean:
2703 2703 clean = [f for f in self._manifest if f not in self._changedset]
2704 2704 else:
2705 2705 clean = []
2706 2706 return scmutil.status(
2707 2707 [f for f in self._status.modified if match(f)],
2708 2708 [f for f in self._status.added if match(f)],
2709 2709 [f for f in self._status.removed if match(f)],
2710 2710 [],
2711 2711 [],
2712 2712 [],
2713 2713 clean,
2714 2714 )
2715 2715
2716 2716 @propertycache
2717 2717 def _changedset(self):
2718 2718 """Return the set of files changed in this context"""
2719 2719 changed = set(self._status.modified)
2720 2720 changed.update(self._status.added)
2721 2721 changed.update(self._status.removed)
2722 2722 return changed
2723 2723
2724 2724
2725 2725 def makecachingfilectxfn(func):
2726 2726 """Create a filectxfn that caches based on the path.
2727 2727
2728 2728 We can't use util.cachefunc because it uses all arguments as the cache
2729 2729 key and this creates a cycle since the arguments include the repo and
2730 2730 memctx.
2731 2731 """
2732 2732 cache = {}
2733 2733
2734 2734 def getfilectx(repo, memctx, path):
2735 2735 if path not in cache:
2736 2736 cache[path] = func(repo, memctx, path)
2737 2737 return cache[path]
2738 2738
2739 2739 return getfilectx
2740 2740
2741 2741
2742 2742 def memfilefromctx(ctx):
2743 2743 """Given a context return a memfilectx for ctx[path]
2744 2744
2745 2745 This is a convenience method for building a memctx based on another
2746 2746 context.
2747 2747 """
2748 2748
2749 2749 def getfilectx(repo, memctx, path):
2750 2750 fctx = ctx[path]
2751 2751 copysource = fctx.copysource()
2752 2752 return memfilectx(
2753 2753 repo,
2754 2754 memctx,
2755 2755 path,
2756 2756 fctx.data(),
2757 2757 islink=fctx.islink(),
2758 2758 isexec=fctx.isexec(),
2759 2759 copysource=copysource,
2760 2760 )
2761 2761
2762 2762 return getfilectx
2763 2763
2764 2764
2765 2765 def memfilefrompatch(patchstore):
2766 2766 """Given a patch (e.g. patchstore object) return a memfilectx
2767 2767
2768 2768 This is a convenience method for building a memctx based on a patchstore.
2769 2769 """
2770 2770
2771 2771 def getfilectx(repo, memctx, path):
2772 2772 data, mode, copysource = patchstore.getfile(path)
2773 2773 if data is None:
2774 2774 return None
2775 2775 islink, isexec = mode
2776 2776 return memfilectx(
2777 2777 repo,
2778 2778 memctx,
2779 2779 path,
2780 2780 data,
2781 2781 islink=islink,
2782 2782 isexec=isexec,
2783 2783 copysource=copysource,
2784 2784 )
2785 2785
2786 2786 return getfilectx
2787 2787
2788 2788
2789 2789 class memctx(committablectx):
2790 2790 """Use memctx to perform in-memory commits via localrepo.commitctx().
2791 2791
2792 2792 Revision information is supplied at initialization time while
2793 2793 related files data and is made available through a callback
2794 2794 mechanism. 'repo' is the current localrepo, 'parents' is a
2795 2795 sequence of two parent revisions identifiers (pass None for every
2796 2796 missing parent), 'text' is the commit message and 'files' lists
2797 2797 names of files touched by the revision (normalized and relative to
2798 2798 repository root).
2799 2799
2800 2800 filectxfn(repo, memctx, path) is a callable receiving the
2801 2801 repository, the current memctx object and the normalized path of
2802 2802 requested file, relative to repository root. It is fired by the
2803 2803 commit function for every file in 'files', but calls order is
2804 2804 undefined. If the file is available in the revision being
2805 2805 committed (updated or added), filectxfn returns a memfilectx
2806 2806 object. If the file was removed, filectxfn return None for recent
2807 2807 Mercurial. Moved files are represented by marking the source file
2808 2808 removed and the new file added with copy information (see
2809 2809 memfilectx).
2810 2810
2811 2811 user receives the committer name and defaults to current
2812 2812 repository username, date is the commit date in any format
2813 2813 supported by dateutil.parsedate() and defaults to current date, extra
2814 2814 is a dictionary of metadata or is left empty.
2815 2815 """
2816 2816
2817 2817 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2818 2818 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2819 2819 # this field to determine what to do in filectxfn.
2820 2820 _returnnoneformissingfiles = True
2821 2821
2822 2822 def __init__(
2823 2823 self,
2824 2824 repo,
2825 2825 parents,
2826 2826 text,
2827 2827 files,
2828 2828 filectxfn,
2829 2829 user=None,
2830 2830 date=None,
2831 2831 extra=None,
2832 2832 branch=None,
2833 2833 editor=None,
2834 2834 ):
2835 2835 super(memctx, self).__init__(
2836 2836 repo, text, user, date, extra, branch=branch
2837 2837 )
2838 2838 self._rev = None
2839 2839 self._node = None
2840 2840 parents = [(p or self._repo.nodeconstants.nullid) for p in parents]
2841 2841 p1, p2 = parents
2842 2842 self._parents = [self._repo[p] for p in (p1, p2)]
2843 2843 files = sorted(set(files))
2844 2844 self._files = files
2845 2845 self.substate = {}
2846 2846
2847 2847 if isinstance(filectxfn, patch.filestore):
2848 2848 filectxfn = memfilefrompatch(filectxfn)
2849 2849 elif not callable(filectxfn):
2850 2850 # if store is not callable, wrap it in a function
2851 2851 filectxfn = memfilefromctx(filectxfn)
2852 2852
2853 2853 # memoizing increases performance for e.g. vcs convert scenarios.
2854 2854 self._filectxfn = makecachingfilectxfn(filectxfn)
2855 2855
2856 2856 if editor:
2857 2857 self._text = editor(self._repo, self, [])
2858 2858 self._repo.savecommitmessage(self._text)
2859 2859
2860 2860 def filectx(self, path, filelog=None):
2861 2861 """get a file context from the working directory
2862 2862
2863 2863 Returns None if file doesn't exist and should be removed."""
2864 2864 return self._filectxfn(self._repo, self, path)
2865 2865
2866 2866 def commit(self):
2867 2867 """commit context to the repo"""
2868 2868 return self._repo.commitctx(self)
2869 2869
2870 2870 @propertycache
2871 2871 def _manifest(self):
2872 2872 """generate a manifest based on the return values of filectxfn"""
2873 2873
2874 2874 # keep this simple for now; just worry about p1
2875 2875 pctx = self._parents[0]
2876 2876 man = pctx.manifest().copy()
2877 2877
2878 2878 for f in self._status.modified:
2879 2879 man[f] = self._repo.nodeconstants.modifiednodeid
2880 2880
2881 2881 for f in self._status.added:
2882 2882 man[f] = self._repo.nodeconstants.addednodeid
2883 2883
2884 2884 for f in self._status.removed:
2885 2885 if f in man:
2886 2886 del man[f]
2887 2887
2888 2888 return man
2889 2889
2890 2890 @propertycache
2891 2891 def _status(self):
2892 2892 """Calculate exact status from ``files`` specified at construction"""
2893 2893 man1 = self.p1().manifest()
2894 2894 p2 = self._parents[1]
2895 2895 # "1 < len(self._parents)" can't be used for checking
2896 2896 # existence of the 2nd parent, because "memctx._parents" is
2897 2897 # explicitly initialized by the list, of which length is 2.
2898 2898 if p2.rev() != nullrev:
2899 2899 man2 = p2.manifest()
2900 2900 managing = lambda f: f in man1 or f in man2
2901 2901 else:
2902 2902 managing = lambda f: f in man1
2903 2903
2904 2904 modified, added, removed = [], [], []
2905 2905 for f in self._files:
2906 2906 if not managing(f):
2907 2907 added.append(f)
2908 2908 elif self[f]:
2909 2909 modified.append(f)
2910 2910 else:
2911 2911 removed.append(f)
2912 2912
2913 2913 return scmutil.status(modified, added, removed, [], [], [], [])
2914 2914
2915 2915 def parents(self):
2916 2916 if self._parents[1].rev() == nullrev:
2917 2917 return [self._parents[0]]
2918 2918 return self._parents
2919 2919
2920 2920
2921 2921 class memfilectx(committablefilectx):
2922 2922 """memfilectx represents an in-memory file to commit.
2923 2923
2924 2924 See memctx and committablefilectx for more details.
2925 2925 """
2926 2926
2927 2927 def __init__(
2928 2928 self,
2929 2929 repo,
2930 2930 changectx,
2931 2931 path,
2932 2932 data,
2933 2933 islink=False,
2934 2934 isexec=False,
2935 2935 copysource=None,
2936 2936 ):
2937 2937 """
2938 2938 path is the normalized file path relative to repository root.
2939 2939 data is the file content as a string.
2940 2940 islink is True if the file is a symbolic link.
2941 2941 isexec is True if the file is executable.
2942 2942 copied is the source file path if current file was copied in the
2943 2943 revision being committed, or None."""
2944 2944 super(memfilectx, self).__init__(repo, path, None, changectx)
2945 2945 self._data = data
2946 2946 if islink:
2947 2947 self._flags = b'l'
2948 2948 elif isexec:
2949 2949 self._flags = b'x'
2950 2950 else:
2951 2951 self._flags = b''
2952 2952 self._copysource = copysource
2953 2953
2954 2954 def copysource(self):
2955 2955 return self._copysource
2956 2956
2957 2957 def cmp(self, fctx):
2958 2958 return self.data() != fctx.data()
2959 2959
2960 2960 def data(self):
2961 2961 return self._data
2962 2962
2963 2963 def remove(self, ignoremissing=False):
2964 2964 """wraps unlink for a repo's working directory"""
2965 2965 # need to figure out what to do here
2966 2966 del self._changectx[self._path]
2967 2967
2968 2968 def write(self, data, flags, **kwargs):
2969 2969 """wraps repo.wwrite"""
2970 2970 self._data = data
2971 2971
2972 2972
2973 2973 class metadataonlyctx(committablectx):
2974 2974 """Like memctx but it's reusing the manifest of different commit.
2975 2975 Intended to be used by lightweight operations that are creating
2976 2976 metadata-only changes.
2977 2977
2978 2978 Revision information is supplied at initialization time. 'repo' is the
2979 2979 current localrepo, 'ctx' is original revision which manifest we're reuisng
2980 2980 'parents' is a sequence of two parent revisions identifiers (pass None for
2981 2981 every missing parent), 'text' is the commit.
2982 2982
2983 2983 user receives the committer name and defaults to current repository
2984 2984 username, date is the commit date in any format supported by
2985 2985 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2986 2986 metadata or is left empty.
2987 2987 """
2988 2988
2989 2989 def __init__(
2990 2990 self,
2991 2991 repo,
2992 2992 originalctx,
2993 2993 parents=None,
2994 2994 text=None,
2995 2995 user=None,
2996 2996 date=None,
2997 2997 extra=None,
2998 2998 editor=None,
2999 2999 ):
3000 3000 if text is None:
3001 3001 text = originalctx.description()
3002 3002 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
3003 3003 self._rev = None
3004 3004 self._node = None
3005 3005 self._originalctx = originalctx
3006 3006 self._manifestnode = originalctx.manifestnode()
3007 3007 if parents is None:
3008 3008 parents = originalctx.parents()
3009 3009 else:
3010 3010 parents = [repo[p] for p in parents if p is not None]
3011 3011 parents = parents[:]
3012 3012 while len(parents) < 2:
3013 3013 parents.append(repo[nullrev])
3014 3014 p1, p2 = self._parents = parents
3015 3015
3016 3016 # sanity check to ensure that the reused manifest parents are
3017 3017 # manifests of our commit parents
3018 3018 mp1, mp2 = self.manifestctx().parents
3019 3019 if p1 != self._repo.nodeconstants.nullid and p1.manifestnode() != mp1:
3020 3020 raise RuntimeError(
3021 3021 r"can't reuse the manifest: its p1 "
3022 3022 r"doesn't match the new ctx p1"
3023 3023 )
3024 3024 if p2 != self._repo.nodeconstants.nullid and p2.manifestnode() != mp2:
3025 3025 raise RuntimeError(
3026 3026 r"can't reuse the manifest: "
3027 3027 r"its p2 doesn't match the new ctx p2"
3028 3028 )
3029 3029
3030 3030 self._files = originalctx.files()
3031 3031 self.substate = {}
3032 3032
3033 3033 if editor:
3034 3034 self._text = editor(self._repo, self, [])
3035 3035 self._repo.savecommitmessage(self._text)
3036 3036
3037 3037 def manifestnode(self):
3038 3038 return self._manifestnode
3039 3039
3040 3040 @property
3041 3041 def _manifestctx(self):
3042 3042 return self._repo.manifestlog[self._manifestnode]
3043 3043
3044 3044 def filectx(self, path, filelog=None):
3045 3045 return self._originalctx.filectx(path, filelog=filelog)
3046 3046
3047 3047 def commit(self):
3048 3048 """commit context to the repo"""
3049 3049 return self._repo.commitctx(self)
3050 3050
3051 3051 @property
3052 3052 def _manifest(self):
3053 3053 return self._originalctx.manifest()
3054 3054
3055 3055 @propertycache
3056 3056 def _status(self):
3057 3057 """Calculate exact status from ``files`` specified in the ``origctx``
3058 3058 and parents manifests.
3059 3059 """
3060 3060 man1 = self.p1().manifest()
3061 3061 p2 = self._parents[1]
3062 3062 # "1 < len(self._parents)" can't be used for checking
3063 3063 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3064 3064 # explicitly initialized by the list, of which length is 2.
3065 3065 if p2.rev() != nullrev:
3066 3066 man2 = p2.manifest()
3067 3067 managing = lambda f: f in man1 or f in man2
3068 3068 else:
3069 3069 managing = lambda f: f in man1
3070 3070
3071 3071 modified, added, removed = [], [], []
3072 3072 for f in self._files:
3073 3073 if not managing(f):
3074 3074 added.append(f)
3075 3075 elif f in self:
3076 3076 modified.append(f)
3077 3077 else:
3078 3078 removed.append(f)
3079 3079
3080 3080 return scmutil.status(modified, added, removed, [], [], [], [])
3081 3081
3082 3082
3083 3083 class arbitraryfilectx(object):
3084 3084 """Allows you to use filectx-like functions on a file in an arbitrary
3085 3085 location on disk, possibly not in the working directory.
3086 3086 """
3087 3087
3088 3088 def __init__(self, path, repo=None):
3089 3089 # Repo is optional because contrib/simplemerge uses this class.
3090 3090 self._repo = repo
3091 3091 self._path = path
3092 3092
3093 3093 def cmp(self, fctx):
3094 3094 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3095 3095 # path if either side is a symlink.
3096 3096 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3097 3097 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3098 3098 # Add a fast-path for merge if both sides are disk-backed.
3099 3099 # Note that filecmp uses the opposite return values (True if same)
3100 3100 # from our cmp functions (True if different).
3101 3101 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3102 3102 return self.data() != fctx.data()
3103 3103
3104 3104 def path(self):
3105 3105 return self._path
3106 3106
3107 3107 def flags(self):
3108 3108 return b''
3109 3109
3110 3110 def data(self):
3111 3111 return util.readfile(self._path)
3112 3112
3113 3113 def decodeddata(self):
3114 3114 with open(self._path, b"rb") as f:
3115 3115 return f.read()
3116 3116
3117 3117 def remove(self):
3118 3118 util.unlink(self._path)
3119 3119
3120 3120 def write(self, data, flags, **kwargs):
3121 3121 assert not flags
3122 3122 with open(self._path, b"wb") as f:
3123 3123 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now