##// END OF EJS Templates
context: use `update_file` instead of `normal` in `markcommitted`...
marmoute -
r48506:5bbf3042 default
parent child Browse files
Show More
@@ -1,3119 +1,3121 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 hex,
18 18 nullrev,
19 19 short,
20 20 )
21 21 from .pycompat import (
22 22 getattr,
23 23 open,
24 24 )
25 25 from . import (
26 26 dagop,
27 27 encoding,
28 28 error,
29 29 fileset,
30 30 match as matchmod,
31 31 mergestate as mergestatemod,
32 32 metadata,
33 33 obsolete as obsmod,
34 34 patch,
35 35 pathutil,
36 36 phases,
37 37 pycompat,
38 38 repoview,
39 39 scmutil,
40 40 sparse,
41 41 subrepo,
42 42 subrepoutil,
43 43 util,
44 44 )
45 45 from .utils import (
46 46 dateutil,
47 47 stringutil,
48 48 )
49 49
50 50 propertycache = util.propertycache
51 51
52 52
53 53 class basectx(object):
54 54 """A basectx object represents the common logic for its children:
55 55 changectx: read-only context that is already present in the repo,
56 56 workingctx: a context that represents the working directory and can
57 57 be committed,
58 58 memctx: a context that represents changes in-memory and can also
59 59 be committed."""
60 60
61 61 def __init__(self, repo):
62 62 self._repo = repo
63 63
64 64 def __bytes__(self):
65 65 return short(self.node())
66 66
67 67 __str__ = encoding.strmethod(__bytes__)
68 68
69 69 def __repr__(self):
70 70 return "<%s %s>" % (type(self).__name__, str(self))
71 71
72 72 def __eq__(self, other):
73 73 try:
74 74 return type(self) == type(other) and self._rev == other._rev
75 75 except AttributeError:
76 76 return False
77 77
78 78 def __ne__(self, other):
79 79 return not (self == other)
80 80
81 81 def __contains__(self, key):
82 82 return key in self._manifest
83 83
84 84 def __getitem__(self, key):
85 85 return self.filectx(key)
86 86
87 87 def __iter__(self):
88 88 return iter(self._manifest)
89 89
90 90 def _buildstatusmanifest(self, status):
91 91 """Builds a manifest that includes the given status results, if this is
92 92 a working copy context. For non-working copy contexts, it just returns
93 93 the normal manifest."""
94 94 return self.manifest()
95 95
96 96 def _matchstatus(self, other, match):
97 97 """This internal method provides a way for child objects to override the
98 98 match operator.
99 99 """
100 100 return match
101 101
102 102 def _buildstatus(
103 103 self, other, s, match, listignored, listclean, listunknown
104 104 ):
105 105 """build a status with respect to another context"""
106 106 # Load earliest manifest first for caching reasons. More specifically,
107 107 # if you have revisions 1000 and 1001, 1001 is probably stored as a
108 108 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
109 109 # 1000 and cache it so that when you read 1001, we just need to apply a
110 110 # delta to what's in the cache. So that's one full reconstruction + one
111 111 # delta application.
112 112 mf2 = None
113 113 if self.rev() is not None and self.rev() < other.rev():
114 114 mf2 = self._buildstatusmanifest(s)
115 115 mf1 = other._buildstatusmanifest(s)
116 116 if mf2 is None:
117 117 mf2 = self._buildstatusmanifest(s)
118 118
119 119 modified, added = [], []
120 120 removed = []
121 121 clean = []
122 122 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
123 123 deletedset = set(deleted)
124 124 d = mf1.diff(mf2, match=match, clean=listclean)
125 125 for fn, value in pycompat.iteritems(d):
126 126 if fn in deletedset:
127 127 continue
128 128 if value is None:
129 129 clean.append(fn)
130 130 continue
131 131 (node1, flag1), (node2, flag2) = value
132 132 if node1 is None:
133 133 added.append(fn)
134 134 elif node2 is None:
135 135 removed.append(fn)
136 136 elif flag1 != flag2:
137 137 modified.append(fn)
138 138 elif node2 not in self._repo.nodeconstants.wdirfilenodeids:
139 139 # When comparing files between two commits, we save time by
140 140 # not comparing the file contents when the nodeids differ.
141 141 # Note that this means we incorrectly report a reverted change
142 142 # to a file as a modification.
143 143 modified.append(fn)
144 144 elif self[fn].cmp(other[fn]):
145 145 modified.append(fn)
146 146 else:
147 147 clean.append(fn)
148 148
149 149 if removed:
150 150 # need to filter files if they are already reported as removed
151 151 unknown = [
152 152 fn
153 153 for fn in unknown
154 154 if fn not in mf1 and (not match or match(fn))
155 155 ]
156 156 ignored = [
157 157 fn
158 158 for fn in ignored
159 159 if fn not in mf1 and (not match or match(fn))
160 160 ]
161 161 # if they're deleted, don't report them as removed
162 162 removed = [fn for fn in removed if fn not in deletedset]
163 163
164 164 return scmutil.status(
165 165 modified, added, removed, deleted, unknown, ignored, clean
166 166 )
167 167
168 168 @propertycache
169 169 def substate(self):
170 170 return subrepoutil.state(self, self._repo.ui)
171 171
172 172 def subrev(self, subpath):
173 173 return self.substate[subpath][1]
174 174
175 175 def rev(self):
176 176 return self._rev
177 177
178 178 def node(self):
179 179 return self._node
180 180
181 181 def hex(self):
182 182 return hex(self.node())
183 183
184 184 def manifest(self):
185 185 return self._manifest
186 186
187 187 def manifestctx(self):
188 188 return self._manifestctx
189 189
190 190 def repo(self):
191 191 return self._repo
192 192
193 193 def phasestr(self):
194 194 return phases.phasenames[self.phase()]
195 195
196 196 def mutable(self):
197 197 return self.phase() > phases.public
198 198
199 199 def matchfileset(self, cwd, expr, badfn=None):
200 200 return fileset.match(self, cwd, expr, badfn=badfn)
201 201
202 202 def obsolete(self):
203 203 """True if the changeset is obsolete"""
204 204 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
205 205
206 206 def extinct(self):
207 207 """True if the changeset is extinct"""
208 208 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
209 209
210 210 def orphan(self):
211 211 """True if the changeset is not obsolete, but its ancestor is"""
212 212 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
213 213
214 214 def phasedivergent(self):
215 215 """True if the changeset tries to be a successor of a public changeset
216 216
217 217 Only non-public and non-obsolete changesets may be phase-divergent.
218 218 """
219 219 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
220 220
221 221 def contentdivergent(self):
222 222 """Is a successor of a changeset with multiple possible successor sets
223 223
224 224 Only non-public and non-obsolete changesets may be content-divergent.
225 225 """
226 226 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
227 227
228 228 def isunstable(self):
229 229 """True if the changeset is either orphan, phase-divergent or
230 230 content-divergent"""
231 231 return self.orphan() or self.phasedivergent() or self.contentdivergent()
232 232
233 233 def instabilities(self):
234 234 """return the list of instabilities affecting this changeset.
235 235
236 236 Instabilities are returned as strings. possible values are:
237 237 - orphan,
238 238 - phase-divergent,
239 239 - content-divergent.
240 240 """
241 241 instabilities = []
242 242 if self.orphan():
243 243 instabilities.append(b'orphan')
244 244 if self.phasedivergent():
245 245 instabilities.append(b'phase-divergent')
246 246 if self.contentdivergent():
247 247 instabilities.append(b'content-divergent')
248 248 return instabilities
249 249
250 250 def parents(self):
251 251 """return contexts for each parent changeset"""
252 252 return self._parents
253 253
254 254 def p1(self):
255 255 return self._parents[0]
256 256
257 257 def p2(self):
258 258 parents = self._parents
259 259 if len(parents) == 2:
260 260 return parents[1]
261 261 return self._repo[nullrev]
262 262
263 263 def _fileinfo(self, path):
264 264 if '_manifest' in self.__dict__:
265 265 try:
266 266 return self._manifest.find(path)
267 267 except KeyError:
268 268 raise error.ManifestLookupError(
269 269 self._node or b'None', path, _(b'not found in manifest')
270 270 )
271 271 if '_manifestdelta' in self.__dict__ or path in self.files():
272 272 if path in self._manifestdelta:
273 273 return (
274 274 self._manifestdelta[path],
275 275 self._manifestdelta.flags(path),
276 276 )
277 277 mfl = self._repo.manifestlog
278 278 try:
279 279 node, flag = mfl[self._changeset.manifest].find(path)
280 280 except KeyError:
281 281 raise error.ManifestLookupError(
282 282 self._node or b'None', path, _(b'not found in manifest')
283 283 )
284 284
285 285 return node, flag
286 286
287 287 def filenode(self, path):
288 288 return self._fileinfo(path)[0]
289 289
290 290 def flags(self, path):
291 291 try:
292 292 return self._fileinfo(path)[1]
293 293 except error.LookupError:
294 294 return b''
295 295
296 296 @propertycache
297 297 def _copies(self):
298 298 return metadata.computechangesetcopies(self)
299 299
300 300 def p1copies(self):
301 301 return self._copies[0]
302 302
303 303 def p2copies(self):
304 304 return self._copies[1]
305 305
306 306 def sub(self, path, allowcreate=True):
307 307 '''return a subrepo for the stored revision of path, never wdir()'''
308 308 return subrepo.subrepo(self, path, allowcreate=allowcreate)
309 309
310 310 def nullsub(self, path, pctx):
311 311 return subrepo.nullsubrepo(self, path, pctx)
312 312
313 313 def workingsub(self, path):
314 314 """return a subrepo for the stored revision, or wdir if this is a wdir
315 315 context.
316 316 """
317 317 return subrepo.subrepo(self, path, allowwdir=True)
318 318
319 319 def match(
320 320 self,
321 321 pats=None,
322 322 include=None,
323 323 exclude=None,
324 324 default=b'glob',
325 325 listsubrepos=False,
326 326 badfn=None,
327 327 cwd=None,
328 328 ):
329 329 r = self._repo
330 330 if not cwd:
331 331 cwd = r.getcwd()
332 332 return matchmod.match(
333 333 r.root,
334 334 cwd,
335 335 pats,
336 336 include,
337 337 exclude,
338 338 default,
339 339 auditor=r.nofsauditor,
340 340 ctx=self,
341 341 listsubrepos=listsubrepos,
342 342 badfn=badfn,
343 343 )
344 344
345 345 def diff(
346 346 self,
347 347 ctx2=None,
348 348 match=None,
349 349 changes=None,
350 350 opts=None,
351 351 losedatafn=None,
352 352 pathfn=None,
353 353 copy=None,
354 354 copysourcematch=None,
355 355 hunksfilterfn=None,
356 356 ):
357 357 """Returns a diff generator for the given contexts and matcher"""
358 358 if ctx2 is None:
359 359 ctx2 = self.p1()
360 360 if ctx2 is not None:
361 361 ctx2 = self._repo[ctx2]
362 362 return patch.diff(
363 363 self._repo,
364 364 ctx2,
365 365 self,
366 366 match=match,
367 367 changes=changes,
368 368 opts=opts,
369 369 losedatafn=losedatafn,
370 370 pathfn=pathfn,
371 371 copy=copy,
372 372 copysourcematch=copysourcematch,
373 373 hunksfilterfn=hunksfilterfn,
374 374 )
375 375
376 376 def dirs(self):
377 377 return self._manifest.dirs()
378 378
379 379 def hasdir(self, dir):
380 380 return self._manifest.hasdir(dir)
381 381
382 382 def status(
383 383 self,
384 384 other=None,
385 385 match=None,
386 386 listignored=False,
387 387 listclean=False,
388 388 listunknown=False,
389 389 listsubrepos=False,
390 390 ):
391 391 """return status of files between two nodes or node and working
392 392 directory.
393 393
394 394 If other is None, compare this node with working directory.
395 395
396 396 ctx1.status(ctx2) returns the status of change from ctx1 to ctx2
397 397
398 398 Returns a mercurial.scmutils.status object.
399 399
400 400 Data can be accessed using either tuple notation:
401 401
402 402 (modified, added, removed, deleted, unknown, ignored, clean)
403 403
404 404 or direct attribute access:
405 405
406 406 s.modified, s.added, ...
407 407 """
408 408
409 409 ctx1 = self
410 410 ctx2 = self._repo[other]
411 411
412 412 # This next code block is, admittedly, fragile logic that tests for
413 413 # reversing the contexts and wouldn't need to exist if it weren't for
414 414 # the fast (and common) code path of comparing the working directory
415 415 # with its first parent.
416 416 #
417 417 # What we're aiming for here is the ability to call:
418 418 #
419 419 # workingctx.status(parentctx)
420 420 #
421 421 # If we always built the manifest for each context and compared those,
422 422 # then we'd be done. But the special case of the above call means we
423 423 # just copy the manifest of the parent.
424 424 reversed = False
425 425 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
426 426 reversed = True
427 427 ctx1, ctx2 = ctx2, ctx1
428 428
429 429 match = self._repo.narrowmatch(match)
430 430 match = ctx2._matchstatus(ctx1, match)
431 431 r = scmutil.status([], [], [], [], [], [], [])
432 432 r = ctx2._buildstatus(
433 433 ctx1, r, match, listignored, listclean, listunknown
434 434 )
435 435
436 436 if reversed:
437 437 # Reverse added and removed. Clear deleted, unknown and ignored as
438 438 # these make no sense to reverse.
439 439 r = scmutil.status(
440 440 r.modified, r.removed, r.added, [], [], [], r.clean
441 441 )
442 442
443 443 if listsubrepos:
444 444 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
445 445 try:
446 446 rev2 = ctx2.subrev(subpath)
447 447 except KeyError:
448 448 # A subrepo that existed in node1 was deleted between
449 449 # node1 and node2 (inclusive). Thus, ctx2's substate
450 450 # won't contain that subpath. The best we can do ignore it.
451 451 rev2 = None
452 452 submatch = matchmod.subdirmatcher(subpath, match)
453 453 s = sub.status(
454 454 rev2,
455 455 match=submatch,
456 456 ignored=listignored,
457 457 clean=listclean,
458 458 unknown=listunknown,
459 459 listsubrepos=True,
460 460 )
461 461 for k in (
462 462 'modified',
463 463 'added',
464 464 'removed',
465 465 'deleted',
466 466 'unknown',
467 467 'ignored',
468 468 'clean',
469 469 ):
470 470 rfiles, sfiles = getattr(r, k), getattr(s, k)
471 471 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
472 472
473 473 r.modified.sort()
474 474 r.added.sort()
475 475 r.removed.sort()
476 476 r.deleted.sort()
477 477 r.unknown.sort()
478 478 r.ignored.sort()
479 479 r.clean.sort()
480 480
481 481 return r
482 482
483 483 def mergestate(self, clean=False):
484 484 """Get a mergestate object for this context."""
485 485 raise NotImplementedError(
486 486 '%s does not implement mergestate()' % self.__class__
487 487 )
488 488
489 489 def isempty(self):
490 490 return not (
491 491 len(self.parents()) > 1
492 492 or self.branch() != self.p1().branch()
493 493 or self.closesbranch()
494 494 or self.files()
495 495 )
496 496
497 497
498 498 class changectx(basectx):
499 499 """A changecontext object makes access to data related to a particular
500 500 changeset convenient. It represents a read-only context already present in
501 501 the repo."""
502 502
503 503 def __init__(self, repo, rev, node, maybe_filtered=True):
504 504 super(changectx, self).__init__(repo)
505 505 self._rev = rev
506 506 self._node = node
507 507 # When maybe_filtered is True, the revision might be affected by
508 508 # changelog filtering and operation through the filtered changelog must be used.
509 509 #
510 510 # When maybe_filtered is False, the revision has already been checked
511 511 # against filtering and is not filtered. Operation through the
512 512 # unfiltered changelog might be used in some case.
513 513 self._maybe_filtered = maybe_filtered
514 514
515 515 def __hash__(self):
516 516 try:
517 517 return hash(self._rev)
518 518 except AttributeError:
519 519 return id(self)
520 520
521 521 def __nonzero__(self):
522 522 return self._rev != nullrev
523 523
524 524 __bool__ = __nonzero__
525 525
526 526 @propertycache
527 527 def _changeset(self):
528 528 if self._maybe_filtered:
529 529 repo = self._repo
530 530 else:
531 531 repo = self._repo.unfiltered()
532 532 return repo.changelog.changelogrevision(self.rev())
533 533
534 534 @propertycache
535 535 def _manifest(self):
536 536 return self._manifestctx.read()
537 537
538 538 @property
539 539 def _manifestctx(self):
540 540 return self._repo.manifestlog[self._changeset.manifest]
541 541
542 542 @propertycache
543 543 def _manifestdelta(self):
544 544 return self._manifestctx.readdelta()
545 545
546 546 @propertycache
547 547 def _parents(self):
548 548 repo = self._repo
549 549 if self._maybe_filtered:
550 550 cl = repo.changelog
551 551 else:
552 552 cl = repo.unfiltered().changelog
553 553
554 554 p1, p2 = cl.parentrevs(self._rev)
555 555 if p2 == nullrev:
556 556 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
557 557 return [
558 558 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
559 559 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
560 560 ]
561 561
562 562 def changeset(self):
563 563 c = self._changeset
564 564 return (
565 565 c.manifest,
566 566 c.user,
567 567 c.date,
568 568 c.files,
569 569 c.description,
570 570 c.extra,
571 571 )
572 572
573 573 def manifestnode(self):
574 574 return self._changeset.manifest
575 575
576 576 def user(self):
577 577 return self._changeset.user
578 578
579 579 def date(self):
580 580 return self._changeset.date
581 581
582 582 def files(self):
583 583 return self._changeset.files
584 584
585 585 def filesmodified(self):
586 586 modified = set(self.files())
587 587 modified.difference_update(self.filesadded())
588 588 modified.difference_update(self.filesremoved())
589 589 return sorted(modified)
590 590
591 591 def filesadded(self):
592 592 filesadded = self._changeset.filesadded
593 593 compute_on_none = True
594 594 if self._repo.filecopiesmode == b'changeset-sidedata':
595 595 compute_on_none = False
596 596 else:
597 597 source = self._repo.ui.config(b'experimental', b'copies.read-from')
598 598 if source == b'changeset-only':
599 599 compute_on_none = False
600 600 elif source != b'compatibility':
601 601 # filelog mode, ignore any changelog content
602 602 filesadded = None
603 603 if filesadded is None:
604 604 if compute_on_none:
605 605 filesadded = metadata.computechangesetfilesadded(self)
606 606 else:
607 607 filesadded = []
608 608 return filesadded
609 609
610 610 def filesremoved(self):
611 611 filesremoved = self._changeset.filesremoved
612 612 compute_on_none = True
613 613 if self._repo.filecopiesmode == b'changeset-sidedata':
614 614 compute_on_none = False
615 615 else:
616 616 source = self._repo.ui.config(b'experimental', b'copies.read-from')
617 617 if source == b'changeset-only':
618 618 compute_on_none = False
619 619 elif source != b'compatibility':
620 620 # filelog mode, ignore any changelog content
621 621 filesremoved = None
622 622 if filesremoved is None:
623 623 if compute_on_none:
624 624 filesremoved = metadata.computechangesetfilesremoved(self)
625 625 else:
626 626 filesremoved = []
627 627 return filesremoved
628 628
629 629 @propertycache
630 630 def _copies(self):
631 631 p1copies = self._changeset.p1copies
632 632 p2copies = self._changeset.p2copies
633 633 compute_on_none = True
634 634 if self._repo.filecopiesmode == b'changeset-sidedata':
635 635 compute_on_none = False
636 636 else:
637 637 source = self._repo.ui.config(b'experimental', b'copies.read-from')
638 638 # If config says to get copy metadata only from changeset, then
639 639 # return that, defaulting to {} if there was no copy metadata. In
640 640 # compatibility mode, we return copy data from the changeset if it
641 641 # was recorded there, and otherwise we fall back to getting it from
642 642 # the filelogs (below).
643 643 #
644 644 # If we are in compatiblity mode and there is not data in the
645 645 # changeset), we get the copy metadata from the filelogs.
646 646 #
647 647 # otherwise, when config said to read only from filelog, we get the
648 648 # copy metadata from the filelogs.
649 649 if source == b'changeset-only':
650 650 compute_on_none = False
651 651 elif source != b'compatibility':
652 652 # filelog mode, ignore any changelog content
653 653 p1copies = p2copies = None
654 654 if p1copies is None:
655 655 if compute_on_none:
656 656 p1copies, p2copies = super(changectx, self)._copies
657 657 else:
658 658 if p1copies is None:
659 659 p1copies = {}
660 660 if p2copies is None:
661 661 p2copies = {}
662 662 return p1copies, p2copies
663 663
664 664 def description(self):
665 665 return self._changeset.description
666 666
667 667 def branch(self):
668 668 return encoding.tolocal(self._changeset.extra.get(b"branch"))
669 669
670 670 def closesbranch(self):
671 671 return b'close' in self._changeset.extra
672 672
673 673 def extra(self):
674 674 """Return a dict of extra information."""
675 675 return self._changeset.extra
676 676
677 677 def tags(self):
678 678 """Return a list of byte tag names"""
679 679 return self._repo.nodetags(self._node)
680 680
681 681 def bookmarks(self):
682 682 """Return a list of byte bookmark names."""
683 683 return self._repo.nodebookmarks(self._node)
684 684
685 685 def phase(self):
686 686 return self._repo._phasecache.phase(self._repo, self._rev)
687 687
688 688 def hidden(self):
689 689 return self._rev in repoview.filterrevs(self._repo, b'visible')
690 690
691 691 def isinmemory(self):
692 692 return False
693 693
694 694 def children(self):
695 695 """return list of changectx contexts for each child changeset.
696 696
697 697 This returns only the immediate child changesets. Use descendants() to
698 698 recursively walk children.
699 699 """
700 700 c = self._repo.changelog.children(self._node)
701 701 return [self._repo[x] for x in c]
702 702
703 703 def ancestors(self):
704 704 for a in self._repo.changelog.ancestors([self._rev]):
705 705 yield self._repo[a]
706 706
707 707 def descendants(self):
708 708 """Recursively yield all children of the changeset.
709 709
710 710 For just the immediate children, use children()
711 711 """
712 712 for d in self._repo.changelog.descendants([self._rev]):
713 713 yield self._repo[d]
714 714
715 715 def filectx(self, path, fileid=None, filelog=None):
716 716 """get a file context from this changeset"""
717 717 if fileid is None:
718 718 fileid = self.filenode(path)
719 719 return filectx(
720 720 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
721 721 )
722 722
723 723 def ancestor(self, c2, warn=False):
724 724 """return the "best" ancestor context of self and c2
725 725
726 726 If there are multiple candidates, it will show a message and check
727 727 merge.preferancestor configuration before falling back to the
728 728 revlog ancestor."""
729 729 # deal with workingctxs
730 730 n2 = c2._node
731 731 if n2 is None:
732 732 n2 = c2._parents[0]._node
733 733 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
734 734 if not cahs:
735 735 anc = self._repo.nodeconstants.nullid
736 736 elif len(cahs) == 1:
737 737 anc = cahs[0]
738 738 else:
739 739 # experimental config: merge.preferancestor
740 740 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
741 741 try:
742 742 ctx = scmutil.revsymbol(self._repo, r)
743 743 except error.RepoLookupError:
744 744 continue
745 745 anc = ctx.node()
746 746 if anc in cahs:
747 747 break
748 748 else:
749 749 anc = self._repo.changelog.ancestor(self._node, n2)
750 750 if warn:
751 751 self._repo.ui.status(
752 752 (
753 753 _(b"note: using %s as ancestor of %s and %s\n")
754 754 % (short(anc), short(self._node), short(n2))
755 755 )
756 756 + b''.join(
757 757 _(
758 758 b" alternatively, use --config "
759 759 b"merge.preferancestor=%s\n"
760 760 )
761 761 % short(n)
762 762 for n in sorted(cahs)
763 763 if n != anc
764 764 )
765 765 )
766 766 return self._repo[anc]
767 767
768 768 def isancestorof(self, other):
769 769 """True if this changeset is an ancestor of other"""
770 770 return self._repo.changelog.isancestorrev(self._rev, other._rev)
771 771
772 772 def walk(self, match):
773 773 '''Generates matching file names.'''
774 774
775 775 # Wrap match.bad method to have message with nodeid
776 776 def bad(fn, msg):
777 777 # The manifest doesn't know about subrepos, so don't complain about
778 778 # paths into valid subrepos.
779 779 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
780 780 return
781 781 match.bad(fn, _(b'no such file in rev %s') % self)
782 782
783 783 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
784 784 return self._manifest.walk(m)
785 785
786 786 def matches(self, match):
787 787 return self.walk(match)
788 788
789 789
790 790 class basefilectx(object):
791 791 """A filecontext object represents the common logic for its children:
792 792 filectx: read-only access to a filerevision that is already present
793 793 in the repo,
794 794 workingfilectx: a filecontext that represents files from the working
795 795 directory,
796 796 memfilectx: a filecontext that represents files in-memory,
797 797 """
798 798
799 799 @propertycache
800 800 def _filelog(self):
801 801 return self._repo.file(self._path)
802 802
803 803 @propertycache
804 804 def _changeid(self):
805 805 if '_changectx' in self.__dict__:
806 806 return self._changectx.rev()
807 807 elif '_descendantrev' in self.__dict__:
808 808 # this file context was created from a revision with a known
809 809 # descendant, we can (lazily) correct for linkrev aliases
810 810 return self._adjustlinkrev(self._descendantrev)
811 811 else:
812 812 return self._filelog.linkrev(self._filerev)
813 813
814 814 @propertycache
815 815 def _filenode(self):
816 816 if '_fileid' in self.__dict__:
817 817 return self._filelog.lookup(self._fileid)
818 818 else:
819 819 return self._changectx.filenode(self._path)
820 820
821 821 @propertycache
822 822 def _filerev(self):
823 823 return self._filelog.rev(self._filenode)
824 824
825 825 @propertycache
826 826 def _repopath(self):
827 827 return self._path
828 828
829 829 def __nonzero__(self):
830 830 try:
831 831 self._filenode
832 832 return True
833 833 except error.LookupError:
834 834 # file is missing
835 835 return False
836 836
837 837 __bool__ = __nonzero__
838 838
839 839 def __bytes__(self):
840 840 try:
841 841 return b"%s@%s" % (self.path(), self._changectx)
842 842 except error.LookupError:
843 843 return b"%s@???" % self.path()
844 844
845 845 __str__ = encoding.strmethod(__bytes__)
846 846
847 847 def __repr__(self):
848 848 return "<%s %s>" % (type(self).__name__, str(self))
849 849
850 850 def __hash__(self):
851 851 try:
852 852 return hash((self._path, self._filenode))
853 853 except AttributeError:
854 854 return id(self)
855 855
856 856 def __eq__(self, other):
857 857 try:
858 858 return (
859 859 type(self) == type(other)
860 860 and self._path == other._path
861 861 and self._filenode == other._filenode
862 862 )
863 863 except AttributeError:
864 864 return False
865 865
866 866 def __ne__(self, other):
867 867 return not (self == other)
868 868
869 869 def filerev(self):
870 870 return self._filerev
871 871
872 872 def filenode(self):
873 873 return self._filenode
874 874
875 875 @propertycache
876 876 def _flags(self):
877 877 return self._changectx.flags(self._path)
878 878
879 879 def flags(self):
880 880 return self._flags
881 881
882 882 def filelog(self):
883 883 return self._filelog
884 884
885 885 def rev(self):
886 886 return self._changeid
887 887
888 888 def linkrev(self):
889 889 return self._filelog.linkrev(self._filerev)
890 890
891 891 def node(self):
892 892 return self._changectx.node()
893 893
894 894 def hex(self):
895 895 return self._changectx.hex()
896 896
897 897 def user(self):
898 898 return self._changectx.user()
899 899
900 900 def date(self):
901 901 return self._changectx.date()
902 902
903 903 def files(self):
904 904 return self._changectx.files()
905 905
906 906 def description(self):
907 907 return self._changectx.description()
908 908
909 909 def branch(self):
910 910 return self._changectx.branch()
911 911
912 912 def extra(self):
913 913 return self._changectx.extra()
914 914
915 915 def phase(self):
916 916 return self._changectx.phase()
917 917
918 918 def phasestr(self):
919 919 return self._changectx.phasestr()
920 920
921 921 def obsolete(self):
922 922 return self._changectx.obsolete()
923 923
924 924 def instabilities(self):
925 925 return self._changectx.instabilities()
926 926
927 927 def manifest(self):
928 928 return self._changectx.manifest()
929 929
930 930 def changectx(self):
931 931 return self._changectx
932 932
933 933 def renamed(self):
934 934 return self._copied
935 935
936 936 def copysource(self):
937 937 return self._copied and self._copied[0]
938 938
939 939 def repo(self):
940 940 return self._repo
941 941
942 942 def size(self):
943 943 return len(self.data())
944 944
945 945 def path(self):
946 946 return self._path
947 947
948 948 def isbinary(self):
949 949 try:
950 950 return stringutil.binary(self.data())
951 951 except IOError:
952 952 return False
953 953
954 954 def isexec(self):
955 955 return b'x' in self.flags()
956 956
957 957 def islink(self):
958 958 return b'l' in self.flags()
959 959
960 960 def isabsent(self):
961 961 """whether this filectx represents a file not in self._changectx
962 962
963 963 This is mainly for merge code to detect change/delete conflicts. This is
964 964 expected to be True for all subclasses of basectx."""
965 965 return False
966 966
967 967 _customcmp = False
968 968
969 969 def cmp(self, fctx):
970 970 """compare with other file context
971 971
972 972 returns True if different than fctx.
973 973 """
974 974 if fctx._customcmp:
975 975 return fctx.cmp(self)
976 976
977 977 if self._filenode is None:
978 978 raise error.ProgrammingError(
979 979 b'filectx.cmp() must be reimplemented if not backed by revlog'
980 980 )
981 981
982 982 if fctx._filenode is None:
983 983 if self._repo._encodefilterpats:
984 984 # can't rely on size() because wdir content may be decoded
985 985 return self._filelog.cmp(self._filenode, fctx.data())
986 986 if self.size() - 4 == fctx.size():
987 987 # size() can match:
988 988 # if file data starts with '\1\n', empty metadata block is
989 989 # prepended, which adds 4 bytes to filelog.size().
990 990 return self._filelog.cmp(self._filenode, fctx.data())
991 991 if self.size() == fctx.size() or self.flags() == b'l':
992 992 # size() matches: need to compare content
993 993 # issue6456: Always compare symlinks because size can represent
994 994 # encrypted string for EXT-4 encryption(fscrypt).
995 995 return self._filelog.cmp(self._filenode, fctx.data())
996 996
997 997 # size() differs
998 998 return True
999 999
1000 1000 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
1001 1001 """return the first ancestor of <srcrev> introducing <fnode>
1002 1002
1003 1003 If the linkrev of the file revision does not point to an ancestor of
1004 1004 srcrev, we'll walk down the ancestors until we find one introducing
1005 1005 this file revision.
1006 1006
1007 1007 :srcrev: the changeset revision we search ancestors from
1008 1008 :inclusive: if true, the src revision will also be checked
1009 1009 :stoprev: an optional revision to stop the walk at. If no introduction
1010 1010 of this file content could be found before this floor
1011 1011 revision, the function will returns "None" and stops its
1012 1012 iteration.
1013 1013 """
1014 1014 repo = self._repo
1015 1015 cl = repo.unfiltered().changelog
1016 1016 mfl = repo.manifestlog
1017 1017 # fetch the linkrev
1018 1018 lkr = self.linkrev()
1019 1019 if srcrev == lkr:
1020 1020 return lkr
1021 1021 # hack to reuse ancestor computation when searching for renames
1022 1022 memberanc = getattr(self, '_ancestrycontext', None)
1023 1023 iteranc = None
1024 1024 if srcrev is None:
1025 1025 # wctx case, used by workingfilectx during mergecopy
1026 1026 revs = [p.rev() for p in self._repo[None].parents()]
1027 1027 inclusive = True # we skipped the real (revless) source
1028 1028 else:
1029 1029 revs = [srcrev]
1030 1030 if memberanc is None:
1031 1031 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1032 1032 # check if this linkrev is an ancestor of srcrev
1033 1033 if lkr not in memberanc:
1034 1034 if iteranc is None:
1035 1035 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1036 1036 fnode = self._filenode
1037 1037 path = self._path
1038 1038 for a in iteranc:
1039 1039 if stoprev is not None and a < stoprev:
1040 1040 return None
1041 1041 ac = cl.read(a) # get changeset data (we avoid object creation)
1042 1042 if path in ac[3]: # checking the 'files' field.
1043 1043 # The file has been touched, check if the content is
1044 1044 # similar to the one we search for.
1045 1045 if fnode == mfl[ac[0]].readfast().get(path):
1046 1046 return a
1047 1047 # In theory, we should never get out of that loop without a result.
1048 1048 # But if manifest uses a buggy file revision (not children of the
1049 1049 # one it replaces) we could. Such a buggy situation will likely
1050 1050 # result is crash somewhere else at to some point.
1051 1051 return lkr
1052 1052
1053 1053 def isintroducedafter(self, changelogrev):
1054 1054 """True if a filectx has been introduced after a given floor revision"""
1055 1055 if self.linkrev() >= changelogrev:
1056 1056 return True
1057 1057 introrev = self._introrev(stoprev=changelogrev)
1058 1058 if introrev is None:
1059 1059 return False
1060 1060 return introrev >= changelogrev
1061 1061
1062 1062 def introrev(self):
1063 1063 """return the rev of the changeset which introduced this file revision
1064 1064
1065 1065 This method is different from linkrev because it take into account the
1066 1066 changeset the filectx was created from. It ensures the returned
1067 1067 revision is one of its ancestors. This prevents bugs from
1068 1068 'linkrev-shadowing' when a file revision is used by multiple
1069 1069 changesets.
1070 1070 """
1071 1071 return self._introrev()
1072 1072
1073 1073 def _introrev(self, stoprev=None):
1074 1074 """
1075 1075 Same as `introrev` but, with an extra argument to limit changelog
1076 1076 iteration range in some internal usecase.
1077 1077
1078 1078 If `stoprev` is set, the `introrev` will not be searched past that
1079 1079 `stoprev` revision and "None" might be returned. This is useful to
1080 1080 limit the iteration range.
1081 1081 """
1082 1082 toprev = None
1083 1083 attrs = vars(self)
1084 1084 if '_changeid' in attrs:
1085 1085 # We have a cached value already
1086 1086 toprev = self._changeid
1087 1087 elif '_changectx' in attrs:
1088 1088 # We know which changelog entry we are coming from
1089 1089 toprev = self._changectx.rev()
1090 1090
1091 1091 if toprev is not None:
1092 1092 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1093 1093 elif '_descendantrev' in attrs:
1094 1094 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1095 1095 # be nice and cache the result of the computation
1096 1096 if introrev is not None:
1097 1097 self._changeid = introrev
1098 1098 return introrev
1099 1099 else:
1100 1100 return self.linkrev()
1101 1101
1102 1102 def introfilectx(self):
1103 1103 """Return filectx having identical contents, but pointing to the
1104 1104 changeset revision where this filectx was introduced"""
1105 1105 introrev = self.introrev()
1106 1106 if self.rev() == introrev:
1107 1107 return self
1108 1108 return self.filectx(self.filenode(), changeid=introrev)
1109 1109
1110 1110 def _parentfilectx(self, path, fileid, filelog):
1111 1111 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1112 1112 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1113 1113 if '_changeid' in vars(self) or '_changectx' in vars(self):
1114 1114 # If self is associated with a changeset (probably explicitly
1115 1115 # fed), ensure the created filectx is associated with a
1116 1116 # changeset that is an ancestor of self.changectx.
1117 1117 # This lets us later use _adjustlinkrev to get a correct link.
1118 1118 fctx._descendantrev = self.rev()
1119 1119 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1120 1120 elif '_descendantrev' in vars(self):
1121 1121 # Otherwise propagate _descendantrev if we have one associated.
1122 1122 fctx._descendantrev = self._descendantrev
1123 1123 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1124 1124 return fctx
1125 1125
1126 1126 def parents(self):
1127 1127 _path = self._path
1128 1128 fl = self._filelog
1129 1129 parents = self._filelog.parents(self._filenode)
1130 1130 pl = [
1131 1131 (_path, node, fl)
1132 1132 for node in parents
1133 1133 if node != self._repo.nodeconstants.nullid
1134 1134 ]
1135 1135
1136 1136 r = fl.renamed(self._filenode)
1137 1137 if r:
1138 1138 # - In the simple rename case, both parent are nullid, pl is empty.
1139 1139 # - In case of merge, only one of the parent is null id and should
1140 1140 # be replaced with the rename information. This parent is -always-
1141 1141 # the first one.
1142 1142 #
1143 1143 # As null id have always been filtered out in the previous list
1144 1144 # comprehension, inserting to 0 will always result in "replacing
1145 1145 # first nullid parent with rename information.
1146 1146 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1147 1147
1148 1148 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1149 1149
1150 1150 def p1(self):
1151 1151 return self.parents()[0]
1152 1152
1153 1153 def p2(self):
1154 1154 p = self.parents()
1155 1155 if len(p) == 2:
1156 1156 return p[1]
1157 1157 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1158 1158
1159 1159 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1160 1160 """Returns a list of annotateline objects for each line in the file
1161 1161
1162 1162 - line.fctx is the filectx of the node where that line was last changed
1163 1163 - line.lineno is the line number at the first appearance in the managed
1164 1164 file
1165 1165 - line.text is the data on that line (including newline character)
1166 1166 """
1167 1167 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1168 1168
1169 1169 def parents(f):
1170 1170 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1171 1171 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1172 1172 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1173 1173 # isn't an ancestor of the srcrev.
1174 1174 f._changeid
1175 1175 pl = f.parents()
1176 1176
1177 1177 # Don't return renamed parents if we aren't following.
1178 1178 if not follow:
1179 1179 pl = [p for p in pl if p.path() == f.path()]
1180 1180
1181 1181 # renamed filectx won't have a filelog yet, so set it
1182 1182 # from the cache to save time
1183 1183 for p in pl:
1184 1184 if not '_filelog' in p.__dict__:
1185 1185 p._filelog = getlog(p.path())
1186 1186
1187 1187 return pl
1188 1188
1189 1189 # use linkrev to find the first changeset where self appeared
1190 1190 base = self.introfilectx()
1191 1191 if getattr(base, '_ancestrycontext', None) is None:
1192 1192 # it is safe to use an unfiltered repository here because we are
1193 1193 # walking ancestors only.
1194 1194 cl = self._repo.unfiltered().changelog
1195 1195 if base.rev() is None:
1196 1196 # wctx is not inclusive, but works because _ancestrycontext
1197 1197 # is used to test filelog revisions
1198 1198 ac = cl.ancestors(
1199 1199 [p.rev() for p in base.parents()], inclusive=True
1200 1200 )
1201 1201 else:
1202 1202 ac = cl.ancestors([base.rev()], inclusive=True)
1203 1203 base._ancestrycontext = ac
1204 1204
1205 1205 return dagop.annotate(
1206 1206 base, parents, skiprevs=skiprevs, diffopts=diffopts
1207 1207 )
1208 1208
1209 1209 def ancestors(self, followfirst=False):
1210 1210 visit = {}
1211 1211 c = self
1212 1212 if followfirst:
1213 1213 cut = 1
1214 1214 else:
1215 1215 cut = None
1216 1216
1217 1217 while True:
1218 1218 for parent in c.parents()[:cut]:
1219 1219 visit[(parent.linkrev(), parent.filenode())] = parent
1220 1220 if not visit:
1221 1221 break
1222 1222 c = visit.pop(max(visit))
1223 1223 yield c
1224 1224
1225 1225 def decodeddata(self):
1226 1226 """Returns `data()` after running repository decoding filters.
1227 1227
1228 1228 This is often equivalent to how the data would be expressed on disk.
1229 1229 """
1230 1230 return self._repo.wwritedata(self.path(), self.data())
1231 1231
1232 1232
1233 1233 class filectx(basefilectx):
1234 1234 """A filecontext object makes access to data related to a particular
1235 1235 filerevision convenient."""
1236 1236
1237 1237 def __init__(
1238 1238 self,
1239 1239 repo,
1240 1240 path,
1241 1241 changeid=None,
1242 1242 fileid=None,
1243 1243 filelog=None,
1244 1244 changectx=None,
1245 1245 ):
1246 1246 """changeid must be a revision number, if specified.
1247 1247 fileid can be a file revision or node."""
1248 1248 self._repo = repo
1249 1249 self._path = path
1250 1250
1251 1251 assert (
1252 1252 changeid is not None or fileid is not None or changectx is not None
1253 1253 ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
1254 1254 changeid,
1255 1255 fileid,
1256 1256 changectx,
1257 1257 )
1258 1258
1259 1259 if filelog is not None:
1260 1260 self._filelog = filelog
1261 1261
1262 1262 if changeid is not None:
1263 1263 self._changeid = changeid
1264 1264 if changectx is not None:
1265 1265 self._changectx = changectx
1266 1266 if fileid is not None:
1267 1267 self._fileid = fileid
1268 1268
1269 1269 @propertycache
1270 1270 def _changectx(self):
1271 1271 try:
1272 1272 return self._repo[self._changeid]
1273 1273 except error.FilteredRepoLookupError:
1274 1274 # Linkrev may point to any revision in the repository. When the
1275 1275 # repository is filtered this may lead to `filectx` trying to build
1276 1276 # `changectx` for filtered revision. In such case we fallback to
1277 1277 # creating `changectx` on the unfiltered version of the reposition.
1278 1278 # This fallback should not be an issue because `changectx` from
1279 1279 # `filectx` are not used in complex operations that care about
1280 1280 # filtering.
1281 1281 #
1282 1282 # This fallback is a cheap and dirty fix that prevent several
1283 1283 # crashes. It does not ensure the behavior is correct. However the
1284 1284 # behavior was not correct before filtering either and "incorrect
1285 1285 # behavior" is seen as better as "crash"
1286 1286 #
1287 1287 # Linkrevs have several serious troubles with filtering that are
1288 1288 # complicated to solve. Proper handling of the issue here should be
1289 1289 # considered when solving linkrev issue are on the table.
1290 1290 return self._repo.unfiltered()[self._changeid]
1291 1291
1292 1292 def filectx(self, fileid, changeid=None):
1293 1293 """opens an arbitrary revision of the file without
1294 1294 opening a new filelog"""
1295 1295 return filectx(
1296 1296 self._repo,
1297 1297 self._path,
1298 1298 fileid=fileid,
1299 1299 filelog=self._filelog,
1300 1300 changeid=changeid,
1301 1301 )
1302 1302
1303 1303 def rawdata(self):
1304 1304 return self._filelog.rawdata(self._filenode)
1305 1305
1306 1306 def rawflags(self):
1307 1307 """low-level revlog flags"""
1308 1308 return self._filelog.flags(self._filerev)
1309 1309
1310 1310 def data(self):
1311 1311 try:
1312 1312 return self._filelog.read(self._filenode)
1313 1313 except error.CensoredNodeError:
1314 1314 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1315 1315 return b""
1316 1316 raise error.Abort(
1317 1317 _(b"censored node: %s") % short(self._filenode),
1318 1318 hint=_(b"set censor.policy to ignore errors"),
1319 1319 )
1320 1320
1321 1321 def size(self):
1322 1322 return self._filelog.size(self._filerev)
1323 1323
1324 1324 @propertycache
1325 1325 def _copied(self):
1326 1326 """check if file was actually renamed in this changeset revision
1327 1327
1328 1328 If rename logged in file revision, we report copy for changeset only
1329 1329 if file revisions linkrev points back to the changeset in question
1330 1330 or both changeset parents contain different file revisions.
1331 1331 """
1332 1332
1333 1333 renamed = self._filelog.renamed(self._filenode)
1334 1334 if not renamed:
1335 1335 return None
1336 1336
1337 1337 if self.rev() == self.linkrev():
1338 1338 return renamed
1339 1339
1340 1340 name = self.path()
1341 1341 fnode = self._filenode
1342 1342 for p in self._changectx.parents():
1343 1343 try:
1344 1344 if fnode == p.filenode(name):
1345 1345 return None
1346 1346 except error.LookupError:
1347 1347 pass
1348 1348 return renamed
1349 1349
1350 1350 def children(self):
1351 1351 # hard for renames
1352 1352 c = self._filelog.children(self._filenode)
1353 1353 return [
1354 1354 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1355 1355 for x in c
1356 1356 ]
1357 1357
1358 1358
1359 1359 class committablectx(basectx):
1360 1360 """A committablectx object provides common functionality for a context that
1361 1361 wants the ability to commit, e.g. workingctx or memctx."""
1362 1362
1363 1363 def __init__(
1364 1364 self,
1365 1365 repo,
1366 1366 text=b"",
1367 1367 user=None,
1368 1368 date=None,
1369 1369 extra=None,
1370 1370 changes=None,
1371 1371 branch=None,
1372 1372 ):
1373 1373 super(committablectx, self).__init__(repo)
1374 1374 self._rev = None
1375 1375 self._node = None
1376 1376 self._text = text
1377 1377 if date:
1378 1378 self._date = dateutil.parsedate(date)
1379 1379 if user:
1380 1380 self._user = user
1381 1381 if changes:
1382 1382 self._status = changes
1383 1383
1384 1384 self._extra = {}
1385 1385 if extra:
1386 1386 self._extra = extra.copy()
1387 1387 if branch is not None:
1388 1388 self._extra[b'branch'] = encoding.fromlocal(branch)
1389 1389 if not self._extra.get(b'branch'):
1390 1390 self._extra[b'branch'] = b'default'
1391 1391
1392 1392 def __bytes__(self):
1393 1393 return bytes(self._parents[0]) + b"+"
1394 1394
1395 1395 def hex(self):
1396 1396 self._repo.nodeconstants.wdirhex
1397 1397
1398 1398 __str__ = encoding.strmethod(__bytes__)
1399 1399
1400 1400 def __nonzero__(self):
1401 1401 return True
1402 1402
1403 1403 __bool__ = __nonzero__
1404 1404
1405 1405 @propertycache
1406 1406 def _status(self):
1407 1407 return self._repo.status()
1408 1408
1409 1409 @propertycache
1410 1410 def _user(self):
1411 1411 return self._repo.ui.username()
1412 1412
1413 1413 @propertycache
1414 1414 def _date(self):
1415 1415 ui = self._repo.ui
1416 1416 date = ui.configdate(b'devel', b'default-date')
1417 1417 if date is None:
1418 1418 date = dateutil.makedate()
1419 1419 return date
1420 1420
1421 1421 def subrev(self, subpath):
1422 1422 return None
1423 1423
1424 1424 def manifestnode(self):
1425 1425 return None
1426 1426
1427 1427 def user(self):
1428 1428 return self._user or self._repo.ui.username()
1429 1429
1430 1430 def date(self):
1431 1431 return self._date
1432 1432
1433 1433 def description(self):
1434 1434 return self._text
1435 1435
1436 1436 def files(self):
1437 1437 return sorted(
1438 1438 self._status.modified + self._status.added + self._status.removed
1439 1439 )
1440 1440
1441 1441 def modified(self):
1442 1442 return self._status.modified
1443 1443
1444 1444 def added(self):
1445 1445 return self._status.added
1446 1446
1447 1447 def removed(self):
1448 1448 return self._status.removed
1449 1449
1450 1450 def deleted(self):
1451 1451 return self._status.deleted
1452 1452
1453 1453 filesmodified = modified
1454 1454 filesadded = added
1455 1455 filesremoved = removed
1456 1456
1457 1457 def branch(self):
1458 1458 return encoding.tolocal(self._extra[b'branch'])
1459 1459
1460 1460 def closesbranch(self):
1461 1461 return b'close' in self._extra
1462 1462
1463 1463 def extra(self):
1464 1464 return self._extra
1465 1465
1466 1466 def isinmemory(self):
1467 1467 return False
1468 1468
1469 1469 def tags(self):
1470 1470 return []
1471 1471
1472 1472 def bookmarks(self):
1473 1473 b = []
1474 1474 for p in self.parents():
1475 1475 b.extend(p.bookmarks())
1476 1476 return b
1477 1477
1478 1478 def phase(self):
1479 1479 phase = phases.newcommitphase(self._repo.ui)
1480 1480 for p in self.parents():
1481 1481 phase = max(phase, p.phase())
1482 1482 return phase
1483 1483
1484 1484 def hidden(self):
1485 1485 return False
1486 1486
1487 1487 def children(self):
1488 1488 return []
1489 1489
1490 1490 def flags(self, path):
1491 1491 if '_manifest' in self.__dict__:
1492 1492 try:
1493 1493 return self._manifest.flags(path)
1494 1494 except KeyError:
1495 1495 return b''
1496 1496
1497 1497 try:
1498 1498 return self._flagfunc(path)
1499 1499 except OSError:
1500 1500 return b''
1501 1501
1502 1502 def ancestor(self, c2):
1503 1503 """return the "best" ancestor context of self and c2"""
1504 1504 return self._parents[0].ancestor(c2) # punt on two parents for now
1505 1505
1506 1506 def ancestors(self):
1507 1507 for p in self._parents:
1508 1508 yield p
1509 1509 for a in self._repo.changelog.ancestors(
1510 1510 [p.rev() for p in self._parents]
1511 1511 ):
1512 1512 yield self._repo[a]
1513 1513
1514 1514 def markcommitted(self, node):
1515 1515 """Perform post-commit cleanup necessary after committing this ctx
1516 1516
1517 1517 Specifically, this updates backing stores this working context
1518 1518 wraps to reflect the fact that the changes reflected by this
1519 1519 workingctx have been committed. For example, it marks
1520 1520 modified and added files as normal in the dirstate.
1521 1521
1522 1522 """
1523 1523
1524 1524 def dirty(self, missing=False, merge=True, branch=True):
1525 1525 return False
1526 1526
1527 1527
1528 1528 class workingctx(committablectx):
1529 1529 """A workingctx object makes access to data related to
1530 1530 the current working directory convenient.
1531 1531 date - any valid date string or (unixtime, offset), or None.
1532 1532 user - username string, or None.
1533 1533 extra - a dictionary of extra values, or None.
1534 1534 changes - a list of file lists as returned by localrepo.status()
1535 1535 or None to use the repository status.
1536 1536 """
1537 1537
1538 1538 def __init__(
1539 1539 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1540 1540 ):
1541 1541 branch = None
1542 1542 if not extra or b'branch' not in extra:
1543 1543 try:
1544 1544 branch = repo.dirstate.branch()
1545 1545 except UnicodeDecodeError:
1546 1546 raise error.Abort(_(b'branch name not in UTF-8!'))
1547 1547 super(workingctx, self).__init__(
1548 1548 repo, text, user, date, extra, changes, branch=branch
1549 1549 )
1550 1550
1551 1551 def __iter__(self):
1552 1552 d = self._repo.dirstate
1553 1553 for f in d:
1554 1554 if d[f] != b'r':
1555 1555 yield f
1556 1556
1557 1557 def __contains__(self, key):
1558 1558 return self._repo.dirstate[key] not in b"?r"
1559 1559
1560 1560 def hex(self):
1561 1561 return self._repo.nodeconstants.wdirhex
1562 1562
1563 1563 @propertycache
1564 1564 def _parents(self):
1565 1565 p = self._repo.dirstate.parents()
1566 1566 if p[1] == self._repo.nodeconstants.nullid:
1567 1567 p = p[:-1]
1568 1568 # use unfiltered repo to delay/avoid loading obsmarkers
1569 1569 unfi = self._repo.unfiltered()
1570 1570 return [
1571 1571 changectx(
1572 1572 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1573 1573 )
1574 1574 for n in p
1575 1575 ]
1576 1576
1577 1577 def setparents(self, p1node, p2node=None):
1578 1578 if p2node is None:
1579 1579 p2node = self._repo.nodeconstants.nullid
1580 1580 dirstate = self._repo.dirstate
1581 1581 with dirstate.parentchange():
1582 1582 copies = dirstate.setparents(p1node, p2node)
1583 1583 pctx = self._repo[p1node]
1584 1584 if copies:
1585 1585 # Adjust copy records, the dirstate cannot do it, it
1586 1586 # requires access to parents manifests. Preserve them
1587 1587 # only for entries added to first parent.
1588 1588 for f in copies:
1589 1589 if f not in pctx and copies[f] in pctx:
1590 1590 dirstate.copy(copies[f], f)
1591 1591 if p2node == self._repo.nodeconstants.nullid:
1592 1592 for f, s in sorted(dirstate.copies().items()):
1593 1593 if f not in pctx and s not in pctx:
1594 1594 dirstate.copy(None, f)
1595 1595
1596 1596 def _fileinfo(self, path):
1597 1597 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1598 1598 self._manifest
1599 1599 return super(workingctx, self)._fileinfo(path)
1600 1600
1601 1601 def _buildflagfunc(self):
1602 1602 # Create a fallback function for getting file flags when the
1603 1603 # filesystem doesn't support them
1604 1604
1605 1605 copiesget = self._repo.dirstate.copies().get
1606 1606 parents = self.parents()
1607 1607 if len(parents) < 2:
1608 1608 # when we have one parent, it's easy: copy from parent
1609 1609 man = parents[0].manifest()
1610 1610
1611 1611 def func(f):
1612 1612 f = copiesget(f, f)
1613 1613 return man.flags(f)
1614 1614
1615 1615 else:
1616 1616 # merges are tricky: we try to reconstruct the unstored
1617 1617 # result from the merge (issue1802)
1618 1618 p1, p2 = parents
1619 1619 pa = p1.ancestor(p2)
1620 1620 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1621 1621
1622 1622 def func(f):
1623 1623 f = copiesget(f, f) # may be wrong for merges with copies
1624 1624 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1625 1625 if fl1 == fl2:
1626 1626 return fl1
1627 1627 if fl1 == fla:
1628 1628 return fl2
1629 1629 if fl2 == fla:
1630 1630 return fl1
1631 1631 return b'' # punt for conflicts
1632 1632
1633 1633 return func
1634 1634
1635 1635 @propertycache
1636 1636 def _flagfunc(self):
1637 1637 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1638 1638
1639 1639 def flags(self, path):
1640 1640 try:
1641 1641 return self._flagfunc(path)
1642 1642 except OSError:
1643 1643 return b''
1644 1644
1645 1645 def filectx(self, path, filelog=None):
1646 1646 """get a file context from the working directory"""
1647 1647 return workingfilectx(
1648 1648 self._repo, path, workingctx=self, filelog=filelog
1649 1649 )
1650 1650
1651 1651 def dirty(self, missing=False, merge=True, branch=True):
1652 1652 """check whether a working directory is modified"""
1653 1653 # check subrepos first
1654 1654 for s in sorted(self.substate):
1655 1655 if self.sub(s).dirty(missing=missing):
1656 1656 return True
1657 1657 # check current working dir
1658 1658 return (
1659 1659 (merge and self.p2())
1660 1660 or (branch and self.branch() != self.p1().branch())
1661 1661 or self.modified()
1662 1662 or self.added()
1663 1663 or self.removed()
1664 1664 or (missing and self.deleted())
1665 1665 )
1666 1666
1667 1667 def add(self, list, prefix=b""):
1668 1668 with self._repo.wlock():
1669 1669 ui, ds = self._repo.ui, self._repo.dirstate
1670 1670 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1671 1671 rejected = []
1672 1672 lstat = self._repo.wvfs.lstat
1673 1673 for f in list:
1674 1674 # ds.pathto() returns an absolute file when this is invoked from
1675 1675 # the keyword extension. That gets flagged as non-portable on
1676 1676 # Windows, since it contains the drive letter and colon.
1677 1677 scmutil.checkportable(ui, os.path.join(prefix, f))
1678 1678 try:
1679 1679 st = lstat(f)
1680 1680 except OSError:
1681 1681 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1682 1682 rejected.append(f)
1683 1683 continue
1684 1684 limit = ui.configbytes(b'ui', b'large-file-limit')
1685 1685 if limit != 0 and st.st_size > limit:
1686 1686 ui.warn(
1687 1687 _(
1688 1688 b"%s: up to %d MB of RAM may be required "
1689 1689 b"to manage this file\n"
1690 1690 b"(use 'hg revert %s' to cancel the "
1691 1691 b"pending addition)\n"
1692 1692 )
1693 1693 % (f, 3 * st.st_size // 1000000, uipath(f))
1694 1694 )
1695 1695 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1696 1696 ui.warn(
1697 1697 _(
1698 1698 b"%s not added: only files and symlinks "
1699 1699 b"supported currently\n"
1700 1700 )
1701 1701 % uipath(f)
1702 1702 )
1703 1703 rejected.append(f)
1704 1704 elif not ds.set_tracked(f):
1705 1705 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1706 1706 return rejected
1707 1707
1708 1708 def forget(self, files, prefix=b""):
1709 1709 with self._repo.wlock():
1710 1710 ds = self._repo.dirstate
1711 1711 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1712 1712 rejected = []
1713 1713 for f in files:
1714 1714 if not ds.set_untracked(f):
1715 1715 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1716 1716 rejected.append(f)
1717 1717 return rejected
1718 1718
1719 1719 def copy(self, source, dest):
1720 1720 try:
1721 1721 st = self._repo.wvfs.lstat(dest)
1722 1722 except OSError as err:
1723 1723 if err.errno != errno.ENOENT:
1724 1724 raise
1725 1725 self._repo.ui.warn(
1726 1726 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1727 1727 )
1728 1728 return
1729 1729 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1730 1730 self._repo.ui.warn(
1731 1731 _(b"copy failed: %s is not a file or a symbolic link\n")
1732 1732 % self._repo.dirstate.pathto(dest)
1733 1733 )
1734 1734 else:
1735 1735 with self._repo.wlock():
1736 1736 ds = self._repo.dirstate
1737 1737 ds.set_tracked(dest)
1738 1738 ds.copy(source, dest)
1739 1739
1740 1740 def match(
1741 1741 self,
1742 1742 pats=None,
1743 1743 include=None,
1744 1744 exclude=None,
1745 1745 default=b'glob',
1746 1746 listsubrepos=False,
1747 1747 badfn=None,
1748 1748 cwd=None,
1749 1749 ):
1750 1750 r = self._repo
1751 1751 if not cwd:
1752 1752 cwd = r.getcwd()
1753 1753
1754 1754 # Only a case insensitive filesystem needs magic to translate user input
1755 1755 # to actual case in the filesystem.
1756 1756 icasefs = not util.fscasesensitive(r.root)
1757 1757 return matchmod.match(
1758 1758 r.root,
1759 1759 cwd,
1760 1760 pats,
1761 1761 include,
1762 1762 exclude,
1763 1763 default,
1764 1764 auditor=r.auditor,
1765 1765 ctx=self,
1766 1766 listsubrepos=listsubrepos,
1767 1767 badfn=badfn,
1768 1768 icasefs=icasefs,
1769 1769 )
1770 1770
1771 1771 def _filtersuspectsymlink(self, files):
1772 1772 if not files or self._repo.dirstate._checklink:
1773 1773 return files
1774 1774
1775 1775 # Symlink placeholders may get non-symlink-like contents
1776 1776 # via user error or dereferencing by NFS or Samba servers,
1777 1777 # so we filter out any placeholders that don't look like a
1778 1778 # symlink
1779 1779 sane = []
1780 1780 for f in files:
1781 1781 if self.flags(f) == b'l':
1782 1782 d = self[f].data()
1783 1783 if (
1784 1784 d == b''
1785 1785 or len(d) >= 1024
1786 1786 or b'\n' in d
1787 1787 or stringutil.binary(d)
1788 1788 ):
1789 1789 self._repo.ui.debug(
1790 1790 b'ignoring suspect symlink placeholder "%s"\n' % f
1791 1791 )
1792 1792 continue
1793 1793 sane.append(f)
1794 1794 return sane
1795 1795
1796 1796 def _checklookup(self, files):
1797 1797 # check for any possibly clean files
1798 1798 if not files:
1799 1799 return [], [], []
1800 1800
1801 1801 modified = []
1802 1802 deleted = []
1803 1803 fixup = []
1804 1804 pctx = self._parents[0]
1805 1805 # do a full compare of any files that might have changed
1806 1806 for f in sorted(files):
1807 1807 try:
1808 1808 # This will return True for a file that got replaced by a
1809 1809 # directory in the interim, but fixing that is pretty hard.
1810 1810 if (
1811 1811 f not in pctx
1812 1812 or self.flags(f) != pctx.flags(f)
1813 1813 or pctx[f].cmp(self[f])
1814 1814 ):
1815 1815 modified.append(f)
1816 1816 else:
1817 1817 fixup.append(f)
1818 1818 except (IOError, OSError):
1819 1819 # A file become inaccessible in between? Mark it as deleted,
1820 1820 # matching dirstate behavior (issue5584).
1821 1821 # The dirstate has more complex behavior around whether a
1822 1822 # missing file matches a directory, etc, but we don't need to
1823 1823 # bother with that: if f has made it to this point, we're sure
1824 1824 # it's in the dirstate.
1825 1825 deleted.append(f)
1826 1826
1827 1827 return modified, deleted, fixup
1828 1828
1829 1829 def _poststatusfixup(self, status, fixup):
1830 1830 """update dirstate for files that are actually clean"""
1831 1831 poststatus = self._repo.postdsstatus()
1832 1832 if fixup or poststatus or self._repo.dirstate._dirty:
1833 1833 try:
1834 1834 oldid = self._repo.dirstate.identity()
1835 1835
1836 1836 # updating the dirstate is optional
1837 1837 # so we don't wait on the lock
1838 1838 # wlock can invalidate the dirstate, so cache normal _after_
1839 1839 # taking the lock
1840 1840 with self._repo.wlock(False):
1841 1841 dirstate = self._repo.dirstate
1842 1842 if dirstate.identity() == oldid:
1843 1843 if fixup:
1844 1844 if dirstate.pendingparentchange():
1845 1845 normal = lambda f: dirstate.update_file(
1846 1846 f, p1_tracked=True, wc_tracked=True
1847 1847 )
1848 1848 else:
1849 1849 normal = dirstate.set_clean
1850 1850 for f in fixup:
1851 1851 normal(f)
1852 1852 # write changes out explicitly, because nesting
1853 1853 # wlock at runtime may prevent 'wlock.release()'
1854 1854 # after this block from doing so for subsequent
1855 1855 # changing files
1856 1856 tr = self._repo.currenttransaction()
1857 1857 self._repo.dirstate.write(tr)
1858 1858
1859 1859 if poststatus:
1860 1860 for ps in poststatus:
1861 1861 ps(self, status)
1862 1862 else:
1863 1863 # in this case, writing changes out breaks
1864 1864 # consistency, because .hg/dirstate was
1865 1865 # already changed simultaneously after last
1866 1866 # caching (see also issue5584 for detail)
1867 1867 self._repo.ui.debug(
1868 1868 b'skip updating dirstate: identity mismatch\n'
1869 1869 )
1870 1870 except error.LockError:
1871 1871 pass
1872 1872 finally:
1873 1873 # Even if the wlock couldn't be grabbed, clear out the list.
1874 1874 self._repo.clearpostdsstatus()
1875 1875
1876 1876 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1877 1877 '''Gets the status from the dirstate -- internal use only.'''
1878 1878 subrepos = []
1879 1879 if b'.hgsub' in self:
1880 1880 subrepos = sorted(self.substate)
1881 1881 cmp, s = self._repo.dirstate.status(
1882 1882 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1883 1883 )
1884 1884
1885 1885 # check for any possibly clean files
1886 1886 fixup = []
1887 1887 if cmp:
1888 1888 modified2, deleted2, fixup = self._checklookup(cmp)
1889 1889 s.modified.extend(modified2)
1890 1890 s.deleted.extend(deleted2)
1891 1891
1892 1892 if fixup and clean:
1893 1893 s.clean.extend(fixup)
1894 1894
1895 1895 self._poststatusfixup(s, fixup)
1896 1896
1897 1897 if match.always():
1898 1898 # cache for performance
1899 1899 if s.unknown or s.ignored or s.clean:
1900 1900 # "_status" is cached with list*=False in the normal route
1901 1901 self._status = scmutil.status(
1902 1902 s.modified, s.added, s.removed, s.deleted, [], [], []
1903 1903 )
1904 1904 else:
1905 1905 self._status = s
1906 1906
1907 1907 return s
1908 1908
1909 1909 @propertycache
1910 1910 def _copies(self):
1911 1911 p1copies = {}
1912 1912 p2copies = {}
1913 1913 parents = self._repo.dirstate.parents()
1914 1914 p1manifest = self._repo[parents[0]].manifest()
1915 1915 p2manifest = self._repo[parents[1]].manifest()
1916 1916 changedset = set(self.added()) | set(self.modified())
1917 1917 narrowmatch = self._repo.narrowmatch()
1918 1918 for dst, src in self._repo.dirstate.copies().items():
1919 1919 if dst not in changedset or not narrowmatch(dst):
1920 1920 continue
1921 1921 if src in p1manifest:
1922 1922 p1copies[dst] = src
1923 1923 elif src in p2manifest:
1924 1924 p2copies[dst] = src
1925 1925 return p1copies, p2copies
1926 1926
1927 1927 @propertycache
1928 1928 def _manifest(self):
1929 1929 """generate a manifest corresponding to the values in self._status
1930 1930
1931 1931 This reuse the file nodeid from parent, but we use special node
1932 1932 identifiers for added and modified files. This is used by manifests
1933 1933 merge to see that files are different and by update logic to avoid
1934 1934 deleting newly added files.
1935 1935 """
1936 1936 return self._buildstatusmanifest(self._status)
1937 1937
1938 1938 def _buildstatusmanifest(self, status):
1939 1939 """Builds a manifest that includes the given status results."""
1940 1940 parents = self.parents()
1941 1941
1942 1942 man = parents[0].manifest().copy()
1943 1943
1944 1944 ff = self._flagfunc
1945 1945 for i, l in (
1946 1946 (self._repo.nodeconstants.addednodeid, status.added),
1947 1947 (self._repo.nodeconstants.modifiednodeid, status.modified),
1948 1948 ):
1949 1949 for f in l:
1950 1950 man[f] = i
1951 1951 try:
1952 1952 man.setflag(f, ff(f))
1953 1953 except OSError:
1954 1954 pass
1955 1955
1956 1956 for f in status.deleted + status.removed:
1957 1957 if f in man:
1958 1958 del man[f]
1959 1959
1960 1960 return man
1961 1961
1962 1962 def _buildstatus(
1963 1963 self, other, s, match, listignored, listclean, listunknown
1964 1964 ):
1965 1965 """build a status with respect to another context
1966 1966
1967 1967 This includes logic for maintaining the fast path of status when
1968 1968 comparing the working directory against its parent, which is to skip
1969 1969 building a new manifest if self (working directory) is not comparing
1970 1970 against its parent (repo['.']).
1971 1971 """
1972 1972 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1973 1973 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1974 1974 # might have accidentally ended up with the entire contents of the file
1975 1975 # they are supposed to be linking to.
1976 1976 s.modified[:] = self._filtersuspectsymlink(s.modified)
1977 1977 if other != self._repo[b'.']:
1978 1978 s = super(workingctx, self)._buildstatus(
1979 1979 other, s, match, listignored, listclean, listunknown
1980 1980 )
1981 1981 return s
1982 1982
1983 1983 def _matchstatus(self, other, match):
1984 1984 """override the match method with a filter for directory patterns
1985 1985
1986 1986 We use inheritance to customize the match.bad method only in cases of
1987 1987 workingctx since it belongs only to the working directory when
1988 1988 comparing against the parent changeset.
1989 1989
1990 1990 If we aren't comparing against the working directory's parent, then we
1991 1991 just use the default match object sent to us.
1992 1992 """
1993 1993 if other != self._repo[b'.']:
1994 1994
1995 1995 def bad(f, msg):
1996 1996 # 'f' may be a directory pattern from 'match.files()',
1997 1997 # so 'f not in ctx1' is not enough
1998 1998 if f not in other and not other.hasdir(f):
1999 1999 self._repo.ui.warn(
2000 2000 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
2001 2001 )
2002 2002
2003 2003 match.bad = bad
2004 2004 return match
2005 2005
2006 2006 def walk(self, match):
2007 2007 '''Generates matching file names.'''
2008 2008 return sorted(
2009 2009 self._repo.dirstate.walk(
2010 2010 self._repo.narrowmatch(match),
2011 2011 subrepos=sorted(self.substate),
2012 2012 unknown=True,
2013 2013 ignored=False,
2014 2014 )
2015 2015 )
2016 2016
2017 2017 def matches(self, match):
2018 2018 match = self._repo.narrowmatch(match)
2019 2019 ds = self._repo.dirstate
2020 2020 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2021 2021
2022 2022 def markcommitted(self, node):
2023 2023 with self._repo.dirstate.parentchange():
2024 2024 for f in self.modified() + self.added():
2025 self._repo.dirstate.normal(f)
2025 self._repo.dirstate.update_file(
2026 f, p1_tracked=True, wc_tracked=True
2027 )
2026 2028 for f in self.removed():
2027 2029 self._repo.dirstate.drop(f)
2028 2030 self._repo.dirstate.setparents(node)
2029 2031 self._repo._quick_access_changeid_invalidate()
2030 2032
2031 2033 sparse.aftercommit(self._repo, node)
2032 2034
2033 2035 # write changes out explicitly, because nesting wlock at
2034 2036 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2035 2037 # from immediately doing so for subsequent changing files
2036 2038 self._repo.dirstate.write(self._repo.currenttransaction())
2037 2039
2038 2040 def mergestate(self, clean=False):
2039 2041 if clean:
2040 2042 return mergestatemod.mergestate.clean(self._repo)
2041 2043 return mergestatemod.mergestate.read(self._repo)
2042 2044
2043 2045
2044 2046 class committablefilectx(basefilectx):
2045 2047 """A committablefilectx provides common functionality for a file context
2046 2048 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2047 2049
2048 2050 def __init__(self, repo, path, filelog=None, ctx=None):
2049 2051 self._repo = repo
2050 2052 self._path = path
2051 2053 self._changeid = None
2052 2054 self._filerev = self._filenode = None
2053 2055
2054 2056 if filelog is not None:
2055 2057 self._filelog = filelog
2056 2058 if ctx:
2057 2059 self._changectx = ctx
2058 2060
2059 2061 def __nonzero__(self):
2060 2062 return True
2061 2063
2062 2064 __bool__ = __nonzero__
2063 2065
2064 2066 def linkrev(self):
2065 2067 # linked to self._changectx no matter if file is modified or not
2066 2068 return self.rev()
2067 2069
2068 2070 def renamed(self):
2069 2071 path = self.copysource()
2070 2072 if not path:
2071 2073 return None
2072 2074 return (
2073 2075 path,
2074 2076 self._changectx._parents[0]._manifest.get(
2075 2077 path, self._repo.nodeconstants.nullid
2076 2078 ),
2077 2079 )
2078 2080
2079 2081 def parents(self):
2080 2082 '''return parent filectxs, following copies if necessary'''
2081 2083
2082 2084 def filenode(ctx, path):
2083 2085 return ctx._manifest.get(path, self._repo.nodeconstants.nullid)
2084 2086
2085 2087 path = self._path
2086 2088 fl = self._filelog
2087 2089 pcl = self._changectx._parents
2088 2090 renamed = self.renamed()
2089 2091
2090 2092 if renamed:
2091 2093 pl = [renamed + (None,)]
2092 2094 else:
2093 2095 pl = [(path, filenode(pcl[0], path), fl)]
2094 2096
2095 2097 for pc in pcl[1:]:
2096 2098 pl.append((path, filenode(pc, path), fl))
2097 2099
2098 2100 return [
2099 2101 self._parentfilectx(p, fileid=n, filelog=l)
2100 2102 for p, n, l in pl
2101 2103 if n != self._repo.nodeconstants.nullid
2102 2104 ]
2103 2105
2104 2106 def children(self):
2105 2107 return []
2106 2108
2107 2109
2108 2110 class workingfilectx(committablefilectx):
2109 2111 """A workingfilectx object makes access to data related to a particular
2110 2112 file in the working directory convenient."""
2111 2113
2112 2114 def __init__(self, repo, path, filelog=None, workingctx=None):
2113 2115 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2114 2116
2115 2117 @propertycache
2116 2118 def _changectx(self):
2117 2119 return workingctx(self._repo)
2118 2120
2119 2121 def data(self):
2120 2122 return self._repo.wread(self._path)
2121 2123
2122 2124 def copysource(self):
2123 2125 return self._repo.dirstate.copied(self._path)
2124 2126
2125 2127 def size(self):
2126 2128 return self._repo.wvfs.lstat(self._path).st_size
2127 2129
2128 2130 def lstat(self):
2129 2131 return self._repo.wvfs.lstat(self._path)
2130 2132
2131 2133 def date(self):
2132 2134 t, tz = self._changectx.date()
2133 2135 try:
2134 2136 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2135 2137 except OSError as err:
2136 2138 if err.errno != errno.ENOENT:
2137 2139 raise
2138 2140 return (t, tz)
2139 2141
2140 2142 def exists(self):
2141 2143 return self._repo.wvfs.exists(self._path)
2142 2144
2143 2145 def lexists(self):
2144 2146 return self._repo.wvfs.lexists(self._path)
2145 2147
2146 2148 def audit(self):
2147 2149 return self._repo.wvfs.audit(self._path)
2148 2150
2149 2151 def cmp(self, fctx):
2150 2152 """compare with other file context
2151 2153
2152 2154 returns True if different than fctx.
2153 2155 """
2154 2156 # fctx should be a filectx (not a workingfilectx)
2155 2157 # invert comparison to reuse the same code path
2156 2158 return fctx.cmp(self)
2157 2159
2158 2160 def remove(self, ignoremissing=False):
2159 2161 """wraps unlink for a repo's working directory"""
2160 2162 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2161 2163 self._repo.wvfs.unlinkpath(
2162 2164 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2163 2165 )
2164 2166
2165 2167 def write(self, data, flags, backgroundclose=False, **kwargs):
2166 2168 """wraps repo.wwrite"""
2167 2169 return self._repo.wwrite(
2168 2170 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2169 2171 )
2170 2172
2171 2173 def markcopied(self, src):
2172 2174 """marks this file a copy of `src`"""
2173 2175 self._repo.dirstate.copy(src, self._path)
2174 2176
2175 2177 def clearunknown(self):
2176 2178 """Removes conflicting items in the working directory so that
2177 2179 ``write()`` can be called successfully.
2178 2180 """
2179 2181 wvfs = self._repo.wvfs
2180 2182 f = self._path
2181 2183 wvfs.audit(f)
2182 2184 if self._repo.ui.configbool(
2183 2185 b'experimental', b'merge.checkpathconflicts'
2184 2186 ):
2185 2187 # remove files under the directory as they should already be
2186 2188 # warned and backed up
2187 2189 if wvfs.isdir(f) and not wvfs.islink(f):
2188 2190 wvfs.rmtree(f, forcibly=True)
2189 2191 for p in reversed(list(pathutil.finddirs(f))):
2190 2192 if wvfs.isfileorlink(p):
2191 2193 wvfs.unlink(p)
2192 2194 break
2193 2195 else:
2194 2196 # don't remove files if path conflicts are not processed
2195 2197 if wvfs.isdir(f) and not wvfs.islink(f):
2196 2198 wvfs.removedirs(f)
2197 2199
2198 2200 def setflags(self, l, x):
2199 2201 self._repo.wvfs.setflags(self._path, l, x)
2200 2202
2201 2203
2202 2204 class overlayworkingctx(committablectx):
2203 2205 """Wraps another mutable context with a write-back cache that can be
2204 2206 converted into a commit context.
2205 2207
2206 2208 self._cache[path] maps to a dict with keys: {
2207 2209 'exists': bool?
2208 2210 'date': date?
2209 2211 'data': str?
2210 2212 'flags': str?
2211 2213 'copied': str? (path or None)
2212 2214 }
2213 2215 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2214 2216 is `False`, the file was deleted.
2215 2217 """
2216 2218
2217 2219 def __init__(self, repo):
2218 2220 super(overlayworkingctx, self).__init__(repo)
2219 2221 self.clean()
2220 2222
2221 2223 def setbase(self, wrappedctx):
2222 2224 self._wrappedctx = wrappedctx
2223 2225 self._parents = [wrappedctx]
2224 2226 # Drop old manifest cache as it is now out of date.
2225 2227 # This is necessary when, e.g., rebasing several nodes with one
2226 2228 # ``overlayworkingctx`` (e.g. with --collapse).
2227 2229 util.clearcachedproperty(self, b'_manifest')
2228 2230
2229 2231 def setparents(self, p1node, p2node=None):
2230 2232 if p2node is None:
2231 2233 p2node = self._repo.nodeconstants.nullid
2232 2234 assert p1node == self._wrappedctx.node()
2233 2235 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2234 2236
2235 2237 def data(self, path):
2236 2238 if self.isdirty(path):
2237 2239 if self._cache[path][b'exists']:
2238 2240 if self._cache[path][b'data'] is not None:
2239 2241 return self._cache[path][b'data']
2240 2242 else:
2241 2243 # Must fallback here, too, because we only set flags.
2242 2244 return self._wrappedctx[path].data()
2243 2245 else:
2244 2246 raise error.ProgrammingError(
2245 2247 b"No such file or directory: %s" % path
2246 2248 )
2247 2249 else:
2248 2250 return self._wrappedctx[path].data()
2249 2251
2250 2252 @propertycache
2251 2253 def _manifest(self):
2252 2254 parents = self.parents()
2253 2255 man = parents[0].manifest().copy()
2254 2256
2255 2257 flag = self._flagfunc
2256 2258 for path in self.added():
2257 2259 man[path] = self._repo.nodeconstants.addednodeid
2258 2260 man.setflag(path, flag(path))
2259 2261 for path in self.modified():
2260 2262 man[path] = self._repo.nodeconstants.modifiednodeid
2261 2263 man.setflag(path, flag(path))
2262 2264 for path in self.removed():
2263 2265 del man[path]
2264 2266 return man
2265 2267
2266 2268 @propertycache
2267 2269 def _flagfunc(self):
2268 2270 def f(path):
2269 2271 return self._cache[path][b'flags']
2270 2272
2271 2273 return f
2272 2274
2273 2275 def files(self):
2274 2276 return sorted(self.added() + self.modified() + self.removed())
2275 2277
2276 2278 def modified(self):
2277 2279 return [
2278 2280 f
2279 2281 for f in self._cache.keys()
2280 2282 if self._cache[f][b'exists'] and self._existsinparent(f)
2281 2283 ]
2282 2284
2283 2285 def added(self):
2284 2286 return [
2285 2287 f
2286 2288 for f in self._cache.keys()
2287 2289 if self._cache[f][b'exists'] and not self._existsinparent(f)
2288 2290 ]
2289 2291
2290 2292 def removed(self):
2291 2293 return [
2292 2294 f
2293 2295 for f in self._cache.keys()
2294 2296 if not self._cache[f][b'exists'] and self._existsinparent(f)
2295 2297 ]
2296 2298
2297 2299 def p1copies(self):
2298 2300 copies = {}
2299 2301 narrowmatch = self._repo.narrowmatch()
2300 2302 for f in self._cache.keys():
2301 2303 if not narrowmatch(f):
2302 2304 continue
2303 2305 copies.pop(f, None) # delete if it exists
2304 2306 source = self._cache[f][b'copied']
2305 2307 if source:
2306 2308 copies[f] = source
2307 2309 return copies
2308 2310
2309 2311 def p2copies(self):
2310 2312 copies = {}
2311 2313 narrowmatch = self._repo.narrowmatch()
2312 2314 for f in self._cache.keys():
2313 2315 if not narrowmatch(f):
2314 2316 continue
2315 2317 copies.pop(f, None) # delete if it exists
2316 2318 source = self._cache[f][b'copied']
2317 2319 if source:
2318 2320 copies[f] = source
2319 2321 return copies
2320 2322
2321 2323 def isinmemory(self):
2322 2324 return True
2323 2325
2324 2326 def filedate(self, path):
2325 2327 if self.isdirty(path):
2326 2328 return self._cache[path][b'date']
2327 2329 else:
2328 2330 return self._wrappedctx[path].date()
2329 2331
2330 2332 def markcopied(self, path, origin):
2331 2333 self._markdirty(
2332 2334 path,
2333 2335 exists=True,
2334 2336 date=self.filedate(path),
2335 2337 flags=self.flags(path),
2336 2338 copied=origin,
2337 2339 )
2338 2340
2339 2341 def copydata(self, path):
2340 2342 if self.isdirty(path):
2341 2343 return self._cache[path][b'copied']
2342 2344 else:
2343 2345 return None
2344 2346
2345 2347 def flags(self, path):
2346 2348 if self.isdirty(path):
2347 2349 if self._cache[path][b'exists']:
2348 2350 return self._cache[path][b'flags']
2349 2351 else:
2350 2352 raise error.ProgrammingError(
2351 2353 b"No such file or directory: %s" % path
2352 2354 )
2353 2355 else:
2354 2356 return self._wrappedctx[path].flags()
2355 2357
2356 2358 def __contains__(self, key):
2357 2359 if key in self._cache:
2358 2360 return self._cache[key][b'exists']
2359 2361 return key in self.p1()
2360 2362
2361 2363 def _existsinparent(self, path):
2362 2364 try:
2363 2365 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2364 2366 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2365 2367 # with an ``exists()`` function.
2366 2368 self._wrappedctx[path]
2367 2369 return True
2368 2370 except error.ManifestLookupError:
2369 2371 return False
2370 2372
2371 2373 def _auditconflicts(self, path):
2372 2374 """Replicates conflict checks done by wvfs.write().
2373 2375
2374 2376 Since we never write to the filesystem and never call `applyupdates` in
2375 2377 IMM, we'll never check that a path is actually writable -- e.g., because
2376 2378 it adds `a/foo`, but `a` is actually a file in the other commit.
2377 2379 """
2378 2380
2379 2381 def fail(path, component):
2380 2382 # p1() is the base and we're receiving "writes" for p2()'s
2381 2383 # files.
2382 2384 if b'l' in self.p1()[component].flags():
2383 2385 raise error.Abort(
2384 2386 b"error: %s conflicts with symlink %s "
2385 2387 b"in %d." % (path, component, self.p1().rev())
2386 2388 )
2387 2389 else:
2388 2390 raise error.Abort(
2389 2391 b"error: '%s' conflicts with file '%s' in "
2390 2392 b"%d." % (path, component, self.p1().rev())
2391 2393 )
2392 2394
2393 2395 # Test that each new directory to be created to write this path from p2
2394 2396 # is not a file in p1.
2395 2397 components = path.split(b'/')
2396 2398 for i in pycompat.xrange(len(components)):
2397 2399 component = b"/".join(components[0:i])
2398 2400 if component in self:
2399 2401 fail(path, component)
2400 2402
2401 2403 # Test the other direction -- that this path from p2 isn't a directory
2402 2404 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2403 2405 match = self.match([path], default=b'path')
2404 2406 mfiles = list(self.p1().manifest().walk(match))
2405 2407 if len(mfiles) > 0:
2406 2408 if len(mfiles) == 1 and mfiles[0] == path:
2407 2409 return
2408 2410 # omit the files which are deleted in current IMM wctx
2409 2411 mfiles = [m for m in mfiles if m in self]
2410 2412 if not mfiles:
2411 2413 return
2412 2414 raise error.Abort(
2413 2415 b"error: file '%s' cannot be written because "
2414 2416 b" '%s/' is a directory in %s (containing %d "
2415 2417 b"entries: %s)"
2416 2418 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2417 2419 )
2418 2420
2419 2421 def write(self, path, data, flags=b'', **kwargs):
2420 2422 if data is None:
2421 2423 raise error.ProgrammingError(b"data must be non-None")
2422 2424 self._auditconflicts(path)
2423 2425 self._markdirty(
2424 2426 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2425 2427 )
2426 2428
2427 2429 def setflags(self, path, l, x):
2428 2430 flag = b''
2429 2431 if l:
2430 2432 flag = b'l'
2431 2433 elif x:
2432 2434 flag = b'x'
2433 2435 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2434 2436
2435 2437 def remove(self, path):
2436 2438 self._markdirty(path, exists=False)
2437 2439
2438 2440 def exists(self, path):
2439 2441 """exists behaves like `lexists`, but needs to follow symlinks and
2440 2442 return False if they are broken.
2441 2443 """
2442 2444 if self.isdirty(path):
2443 2445 # If this path exists and is a symlink, "follow" it by calling
2444 2446 # exists on the destination path.
2445 2447 if (
2446 2448 self._cache[path][b'exists']
2447 2449 and b'l' in self._cache[path][b'flags']
2448 2450 ):
2449 2451 return self.exists(self._cache[path][b'data'].strip())
2450 2452 else:
2451 2453 return self._cache[path][b'exists']
2452 2454
2453 2455 return self._existsinparent(path)
2454 2456
2455 2457 def lexists(self, path):
2456 2458 """lexists returns True if the path exists"""
2457 2459 if self.isdirty(path):
2458 2460 return self._cache[path][b'exists']
2459 2461
2460 2462 return self._existsinparent(path)
2461 2463
2462 2464 def size(self, path):
2463 2465 if self.isdirty(path):
2464 2466 if self._cache[path][b'exists']:
2465 2467 return len(self._cache[path][b'data'])
2466 2468 else:
2467 2469 raise error.ProgrammingError(
2468 2470 b"No such file or directory: %s" % path
2469 2471 )
2470 2472 return self._wrappedctx[path].size()
2471 2473
2472 2474 def tomemctx(
2473 2475 self,
2474 2476 text,
2475 2477 branch=None,
2476 2478 extra=None,
2477 2479 date=None,
2478 2480 parents=None,
2479 2481 user=None,
2480 2482 editor=None,
2481 2483 ):
2482 2484 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2483 2485 committed.
2484 2486
2485 2487 ``text`` is the commit message.
2486 2488 ``parents`` (optional) are rev numbers.
2487 2489 """
2488 2490 # Default parents to the wrapped context if not passed.
2489 2491 if parents is None:
2490 2492 parents = self.parents()
2491 2493 if len(parents) == 1:
2492 2494 parents = (parents[0], None)
2493 2495
2494 2496 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2495 2497 if parents[1] is None:
2496 2498 parents = (self._repo[parents[0]], None)
2497 2499 else:
2498 2500 parents = (self._repo[parents[0]], self._repo[parents[1]])
2499 2501
2500 2502 files = self.files()
2501 2503
2502 2504 def getfile(repo, memctx, path):
2503 2505 if self._cache[path][b'exists']:
2504 2506 return memfilectx(
2505 2507 repo,
2506 2508 memctx,
2507 2509 path,
2508 2510 self._cache[path][b'data'],
2509 2511 b'l' in self._cache[path][b'flags'],
2510 2512 b'x' in self._cache[path][b'flags'],
2511 2513 self._cache[path][b'copied'],
2512 2514 )
2513 2515 else:
2514 2516 # Returning None, but including the path in `files`, is
2515 2517 # necessary for memctx to register a deletion.
2516 2518 return None
2517 2519
2518 2520 if branch is None:
2519 2521 branch = self._wrappedctx.branch()
2520 2522
2521 2523 return memctx(
2522 2524 self._repo,
2523 2525 parents,
2524 2526 text,
2525 2527 files,
2526 2528 getfile,
2527 2529 date=date,
2528 2530 extra=extra,
2529 2531 user=user,
2530 2532 branch=branch,
2531 2533 editor=editor,
2532 2534 )
2533 2535
2534 2536 def tomemctx_for_amend(self, precursor):
2535 2537 extra = precursor.extra().copy()
2536 2538 extra[b'amend_source'] = precursor.hex()
2537 2539 return self.tomemctx(
2538 2540 text=precursor.description(),
2539 2541 branch=precursor.branch(),
2540 2542 extra=extra,
2541 2543 date=precursor.date(),
2542 2544 user=precursor.user(),
2543 2545 )
2544 2546
2545 2547 def isdirty(self, path):
2546 2548 return path in self._cache
2547 2549
2548 2550 def clean(self):
2549 2551 self._mergestate = None
2550 2552 self._cache = {}
2551 2553
2552 2554 def _compact(self):
2553 2555 """Removes keys from the cache that are actually clean, by comparing
2554 2556 them with the underlying context.
2555 2557
2556 2558 This can occur during the merge process, e.g. by passing --tool :local
2557 2559 to resolve a conflict.
2558 2560 """
2559 2561 keys = []
2560 2562 # This won't be perfect, but can help performance significantly when
2561 2563 # using things like remotefilelog.
2562 2564 scmutil.prefetchfiles(
2563 2565 self.repo(),
2564 2566 [
2565 2567 (
2566 2568 self.p1().rev(),
2567 2569 scmutil.matchfiles(self.repo(), self._cache.keys()),
2568 2570 )
2569 2571 ],
2570 2572 )
2571 2573
2572 2574 for path in self._cache.keys():
2573 2575 cache = self._cache[path]
2574 2576 try:
2575 2577 underlying = self._wrappedctx[path]
2576 2578 if (
2577 2579 underlying.data() == cache[b'data']
2578 2580 and underlying.flags() == cache[b'flags']
2579 2581 ):
2580 2582 keys.append(path)
2581 2583 except error.ManifestLookupError:
2582 2584 # Path not in the underlying manifest (created).
2583 2585 continue
2584 2586
2585 2587 for path in keys:
2586 2588 del self._cache[path]
2587 2589 return keys
2588 2590
2589 2591 def _markdirty(
2590 2592 self, path, exists, data=None, date=None, flags=b'', copied=None
2591 2593 ):
2592 2594 # data not provided, let's see if we already have some; if not, let's
2593 2595 # grab it from our underlying context, so that we always have data if
2594 2596 # the file is marked as existing.
2595 2597 if exists and data is None:
2596 2598 oldentry = self._cache.get(path) or {}
2597 2599 data = oldentry.get(b'data')
2598 2600 if data is None:
2599 2601 data = self._wrappedctx[path].data()
2600 2602
2601 2603 self._cache[path] = {
2602 2604 b'exists': exists,
2603 2605 b'data': data,
2604 2606 b'date': date,
2605 2607 b'flags': flags,
2606 2608 b'copied': copied,
2607 2609 }
2608 2610 util.clearcachedproperty(self, b'_manifest')
2609 2611
2610 2612 def filectx(self, path, filelog=None):
2611 2613 return overlayworkingfilectx(
2612 2614 self._repo, path, parent=self, filelog=filelog
2613 2615 )
2614 2616
2615 2617 def mergestate(self, clean=False):
2616 2618 if clean or self._mergestate is None:
2617 2619 self._mergestate = mergestatemod.memmergestate(self._repo)
2618 2620 return self._mergestate
2619 2621
2620 2622
2621 2623 class overlayworkingfilectx(committablefilectx):
2622 2624 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2623 2625 cache, which can be flushed through later by calling ``flush()``."""
2624 2626
2625 2627 def __init__(self, repo, path, filelog=None, parent=None):
2626 2628 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2627 2629 self._repo = repo
2628 2630 self._parent = parent
2629 2631 self._path = path
2630 2632
2631 2633 def cmp(self, fctx):
2632 2634 return self.data() != fctx.data()
2633 2635
2634 2636 def changectx(self):
2635 2637 return self._parent
2636 2638
2637 2639 def data(self):
2638 2640 return self._parent.data(self._path)
2639 2641
2640 2642 def date(self):
2641 2643 return self._parent.filedate(self._path)
2642 2644
2643 2645 def exists(self):
2644 2646 return self.lexists()
2645 2647
2646 2648 def lexists(self):
2647 2649 return self._parent.exists(self._path)
2648 2650
2649 2651 def copysource(self):
2650 2652 return self._parent.copydata(self._path)
2651 2653
2652 2654 def size(self):
2653 2655 return self._parent.size(self._path)
2654 2656
2655 2657 def markcopied(self, origin):
2656 2658 self._parent.markcopied(self._path, origin)
2657 2659
2658 2660 def audit(self):
2659 2661 pass
2660 2662
2661 2663 def flags(self):
2662 2664 return self._parent.flags(self._path)
2663 2665
2664 2666 def setflags(self, islink, isexec):
2665 2667 return self._parent.setflags(self._path, islink, isexec)
2666 2668
2667 2669 def write(self, data, flags, backgroundclose=False, **kwargs):
2668 2670 return self._parent.write(self._path, data, flags, **kwargs)
2669 2671
2670 2672 def remove(self, ignoremissing=False):
2671 2673 return self._parent.remove(self._path)
2672 2674
2673 2675 def clearunknown(self):
2674 2676 pass
2675 2677
2676 2678
2677 2679 class workingcommitctx(workingctx):
2678 2680 """A workingcommitctx object makes access to data related to
2679 2681 the revision being committed convenient.
2680 2682
2681 2683 This hides changes in the working directory, if they aren't
2682 2684 committed in this context.
2683 2685 """
2684 2686
2685 2687 def __init__(
2686 2688 self, repo, changes, text=b"", user=None, date=None, extra=None
2687 2689 ):
2688 2690 super(workingcommitctx, self).__init__(
2689 2691 repo, text, user, date, extra, changes
2690 2692 )
2691 2693
2692 2694 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2693 2695 """Return matched files only in ``self._status``
2694 2696
2695 2697 Uncommitted files appear "clean" via this context, even if
2696 2698 they aren't actually so in the working directory.
2697 2699 """
2698 2700 if clean:
2699 2701 clean = [f for f in self._manifest if f not in self._changedset]
2700 2702 else:
2701 2703 clean = []
2702 2704 return scmutil.status(
2703 2705 [f for f in self._status.modified if match(f)],
2704 2706 [f for f in self._status.added if match(f)],
2705 2707 [f for f in self._status.removed if match(f)],
2706 2708 [],
2707 2709 [],
2708 2710 [],
2709 2711 clean,
2710 2712 )
2711 2713
2712 2714 @propertycache
2713 2715 def _changedset(self):
2714 2716 """Return the set of files changed in this context"""
2715 2717 changed = set(self._status.modified)
2716 2718 changed.update(self._status.added)
2717 2719 changed.update(self._status.removed)
2718 2720 return changed
2719 2721
2720 2722
2721 2723 def makecachingfilectxfn(func):
2722 2724 """Create a filectxfn that caches based on the path.
2723 2725
2724 2726 We can't use util.cachefunc because it uses all arguments as the cache
2725 2727 key and this creates a cycle since the arguments include the repo and
2726 2728 memctx.
2727 2729 """
2728 2730 cache = {}
2729 2731
2730 2732 def getfilectx(repo, memctx, path):
2731 2733 if path not in cache:
2732 2734 cache[path] = func(repo, memctx, path)
2733 2735 return cache[path]
2734 2736
2735 2737 return getfilectx
2736 2738
2737 2739
2738 2740 def memfilefromctx(ctx):
2739 2741 """Given a context return a memfilectx for ctx[path]
2740 2742
2741 2743 This is a convenience method for building a memctx based on another
2742 2744 context.
2743 2745 """
2744 2746
2745 2747 def getfilectx(repo, memctx, path):
2746 2748 fctx = ctx[path]
2747 2749 copysource = fctx.copysource()
2748 2750 return memfilectx(
2749 2751 repo,
2750 2752 memctx,
2751 2753 path,
2752 2754 fctx.data(),
2753 2755 islink=fctx.islink(),
2754 2756 isexec=fctx.isexec(),
2755 2757 copysource=copysource,
2756 2758 )
2757 2759
2758 2760 return getfilectx
2759 2761
2760 2762
2761 2763 def memfilefrompatch(patchstore):
2762 2764 """Given a patch (e.g. patchstore object) return a memfilectx
2763 2765
2764 2766 This is a convenience method for building a memctx based on a patchstore.
2765 2767 """
2766 2768
2767 2769 def getfilectx(repo, memctx, path):
2768 2770 data, mode, copysource = patchstore.getfile(path)
2769 2771 if data is None:
2770 2772 return None
2771 2773 islink, isexec = mode
2772 2774 return memfilectx(
2773 2775 repo,
2774 2776 memctx,
2775 2777 path,
2776 2778 data,
2777 2779 islink=islink,
2778 2780 isexec=isexec,
2779 2781 copysource=copysource,
2780 2782 )
2781 2783
2782 2784 return getfilectx
2783 2785
2784 2786
2785 2787 class memctx(committablectx):
2786 2788 """Use memctx to perform in-memory commits via localrepo.commitctx().
2787 2789
2788 2790 Revision information is supplied at initialization time while
2789 2791 related files data and is made available through a callback
2790 2792 mechanism. 'repo' is the current localrepo, 'parents' is a
2791 2793 sequence of two parent revisions identifiers (pass None for every
2792 2794 missing parent), 'text' is the commit message and 'files' lists
2793 2795 names of files touched by the revision (normalized and relative to
2794 2796 repository root).
2795 2797
2796 2798 filectxfn(repo, memctx, path) is a callable receiving the
2797 2799 repository, the current memctx object and the normalized path of
2798 2800 requested file, relative to repository root. It is fired by the
2799 2801 commit function for every file in 'files', but calls order is
2800 2802 undefined. If the file is available in the revision being
2801 2803 committed (updated or added), filectxfn returns a memfilectx
2802 2804 object. If the file was removed, filectxfn return None for recent
2803 2805 Mercurial. Moved files are represented by marking the source file
2804 2806 removed and the new file added with copy information (see
2805 2807 memfilectx).
2806 2808
2807 2809 user receives the committer name and defaults to current
2808 2810 repository username, date is the commit date in any format
2809 2811 supported by dateutil.parsedate() and defaults to current date, extra
2810 2812 is a dictionary of metadata or is left empty.
2811 2813 """
2812 2814
2813 2815 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2814 2816 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2815 2817 # this field to determine what to do in filectxfn.
2816 2818 _returnnoneformissingfiles = True
2817 2819
2818 2820 def __init__(
2819 2821 self,
2820 2822 repo,
2821 2823 parents,
2822 2824 text,
2823 2825 files,
2824 2826 filectxfn,
2825 2827 user=None,
2826 2828 date=None,
2827 2829 extra=None,
2828 2830 branch=None,
2829 2831 editor=None,
2830 2832 ):
2831 2833 super(memctx, self).__init__(
2832 2834 repo, text, user, date, extra, branch=branch
2833 2835 )
2834 2836 self._rev = None
2835 2837 self._node = None
2836 2838 parents = [(p or self._repo.nodeconstants.nullid) for p in parents]
2837 2839 p1, p2 = parents
2838 2840 self._parents = [self._repo[p] for p in (p1, p2)]
2839 2841 files = sorted(set(files))
2840 2842 self._files = files
2841 2843 self.substate = {}
2842 2844
2843 2845 if isinstance(filectxfn, patch.filestore):
2844 2846 filectxfn = memfilefrompatch(filectxfn)
2845 2847 elif not callable(filectxfn):
2846 2848 # if store is not callable, wrap it in a function
2847 2849 filectxfn = memfilefromctx(filectxfn)
2848 2850
2849 2851 # memoizing increases performance for e.g. vcs convert scenarios.
2850 2852 self._filectxfn = makecachingfilectxfn(filectxfn)
2851 2853
2852 2854 if editor:
2853 2855 self._text = editor(self._repo, self, [])
2854 2856 self._repo.savecommitmessage(self._text)
2855 2857
2856 2858 def filectx(self, path, filelog=None):
2857 2859 """get a file context from the working directory
2858 2860
2859 2861 Returns None if file doesn't exist and should be removed."""
2860 2862 return self._filectxfn(self._repo, self, path)
2861 2863
2862 2864 def commit(self):
2863 2865 """commit context to the repo"""
2864 2866 return self._repo.commitctx(self)
2865 2867
2866 2868 @propertycache
2867 2869 def _manifest(self):
2868 2870 """generate a manifest based on the return values of filectxfn"""
2869 2871
2870 2872 # keep this simple for now; just worry about p1
2871 2873 pctx = self._parents[0]
2872 2874 man = pctx.manifest().copy()
2873 2875
2874 2876 for f in self._status.modified:
2875 2877 man[f] = self._repo.nodeconstants.modifiednodeid
2876 2878
2877 2879 for f in self._status.added:
2878 2880 man[f] = self._repo.nodeconstants.addednodeid
2879 2881
2880 2882 for f in self._status.removed:
2881 2883 if f in man:
2882 2884 del man[f]
2883 2885
2884 2886 return man
2885 2887
2886 2888 @propertycache
2887 2889 def _status(self):
2888 2890 """Calculate exact status from ``files`` specified at construction"""
2889 2891 man1 = self.p1().manifest()
2890 2892 p2 = self._parents[1]
2891 2893 # "1 < len(self._parents)" can't be used for checking
2892 2894 # existence of the 2nd parent, because "memctx._parents" is
2893 2895 # explicitly initialized by the list, of which length is 2.
2894 2896 if p2.rev() != nullrev:
2895 2897 man2 = p2.manifest()
2896 2898 managing = lambda f: f in man1 or f in man2
2897 2899 else:
2898 2900 managing = lambda f: f in man1
2899 2901
2900 2902 modified, added, removed = [], [], []
2901 2903 for f in self._files:
2902 2904 if not managing(f):
2903 2905 added.append(f)
2904 2906 elif self[f]:
2905 2907 modified.append(f)
2906 2908 else:
2907 2909 removed.append(f)
2908 2910
2909 2911 return scmutil.status(modified, added, removed, [], [], [], [])
2910 2912
2911 2913 def parents(self):
2912 2914 if self._parents[1].rev() == nullrev:
2913 2915 return [self._parents[0]]
2914 2916 return self._parents
2915 2917
2916 2918
2917 2919 class memfilectx(committablefilectx):
2918 2920 """memfilectx represents an in-memory file to commit.
2919 2921
2920 2922 See memctx and committablefilectx for more details.
2921 2923 """
2922 2924
2923 2925 def __init__(
2924 2926 self,
2925 2927 repo,
2926 2928 changectx,
2927 2929 path,
2928 2930 data,
2929 2931 islink=False,
2930 2932 isexec=False,
2931 2933 copysource=None,
2932 2934 ):
2933 2935 """
2934 2936 path is the normalized file path relative to repository root.
2935 2937 data is the file content as a string.
2936 2938 islink is True if the file is a symbolic link.
2937 2939 isexec is True if the file is executable.
2938 2940 copied is the source file path if current file was copied in the
2939 2941 revision being committed, or None."""
2940 2942 super(memfilectx, self).__init__(repo, path, None, changectx)
2941 2943 self._data = data
2942 2944 if islink:
2943 2945 self._flags = b'l'
2944 2946 elif isexec:
2945 2947 self._flags = b'x'
2946 2948 else:
2947 2949 self._flags = b''
2948 2950 self._copysource = copysource
2949 2951
2950 2952 def copysource(self):
2951 2953 return self._copysource
2952 2954
2953 2955 def cmp(self, fctx):
2954 2956 return self.data() != fctx.data()
2955 2957
2956 2958 def data(self):
2957 2959 return self._data
2958 2960
2959 2961 def remove(self, ignoremissing=False):
2960 2962 """wraps unlink for a repo's working directory"""
2961 2963 # need to figure out what to do here
2962 2964 del self._changectx[self._path]
2963 2965
2964 2966 def write(self, data, flags, **kwargs):
2965 2967 """wraps repo.wwrite"""
2966 2968 self._data = data
2967 2969
2968 2970
2969 2971 class metadataonlyctx(committablectx):
2970 2972 """Like memctx but it's reusing the manifest of different commit.
2971 2973 Intended to be used by lightweight operations that are creating
2972 2974 metadata-only changes.
2973 2975
2974 2976 Revision information is supplied at initialization time. 'repo' is the
2975 2977 current localrepo, 'ctx' is original revision which manifest we're reuisng
2976 2978 'parents' is a sequence of two parent revisions identifiers (pass None for
2977 2979 every missing parent), 'text' is the commit.
2978 2980
2979 2981 user receives the committer name and defaults to current repository
2980 2982 username, date is the commit date in any format supported by
2981 2983 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2982 2984 metadata or is left empty.
2983 2985 """
2984 2986
2985 2987 def __init__(
2986 2988 self,
2987 2989 repo,
2988 2990 originalctx,
2989 2991 parents=None,
2990 2992 text=None,
2991 2993 user=None,
2992 2994 date=None,
2993 2995 extra=None,
2994 2996 editor=None,
2995 2997 ):
2996 2998 if text is None:
2997 2999 text = originalctx.description()
2998 3000 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2999 3001 self._rev = None
3000 3002 self._node = None
3001 3003 self._originalctx = originalctx
3002 3004 self._manifestnode = originalctx.manifestnode()
3003 3005 if parents is None:
3004 3006 parents = originalctx.parents()
3005 3007 else:
3006 3008 parents = [repo[p] for p in parents if p is not None]
3007 3009 parents = parents[:]
3008 3010 while len(parents) < 2:
3009 3011 parents.append(repo[nullrev])
3010 3012 p1, p2 = self._parents = parents
3011 3013
3012 3014 # sanity check to ensure that the reused manifest parents are
3013 3015 # manifests of our commit parents
3014 3016 mp1, mp2 = self.manifestctx().parents
3015 3017 if p1 != self._repo.nodeconstants.nullid and p1.manifestnode() != mp1:
3016 3018 raise RuntimeError(
3017 3019 r"can't reuse the manifest: its p1 "
3018 3020 r"doesn't match the new ctx p1"
3019 3021 )
3020 3022 if p2 != self._repo.nodeconstants.nullid and p2.manifestnode() != mp2:
3021 3023 raise RuntimeError(
3022 3024 r"can't reuse the manifest: "
3023 3025 r"its p2 doesn't match the new ctx p2"
3024 3026 )
3025 3027
3026 3028 self._files = originalctx.files()
3027 3029 self.substate = {}
3028 3030
3029 3031 if editor:
3030 3032 self._text = editor(self._repo, self, [])
3031 3033 self._repo.savecommitmessage(self._text)
3032 3034
3033 3035 def manifestnode(self):
3034 3036 return self._manifestnode
3035 3037
3036 3038 @property
3037 3039 def _manifestctx(self):
3038 3040 return self._repo.manifestlog[self._manifestnode]
3039 3041
3040 3042 def filectx(self, path, filelog=None):
3041 3043 return self._originalctx.filectx(path, filelog=filelog)
3042 3044
3043 3045 def commit(self):
3044 3046 """commit context to the repo"""
3045 3047 return self._repo.commitctx(self)
3046 3048
3047 3049 @property
3048 3050 def _manifest(self):
3049 3051 return self._originalctx.manifest()
3050 3052
3051 3053 @propertycache
3052 3054 def _status(self):
3053 3055 """Calculate exact status from ``files`` specified in the ``origctx``
3054 3056 and parents manifests.
3055 3057 """
3056 3058 man1 = self.p1().manifest()
3057 3059 p2 = self._parents[1]
3058 3060 # "1 < len(self._parents)" can't be used for checking
3059 3061 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3060 3062 # explicitly initialized by the list, of which length is 2.
3061 3063 if p2.rev() != nullrev:
3062 3064 man2 = p2.manifest()
3063 3065 managing = lambda f: f in man1 or f in man2
3064 3066 else:
3065 3067 managing = lambda f: f in man1
3066 3068
3067 3069 modified, added, removed = [], [], []
3068 3070 for f in self._files:
3069 3071 if not managing(f):
3070 3072 added.append(f)
3071 3073 elif f in self:
3072 3074 modified.append(f)
3073 3075 else:
3074 3076 removed.append(f)
3075 3077
3076 3078 return scmutil.status(modified, added, removed, [], [], [], [])
3077 3079
3078 3080
3079 3081 class arbitraryfilectx(object):
3080 3082 """Allows you to use filectx-like functions on a file in an arbitrary
3081 3083 location on disk, possibly not in the working directory.
3082 3084 """
3083 3085
3084 3086 def __init__(self, path, repo=None):
3085 3087 # Repo is optional because contrib/simplemerge uses this class.
3086 3088 self._repo = repo
3087 3089 self._path = path
3088 3090
3089 3091 def cmp(self, fctx):
3090 3092 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3091 3093 # path if either side is a symlink.
3092 3094 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3093 3095 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3094 3096 # Add a fast-path for merge if both sides are disk-backed.
3095 3097 # Note that filecmp uses the opposite return values (True if same)
3096 3098 # from our cmp functions (True if different).
3097 3099 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3098 3100 return self.data() != fctx.data()
3099 3101
3100 3102 def path(self):
3101 3103 return self._path
3102 3104
3103 3105 def flags(self):
3104 3106 return b''
3105 3107
3106 3108 def data(self):
3107 3109 return util.readfile(self._path)
3108 3110
3109 3111 def decodeddata(self):
3110 3112 with open(self._path, b"rb") as f:
3111 3113 return f.read()
3112 3114
3113 3115 def remove(self):
3114 3116 util.unlink(self._path)
3115 3117
3116 3118 def write(self, data, flags, **kwargs):
3117 3119 assert not flags
3118 3120 with open(self._path, b"wb") as f:
3119 3121 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now