##// END OF EJS Templates
changectx: add a "maybe filtered" filtered attribute...
marmoute -
r44199:1e87851d default
parent child Browse files
Show More
@@ -1,3000 +1,3011 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 hex,
19 19 modifiednodeid,
20 20 nullid,
21 21 nullrev,
22 22 short,
23 23 wdirfilenodeids,
24 24 wdirhex,
25 25 )
26 26 from .pycompat import (
27 27 getattr,
28 28 open,
29 29 )
30 30 from . import (
31 31 copies,
32 32 dagop,
33 33 encoding,
34 34 error,
35 35 fileset,
36 36 match as matchmod,
37 37 obsolete as obsmod,
38 38 patch,
39 39 pathutil,
40 40 phases,
41 41 pycompat,
42 42 repoview,
43 43 scmutil,
44 44 sparse,
45 45 subrepo,
46 46 subrepoutil,
47 47 util,
48 48 )
49 49 from .utils import (
50 50 dateutil,
51 51 stringutil,
52 52 )
53 53
54 54 propertycache = util.propertycache
55 55
56 56
57 57 class basectx(object):
58 58 """A basectx object represents the common logic for its children:
59 59 changectx: read-only context that is already present in the repo,
60 60 workingctx: a context that represents the working directory and can
61 61 be committed,
62 62 memctx: a context that represents changes in-memory and can also
63 63 be committed."""
64 64
65 65 def __init__(self, repo):
66 66 self._repo = repo
67 67
68 68 def __bytes__(self):
69 69 return short(self.node())
70 70
71 71 __str__ = encoding.strmethod(__bytes__)
72 72
73 73 def __repr__(self):
74 74 return "<%s %s>" % (type(self).__name__, str(self))
75 75
76 76 def __eq__(self, other):
77 77 try:
78 78 return type(self) == type(other) and self._rev == other._rev
79 79 except AttributeError:
80 80 return False
81 81
82 82 def __ne__(self, other):
83 83 return not (self == other)
84 84
85 85 def __contains__(self, key):
86 86 return key in self._manifest
87 87
88 88 def __getitem__(self, key):
89 89 return self.filectx(key)
90 90
91 91 def __iter__(self):
92 92 return iter(self._manifest)
93 93
94 94 def _buildstatusmanifest(self, status):
95 95 """Builds a manifest that includes the given status results, if this is
96 96 a working copy context. For non-working copy contexts, it just returns
97 97 the normal manifest."""
98 98 return self.manifest()
99 99
100 100 def _matchstatus(self, other, match):
101 101 """This internal method provides a way for child objects to override the
102 102 match operator.
103 103 """
104 104 return match
105 105
106 106 def _buildstatus(
107 107 self, other, s, match, listignored, listclean, listunknown
108 108 ):
109 109 """build a status with respect to another context"""
110 110 # Load earliest manifest first for caching reasons. More specifically,
111 111 # if you have revisions 1000 and 1001, 1001 is probably stored as a
112 112 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
113 113 # 1000 and cache it so that when you read 1001, we just need to apply a
114 114 # delta to what's in the cache. So that's one full reconstruction + one
115 115 # delta application.
116 116 mf2 = None
117 117 if self.rev() is not None and self.rev() < other.rev():
118 118 mf2 = self._buildstatusmanifest(s)
119 119 mf1 = other._buildstatusmanifest(s)
120 120 if mf2 is None:
121 121 mf2 = self._buildstatusmanifest(s)
122 122
123 123 modified, added = [], []
124 124 removed = []
125 125 clean = []
126 126 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
127 127 deletedset = set(deleted)
128 128 d = mf1.diff(mf2, match=match, clean=listclean)
129 129 for fn, value in pycompat.iteritems(d):
130 130 if fn in deletedset:
131 131 continue
132 132 if value is None:
133 133 clean.append(fn)
134 134 continue
135 135 (node1, flag1), (node2, flag2) = value
136 136 if node1 is None:
137 137 added.append(fn)
138 138 elif node2 is None:
139 139 removed.append(fn)
140 140 elif flag1 != flag2:
141 141 modified.append(fn)
142 142 elif node2 not in wdirfilenodeids:
143 143 # When comparing files between two commits, we save time by
144 144 # not comparing the file contents when the nodeids differ.
145 145 # Note that this means we incorrectly report a reverted change
146 146 # to a file as a modification.
147 147 modified.append(fn)
148 148 elif self[fn].cmp(other[fn]):
149 149 modified.append(fn)
150 150 else:
151 151 clean.append(fn)
152 152
153 153 if removed:
154 154 # need to filter files if they are already reported as removed
155 155 unknown = [
156 156 fn
157 157 for fn in unknown
158 158 if fn not in mf1 and (not match or match(fn))
159 159 ]
160 160 ignored = [
161 161 fn
162 162 for fn in ignored
163 163 if fn not in mf1 and (not match or match(fn))
164 164 ]
165 165 # if they're deleted, don't report them as removed
166 166 removed = [fn for fn in removed if fn not in deletedset]
167 167
168 168 return scmutil.status(
169 169 modified, added, removed, deleted, unknown, ignored, clean
170 170 )
171 171
172 172 @propertycache
173 173 def substate(self):
174 174 return subrepoutil.state(self, self._repo.ui)
175 175
176 176 def subrev(self, subpath):
177 177 return self.substate[subpath][1]
178 178
179 179 def rev(self):
180 180 return self._rev
181 181
182 182 def node(self):
183 183 return self._node
184 184
185 185 def hex(self):
186 186 return hex(self.node())
187 187
188 188 def manifest(self):
189 189 return self._manifest
190 190
191 191 def manifestctx(self):
192 192 return self._manifestctx
193 193
194 194 def repo(self):
195 195 return self._repo
196 196
197 197 def phasestr(self):
198 198 return phases.phasenames[self.phase()]
199 199
200 200 def mutable(self):
201 201 return self.phase() > phases.public
202 202
203 203 def matchfileset(self, expr, badfn=None):
204 204 return fileset.match(self, expr, badfn=badfn)
205 205
206 206 def obsolete(self):
207 207 """True if the changeset is obsolete"""
208 208 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
209 209
210 210 def extinct(self):
211 211 """True if the changeset is extinct"""
212 212 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
213 213
214 214 def orphan(self):
215 215 """True if the changeset is not obsolete, but its ancestor is"""
216 216 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
217 217
218 218 def phasedivergent(self):
219 219 """True if the changeset tries to be a successor of a public changeset
220 220
221 221 Only non-public and non-obsolete changesets may be phase-divergent.
222 222 """
223 223 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
224 224
225 225 def contentdivergent(self):
226 226 """Is a successor of a changeset with multiple possible successor sets
227 227
228 228 Only non-public and non-obsolete changesets may be content-divergent.
229 229 """
230 230 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
231 231
232 232 def isunstable(self):
233 233 """True if the changeset is either orphan, phase-divergent or
234 234 content-divergent"""
235 235 return self.orphan() or self.phasedivergent() or self.contentdivergent()
236 236
237 237 def instabilities(self):
238 238 """return the list of instabilities affecting this changeset.
239 239
240 240 Instabilities are returned as strings. possible values are:
241 241 - orphan,
242 242 - phase-divergent,
243 243 - content-divergent.
244 244 """
245 245 instabilities = []
246 246 if self.orphan():
247 247 instabilities.append(b'orphan')
248 248 if self.phasedivergent():
249 249 instabilities.append(b'phase-divergent')
250 250 if self.contentdivergent():
251 251 instabilities.append(b'content-divergent')
252 252 return instabilities
253 253
254 254 def parents(self):
255 255 """return contexts for each parent changeset"""
256 256 return self._parents
257 257
258 258 def p1(self):
259 259 return self._parents[0]
260 260
261 261 def p2(self):
262 262 parents = self._parents
263 263 if len(parents) == 2:
264 264 return parents[1]
265 265 return self._repo[nullrev]
266 266
267 267 def _fileinfo(self, path):
268 268 if '_manifest' in self.__dict__:
269 269 try:
270 270 return self._manifest[path], self._manifest.flags(path)
271 271 except KeyError:
272 272 raise error.ManifestLookupError(
273 273 self._node, path, _(b'not found in manifest')
274 274 )
275 275 if '_manifestdelta' in self.__dict__ or path in self.files():
276 276 if path in self._manifestdelta:
277 277 return (
278 278 self._manifestdelta[path],
279 279 self._manifestdelta.flags(path),
280 280 )
281 281 mfl = self._repo.manifestlog
282 282 try:
283 283 node, flag = mfl[self._changeset.manifest].find(path)
284 284 except KeyError:
285 285 raise error.ManifestLookupError(
286 286 self._node, path, _(b'not found in manifest')
287 287 )
288 288
289 289 return node, flag
290 290
291 291 def filenode(self, path):
292 292 return self._fileinfo(path)[0]
293 293
294 294 def flags(self, path):
295 295 try:
296 296 return self._fileinfo(path)[1]
297 297 except error.LookupError:
298 298 return b''
299 299
300 300 @propertycache
301 301 def _copies(self):
302 302 return copies.computechangesetcopies(self)
303 303
304 304 def p1copies(self):
305 305 return self._copies[0]
306 306
307 307 def p2copies(self):
308 308 return self._copies[1]
309 309
310 310 def sub(self, path, allowcreate=True):
311 311 '''return a subrepo for the stored revision of path, never wdir()'''
312 312 return subrepo.subrepo(self, path, allowcreate=allowcreate)
313 313
314 314 def nullsub(self, path, pctx):
315 315 return subrepo.nullsubrepo(self, path, pctx)
316 316
317 317 def workingsub(self, path):
318 318 '''return a subrepo for the stored revision, or wdir if this is a wdir
319 319 context.
320 320 '''
321 321 return subrepo.subrepo(self, path, allowwdir=True)
322 322
323 323 def match(
324 324 self,
325 325 pats=None,
326 326 include=None,
327 327 exclude=None,
328 328 default=b'glob',
329 329 listsubrepos=False,
330 330 badfn=None,
331 331 ):
332 332 r = self._repo
333 333 return matchmod.match(
334 334 r.root,
335 335 r.getcwd(),
336 336 pats,
337 337 include,
338 338 exclude,
339 339 default,
340 340 auditor=r.nofsauditor,
341 341 ctx=self,
342 342 listsubrepos=listsubrepos,
343 343 badfn=badfn,
344 344 )
345 345
346 346 def diff(
347 347 self,
348 348 ctx2=None,
349 349 match=None,
350 350 changes=None,
351 351 opts=None,
352 352 losedatafn=None,
353 353 pathfn=None,
354 354 copy=None,
355 355 copysourcematch=None,
356 356 hunksfilterfn=None,
357 357 ):
358 358 """Returns a diff generator for the given contexts and matcher"""
359 359 if ctx2 is None:
360 360 ctx2 = self.p1()
361 361 if ctx2 is not None:
362 362 ctx2 = self._repo[ctx2]
363 363 return patch.diff(
364 364 self._repo,
365 365 ctx2,
366 366 self,
367 367 match=match,
368 368 changes=changes,
369 369 opts=opts,
370 370 losedatafn=losedatafn,
371 371 pathfn=pathfn,
372 372 copy=copy,
373 373 copysourcematch=copysourcematch,
374 374 hunksfilterfn=hunksfilterfn,
375 375 )
376 376
377 377 def dirs(self):
378 378 return self._manifest.dirs()
379 379
380 380 def hasdir(self, dir):
381 381 return self._manifest.hasdir(dir)
382 382
383 383 def status(
384 384 self,
385 385 other=None,
386 386 match=None,
387 387 listignored=False,
388 388 listclean=False,
389 389 listunknown=False,
390 390 listsubrepos=False,
391 391 ):
392 392 """return status of files between two nodes or node and working
393 393 directory.
394 394
395 395 If other is None, compare this node with working directory.
396 396
397 397 returns (modified, added, removed, deleted, unknown, ignored, clean)
398 398 """
399 399
400 400 ctx1 = self
401 401 ctx2 = self._repo[other]
402 402
403 403 # This next code block is, admittedly, fragile logic that tests for
404 404 # reversing the contexts and wouldn't need to exist if it weren't for
405 405 # the fast (and common) code path of comparing the working directory
406 406 # with its first parent.
407 407 #
408 408 # What we're aiming for here is the ability to call:
409 409 #
410 410 # workingctx.status(parentctx)
411 411 #
412 412 # If we always built the manifest for each context and compared those,
413 413 # then we'd be done. But the special case of the above call means we
414 414 # just copy the manifest of the parent.
415 415 reversed = False
416 416 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
417 417 reversed = True
418 418 ctx1, ctx2 = ctx2, ctx1
419 419
420 420 match = self._repo.narrowmatch(match)
421 421 match = ctx2._matchstatus(ctx1, match)
422 422 r = scmutil.status([], [], [], [], [], [], [])
423 423 r = ctx2._buildstatus(
424 424 ctx1, r, match, listignored, listclean, listunknown
425 425 )
426 426
427 427 if reversed:
428 428 # Reverse added and removed. Clear deleted, unknown and ignored as
429 429 # these make no sense to reverse.
430 430 r = scmutil.status(
431 431 r.modified, r.removed, r.added, [], [], [], r.clean
432 432 )
433 433
434 434 if listsubrepos:
435 435 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
436 436 try:
437 437 rev2 = ctx2.subrev(subpath)
438 438 except KeyError:
439 439 # A subrepo that existed in node1 was deleted between
440 440 # node1 and node2 (inclusive). Thus, ctx2's substate
441 441 # won't contain that subpath. The best we can do ignore it.
442 442 rev2 = None
443 443 submatch = matchmod.subdirmatcher(subpath, match)
444 444 s = sub.status(
445 445 rev2,
446 446 match=submatch,
447 447 ignored=listignored,
448 448 clean=listclean,
449 449 unknown=listunknown,
450 450 listsubrepos=True,
451 451 )
452 452 for k in (
453 453 'modified',
454 454 'added',
455 455 'removed',
456 456 'deleted',
457 457 'unknown',
458 458 'ignored',
459 459 'clean',
460 460 ):
461 461 rfiles, sfiles = getattr(r, k), getattr(s, k)
462 462 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
463 463
464 464 r.modified.sort()
465 465 r.added.sort()
466 466 r.removed.sort()
467 467 r.deleted.sort()
468 468 r.unknown.sort()
469 469 r.ignored.sort()
470 470 r.clean.sort()
471 471
472 472 return r
473 473
474 474
475 475 class changectx(basectx):
476 476 """A changecontext object makes access to data related to a particular
477 477 changeset convenient. It represents a read-only context already present in
478 478 the repo."""
479 479
480 def __init__(self, repo, rev, node):
480 def __init__(self, repo, rev, node, maybe_filtered=True):
481 481 super(changectx, self).__init__(repo)
482 482 self._rev = rev
483 483 self._node = node
484 # When maybe_filtered is True, the revision might be affected by
485 # changelog filtering and operation through the filtered changelog must be used.
486 #
487 # When maybe_filtered is False, the revision has already been checked
488 # against filtering and is not filtered. Operation through the
489 # unfiltered changelog might be used in some case.
490 self._maybe_filtered = maybe_filtered
484 491
485 492 def __hash__(self):
486 493 try:
487 494 return hash(self._rev)
488 495 except AttributeError:
489 496 return id(self)
490 497
491 498 def __nonzero__(self):
492 499 return self._rev != nullrev
493 500
494 501 __bool__ = __nonzero__
495 502
496 503 @propertycache
497 504 def _changeset(self):
498 return self._repo.changelog.changelogrevision(self.rev())
505 if self._maybe_filtered:
506 repo = self._repo
507 else:
508 repo = self._repo.unfiltered()
509 return repo.changelog.changelogrevision(self.rev())
499 510
500 511 @propertycache
501 512 def _manifest(self):
502 513 return self._manifestctx.read()
503 514
504 515 @property
505 516 def _manifestctx(self):
506 517 return self._repo.manifestlog[self._changeset.manifest]
507 518
508 519 @propertycache
509 520 def _manifestdelta(self):
510 521 return self._manifestctx.readdelta()
511 522
512 523 @propertycache
513 524 def _parents(self):
514 525 repo = self._repo
515 526 p1, p2 = repo.changelog.parentrevs(self._rev)
516 527 if p2 == nullrev:
517 528 return [repo[p1]]
518 529 return [repo[p1], repo[p2]]
519 530
520 531 def changeset(self):
521 532 c = self._changeset
522 533 return (
523 534 c.manifest,
524 535 c.user,
525 536 c.date,
526 537 c.files,
527 538 c.description,
528 539 c.extra,
529 540 )
530 541
531 542 def manifestnode(self):
532 543 return self._changeset.manifest
533 544
534 545 def user(self):
535 546 return self._changeset.user
536 547
537 548 def date(self):
538 549 return self._changeset.date
539 550
540 551 def files(self):
541 552 return self._changeset.files
542 553
543 554 def filesmodified(self):
544 555 modified = set(self.files())
545 556 modified.difference_update(self.filesadded())
546 557 modified.difference_update(self.filesremoved())
547 558 return sorted(modified)
548 559
549 560 def filesadded(self):
550 561 filesadded = self._changeset.filesadded
551 562 compute_on_none = True
552 563 if self._repo.filecopiesmode == b'changeset-sidedata':
553 564 compute_on_none = False
554 565 else:
555 566 source = self._repo.ui.config(b'experimental', b'copies.read-from')
556 567 if source == b'changeset-only':
557 568 compute_on_none = False
558 569 elif source != b'compatibility':
559 570 # filelog mode, ignore any changelog content
560 571 filesadded = None
561 572 if filesadded is None:
562 573 if compute_on_none:
563 574 filesadded = copies.computechangesetfilesadded(self)
564 575 else:
565 576 filesadded = []
566 577 return filesadded
567 578
568 579 def filesremoved(self):
569 580 filesremoved = self._changeset.filesremoved
570 581 compute_on_none = True
571 582 if self._repo.filecopiesmode == b'changeset-sidedata':
572 583 compute_on_none = False
573 584 else:
574 585 source = self._repo.ui.config(b'experimental', b'copies.read-from')
575 586 if source == b'changeset-only':
576 587 compute_on_none = False
577 588 elif source != b'compatibility':
578 589 # filelog mode, ignore any changelog content
579 590 filesremoved = None
580 591 if filesremoved is None:
581 592 if compute_on_none:
582 593 filesremoved = copies.computechangesetfilesremoved(self)
583 594 else:
584 595 filesremoved = []
585 596 return filesremoved
586 597
587 598 @propertycache
588 599 def _copies(self):
589 600 p1copies = self._changeset.p1copies
590 601 p2copies = self._changeset.p2copies
591 602 compute_on_none = True
592 603 if self._repo.filecopiesmode == b'changeset-sidedata':
593 604 compute_on_none = False
594 605 else:
595 606 source = self._repo.ui.config(b'experimental', b'copies.read-from')
596 607 # If config says to get copy metadata only from changeset, then
597 608 # return that, defaulting to {} if there was no copy metadata. In
598 609 # compatibility mode, we return copy data from the changeset if it
599 610 # was recorded there, and otherwise we fall back to getting it from
600 611 # the filelogs (below).
601 612 #
602 613 # If we are in compatiblity mode and there is not data in the
603 614 # changeset), we get the copy metadata from the filelogs.
604 615 #
605 616 # otherwise, when config said to read only from filelog, we get the
606 617 # copy metadata from the filelogs.
607 618 if source == b'changeset-only':
608 619 compute_on_none = False
609 620 elif source != b'compatibility':
610 621 # filelog mode, ignore any changelog content
611 622 p1copies = p2copies = None
612 623 if p1copies is None:
613 624 if compute_on_none:
614 625 p1copies, p2copies = super(changectx, self)._copies
615 626 else:
616 627 if p1copies is None:
617 628 p1copies = {}
618 629 if p2copies is None:
619 630 p2copies = {}
620 631 return p1copies, p2copies
621 632
622 633 def description(self):
623 634 return self._changeset.description
624 635
625 636 def branch(self):
626 637 return encoding.tolocal(self._changeset.extra.get(b"branch"))
627 638
628 639 def closesbranch(self):
629 640 return b'close' in self._changeset.extra
630 641
631 642 def extra(self):
632 643 """Return a dict of extra information."""
633 644 return self._changeset.extra
634 645
635 646 def tags(self):
636 647 """Return a list of byte tag names"""
637 648 return self._repo.nodetags(self._node)
638 649
639 650 def bookmarks(self):
640 651 """Return a list of byte bookmark names."""
641 652 return self._repo.nodebookmarks(self._node)
642 653
643 654 def phase(self):
644 655 return self._repo._phasecache.phase(self._repo, self._rev)
645 656
646 657 def hidden(self):
647 658 return self._rev in repoview.filterrevs(self._repo, b'visible')
648 659
649 660 def isinmemory(self):
650 661 return False
651 662
652 663 def children(self):
653 664 """return list of changectx contexts for each child changeset.
654 665
655 666 This returns only the immediate child changesets. Use descendants() to
656 667 recursively walk children.
657 668 """
658 669 c = self._repo.changelog.children(self._node)
659 670 return [self._repo[x] for x in c]
660 671
661 672 def ancestors(self):
662 673 for a in self._repo.changelog.ancestors([self._rev]):
663 674 yield self._repo[a]
664 675
665 676 def descendants(self):
666 677 """Recursively yield all children of the changeset.
667 678
668 679 For just the immediate children, use children()
669 680 """
670 681 for d in self._repo.changelog.descendants([self._rev]):
671 682 yield self._repo[d]
672 683
673 684 def filectx(self, path, fileid=None, filelog=None):
674 685 """get a file context from this changeset"""
675 686 if fileid is None:
676 687 fileid = self.filenode(path)
677 688 return filectx(
678 689 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
679 690 )
680 691
681 692 def ancestor(self, c2, warn=False):
682 693 """return the "best" ancestor context of self and c2
683 694
684 695 If there are multiple candidates, it will show a message and check
685 696 merge.preferancestor configuration before falling back to the
686 697 revlog ancestor."""
687 698 # deal with workingctxs
688 699 n2 = c2._node
689 700 if n2 is None:
690 701 n2 = c2._parents[0]._node
691 702 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
692 703 if not cahs:
693 704 anc = nullid
694 705 elif len(cahs) == 1:
695 706 anc = cahs[0]
696 707 else:
697 708 # experimental config: merge.preferancestor
698 709 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
699 710 try:
700 711 ctx = scmutil.revsymbol(self._repo, r)
701 712 except error.RepoLookupError:
702 713 continue
703 714 anc = ctx.node()
704 715 if anc in cahs:
705 716 break
706 717 else:
707 718 anc = self._repo.changelog.ancestor(self._node, n2)
708 719 if warn:
709 720 self._repo.ui.status(
710 721 (
711 722 _(b"note: using %s as ancestor of %s and %s\n")
712 723 % (short(anc), short(self._node), short(n2))
713 724 )
714 725 + b''.join(
715 726 _(
716 727 b" alternatively, use --config "
717 728 b"merge.preferancestor=%s\n"
718 729 )
719 730 % short(n)
720 731 for n in sorted(cahs)
721 732 if n != anc
722 733 )
723 734 )
724 735 return self._repo[anc]
725 736
726 737 def isancestorof(self, other):
727 738 """True if this changeset is an ancestor of other"""
728 739 return self._repo.changelog.isancestorrev(self._rev, other._rev)
729 740
730 741 def walk(self, match):
731 742 '''Generates matching file names.'''
732 743
733 744 # Wrap match.bad method to have message with nodeid
734 745 def bad(fn, msg):
735 746 # The manifest doesn't know about subrepos, so don't complain about
736 747 # paths into valid subrepos.
737 748 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
738 749 return
739 750 match.bad(fn, _(b'no such file in rev %s') % self)
740 751
741 752 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
742 753 return self._manifest.walk(m)
743 754
744 755 def matches(self, match):
745 756 return self.walk(match)
746 757
747 758
748 759 class basefilectx(object):
749 760 """A filecontext object represents the common logic for its children:
750 761 filectx: read-only access to a filerevision that is already present
751 762 in the repo,
752 763 workingfilectx: a filecontext that represents files from the working
753 764 directory,
754 765 memfilectx: a filecontext that represents files in-memory,
755 766 """
756 767
757 768 @propertycache
758 769 def _filelog(self):
759 770 return self._repo.file(self._path)
760 771
761 772 @propertycache
762 773 def _changeid(self):
763 774 if '_changectx' in self.__dict__:
764 775 return self._changectx.rev()
765 776 elif '_descendantrev' in self.__dict__:
766 777 # this file context was created from a revision with a known
767 778 # descendant, we can (lazily) correct for linkrev aliases
768 779 return self._adjustlinkrev(self._descendantrev)
769 780 else:
770 781 return self._filelog.linkrev(self._filerev)
771 782
772 783 @propertycache
773 784 def _filenode(self):
774 785 if '_fileid' in self.__dict__:
775 786 return self._filelog.lookup(self._fileid)
776 787 else:
777 788 return self._changectx.filenode(self._path)
778 789
779 790 @propertycache
780 791 def _filerev(self):
781 792 return self._filelog.rev(self._filenode)
782 793
783 794 @propertycache
784 795 def _repopath(self):
785 796 return self._path
786 797
787 798 def __nonzero__(self):
788 799 try:
789 800 self._filenode
790 801 return True
791 802 except error.LookupError:
792 803 # file is missing
793 804 return False
794 805
795 806 __bool__ = __nonzero__
796 807
797 808 def __bytes__(self):
798 809 try:
799 810 return b"%s@%s" % (self.path(), self._changectx)
800 811 except error.LookupError:
801 812 return b"%s@???" % self.path()
802 813
803 814 __str__ = encoding.strmethod(__bytes__)
804 815
805 816 def __repr__(self):
806 817 return "<%s %s>" % (type(self).__name__, str(self))
807 818
808 819 def __hash__(self):
809 820 try:
810 821 return hash((self._path, self._filenode))
811 822 except AttributeError:
812 823 return id(self)
813 824
814 825 def __eq__(self, other):
815 826 try:
816 827 return (
817 828 type(self) == type(other)
818 829 and self._path == other._path
819 830 and self._filenode == other._filenode
820 831 )
821 832 except AttributeError:
822 833 return False
823 834
824 835 def __ne__(self, other):
825 836 return not (self == other)
826 837
827 838 def filerev(self):
828 839 return self._filerev
829 840
830 841 def filenode(self):
831 842 return self._filenode
832 843
833 844 @propertycache
834 845 def _flags(self):
835 846 return self._changectx.flags(self._path)
836 847
837 848 def flags(self):
838 849 return self._flags
839 850
840 851 def filelog(self):
841 852 return self._filelog
842 853
843 854 def rev(self):
844 855 return self._changeid
845 856
846 857 def linkrev(self):
847 858 return self._filelog.linkrev(self._filerev)
848 859
849 860 def node(self):
850 861 return self._changectx.node()
851 862
852 863 def hex(self):
853 864 return self._changectx.hex()
854 865
855 866 def user(self):
856 867 return self._changectx.user()
857 868
858 869 def date(self):
859 870 return self._changectx.date()
860 871
861 872 def files(self):
862 873 return self._changectx.files()
863 874
864 875 def description(self):
865 876 return self._changectx.description()
866 877
867 878 def branch(self):
868 879 return self._changectx.branch()
869 880
870 881 def extra(self):
871 882 return self._changectx.extra()
872 883
873 884 def phase(self):
874 885 return self._changectx.phase()
875 886
876 887 def phasestr(self):
877 888 return self._changectx.phasestr()
878 889
879 890 def obsolete(self):
880 891 return self._changectx.obsolete()
881 892
882 893 def instabilities(self):
883 894 return self._changectx.instabilities()
884 895
885 896 def manifest(self):
886 897 return self._changectx.manifest()
887 898
888 899 def changectx(self):
889 900 return self._changectx
890 901
891 902 def renamed(self):
892 903 return self._copied
893 904
894 905 def copysource(self):
895 906 return self._copied and self._copied[0]
896 907
897 908 def repo(self):
898 909 return self._repo
899 910
900 911 def size(self):
901 912 return len(self.data())
902 913
903 914 def path(self):
904 915 return self._path
905 916
906 917 def isbinary(self):
907 918 try:
908 919 return stringutil.binary(self.data())
909 920 except IOError:
910 921 return False
911 922
912 923 def isexec(self):
913 924 return b'x' in self.flags()
914 925
915 926 def islink(self):
916 927 return b'l' in self.flags()
917 928
918 929 def isabsent(self):
919 930 """whether this filectx represents a file not in self._changectx
920 931
921 932 This is mainly for merge code to detect change/delete conflicts. This is
922 933 expected to be True for all subclasses of basectx."""
923 934 return False
924 935
925 936 _customcmp = False
926 937
927 938 def cmp(self, fctx):
928 939 """compare with other file context
929 940
930 941 returns True if different than fctx.
931 942 """
932 943 if fctx._customcmp:
933 944 return fctx.cmp(self)
934 945
935 946 if self._filenode is None:
936 947 raise error.ProgrammingError(
937 948 b'filectx.cmp() must be reimplemented if not backed by revlog'
938 949 )
939 950
940 951 if fctx._filenode is None:
941 952 if self._repo._encodefilterpats:
942 953 # can't rely on size() because wdir content may be decoded
943 954 return self._filelog.cmp(self._filenode, fctx.data())
944 955 if self.size() - 4 == fctx.size():
945 956 # size() can match:
946 957 # if file data starts with '\1\n', empty metadata block is
947 958 # prepended, which adds 4 bytes to filelog.size().
948 959 return self._filelog.cmp(self._filenode, fctx.data())
949 960 if self.size() == fctx.size():
950 961 # size() matches: need to compare content
951 962 return self._filelog.cmp(self._filenode, fctx.data())
952 963
953 964 # size() differs
954 965 return True
955 966
956 967 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
957 968 """return the first ancestor of <srcrev> introducing <fnode>
958 969
959 970 If the linkrev of the file revision does not point to an ancestor of
960 971 srcrev, we'll walk down the ancestors until we find one introducing
961 972 this file revision.
962 973
963 974 :srcrev: the changeset revision we search ancestors from
964 975 :inclusive: if true, the src revision will also be checked
965 976 :stoprev: an optional revision to stop the walk at. If no introduction
966 977 of this file content could be found before this floor
967 978 revision, the function will returns "None" and stops its
968 979 iteration.
969 980 """
970 981 repo = self._repo
971 982 cl = repo.unfiltered().changelog
972 983 mfl = repo.manifestlog
973 984 # fetch the linkrev
974 985 lkr = self.linkrev()
975 986 if srcrev == lkr:
976 987 return lkr
977 988 # hack to reuse ancestor computation when searching for renames
978 989 memberanc = getattr(self, '_ancestrycontext', None)
979 990 iteranc = None
980 991 if srcrev is None:
981 992 # wctx case, used by workingfilectx during mergecopy
982 993 revs = [p.rev() for p in self._repo[None].parents()]
983 994 inclusive = True # we skipped the real (revless) source
984 995 else:
985 996 revs = [srcrev]
986 997 if memberanc is None:
987 998 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
988 999 # check if this linkrev is an ancestor of srcrev
989 1000 if lkr not in memberanc:
990 1001 if iteranc is None:
991 1002 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
992 1003 fnode = self._filenode
993 1004 path = self._path
994 1005 for a in iteranc:
995 1006 if stoprev is not None and a < stoprev:
996 1007 return None
997 1008 ac = cl.read(a) # get changeset data (we avoid object creation)
998 1009 if path in ac[3]: # checking the 'files' field.
999 1010 # The file has been touched, check if the content is
1000 1011 # similar to the one we search for.
1001 1012 if fnode == mfl[ac[0]].readfast().get(path):
1002 1013 return a
1003 1014 # In theory, we should never get out of that loop without a result.
1004 1015 # But if manifest uses a buggy file revision (not children of the
1005 1016 # one it replaces) we could. Such a buggy situation will likely
1006 1017 # result is crash somewhere else at to some point.
1007 1018 return lkr
1008 1019
1009 1020 def isintroducedafter(self, changelogrev):
1010 1021 """True if a filectx has been introduced after a given floor revision
1011 1022 """
1012 1023 if self.linkrev() >= changelogrev:
1013 1024 return True
1014 1025 introrev = self._introrev(stoprev=changelogrev)
1015 1026 if introrev is None:
1016 1027 return False
1017 1028 return introrev >= changelogrev
1018 1029
1019 1030 def introrev(self):
1020 1031 """return the rev of the changeset which introduced this file revision
1021 1032
1022 1033 This method is different from linkrev because it take into account the
1023 1034 changeset the filectx was created from. It ensures the returned
1024 1035 revision is one of its ancestors. This prevents bugs from
1025 1036 'linkrev-shadowing' when a file revision is used by multiple
1026 1037 changesets.
1027 1038 """
1028 1039 return self._introrev()
1029 1040
1030 1041 def _introrev(self, stoprev=None):
1031 1042 """
1032 1043 Same as `introrev` but, with an extra argument to limit changelog
1033 1044 iteration range in some internal usecase.
1034 1045
1035 1046 If `stoprev` is set, the `introrev` will not be searched past that
1036 1047 `stoprev` revision and "None" might be returned. This is useful to
1037 1048 limit the iteration range.
1038 1049 """
1039 1050 toprev = None
1040 1051 attrs = vars(self)
1041 1052 if '_changeid' in attrs:
1042 1053 # We have a cached value already
1043 1054 toprev = self._changeid
1044 1055 elif '_changectx' in attrs:
1045 1056 # We know which changelog entry we are coming from
1046 1057 toprev = self._changectx.rev()
1047 1058
1048 1059 if toprev is not None:
1049 1060 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1050 1061 elif '_descendantrev' in attrs:
1051 1062 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1052 1063 # be nice and cache the result of the computation
1053 1064 if introrev is not None:
1054 1065 self._changeid = introrev
1055 1066 return introrev
1056 1067 else:
1057 1068 return self.linkrev()
1058 1069
1059 1070 def introfilectx(self):
1060 1071 """Return filectx having identical contents, but pointing to the
1061 1072 changeset revision where this filectx was introduced"""
1062 1073 introrev = self.introrev()
1063 1074 if self.rev() == introrev:
1064 1075 return self
1065 1076 return self.filectx(self.filenode(), changeid=introrev)
1066 1077
1067 1078 def _parentfilectx(self, path, fileid, filelog):
1068 1079 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1069 1080 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1070 1081 if '_changeid' in vars(self) or '_changectx' in vars(self):
1071 1082 # If self is associated with a changeset (probably explicitly
1072 1083 # fed), ensure the created filectx is associated with a
1073 1084 # changeset that is an ancestor of self.changectx.
1074 1085 # This lets us later use _adjustlinkrev to get a correct link.
1075 1086 fctx._descendantrev = self.rev()
1076 1087 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1077 1088 elif '_descendantrev' in vars(self):
1078 1089 # Otherwise propagate _descendantrev if we have one associated.
1079 1090 fctx._descendantrev = self._descendantrev
1080 1091 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1081 1092 return fctx
1082 1093
1083 1094 def parents(self):
1084 1095 _path = self._path
1085 1096 fl = self._filelog
1086 1097 parents = self._filelog.parents(self._filenode)
1087 1098 pl = [(_path, node, fl) for node in parents if node != nullid]
1088 1099
1089 1100 r = fl.renamed(self._filenode)
1090 1101 if r:
1091 1102 # - In the simple rename case, both parent are nullid, pl is empty.
1092 1103 # - In case of merge, only one of the parent is null id and should
1093 1104 # be replaced with the rename information. This parent is -always-
1094 1105 # the first one.
1095 1106 #
1096 1107 # As null id have always been filtered out in the previous list
1097 1108 # comprehension, inserting to 0 will always result in "replacing
1098 1109 # first nullid parent with rename information.
1099 1110 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1100 1111
1101 1112 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1102 1113
1103 1114 def p1(self):
1104 1115 return self.parents()[0]
1105 1116
1106 1117 def p2(self):
1107 1118 p = self.parents()
1108 1119 if len(p) == 2:
1109 1120 return p[1]
1110 1121 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1111 1122
1112 1123 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1113 1124 """Returns a list of annotateline objects for each line in the file
1114 1125
1115 1126 - line.fctx is the filectx of the node where that line was last changed
1116 1127 - line.lineno is the line number at the first appearance in the managed
1117 1128 file
1118 1129 - line.text is the data on that line (including newline character)
1119 1130 """
1120 1131 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1121 1132
1122 1133 def parents(f):
1123 1134 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1124 1135 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1125 1136 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1126 1137 # isn't an ancestor of the srcrev.
1127 1138 f._changeid
1128 1139 pl = f.parents()
1129 1140
1130 1141 # Don't return renamed parents if we aren't following.
1131 1142 if not follow:
1132 1143 pl = [p for p in pl if p.path() == f.path()]
1133 1144
1134 1145 # renamed filectx won't have a filelog yet, so set it
1135 1146 # from the cache to save time
1136 1147 for p in pl:
1137 1148 if not '_filelog' in p.__dict__:
1138 1149 p._filelog = getlog(p.path())
1139 1150
1140 1151 return pl
1141 1152
1142 1153 # use linkrev to find the first changeset where self appeared
1143 1154 base = self.introfilectx()
1144 1155 if getattr(base, '_ancestrycontext', None) is None:
1145 1156 cl = self._repo.changelog
1146 1157 if base.rev() is None:
1147 1158 # wctx is not inclusive, but works because _ancestrycontext
1148 1159 # is used to test filelog revisions
1149 1160 ac = cl.ancestors(
1150 1161 [p.rev() for p in base.parents()], inclusive=True
1151 1162 )
1152 1163 else:
1153 1164 ac = cl.ancestors([base.rev()], inclusive=True)
1154 1165 base._ancestrycontext = ac
1155 1166
1156 1167 return dagop.annotate(
1157 1168 base, parents, skiprevs=skiprevs, diffopts=diffopts
1158 1169 )
1159 1170
1160 1171 def ancestors(self, followfirst=False):
1161 1172 visit = {}
1162 1173 c = self
1163 1174 if followfirst:
1164 1175 cut = 1
1165 1176 else:
1166 1177 cut = None
1167 1178
1168 1179 while True:
1169 1180 for parent in c.parents()[:cut]:
1170 1181 visit[(parent.linkrev(), parent.filenode())] = parent
1171 1182 if not visit:
1172 1183 break
1173 1184 c = visit.pop(max(visit))
1174 1185 yield c
1175 1186
1176 1187 def decodeddata(self):
1177 1188 """Returns `data()` after running repository decoding filters.
1178 1189
1179 1190 This is often equivalent to how the data would be expressed on disk.
1180 1191 """
1181 1192 return self._repo.wwritedata(self.path(), self.data())
1182 1193
1183 1194
1184 1195 class filectx(basefilectx):
1185 1196 """A filecontext object makes access to data related to a particular
1186 1197 filerevision convenient."""
1187 1198
1188 1199 def __init__(
1189 1200 self,
1190 1201 repo,
1191 1202 path,
1192 1203 changeid=None,
1193 1204 fileid=None,
1194 1205 filelog=None,
1195 1206 changectx=None,
1196 1207 ):
1197 1208 """changeid must be a revision number, if specified.
1198 1209 fileid can be a file revision or node."""
1199 1210 self._repo = repo
1200 1211 self._path = path
1201 1212
1202 1213 assert (
1203 1214 changeid is not None or fileid is not None or changectx is not None
1204 1215 ), (
1205 1216 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1206 1217 % (changeid, fileid, changectx,)
1207 1218 )
1208 1219
1209 1220 if filelog is not None:
1210 1221 self._filelog = filelog
1211 1222
1212 1223 if changeid is not None:
1213 1224 self._changeid = changeid
1214 1225 if changectx is not None:
1215 1226 self._changectx = changectx
1216 1227 if fileid is not None:
1217 1228 self._fileid = fileid
1218 1229
1219 1230 @propertycache
1220 1231 def _changectx(self):
1221 1232 try:
1222 1233 return self._repo[self._changeid]
1223 1234 except error.FilteredRepoLookupError:
1224 1235 # Linkrev may point to any revision in the repository. When the
1225 1236 # repository is filtered this may lead to `filectx` trying to build
1226 1237 # `changectx` for filtered revision. In such case we fallback to
1227 1238 # creating `changectx` on the unfiltered version of the reposition.
1228 1239 # This fallback should not be an issue because `changectx` from
1229 1240 # `filectx` are not used in complex operations that care about
1230 1241 # filtering.
1231 1242 #
1232 1243 # This fallback is a cheap and dirty fix that prevent several
1233 1244 # crashes. It does not ensure the behavior is correct. However the
1234 1245 # behavior was not correct before filtering either and "incorrect
1235 1246 # behavior" is seen as better as "crash"
1236 1247 #
1237 1248 # Linkrevs have several serious troubles with filtering that are
1238 1249 # complicated to solve. Proper handling of the issue here should be
1239 1250 # considered when solving linkrev issue are on the table.
1240 1251 return self._repo.unfiltered()[self._changeid]
1241 1252
1242 1253 def filectx(self, fileid, changeid=None):
1243 1254 '''opens an arbitrary revision of the file without
1244 1255 opening a new filelog'''
1245 1256 return filectx(
1246 1257 self._repo,
1247 1258 self._path,
1248 1259 fileid=fileid,
1249 1260 filelog=self._filelog,
1250 1261 changeid=changeid,
1251 1262 )
1252 1263
1253 1264 def rawdata(self):
1254 1265 return self._filelog.rawdata(self._filenode)
1255 1266
1256 1267 def rawflags(self):
1257 1268 """low-level revlog flags"""
1258 1269 return self._filelog.flags(self._filerev)
1259 1270
1260 1271 def data(self):
1261 1272 try:
1262 1273 return self._filelog.read(self._filenode)
1263 1274 except error.CensoredNodeError:
1264 1275 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1265 1276 return b""
1266 1277 raise error.Abort(
1267 1278 _(b"censored node: %s") % short(self._filenode),
1268 1279 hint=_(b"set censor.policy to ignore errors"),
1269 1280 )
1270 1281
1271 1282 def size(self):
1272 1283 return self._filelog.size(self._filerev)
1273 1284
1274 1285 @propertycache
1275 1286 def _copied(self):
1276 1287 """check if file was actually renamed in this changeset revision
1277 1288
1278 1289 If rename logged in file revision, we report copy for changeset only
1279 1290 if file revisions linkrev points back to the changeset in question
1280 1291 or both changeset parents contain different file revisions.
1281 1292 """
1282 1293
1283 1294 renamed = self._filelog.renamed(self._filenode)
1284 1295 if not renamed:
1285 1296 return None
1286 1297
1287 1298 if self.rev() == self.linkrev():
1288 1299 return renamed
1289 1300
1290 1301 name = self.path()
1291 1302 fnode = self._filenode
1292 1303 for p in self._changectx.parents():
1293 1304 try:
1294 1305 if fnode == p.filenode(name):
1295 1306 return None
1296 1307 except error.LookupError:
1297 1308 pass
1298 1309 return renamed
1299 1310
1300 1311 def children(self):
1301 1312 # hard for renames
1302 1313 c = self._filelog.children(self._filenode)
1303 1314 return [
1304 1315 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1305 1316 for x in c
1306 1317 ]
1307 1318
1308 1319
1309 1320 class committablectx(basectx):
1310 1321 """A committablectx object provides common functionality for a context that
1311 1322 wants the ability to commit, e.g. workingctx or memctx."""
1312 1323
1313 1324 def __init__(
1314 1325 self,
1315 1326 repo,
1316 1327 text=b"",
1317 1328 user=None,
1318 1329 date=None,
1319 1330 extra=None,
1320 1331 changes=None,
1321 1332 branch=None,
1322 1333 ):
1323 1334 super(committablectx, self).__init__(repo)
1324 1335 self._rev = None
1325 1336 self._node = None
1326 1337 self._text = text
1327 1338 if date:
1328 1339 self._date = dateutil.parsedate(date)
1329 1340 if user:
1330 1341 self._user = user
1331 1342 if changes:
1332 1343 self._status = changes
1333 1344
1334 1345 self._extra = {}
1335 1346 if extra:
1336 1347 self._extra = extra.copy()
1337 1348 if branch is not None:
1338 1349 self._extra[b'branch'] = encoding.fromlocal(branch)
1339 1350 if not self._extra.get(b'branch'):
1340 1351 self._extra[b'branch'] = b'default'
1341 1352
1342 1353 def __bytes__(self):
1343 1354 return bytes(self._parents[0]) + b"+"
1344 1355
1345 1356 __str__ = encoding.strmethod(__bytes__)
1346 1357
1347 1358 def __nonzero__(self):
1348 1359 return True
1349 1360
1350 1361 __bool__ = __nonzero__
1351 1362
1352 1363 @propertycache
1353 1364 def _status(self):
1354 1365 return self._repo.status()
1355 1366
1356 1367 @propertycache
1357 1368 def _user(self):
1358 1369 return self._repo.ui.username()
1359 1370
1360 1371 @propertycache
1361 1372 def _date(self):
1362 1373 ui = self._repo.ui
1363 1374 date = ui.configdate(b'devel', b'default-date')
1364 1375 if date is None:
1365 1376 date = dateutil.makedate()
1366 1377 return date
1367 1378
1368 1379 def subrev(self, subpath):
1369 1380 return None
1370 1381
1371 1382 def manifestnode(self):
1372 1383 return None
1373 1384
1374 1385 def user(self):
1375 1386 return self._user or self._repo.ui.username()
1376 1387
1377 1388 def date(self):
1378 1389 return self._date
1379 1390
1380 1391 def description(self):
1381 1392 return self._text
1382 1393
1383 1394 def files(self):
1384 1395 return sorted(
1385 1396 self._status.modified + self._status.added + self._status.removed
1386 1397 )
1387 1398
1388 1399 def modified(self):
1389 1400 return self._status.modified
1390 1401
1391 1402 def added(self):
1392 1403 return self._status.added
1393 1404
1394 1405 def removed(self):
1395 1406 return self._status.removed
1396 1407
1397 1408 def deleted(self):
1398 1409 return self._status.deleted
1399 1410
1400 1411 filesmodified = modified
1401 1412 filesadded = added
1402 1413 filesremoved = removed
1403 1414
1404 1415 def branch(self):
1405 1416 return encoding.tolocal(self._extra[b'branch'])
1406 1417
1407 1418 def closesbranch(self):
1408 1419 return b'close' in self._extra
1409 1420
1410 1421 def extra(self):
1411 1422 return self._extra
1412 1423
1413 1424 def isinmemory(self):
1414 1425 return False
1415 1426
1416 1427 def tags(self):
1417 1428 return []
1418 1429
1419 1430 def bookmarks(self):
1420 1431 b = []
1421 1432 for p in self.parents():
1422 1433 b.extend(p.bookmarks())
1423 1434 return b
1424 1435
1425 1436 def phase(self):
1426 1437 phase = phases.draft # default phase to draft
1427 1438 for p in self.parents():
1428 1439 phase = max(phase, p.phase())
1429 1440 return phase
1430 1441
1431 1442 def hidden(self):
1432 1443 return False
1433 1444
1434 1445 def children(self):
1435 1446 return []
1436 1447
1437 1448 def ancestor(self, c2):
1438 1449 """return the "best" ancestor context of self and c2"""
1439 1450 return self._parents[0].ancestor(c2) # punt on two parents for now
1440 1451
1441 1452 def ancestors(self):
1442 1453 for p in self._parents:
1443 1454 yield p
1444 1455 for a in self._repo.changelog.ancestors(
1445 1456 [p.rev() for p in self._parents]
1446 1457 ):
1447 1458 yield self._repo[a]
1448 1459
1449 1460 def markcommitted(self, node):
1450 1461 """Perform post-commit cleanup necessary after committing this ctx
1451 1462
1452 1463 Specifically, this updates backing stores this working context
1453 1464 wraps to reflect the fact that the changes reflected by this
1454 1465 workingctx have been committed. For example, it marks
1455 1466 modified and added files as normal in the dirstate.
1456 1467
1457 1468 """
1458 1469
1459 1470 def dirty(self, missing=False, merge=True, branch=True):
1460 1471 return False
1461 1472
1462 1473
1463 1474 class workingctx(committablectx):
1464 1475 """A workingctx object makes access to data related to
1465 1476 the current working directory convenient.
1466 1477 date - any valid date string or (unixtime, offset), or None.
1467 1478 user - username string, or None.
1468 1479 extra - a dictionary of extra values, or None.
1469 1480 changes - a list of file lists as returned by localrepo.status()
1470 1481 or None to use the repository status.
1471 1482 """
1472 1483
1473 1484 def __init__(
1474 1485 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1475 1486 ):
1476 1487 branch = None
1477 1488 if not extra or b'branch' not in extra:
1478 1489 try:
1479 1490 branch = repo.dirstate.branch()
1480 1491 except UnicodeDecodeError:
1481 1492 raise error.Abort(_(b'branch name not in UTF-8!'))
1482 1493 super(workingctx, self).__init__(
1483 1494 repo, text, user, date, extra, changes, branch=branch
1484 1495 )
1485 1496
1486 1497 def __iter__(self):
1487 1498 d = self._repo.dirstate
1488 1499 for f in d:
1489 1500 if d[f] != b'r':
1490 1501 yield f
1491 1502
1492 1503 def __contains__(self, key):
1493 1504 return self._repo.dirstate[key] not in b"?r"
1494 1505
1495 1506 def hex(self):
1496 1507 return wdirhex
1497 1508
1498 1509 @propertycache
1499 1510 def _parents(self):
1500 1511 p = self._repo.dirstate.parents()
1501 1512 if p[1] == nullid:
1502 1513 p = p[:-1]
1503 1514 # use unfiltered repo to delay/avoid loading obsmarkers
1504 1515 unfi = self._repo.unfiltered()
1505 1516 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1506 1517
1507 1518 def _fileinfo(self, path):
1508 1519 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1509 1520 self._manifest
1510 1521 return super(workingctx, self)._fileinfo(path)
1511 1522
1512 1523 def _buildflagfunc(self):
1513 1524 # Create a fallback function for getting file flags when the
1514 1525 # filesystem doesn't support them
1515 1526
1516 1527 copiesget = self._repo.dirstate.copies().get
1517 1528 parents = self.parents()
1518 1529 if len(parents) < 2:
1519 1530 # when we have one parent, it's easy: copy from parent
1520 1531 man = parents[0].manifest()
1521 1532
1522 1533 def func(f):
1523 1534 f = copiesget(f, f)
1524 1535 return man.flags(f)
1525 1536
1526 1537 else:
1527 1538 # merges are tricky: we try to reconstruct the unstored
1528 1539 # result from the merge (issue1802)
1529 1540 p1, p2 = parents
1530 1541 pa = p1.ancestor(p2)
1531 1542 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1532 1543
1533 1544 def func(f):
1534 1545 f = copiesget(f, f) # may be wrong for merges with copies
1535 1546 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1536 1547 if fl1 == fl2:
1537 1548 return fl1
1538 1549 if fl1 == fla:
1539 1550 return fl2
1540 1551 if fl2 == fla:
1541 1552 return fl1
1542 1553 return b'' # punt for conflicts
1543 1554
1544 1555 return func
1545 1556
1546 1557 @propertycache
1547 1558 def _flagfunc(self):
1548 1559 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1549 1560
1550 1561 def flags(self, path):
1551 1562 if '_manifest' in self.__dict__:
1552 1563 try:
1553 1564 return self._manifest.flags(path)
1554 1565 except KeyError:
1555 1566 return b''
1556 1567
1557 1568 try:
1558 1569 return self._flagfunc(path)
1559 1570 except OSError:
1560 1571 return b''
1561 1572
1562 1573 def filectx(self, path, filelog=None):
1563 1574 """get a file context from the working directory"""
1564 1575 return workingfilectx(
1565 1576 self._repo, path, workingctx=self, filelog=filelog
1566 1577 )
1567 1578
1568 1579 def dirty(self, missing=False, merge=True, branch=True):
1569 1580 b"check whether a working directory is modified"
1570 1581 # check subrepos first
1571 1582 for s in sorted(self.substate):
1572 1583 if self.sub(s).dirty(missing=missing):
1573 1584 return True
1574 1585 # check current working dir
1575 1586 return (
1576 1587 (merge and self.p2())
1577 1588 or (branch and self.branch() != self.p1().branch())
1578 1589 or self.modified()
1579 1590 or self.added()
1580 1591 or self.removed()
1581 1592 or (missing and self.deleted())
1582 1593 )
1583 1594
1584 1595 def add(self, list, prefix=b""):
1585 1596 with self._repo.wlock():
1586 1597 ui, ds = self._repo.ui, self._repo.dirstate
1587 1598 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1588 1599 rejected = []
1589 1600 lstat = self._repo.wvfs.lstat
1590 1601 for f in list:
1591 1602 # ds.pathto() returns an absolute file when this is invoked from
1592 1603 # the keyword extension. That gets flagged as non-portable on
1593 1604 # Windows, since it contains the drive letter and colon.
1594 1605 scmutil.checkportable(ui, os.path.join(prefix, f))
1595 1606 try:
1596 1607 st = lstat(f)
1597 1608 except OSError:
1598 1609 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1599 1610 rejected.append(f)
1600 1611 continue
1601 1612 limit = ui.configbytes(b'ui', b'large-file-limit')
1602 1613 if limit != 0 and st.st_size > limit:
1603 1614 ui.warn(
1604 1615 _(
1605 1616 b"%s: up to %d MB of RAM may be required "
1606 1617 b"to manage this file\n"
1607 1618 b"(use 'hg revert %s' to cancel the "
1608 1619 b"pending addition)\n"
1609 1620 )
1610 1621 % (f, 3 * st.st_size // 1000000, uipath(f))
1611 1622 )
1612 1623 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1613 1624 ui.warn(
1614 1625 _(
1615 1626 b"%s not added: only files and symlinks "
1616 1627 b"supported currently\n"
1617 1628 )
1618 1629 % uipath(f)
1619 1630 )
1620 1631 rejected.append(f)
1621 1632 elif ds[f] in b'amn':
1622 1633 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1623 1634 elif ds[f] == b'r':
1624 1635 ds.normallookup(f)
1625 1636 else:
1626 1637 ds.add(f)
1627 1638 return rejected
1628 1639
1629 1640 def forget(self, files, prefix=b""):
1630 1641 with self._repo.wlock():
1631 1642 ds = self._repo.dirstate
1632 1643 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1633 1644 rejected = []
1634 1645 for f in files:
1635 1646 if f not in ds:
1636 1647 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1637 1648 rejected.append(f)
1638 1649 elif ds[f] != b'a':
1639 1650 ds.remove(f)
1640 1651 else:
1641 1652 ds.drop(f)
1642 1653 return rejected
1643 1654
1644 1655 def copy(self, source, dest):
1645 1656 try:
1646 1657 st = self._repo.wvfs.lstat(dest)
1647 1658 except OSError as err:
1648 1659 if err.errno != errno.ENOENT:
1649 1660 raise
1650 1661 self._repo.ui.warn(
1651 1662 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1652 1663 )
1653 1664 return
1654 1665 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1655 1666 self._repo.ui.warn(
1656 1667 _(b"copy failed: %s is not a file or a symbolic link\n")
1657 1668 % self._repo.dirstate.pathto(dest)
1658 1669 )
1659 1670 else:
1660 1671 with self._repo.wlock():
1661 1672 ds = self._repo.dirstate
1662 1673 if ds[dest] in b'?':
1663 1674 ds.add(dest)
1664 1675 elif ds[dest] in b'r':
1665 1676 ds.normallookup(dest)
1666 1677 ds.copy(source, dest)
1667 1678
1668 1679 def match(
1669 1680 self,
1670 1681 pats=None,
1671 1682 include=None,
1672 1683 exclude=None,
1673 1684 default=b'glob',
1674 1685 listsubrepos=False,
1675 1686 badfn=None,
1676 1687 ):
1677 1688 r = self._repo
1678 1689
1679 1690 # Only a case insensitive filesystem needs magic to translate user input
1680 1691 # to actual case in the filesystem.
1681 1692 icasefs = not util.fscasesensitive(r.root)
1682 1693 return matchmod.match(
1683 1694 r.root,
1684 1695 r.getcwd(),
1685 1696 pats,
1686 1697 include,
1687 1698 exclude,
1688 1699 default,
1689 1700 auditor=r.auditor,
1690 1701 ctx=self,
1691 1702 listsubrepos=listsubrepos,
1692 1703 badfn=badfn,
1693 1704 icasefs=icasefs,
1694 1705 )
1695 1706
1696 1707 def _filtersuspectsymlink(self, files):
1697 1708 if not files or self._repo.dirstate._checklink:
1698 1709 return files
1699 1710
1700 1711 # Symlink placeholders may get non-symlink-like contents
1701 1712 # via user error or dereferencing by NFS or Samba servers,
1702 1713 # so we filter out any placeholders that don't look like a
1703 1714 # symlink
1704 1715 sane = []
1705 1716 for f in files:
1706 1717 if self.flags(f) == b'l':
1707 1718 d = self[f].data()
1708 1719 if (
1709 1720 d == b''
1710 1721 or len(d) >= 1024
1711 1722 or b'\n' in d
1712 1723 or stringutil.binary(d)
1713 1724 ):
1714 1725 self._repo.ui.debug(
1715 1726 b'ignoring suspect symlink placeholder "%s"\n' % f
1716 1727 )
1717 1728 continue
1718 1729 sane.append(f)
1719 1730 return sane
1720 1731
1721 1732 def _checklookup(self, files):
1722 1733 # check for any possibly clean files
1723 1734 if not files:
1724 1735 return [], [], []
1725 1736
1726 1737 modified = []
1727 1738 deleted = []
1728 1739 fixup = []
1729 1740 pctx = self._parents[0]
1730 1741 # do a full compare of any files that might have changed
1731 1742 for f in sorted(files):
1732 1743 try:
1733 1744 # This will return True for a file that got replaced by a
1734 1745 # directory in the interim, but fixing that is pretty hard.
1735 1746 if (
1736 1747 f not in pctx
1737 1748 or self.flags(f) != pctx.flags(f)
1738 1749 or pctx[f].cmp(self[f])
1739 1750 ):
1740 1751 modified.append(f)
1741 1752 else:
1742 1753 fixup.append(f)
1743 1754 except (IOError, OSError):
1744 1755 # A file become inaccessible in between? Mark it as deleted,
1745 1756 # matching dirstate behavior (issue5584).
1746 1757 # The dirstate has more complex behavior around whether a
1747 1758 # missing file matches a directory, etc, but we don't need to
1748 1759 # bother with that: if f has made it to this point, we're sure
1749 1760 # it's in the dirstate.
1750 1761 deleted.append(f)
1751 1762
1752 1763 return modified, deleted, fixup
1753 1764
1754 1765 def _poststatusfixup(self, status, fixup):
1755 1766 """update dirstate for files that are actually clean"""
1756 1767 poststatus = self._repo.postdsstatus()
1757 1768 if fixup or poststatus:
1758 1769 try:
1759 1770 oldid = self._repo.dirstate.identity()
1760 1771
1761 1772 # updating the dirstate is optional
1762 1773 # so we don't wait on the lock
1763 1774 # wlock can invalidate the dirstate, so cache normal _after_
1764 1775 # taking the lock
1765 1776 with self._repo.wlock(False):
1766 1777 if self._repo.dirstate.identity() == oldid:
1767 1778 if fixup:
1768 1779 normal = self._repo.dirstate.normal
1769 1780 for f in fixup:
1770 1781 normal(f)
1771 1782 # write changes out explicitly, because nesting
1772 1783 # wlock at runtime may prevent 'wlock.release()'
1773 1784 # after this block from doing so for subsequent
1774 1785 # changing files
1775 1786 tr = self._repo.currenttransaction()
1776 1787 self._repo.dirstate.write(tr)
1777 1788
1778 1789 if poststatus:
1779 1790 for ps in poststatus:
1780 1791 ps(self, status)
1781 1792 else:
1782 1793 # in this case, writing changes out breaks
1783 1794 # consistency, because .hg/dirstate was
1784 1795 # already changed simultaneously after last
1785 1796 # caching (see also issue5584 for detail)
1786 1797 self._repo.ui.debug(
1787 1798 b'skip updating dirstate: identity mismatch\n'
1788 1799 )
1789 1800 except error.LockError:
1790 1801 pass
1791 1802 finally:
1792 1803 # Even if the wlock couldn't be grabbed, clear out the list.
1793 1804 self._repo.clearpostdsstatus()
1794 1805
1795 1806 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1796 1807 '''Gets the status from the dirstate -- internal use only.'''
1797 1808 subrepos = []
1798 1809 if b'.hgsub' in self:
1799 1810 subrepos = sorted(self.substate)
1800 1811 cmp, s = self._repo.dirstate.status(
1801 1812 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1802 1813 )
1803 1814
1804 1815 # check for any possibly clean files
1805 1816 fixup = []
1806 1817 if cmp:
1807 1818 modified2, deleted2, fixup = self._checklookup(cmp)
1808 1819 s.modified.extend(modified2)
1809 1820 s.deleted.extend(deleted2)
1810 1821
1811 1822 if fixup and clean:
1812 1823 s.clean.extend(fixup)
1813 1824
1814 1825 self._poststatusfixup(s, fixup)
1815 1826
1816 1827 if match.always():
1817 1828 # cache for performance
1818 1829 if s.unknown or s.ignored or s.clean:
1819 1830 # "_status" is cached with list*=False in the normal route
1820 1831 self._status = scmutil.status(
1821 1832 s.modified, s.added, s.removed, s.deleted, [], [], []
1822 1833 )
1823 1834 else:
1824 1835 self._status = s
1825 1836
1826 1837 return s
1827 1838
1828 1839 @propertycache
1829 1840 def _copies(self):
1830 1841 p1copies = {}
1831 1842 p2copies = {}
1832 1843 parents = self._repo.dirstate.parents()
1833 1844 p1manifest = self._repo[parents[0]].manifest()
1834 1845 p2manifest = self._repo[parents[1]].manifest()
1835 1846 changedset = set(self.added()) | set(self.modified())
1836 1847 narrowmatch = self._repo.narrowmatch()
1837 1848 for dst, src in self._repo.dirstate.copies().items():
1838 1849 if dst not in changedset or not narrowmatch(dst):
1839 1850 continue
1840 1851 if src in p1manifest:
1841 1852 p1copies[dst] = src
1842 1853 elif src in p2manifest:
1843 1854 p2copies[dst] = src
1844 1855 return p1copies, p2copies
1845 1856
1846 1857 @propertycache
1847 1858 def _manifest(self):
1848 1859 """generate a manifest corresponding to the values in self._status
1849 1860
1850 1861 This reuse the file nodeid from parent, but we use special node
1851 1862 identifiers for added and modified files. This is used by manifests
1852 1863 merge to see that files are different and by update logic to avoid
1853 1864 deleting newly added files.
1854 1865 """
1855 1866 return self._buildstatusmanifest(self._status)
1856 1867
1857 1868 def _buildstatusmanifest(self, status):
1858 1869 """Builds a manifest that includes the given status results."""
1859 1870 parents = self.parents()
1860 1871
1861 1872 man = parents[0].manifest().copy()
1862 1873
1863 1874 ff = self._flagfunc
1864 1875 for i, l in (
1865 1876 (addednodeid, status.added),
1866 1877 (modifiednodeid, status.modified),
1867 1878 ):
1868 1879 for f in l:
1869 1880 man[f] = i
1870 1881 try:
1871 1882 man.setflag(f, ff(f))
1872 1883 except OSError:
1873 1884 pass
1874 1885
1875 1886 for f in status.deleted + status.removed:
1876 1887 if f in man:
1877 1888 del man[f]
1878 1889
1879 1890 return man
1880 1891
1881 1892 def _buildstatus(
1882 1893 self, other, s, match, listignored, listclean, listunknown
1883 1894 ):
1884 1895 """build a status with respect to another context
1885 1896
1886 1897 This includes logic for maintaining the fast path of status when
1887 1898 comparing the working directory against its parent, which is to skip
1888 1899 building a new manifest if self (working directory) is not comparing
1889 1900 against its parent (repo['.']).
1890 1901 """
1891 1902 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1892 1903 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1893 1904 # might have accidentally ended up with the entire contents of the file
1894 1905 # they are supposed to be linking to.
1895 1906 s.modified[:] = self._filtersuspectsymlink(s.modified)
1896 1907 if other != self._repo[b'.']:
1897 1908 s = super(workingctx, self)._buildstatus(
1898 1909 other, s, match, listignored, listclean, listunknown
1899 1910 )
1900 1911 return s
1901 1912
1902 1913 def _matchstatus(self, other, match):
1903 1914 """override the match method with a filter for directory patterns
1904 1915
1905 1916 We use inheritance to customize the match.bad method only in cases of
1906 1917 workingctx since it belongs only to the working directory when
1907 1918 comparing against the parent changeset.
1908 1919
1909 1920 If we aren't comparing against the working directory's parent, then we
1910 1921 just use the default match object sent to us.
1911 1922 """
1912 1923 if other != self._repo[b'.']:
1913 1924
1914 1925 def bad(f, msg):
1915 1926 # 'f' may be a directory pattern from 'match.files()',
1916 1927 # so 'f not in ctx1' is not enough
1917 1928 if f not in other and not other.hasdir(f):
1918 1929 self._repo.ui.warn(
1919 1930 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1920 1931 )
1921 1932
1922 1933 match.bad = bad
1923 1934 return match
1924 1935
1925 1936 def walk(self, match):
1926 1937 '''Generates matching file names.'''
1927 1938 return sorted(
1928 1939 self._repo.dirstate.walk(
1929 1940 self._repo.narrowmatch(match),
1930 1941 subrepos=sorted(self.substate),
1931 1942 unknown=True,
1932 1943 ignored=False,
1933 1944 )
1934 1945 )
1935 1946
1936 1947 def matches(self, match):
1937 1948 match = self._repo.narrowmatch(match)
1938 1949 ds = self._repo.dirstate
1939 1950 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
1940 1951
1941 1952 def markcommitted(self, node):
1942 1953 with self._repo.dirstate.parentchange():
1943 1954 for f in self.modified() + self.added():
1944 1955 self._repo.dirstate.normal(f)
1945 1956 for f in self.removed():
1946 1957 self._repo.dirstate.drop(f)
1947 1958 self._repo.dirstate.setparents(node)
1948 1959
1949 1960 # write changes out explicitly, because nesting wlock at
1950 1961 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1951 1962 # from immediately doing so for subsequent changing files
1952 1963 self._repo.dirstate.write(self._repo.currenttransaction())
1953 1964
1954 1965 sparse.aftercommit(self._repo, node)
1955 1966
1956 1967
1957 1968 class committablefilectx(basefilectx):
1958 1969 """A committablefilectx provides common functionality for a file context
1959 1970 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1960 1971
1961 1972 def __init__(self, repo, path, filelog=None, ctx=None):
1962 1973 self._repo = repo
1963 1974 self._path = path
1964 1975 self._changeid = None
1965 1976 self._filerev = self._filenode = None
1966 1977
1967 1978 if filelog is not None:
1968 1979 self._filelog = filelog
1969 1980 if ctx:
1970 1981 self._changectx = ctx
1971 1982
1972 1983 def __nonzero__(self):
1973 1984 return True
1974 1985
1975 1986 __bool__ = __nonzero__
1976 1987
1977 1988 def linkrev(self):
1978 1989 # linked to self._changectx no matter if file is modified or not
1979 1990 return self.rev()
1980 1991
1981 1992 def renamed(self):
1982 1993 path = self.copysource()
1983 1994 if not path:
1984 1995 return None
1985 1996 return path, self._changectx._parents[0]._manifest.get(path, nullid)
1986 1997
1987 1998 def parents(self):
1988 1999 '''return parent filectxs, following copies if necessary'''
1989 2000
1990 2001 def filenode(ctx, path):
1991 2002 return ctx._manifest.get(path, nullid)
1992 2003
1993 2004 path = self._path
1994 2005 fl = self._filelog
1995 2006 pcl = self._changectx._parents
1996 2007 renamed = self.renamed()
1997 2008
1998 2009 if renamed:
1999 2010 pl = [renamed + (None,)]
2000 2011 else:
2001 2012 pl = [(path, filenode(pcl[0], path), fl)]
2002 2013
2003 2014 for pc in pcl[1:]:
2004 2015 pl.append((path, filenode(pc, path), fl))
2005 2016
2006 2017 return [
2007 2018 self._parentfilectx(p, fileid=n, filelog=l)
2008 2019 for p, n, l in pl
2009 2020 if n != nullid
2010 2021 ]
2011 2022
2012 2023 def children(self):
2013 2024 return []
2014 2025
2015 2026
2016 2027 class workingfilectx(committablefilectx):
2017 2028 """A workingfilectx object makes access to data related to a particular
2018 2029 file in the working directory convenient."""
2019 2030
2020 2031 def __init__(self, repo, path, filelog=None, workingctx=None):
2021 2032 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2022 2033
2023 2034 @propertycache
2024 2035 def _changectx(self):
2025 2036 return workingctx(self._repo)
2026 2037
2027 2038 def data(self):
2028 2039 return self._repo.wread(self._path)
2029 2040
2030 2041 def copysource(self):
2031 2042 return self._repo.dirstate.copied(self._path)
2032 2043
2033 2044 def size(self):
2034 2045 return self._repo.wvfs.lstat(self._path).st_size
2035 2046
2036 2047 def lstat(self):
2037 2048 return self._repo.wvfs.lstat(self._path)
2038 2049
2039 2050 def date(self):
2040 2051 t, tz = self._changectx.date()
2041 2052 try:
2042 2053 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2043 2054 except OSError as err:
2044 2055 if err.errno != errno.ENOENT:
2045 2056 raise
2046 2057 return (t, tz)
2047 2058
2048 2059 def exists(self):
2049 2060 return self._repo.wvfs.exists(self._path)
2050 2061
2051 2062 def lexists(self):
2052 2063 return self._repo.wvfs.lexists(self._path)
2053 2064
2054 2065 def audit(self):
2055 2066 return self._repo.wvfs.audit(self._path)
2056 2067
2057 2068 def cmp(self, fctx):
2058 2069 """compare with other file context
2059 2070
2060 2071 returns True if different than fctx.
2061 2072 """
2062 2073 # fctx should be a filectx (not a workingfilectx)
2063 2074 # invert comparison to reuse the same code path
2064 2075 return fctx.cmp(self)
2065 2076
2066 2077 def remove(self, ignoremissing=False):
2067 2078 """wraps unlink for a repo's working directory"""
2068 2079 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2069 2080 self._repo.wvfs.unlinkpath(
2070 2081 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2071 2082 )
2072 2083
2073 2084 def write(self, data, flags, backgroundclose=False, **kwargs):
2074 2085 """wraps repo.wwrite"""
2075 2086 return self._repo.wwrite(
2076 2087 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2077 2088 )
2078 2089
2079 2090 def markcopied(self, src):
2080 2091 """marks this file a copy of `src`"""
2081 2092 self._repo.dirstate.copy(src, self._path)
2082 2093
2083 2094 def clearunknown(self):
2084 2095 """Removes conflicting items in the working directory so that
2085 2096 ``write()`` can be called successfully.
2086 2097 """
2087 2098 wvfs = self._repo.wvfs
2088 2099 f = self._path
2089 2100 wvfs.audit(f)
2090 2101 if self._repo.ui.configbool(
2091 2102 b'experimental', b'merge.checkpathconflicts'
2092 2103 ):
2093 2104 # remove files under the directory as they should already be
2094 2105 # warned and backed up
2095 2106 if wvfs.isdir(f) and not wvfs.islink(f):
2096 2107 wvfs.rmtree(f, forcibly=True)
2097 2108 for p in reversed(list(pathutil.finddirs(f))):
2098 2109 if wvfs.isfileorlink(p):
2099 2110 wvfs.unlink(p)
2100 2111 break
2101 2112 else:
2102 2113 # don't remove files if path conflicts are not processed
2103 2114 if wvfs.isdir(f) and not wvfs.islink(f):
2104 2115 wvfs.removedirs(f)
2105 2116
2106 2117 def setflags(self, l, x):
2107 2118 self._repo.wvfs.setflags(self._path, l, x)
2108 2119
2109 2120
2110 2121 class overlayworkingctx(committablectx):
2111 2122 """Wraps another mutable context with a write-back cache that can be
2112 2123 converted into a commit context.
2113 2124
2114 2125 self._cache[path] maps to a dict with keys: {
2115 2126 'exists': bool?
2116 2127 'date': date?
2117 2128 'data': str?
2118 2129 'flags': str?
2119 2130 'copied': str? (path or None)
2120 2131 }
2121 2132 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2122 2133 is `False`, the file was deleted.
2123 2134 """
2124 2135
2125 2136 def __init__(self, repo):
2126 2137 super(overlayworkingctx, self).__init__(repo)
2127 2138 self.clean()
2128 2139
2129 2140 def setbase(self, wrappedctx):
2130 2141 self._wrappedctx = wrappedctx
2131 2142 self._parents = [wrappedctx]
2132 2143 # Drop old manifest cache as it is now out of date.
2133 2144 # This is necessary when, e.g., rebasing several nodes with one
2134 2145 # ``overlayworkingctx`` (e.g. with --collapse).
2135 2146 util.clearcachedproperty(self, b'_manifest')
2136 2147
2137 2148 def data(self, path):
2138 2149 if self.isdirty(path):
2139 2150 if self._cache[path][b'exists']:
2140 2151 if self._cache[path][b'data'] is not None:
2141 2152 return self._cache[path][b'data']
2142 2153 else:
2143 2154 # Must fallback here, too, because we only set flags.
2144 2155 return self._wrappedctx[path].data()
2145 2156 else:
2146 2157 raise error.ProgrammingError(
2147 2158 b"No such file or directory: %s" % path
2148 2159 )
2149 2160 else:
2150 2161 return self._wrappedctx[path].data()
2151 2162
2152 2163 @propertycache
2153 2164 def _manifest(self):
2154 2165 parents = self.parents()
2155 2166 man = parents[0].manifest().copy()
2156 2167
2157 2168 flag = self._flagfunc
2158 2169 for path in self.added():
2159 2170 man[path] = addednodeid
2160 2171 man.setflag(path, flag(path))
2161 2172 for path in self.modified():
2162 2173 man[path] = modifiednodeid
2163 2174 man.setflag(path, flag(path))
2164 2175 for path in self.removed():
2165 2176 del man[path]
2166 2177 return man
2167 2178
2168 2179 @propertycache
2169 2180 def _flagfunc(self):
2170 2181 def f(path):
2171 2182 return self._cache[path][b'flags']
2172 2183
2173 2184 return f
2174 2185
2175 2186 def files(self):
2176 2187 return sorted(self.added() + self.modified() + self.removed())
2177 2188
2178 2189 def modified(self):
2179 2190 return [
2180 2191 f
2181 2192 for f in self._cache.keys()
2182 2193 if self._cache[f][b'exists'] and self._existsinparent(f)
2183 2194 ]
2184 2195
2185 2196 def added(self):
2186 2197 return [
2187 2198 f
2188 2199 for f in self._cache.keys()
2189 2200 if self._cache[f][b'exists'] and not self._existsinparent(f)
2190 2201 ]
2191 2202
2192 2203 def removed(self):
2193 2204 return [
2194 2205 f
2195 2206 for f in self._cache.keys()
2196 2207 if not self._cache[f][b'exists'] and self._existsinparent(f)
2197 2208 ]
2198 2209
2199 2210 def p1copies(self):
2200 2211 copies = self._repo._wrappedctx.p1copies().copy()
2201 2212 narrowmatch = self._repo.narrowmatch()
2202 2213 for f in self._cache.keys():
2203 2214 if not narrowmatch(f):
2204 2215 continue
2205 2216 copies.pop(f, None) # delete if it exists
2206 2217 source = self._cache[f][b'copied']
2207 2218 if source:
2208 2219 copies[f] = source
2209 2220 return copies
2210 2221
2211 2222 def p2copies(self):
2212 2223 copies = self._repo._wrappedctx.p2copies().copy()
2213 2224 narrowmatch = self._repo.narrowmatch()
2214 2225 for f in self._cache.keys():
2215 2226 if not narrowmatch(f):
2216 2227 continue
2217 2228 copies.pop(f, None) # delete if it exists
2218 2229 source = self._cache[f][b'copied']
2219 2230 if source:
2220 2231 copies[f] = source
2221 2232 return copies
2222 2233
2223 2234 def isinmemory(self):
2224 2235 return True
2225 2236
2226 2237 def filedate(self, path):
2227 2238 if self.isdirty(path):
2228 2239 return self._cache[path][b'date']
2229 2240 else:
2230 2241 return self._wrappedctx[path].date()
2231 2242
2232 2243 def markcopied(self, path, origin):
2233 2244 self._markdirty(
2234 2245 path,
2235 2246 exists=True,
2236 2247 date=self.filedate(path),
2237 2248 flags=self.flags(path),
2238 2249 copied=origin,
2239 2250 )
2240 2251
2241 2252 def copydata(self, path):
2242 2253 if self.isdirty(path):
2243 2254 return self._cache[path][b'copied']
2244 2255 else:
2245 2256 return None
2246 2257
2247 2258 def flags(self, path):
2248 2259 if self.isdirty(path):
2249 2260 if self._cache[path][b'exists']:
2250 2261 return self._cache[path][b'flags']
2251 2262 else:
2252 2263 raise error.ProgrammingError(
2253 2264 b"No such file or directory: %s" % self._path
2254 2265 )
2255 2266 else:
2256 2267 return self._wrappedctx[path].flags()
2257 2268
2258 2269 def __contains__(self, key):
2259 2270 if key in self._cache:
2260 2271 return self._cache[key][b'exists']
2261 2272 return key in self.p1()
2262 2273
2263 2274 def _existsinparent(self, path):
2264 2275 try:
2265 2276 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2266 2277 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2267 2278 # with an ``exists()`` function.
2268 2279 self._wrappedctx[path]
2269 2280 return True
2270 2281 except error.ManifestLookupError:
2271 2282 return False
2272 2283
2273 2284 def _auditconflicts(self, path):
2274 2285 """Replicates conflict checks done by wvfs.write().
2275 2286
2276 2287 Since we never write to the filesystem and never call `applyupdates` in
2277 2288 IMM, we'll never check that a path is actually writable -- e.g., because
2278 2289 it adds `a/foo`, but `a` is actually a file in the other commit.
2279 2290 """
2280 2291
2281 2292 def fail(path, component):
2282 2293 # p1() is the base and we're receiving "writes" for p2()'s
2283 2294 # files.
2284 2295 if b'l' in self.p1()[component].flags():
2285 2296 raise error.Abort(
2286 2297 b"error: %s conflicts with symlink %s "
2287 2298 b"in %d." % (path, component, self.p1().rev())
2288 2299 )
2289 2300 else:
2290 2301 raise error.Abort(
2291 2302 b"error: '%s' conflicts with file '%s' in "
2292 2303 b"%d." % (path, component, self.p1().rev())
2293 2304 )
2294 2305
2295 2306 # Test that each new directory to be created to write this path from p2
2296 2307 # is not a file in p1.
2297 2308 components = path.split(b'/')
2298 2309 for i in pycompat.xrange(len(components)):
2299 2310 component = b"/".join(components[0:i])
2300 2311 if component in self:
2301 2312 fail(path, component)
2302 2313
2303 2314 # Test the other direction -- that this path from p2 isn't a directory
2304 2315 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2305 2316 match = self.match([path], default=b'path')
2306 2317 matches = self.p1().manifest().matches(match)
2307 2318 mfiles = matches.keys()
2308 2319 if len(mfiles) > 0:
2309 2320 if len(mfiles) == 1 and mfiles[0] == path:
2310 2321 return
2311 2322 # omit the files which are deleted in current IMM wctx
2312 2323 mfiles = [m for m in mfiles if m in self]
2313 2324 if not mfiles:
2314 2325 return
2315 2326 raise error.Abort(
2316 2327 b"error: file '%s' cannot be written because "
2317 2328 b" '%s/' is a directory in %s (containing %d "
2318 2329 b"entries: %s)"
2319 2330 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2320 2331 )
2321 2332
2322 2333 def write(self, path, data, flags=b'', **kwargs):
2323 2334 if data is None:
2324 2335 raise error.ProgrammingError(b"data must be non-None")
2325 2336 self._auditconflicts(path)
2326 2337 self._markdirty(
2327 2338 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2328 2339 )
2329 2340
2330 2341 def setflags(self, path, l, x):
2331 2342 flag = b''
2332 2343 if l:
2333 2344 flag = b'l'
2334 2345 elif x:
2335 2346 flag = b'x'
2336 2347 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2337 2348
2338 2349 def remove(self, path):
2339 2350 self._markdirty(path, exists=False)
2340 2351
2341 2352 def exists(self, path):
2342 2353 """exists behaves like `lexists`, but needs to follow symlinks and
2343 2354 return False if they are broken.
2344 2355 """
2345 2356 if self.isdirty(path):
2346 2357 # If this path exists and is a symlink, "follow" it by calling
2347 2358 # exists on the destination path.
2348 2359 if (
2349 2360 self._cache[path][b'exists']
2350 2361 and b'l' in self._cache[path][b'flags']
2351 2362 ):
2352 2363 return self.exists(self._cache[path][b'data'].strip())
2353 2364 else:
2354 2365 return self._cache[path][b'exists']
2355 2366
2356 2367 return self._existsinparent(path)
2357 2368
2358 2369 def lexists(self, path):
2359 2370 """lexists returns True if the path exists"""
2360 2371 if self.isdirty(path):
2361 2372 return self._cache[path][b'exists']
2362 2373
2363 2374 return self._existsinparent(path)
2364 2375
2365 2376 def size(self, path):
2366 2377 if self.isdirty(path):
2367 2378 if self._cache[path][b'exists']:
2368 2379 return len(self._cache[path][b'data'])
2369 2380 else:
2370 2381 raise error.ProgrammingError(
2371 2382 b"No such file or directory: %s" % self._path
2372 2383 )
2373 2384 return self._wrappedctx[path].size()
2374 2385
2375 2386 def tomemctx(
2376 2387 self,
2377 2388 text,
2378 2389 branch=None,
2379 2390 extra=None,
2380 2391 date=None,
2381 2392 parents=None,
2382 2393 user=None,
2383 2394 editor=None,
2384 2395 ):
2385 2396 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2386 2397 committed.
2387 2398
2388 2399 ``text`` is the commit message.
2389 2400 ``parents`` (optional) are rev numbers.
2390 2401 """
2391 2402 # Default parents to the wrapped contexts' if not passed.
2392 2403 if parents is None:
2393 2404 parents = self._wrappedctx.parents()
2394 2405 if len(parents) == 1:
2395 2406 parents = (parents[0], None)
2396 2407
2397 2408 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2398 2409 if parents[1] is None:
2399 2410 parents = (self._repo[parents[0]], None)
2400 2411 else:
2401 2412 parents = (self._repo[parents[0]], self._repo[parents[1]])
2402 2413
2403 2414 files = self.files()
2404 2415
2405 2416 def getfile(repo, memctx, path):
2406 2417 if self._cache[path][b'exists']:
2407 2418 return memfilectx(
2408 2419 repo,
2409 2420 memctx,
2410 2421 path,
2411 2422 self._cache[path][b'data'],
2412 2423 b'l' in self._cache[path][b'flags'],
2413 2424 b'x' in self._cache[path][b'flags'],
2414 2425 self._cache[path][b'copied'],
2415 2426 )
2416 2427 else:
2417 2428 # Returning None, but including the path in `files`, is
2418 2429 # necessary for memctx to register a deletion.
2419 2430 return None
2420 2431
2421 2432 return memctx(
2422 2433 self._repo,
2423 2434 parents,
2424 2435 text,
2425 2436 files,
2426 2437 getfile,
2427 2438 date=date,
2428 2439 extra=extra,
2429 2440 user=user,
2430 2441 branch=branch,
2431 2442 editor=editor,
2432 2443 )
2433 2444
2434 2445 def isdirty(self, path):
2435 2446 return path in self._cache
2436 2447
2437 2448 def isempty(self):
2438 2449 # We need to discard any keys that are actually clean before the empty
2439 2450 # commit check.
2440 2451 self._compact()
2441 2452 return len(self._cache) == 0
2442 2453
2443 2454 def clean(self):
2444 2455 self._cache = {}
2445 2456
2446 2457 def _compact(self):
2447 2458 """Removes keys from the cache that are actually clean, by comparing
2448 2459 them with the underlying context.
2449 2460
2450 2461 This can occur during the merge process, e.g. by passing --tool :local
2451 2462 to resolve a conflict.
2452 2463 """
2453 2464 keys = []
2454 2465 # This won't be perfect, but can help performance significantly when
2455 2466 # using things like remotefilelog.
2456 2467 scmutil.prefetchfiles(
2457 2468 self.repo(),
2458 2469 [self.p1().rev()],
2459 2470 scmutil.matchfiles(self.repo(), self._cache.keys()),
2460 2471 )
2461 2472
2462 2473 for path in self._cache.keys():
2463 2474 cache = self._cache[path]
2464 2475 try:
2465 2476 underlying = self._wrappedctx[path]
2466 2477 if (
2467 2478 underlying.data() == cache[b'data']
2468 2479 and underlying.flags() == cache[b'flags']
2469 2480 ):
2470 2481 keys.append(path)
2471 2482 except error.ManifestLookupError:
2472 2483 # Path not in the underlying manifest (created).
2473 2484 continue
2474 2485
2475 2486 for path in keys:
2476 2487 del self._cache[path]
2477 2488 return keys
2478 2489
2479 2490 def _markdirty(
2480 2491 self, path, exists, data=None, date=None, flags=b'', copied=None
2481 2492 ):
2482 2493 # data not provided, let's see if we already have some; if not, let's
2483 2494 # grab it from our underlying context, so that we always have data if
2484 2495 # the file is marked as existing.
2485 2496 if exists and data is None:
2486 2497 oldentry = self._cache.get(path) or {}
2487 2498 data = oldentry.get(b'data')
2488 2499 if data is None:
2489 2500 data = self._wrappedctx[path].data()
2490 2501
2491 2502 self._cache[path] = {
2492 2503 b'exists': exists,
2493 2504 b'data': data,
2494 2505 b'date': date,
2495 2506 b'flags': flags,
2496 2507 b'copied': copied,
2497 2508 }
2498 2509
2499 2510 def filectx(self, path, filelog=None):
2500 2511 return overlayworkingfilectx(
2501 2512 self._repo, path, parent=self, filelog=filelog
2502 2513 )
2503 2514
2504 2515
2505 2516 class overlayworkingfilectx(committablefilectx):
2506 2517 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2507 2518 cache, which can be flushed through later by calling ``flush()``."""
2508 2519
2509 2520 def __init__(self, repo, path, filelog=None, parent=None):
2510 2521 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2511 2522 self._repo = repo
2512 2523 self._parent = parent
2513 2524 self._path = path
2514 2525
2515 2526 def cmp(self, fctx):
2516 2527 return self.data() != fctx.data()
2517 2528
2518 2529 def changectx(self):
2519 2530 return self._parent
2520 2531
2521 2532 def data(self):
2522 2533 return self._parent.data(self._path)
2523 2534
2524 2535 def date(self):
2525 2536 return self._parent.filedate(self._path)
2526 2537
2527 2538 def exists(self):
2528 2539 return self.lexists()
2529 2540
2530 2541 def lexists(self):
2531 2542 return self._parent.exists(self._path)
2532 2543
2533 2544 def copysource(self):
2534 2545 return self._parent.copydata(self._path)
2535 2546
2536 2547 def size(self):
2537 2548 return self._parent.size(self._path)
2538 2549
2539 2550 def markcopied(self, origin):
2540 2551 self._parent.markcopied(self._path, origin)
2541 2552
2542 2553 def audit(self):
2543 2554 pass
2544 2555
2545 2556 def flags(self):
2546 2557 return self._parent.flags(self._path)
2547 2558
2548 2559 def setflags(self, islink, isexec):
2549 2560 return self._parent.setflags(self._path, islink, isexec)
2550 2561
2551 2562 def write(self, data, flags, backgroundclose=False, **kwargs):
2552 2563 return self._parent.write(self._path, data, flags, **kwargs)
2553 2564
2554 2565 def remove(self, ignoremissing=False):
2555 2566 return self._parent.remove(self._path)
2556 2567
2557 2568 def clearunknown(self):
2558 2569 pass
2559 2570
2560 2571
2561 2572 class workingcommitctx(workingctx):
2562 2573 """A workingcommitctx object makes access to data related to
2563 2574 the revision being committed convenient.
2564 2575
2565 2576 This hides changes in the working directory, if they aren't
2566 2577 committed in this context.
2567 2578 """
2568 2579
2569 2580 def __init__(
2570 2581 self, repo, changes, text=b"", user=None, date=None, extra=None
2571 2582 ):
2572 2583 super(workingcommitctx, self).__init__(
2573 2584 repo, text, user, date, extra, changes
2574 2585 )
2575 2586
2576 2587 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2577 2588 """Return matched files only in ``self._status``
2578 2589
2579 2590 Uncommitted files appear "clean" via this context, even if
2580 2591 they aren't actually so in the working directory.
2581 2592 """
2582 2593 if clean:
2583 2594 clean = [f for f in self._manifest if f not in self._changedset]
2584 2595 else:
2585 2596 clean = []
2586 2597 return scmutil.status(
2587 2598 [f for f in self._status.modified if match(f)],
2588 2599 [f for f in self._status.added if match(f)],
2589 2600 [f for f in self._status.removed if match(f)],
2590 2601 [],
2591 2602 [],
2592 2603 [],
2593 2604 clean,
2594 2605 )
2595 2606
2596 2607 @propertycache
2597 2608 def _changedset(self):
2598 2609 """Return the set of files changed in this context
2599 2610 """
2600 2611 changed = set(self._status.modified)
2601 2612 changed.update(self._status.added)
2602 2613 changed.update(self._status.removed)
2603 2614 return changed
2604 2615
2605 2616
2606 2617 def makecachingfilectxfn(func):
2607 2618 """Create a filectxfn that caches based on the path.
2608 2619
2609 2620 We can't use util.cachefunc because it uses all arguments as the cache
2610 2621 key and this creates a cycle since the arguments include the repo and
2611 2622 memctx.
2612 2623 """
2613 2624 cache = {}
2614 2625
2615 2626 def getfilectx(repo, memctx, path):
2616 2627 if path not in cache:
2617 2628 cache[path] = func(repo, memctx, path)
2618 2629 return cache[path]
2619 2630
2620 2631 return getfilectx
2621 2632
2622 2633
2623 2634 def memfilefromctx(ctx):
2624 2635 """Given a context return a memfilectx for ctx[path]
2625 2636
2626 2637 This is a convenience method for building a memctx based on another
2627 2638 context.
2628 2639 """
2629 2640
2630 2641 def getfilectx(repo, memctx, path):
2631 2642 fctx = ctx[path]
2632 2643 copysource = fctx.copysource()
2633 2644 return memfilectx(
2634 2645 repo,
2635 2646 memctx,
2636 2647 path,
2637 2648 fctx.data(),
2638 2649 islink=fctx.islink(),
2639 2650 isexec=fctx.isexec(),
2640 2651 copysource=copysource,
2641 2652 )
2642 2653
2643 2654 return getfilectx
2644 2655
2645 2656
2646 2657 def memfilefrompatch(patchstore):
2647 2658 """Given a patch (e.g. patchstore object) return a memfilectx
2648 2659
2649 2660 This is a convenience method for building a memctx based on a patchstore.
2650 2661 """
2651 2662
2652 2663 def getfilectx(repo, memctx, path):
2653 2664 data, mode, copysource = patchstore.getfile(path)
2654 2665 if data is None:
2655 2666 return None
2656 2667 islink, isexec = mode
2657 2668 return memfilectx(
2658 2669 repo,
2659 2670 memctx,
2660 2671 path,
2661 2672 data,
2662 2673 islink=islink,
2663 2674 isexec=isexec,
2664 2675 copysource=copysource,
2665 2676 )
2666 2677
2667 2678 return getfilectx
2668 2679
2669 2680
2670 2681 class memctx(committablectx):
2671 2682 """Use memctx to perform in-memory commits via localrepo.commitctx().
2672 2683
2673 2684 Revision information is supplied at initialization time while
2674 2685 related files data and is made available through a callback
2675 2686 mechanism. 'repo' is the current localrepo, 'parents' is a
2676 2687 sequence of two parent revisions identifiers (pass None for every
2677 2688 missing parent), 'text' is the commit message and 'files' lists
2678 2689 names of files touched by the revision (normalized and relative to
2679 2690 repository root).
2680 2691
2681 2692 filectxfn(repo, memctx, path) is a callable receiving the
2682 2693 repository, the current memctx object and the normalized path of
2683 2694 requested file, relative to repository root. It is fired by the
2684 2695 commit function for every file in 'files', but calls order is
2685 2696 undefined. If the file is available in the revision being
2686 2697 committed (updated or added), filectxfn returns a memfilectx
2687 2698 object. If the file was removed, filectxfn return None for recent
2688 2699 Mercurial. Moved files are represented by marking the source file
2689 2700 removed and the new file added with copy information (see
2690 2701 memfilectx).
2691 2702
2692 2703 user receives the committer name and defaults to current
2693 2704 repository username, date is the commit date in any format
2694 2705 supported by dateutil.parsedate() and defaults to current date, extra
2695 2706 is a dictionary of metadata or is left empty.
2696 2707 """
2697 2708
2698 2709 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2699 2710 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2700 2711 # this field to determine what to do in filectxfn.
2701 2712 _returnnoneformissingfiles = True
2702 2713
2703 2714 def __init__(
2704 2715 self,
2705 2716 repo,
2706 2717 parents,
2707 2718 text,
2708 2719 files,
2709 2720 filectxfn,
2710 2721 user=None,
2711 2722 date=None,
2712 2723 extra=None,
2713 2724 branch=None,
2714 2725 editor=False,
2715 2726 ):
2716 2727 super(memctx, self).__init__(
2717 2728 repo, text, user, date, extra, branch=branch
2718 2729 )
2719 2730 self._rev = None
2720 2731 self._node = None
2721 2732 parents = [(p or nullid) for p in parents]
2722 2733 p1, p2 = parents
2723 2734 self._parents = [self._repo[p] for p in (p1, p2)]
2724 2735 files = sorted(set(files))
2725 2736 self._files = files
2726 2737 self.substate = {}
2727 2738
2728 2739 if isinstance(filectxfn, patch.filestore):
2729 2740 filectxfn = memfilefrompatch(filectxfn)
2730 2741 elif not callable(filectxfn):
2731 2742 # if store is not callable, wrap it in a function
2732 2743 filectxfn = memfilefromctx(filectxfn)
2733 2744
2734 2745 # memoizing increases performance for e.g. vcs convert scenarios.
2735 2746 self._filectxfn = makecachingfilectxfn(filectxfn)
2736 2747
2737 2748 if editor:
2738 2749 self._text = editor(self._repo, self, [])
2739 2750 self._repo.savecommitmessage(self._text)
2740 2751
2741 2752 def filectx(self, path, filelog=None):
2742 2753 """get a file context from the working directory
2743 2754
2744 2755 Returns None if file doesn't exist and should be removed."""
2745 2756 return self._filectxfn(self._repo, self, path)
2746 2757
2747 2758 def commit(self):
2748 2759 """commit context to the repo"""
2749 2760 return self._repo.commitctx(self)
2750 2761
2751 2762 @propertycache
2752 2763 def _manifest(self):
2753 2764 """generate a manifest based on the return values of filectxfn"""
2754 2765
2755 2766 # keep this simple for now; just worry about p1
2756 2767 pctx = self._parents[0]
2757 2768 man = pctx.manifest().copy()
2758 2769
2759 2770 for f in self._status.modified:
2760 2771 man[f] = modifiednodeid
2761 2772
2762 2773 for f in self._status.added:
2763 2774 man[f] = addednodeid
2764 2775
2765 2776 for f in self._status.removed:
2766 2777 if f in man:
2767 2778 del man[f]
2768 2779
2769 2780 return man
2770 2781
2771 2782 @propertycache
2772 2783 def _status(self):
2773 2784 """Calculate exact status from ``files`` specified at construction
2774 2785 """
2775 2786 man1 = self.p1().manifest()
2776 2787 p2 = self._parents[1]
2777 2788 # "1 < len(self._parents)" can't be used for checking
2778 2789 # existence of the 2nd parent, because "memctx._parents" is
2779 2790 # explicitly initialized by the list, of which length is 2.
2780 2791 if p2.node() != nullid:
2781 2792 man2 = p2.manifest()
2782 2793 managing = lambda f: f in man1 or f in man2
2783 2794 else:
2784 2795 managing = lambda f: f in man1
2785 2796
2786 2797 modified, added, removed = [], [], []
2787 2798 for f in self._files:
2788 2799 if not managing(f):
2789 2800 added.append(f)
2790 2801 elif self[f]:
2791 2802 modified.append(f)
2792 2803 else:
2793 2804 removed.append(f)
2794 2805
2795 2806 return scmutil.status(modified, added, removed, [], [], [], [])
2796 2807
2797 2808
2798 2809 class memfilectx(committablefilectx):
2799 2810 """memfilectx represents an in-memory file to commit.
2800 2811
2801 2812 See memctx and committablefilectx for more details.
2802 2813 """
2803 2814
2804 2815 def __init__(
2805 2816 self,
2806 2817 repo,
2807 2818 changectx,
2808 2819 path,
2809 2820 data,
2810 2821 islink=False,
2811 2822 isexec=False,
2812 2823 copysource=None,
2813 2824 ):
2814 2825 """
2815 2826 path is the normalized file path relative to repository root.
2816 2827 data is the file content as a string.
2817 2828 islink is True if the file is a symbolic link.
2818 2829 isexec is True if the file is executable.
2819 2830 copied is the source file path if current file was copied in the
2820 2831 revision being committed, or None."""
2821 2832 super(memfilectx, self).__init__(repo, path, None, changectx)
2822 2833 self._data = data
2823 2834 if islink:
2824 2835 self._flags = b'l'
2825 2836 elif isexec:
2826 2837 self._flags = b'x'
2827 2838 else:
2828 2839 self._flags = b''
2829 2840 self._copysource = copysource
2830 2841
2831 2842 def copysource(self):
2832 2843 return self._copysource
2833 2844
2834 2845 def cmp(self, fctx):
2835 2846 return self.data() != fctx.data()
2836 2847
2837 2848 def data(self):
2838 2849 return self._data
2839 2850
2840 2851 def remove(self, ignoremissing=False):
2841 2852 """wraps unlink for a repo's working directory"""
2842 2853 # need to figure out what to do here
2843 2854 del self._changectx[self._path]
2844 2855
2845 2856 def write(self, data, flags, **kwargs):
2846 2857 """wraps repo.wwrite"""
2847 2858 self._data = data
2848 2859
2849 2860
2850 2861 class metadataonlyctx(committablectx):
2851 2862 """Like memctx but it's reusing the manifest of different commit.
2852 2863 Intended to be used by lightweight operations that are creating
2853 2864 metadata-only changes.
2854 2865
2855 2866 Revision information is supplied at initialization time. 'repo' is the
2856 2867 current localrepo, 'ctx' is original revision which manifest we're reuisng
2857 2868 'parents' is a sequence of two parent revisions identifiers (pass None for
2858 2869 every missing parent), 'text' is the commit.
2859 2870
2860 2871 user receives the committer name and defaults to current repository
2861 2872 username, date is the commit date in any format supported by
2862 2873 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2863 2874 metadata or is left empty.
2864 2875 """
2865 2876
2866 2877 def __init__(
2867 2878 self,
2868 2879 repo,
2869 2880 originalctx,
2870 2881 parents=None,
2871 2882 text=None,
2872 2883 user=None,
2873 2884 date=None,
2874 2885 extra=None,
2875 2886 editor=False,
2876 2887 ):
2877 2888 if text is None:
2878 2889 text = originalctx.description()
2879 2890 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2880 2891 self._rev = None
2881 2892 self._node = None
2882 2893 self._originalctx = originalctx
2883 2894 self._manifestnode = originalctx.manifestnode()
2884 2895 if parents is None:
2885 2896 parents = originalctx.parents()
2886 2897 else:
2887 2898 parents = [repo[p] for p in parents if p is not None]
2888 2899 parents = parents[:]
2889 2900 while len(parents) < 2:
2890 2901 parents.append(repo[nullid])
2891 2902 p1, p2 = self._parents = parents
2892 2903
2893 2904 # sanity check to ensure that the reused manifest parents are
2894 2905 # manifests of our commit parents
2895 2906 mp1, mp2 = self.manifestctx().parents
2896 2907 if p1 != nullid and p1.manifestnode() != mp1:
2897 2908 raise RuntimeError(
2898 2909 r"can't reuse the manifest: its p1 "
2899 2910 r"doesn't match the new ctx p1"
2900 2911 )
2901 2912 if p2 != nullid and p2.manifestnode() != mp2:
2902 2913 raise RuntimeError(
2903 2914 r"can't reuse the manifest: "
2904 2915 r"its p2 doesn't match the new ctx p2"
2905 2916 )
2906 2917
2907 2918 self._files = originalctx.files()
2908 2919 self.substate = {}
2909 2920
2910 2921 if editor:
2911 2922 self._text = editor(self._repo, self, [])
2912 2923 self._repo.savecommitmessage(self._text)
2913 2924
2914 2925 def manifestnode(self):
2915 2926 return self._manifestnode
2916 2927
2917 2928 @property
2918 2929 def _manifestctx(self):
2919 2930 return self._repo.manifestlog[self._manifestnode]
2920 2931
2921 2932 def filectx(self, path, filelog=None):
2922 2933 return self._originalctx.filectx(path, filelog=filelog)
2923 2934
2924 2935 def commit(self):
2925 2936 """commit context to the repo"""
2926 2937 return self._repo.commitctx(self)
2927 2938
2928 2939 @property
2929 2940 def _manifest(self):
2930 2941 return self._originalctx.manifest()
2931 2942
2932 2943 @propertycache
2933 2944 def _status(self):
2934 2945 """Calculate exact status from ``files`` specified in the ``origctx``
2935 2946 and parents manifests.
2936 2947 """
2937 2948 man1 = self.p1().manifest()
2938 2949 p2 = self._parents[1]
2939 2950 # "1 < len(self._parents)" can't be used for checking
2940 2951 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2941 2952 # explicitly initialized by the list, of which length is 2.
2942 2953 if p2.node() != nullid:
2943 2954 man2 = p2.manifest()
2944 2955 managing = lambda f: f in man1 or f in man2
2945 2956 else:
2946 2957 managing = lambda f: f in man1
2947 2958
2948 2959 modified, added, removed = [], [], []
2949 2960 for f in self._files:
2950 2961 if not managing(f):
2951 2962 added.append(f)
2952 2963 elif f in self:
2953 2964 modified.append(f)
2954 2965 else:
2955 2966 removed.append(f)
2956 2967
2957 2968 return scmutil.status(modified, added, removed, [], [], [], [])
2958 2969
2959 2970
2960 2971 class arbitraryfilectx(object):
2961 2972 """Allows you to use filectx-like functions on a file in an arbitrary
2962 2973 location on disk, possibly not in the working directory.
2963 2974 """
2964 2975
2965 2976 def __init__(self, path, repo=None):
2966 2977 # Repo is optional because contrib/simplemerge uses this class.
2967 2978 self._repo = repo
2968 2979 self._path = path
2969 2980
2970 2981 def cmp(self, fctx):
2971 2982 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2972 2983 # path if either side is a symlink.
2973 2984 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
2974 2985 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2975 2986 # Add a fast-path for merge if both sides are disk-backed.
2976 2987 # Note that filecmp uses the opposite return values (True if same)
2977 2988 # from our cmp functions (True if different).
2978 2989 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2979 2990 return self.data() != fctx.data()
2980 2991
2981 2992 def path(self):
2982 2993 return self._path
2983 2994
2984 2995 def flags(self):
2985 2996 return b''
2986 2997
2987 2998 def data(self):
2988 2999 return util.readfile(self._path)
2989 3000
2990 3001 def decodeddata(self):
2991 3002 with open(self._path, b"rb") as f:
2992 3003 return f.read()
2993 3004
2994 3005 def remove(self):
2995 3006 util.unlink(self._path)
2996 3007
2997 3008 def write(self, data, flags, **kwargs):
2998 3009 assert not flags
2999 3010 with open(self._path, b"wb") as f:
3000 3011 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now