##// END OF EJS Templates
context: drop deprecated methods (API)...
Matt Harbison -
r35908:44bc37d2 default
parent child Browse files
Show More
@@ -1,2790 +1,2749 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import re
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 addednodeid,
19 19 bin,
20 20 hex,
21 21 modifiednodeid,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 wdirid,
26 26 wdirnodes,
27 27 wdirrev,
28 28 )
29 29 from .thirdparty import (
30 30 attr,
31 31 )
32 32 from . import (
33 33 encoding,
34 34 error,
35 35 fileset,
36 36 match as matchmod,
37 37 mdiff,
38 38 obsolete as obsmod,
39 39 obsutil,
40 40 patch,
41 41 pathutil,
42 42 phases,
43 43 pycompat,
44 44 repoview,
45 45 revlog,
46 46 scmutil,
47 47 sparse,
48 48 subrepo,
49 49 util,
50 50 )
51 51
52 52 propertycache = util.propertycache
53 53
54 54 nonascii = re.compile(r'[^\x21-\x7f]').search
55 55
56 56 class basectx(object):
57 57 """A basectx object represents the common logic for its children:
58 58 changectx: read-only context that is already present in the repo,
59 59 workingctx: a context that represents the working directory and can
60 60 be committed,
61 61 memctx: a context that represents changes in-memory and can also
62 62 be committed."""
63 63 def __new__(cls, repo, changeid='', *args, **kwargs):
64 64 if isinstance(changeid, basectx):
65 65 return changeid
66 66
67 67 o = super(basectx, cls).__new__(cls)
68 68
69 69 o._repo = repo
70 70 o._rev = nullrev
71 71 o._node = nullid
72 72
73 73 return o
74 74
75 75 def __bytes__(self):
76 76 return short(self.node())
77 77
78 78 __str__ = encoding.strmethod(__bytes__)
79 79
80 80 def __int__(self):
81 81 return self.rev()
82 82
83 83 def __repr__(self):
84 84 return r"<%s %s>" % (type(self).__name__, str(self))
85 85
86 86 def __eq__(self, other):
87 87 try:
88 88 return type(self) == type(other) and self._rev == other._rev
89 89 except AttributeError:
90 90 return False
91 91
92 92 def __ne__(self, other):
93 93 return not (self == other)
94 94
95 95 def __contains__(self, key):
96 96 return key in self._manifest
97 97
98 98 def __getitem__(self, key):
99 99 return self.filectx(key)
100 100
101 101 def __iter__(self):
102 102 return iter(self._manifest)
103 103
104 104 def _buildstatusmanifest(self, status):
105 105 """Builds a manifest that includes the given status results, if this is
106 106 a working copy context. For non-working copy contexts, it just returns
107 107 the normal manifest."""
108 108 return self.manifest()
109 109
110 110 def _matchstatus(self, other, match):
111 111 """This internal method provides a way for child objects to override the
112 112 match operator.
113 113 """
114 114 return match
115 115
116 116 def _buildstatus(self, other, s, match, listignored, listclean,
117 117 listunknown):
118 118 """build a status with respect to another context"""
119 119 # Load earliest manifest first for caching reasons. More specifically,
120 120 # if you have revisions 1000 and 1001, 1001 is probably stored as a
121 121 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
122 122 # 1000 and cache it so that when you read 1001, we just need to apply a
123 123 # delta to what's in the cache. So that's one full reconstruction + one
124 124 # delta application.
125 125 mf2 = None
126 126 if self.rev() is not None and self.rev() < other.rev():
127 127 mf2 = self._buildstatusmanifest(s)
128 128 mf1 = other._buildstatusmanifest(s)
129 129 if mf2 is None:
130 130 mf2 = self._buildstatusmanifest(s)
131 131
132 132 modified, added = [], []
133 133 removed = []
134 134 clean = []
135 135 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
136 136 deletedset = set(deleted)
137 137 d = mf1.diff(mf2, match=match, clean=listclean)
138 138 for fn, value in d.iteritems():
139 139 if fn in deletedset:
140 140 continue
141 141 if value is None:
142 142 clean.append(fn)
143 143 continue
144 144 (node1, flag1), (node2, flag2) = value
145 145 if node1 is None:
146 146 added.append(fn)
147 147 elif node2 is None:
148 148 removed.append(fn)
149 149 elif flag1 != flag2:
150 150 modified.append(fn)
151 151 elif node2 not in wdirnodes:
152 152 # When comparing files between two commits, we save time by
153 153 # not comparing the file contents when the nodeids differ.
154 154 # Note that this means we incorrectly report a reverted change
155 155 # to a file as a modification.
156 156 modified.append(fn)
157 157 elif self[fn].cmp(other[fn]):
158 158 modified.append(fn)
159 159 else:
160 160 clean.append(fn)
161 161
162 162 if removed:
163 163 # need to filter files if they are already reported as removed
164 164 unknown = [fn for fn in unknown if fn not in mf1 and
165 165 (not match or match(fn))]
166 166 ignored = [fn for fn in ignored if fn not in mf1 and
167 167 (not match or match(fn))]
168 168 # if they're deleted, don't report them as removed
169 169 removed = [fn for fn in removed if fn not in deletedset]
170 170
171 171 return scmutil.status(modified, added, removed, deleted, unknown,
172 172 ignored, clean)
173 173
174 174 @propertycache
175 175 def substate(self):
176 176 return subrepo.state(self, self._repo.ui)
177 177
178 178 def subrev(self, subpath):
179 179 return self.substate[subpath][1]
180 180
181 181 def rev(self):
182 182 return self._rev
183 183 def node(self):
184 184 return self._node
185 185 def hex(self):
186 186 return hex(self.node())
187 187 def manifest(self):
188 188 return self._manifest
189 189 def manifestctx(self):
190 190 return self._manifestctx
191 191 def repo(self):
192 192 return self._repo
193 193 def phasestr(self):
194 194 return phases.phasenames[self.phase()]
195 195 def mutable(self):
196 196 return self.phase() > phases.public
197 197
198 198 def getfileset(self, expr):
199 199 return fileset.getfileset(self, expr)
200 200
201 201 def obsolete(self):
202 202 """True if the changeset is obsolete"""
203 203 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
204 204
205 205 def extinct(self):
206 206 """True if the changeset is extinct"""
207 207 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
208 208
209 def unstable(self):
210 msg = ("'context.unstable' is deprecated, "
211 "use 'context.orphan'")
212 self._repo.ui.deprecwarn(msg, '4.4')
213 return self.orphan()
214
215 209 def orphan(self):
216 210 """True if the changeset is not obsolete but it's ancestor are"""
217 211 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
218 212
219 def bumped(self):
220 msg = ("'context.bumped' is deprecated, "
221 "use 'context.phasedivergent'")
222 self._repo.ui.deprecwarn(msg, '4.4')
223 return self.phasedivergent()
224
225 213 def phasedivergent(self):
226 214 """True if the changeset try to be a successor of a public changeset
227 215
228 216 Only non-public and non-obsolete changesets may be bumped.
229 217 """
230 218 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
231 219
232 def divergent(self):
233 msg = ("'context.divergent' is deprecated, "
234 "use 'context.contentdivergent'")
235 self._repo.ui.deprecwarn(msg, '4.4')
236 return self.contentdivergent()
237
238 220 def contentdivergent(self):
239 221 """Is a successors of a changeset with multiple possible successors set
240 222
241 223 Only non-public and non-obsolete changesets may be divergent.
242 224 """
243 225 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
244 226
245 def troubled(self):
246 msg = ("'context.troubled' is deprecated, "
247 "use 'context.isunstable'")
248 self._repo.ui.deprecwarn(msg, '4.4')
249 return self.isunstable()
250
251 227 def isunstable(self):
252 228 """True if the changeset is either unstable, bumped or divergent"""
253 229 return self.orphan() or self.phasedivergent() or self.contentdivergent()
254 230
255 def troubles(self):
256 """Keep the old version around in order to avoid breaking extensions
257 about different return values.
258 """
259 msg = ("'context.troubles' is deprecated, "
260 "use 'context.instabilities'")
261 self._repo.ui.deprecwarn(msg, '4.4')
262
263 troubles = []
264 if self.orphan():
265 troubles.append('orphan')
266 if self.phasedivergent():
267 troubles.append('bumped')
268 if self.contentdivergent():
269 troubles.append('divergent')
270 return troubles
271
272 231 def instabilities(self):
273 232 """return the list of instabilities affecting this changeset.
274 233
275 234 Instabilities are returned as strings. possible values are:
276 235 - orphan,
277 236 - phase-divergent,
278 237 - content-divergent.
279 238 """
280 239 instabilities = []
281 240 if self.orphan():
282 241 instabilities.append('orphan')
283 242 if self.phasedivergent():
284 243 instabilities.append('phase-divergent')
285 244 if self.contentdivergent():
286 245 instabilities.append('content-divergent')
287 246 return instabilities
288 247
289 248 def parents(self):
290 249 """return contexts for each parent changeset"""
291 250 return self._parents
292 251
293 252 def p1(self):
294 253 return self._parents[0]
295 254
296 255 def p2(self):
297 256 parents = self._parents
298 257 if len(parents) == 2:
299 258 return parents[1]
300 259 return changectx(self._repo, nullrev)
301 260
302 261 def _fileinfo(self, path):
303 262 if r'_manifest' in self.__dict__:
304 263 try:
305 264 return self._manifest[path], self._manifest.flags(path)
306 265 except KeyError:
307 266 raise error.ManifestLookupError(self._node, path,
308 267 _('not found in manifest'))
309 268 if r'_manifestdelta' in self.__dict__ or path in self.files():
310 269 if path in self._manifestdelta:
311 270 return (self._manifestdelta[path],
312 271 self._manifestdelta.flags(path))
313 272 mfl = self._repo.manifestlog
314 273 try:
315 274 node, flag = mfl[self._changeset.manifest].find(path)
316 275 except KeyError:
317 276 raise error.ManifestLookupError(self._node, path,
318 277 _('not found in manifest'))
319 278
320 279 return node, flag
321 280
322 281 def filenode(self, path):
323 282 return self._fileinfo(path)[0]
324 283
325 284 def flags(self, path):
326 285 try:
327 286 return self._fileinfo(path)[1]
328 287 except error.LookupError:
329 288 return ''
330 289
331 290 def sub(self, path, allowcreate=True):
332 291 '''return a subrepo for the stored revision of path, never wdir()'''
333 292 return subrepo.subrepo(self, path, allowcreate=allowcreate)
334 293
335 294 def nullsub(self, path, pctx):
336 295 return subrepo.nullsubrepo(self, path, pctx)
337 296
338 297 def workingsub(self, path):
339 298 '''return a subrepo for the stored revision, or wdir if this is a wdir
340 299 context.
341 300 '''
342 301 return subrepo.subrepo(self, path, allowwdir=True)
343 302
344 303 def match(self, pats=None, include=None, exclude=None, default='glob',
345 304 listsubrepos=False, badfn=None):
346 305 r = self._repo
347 306 return matchmod.match(r.root, r.getcwd(), pats,
348 307 include, exclude, default,
349 308 auditor=r.nofsauditor, ctx=self,
350 309 listsubrepos=listsubrepos, badfn=badfn)
351 310
352 311 def diff(self, ctx2=None, match=None, **opts):
353 312 """Returns a diff generator for the given contexts and matcher"""
354 313 if ctx2 is None:
355 314 ctx2 = self.p1()
356 315 if ctx2 is not None:
357 316 ctx2 = self._repo[ctx2]
358 317 diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
359 318 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
360 319
361 320 def dirs(self):
362 321 return self._manifest.dirs()
363 322
364 323 def hasdir(self, dir):
365 324 return self._manifest.hasdir(dir)
366 325
367 326 def status(self, other=None, match=None, listignored=False,
368 327 listclean=False, listunknown=False, listsubrepos=False):
369 328 """return status of files between two nodes or node and working
370 329 directory.
371 330
372 331 If other is None, compare this node with working directory.
373 332
374 333 returns (modified, added, removed, deleted, unknown, ignored, clean)
375 334 """
376 335
377 336 ctx1 = self
378 337 ctx2 = self._repo[other]
379 338
380 339 # This next code block is, admittedly, fragile logic that tests for
381 340 # reversing the contexts and wouldn't need to exist if it weren't for
382 341 # the fast (and common) code path of comparing the working directory
383 342 # with its first parent.
384 343 #
385 344 # What we're aiming for here is the ability to call:
386 345 #
387 346 # workingctx.status(parentctx)
388 347 #
389 348 # If we always built the manifest for each context and compared those,
390 349 # then we'd be done. But the special case of the above call means we
391 350 # just copy the manifest of the parent.
392 351 reversed = False
393 352 if (not isinstance(ctx1, changectx)
394 353 and isinstance(ctx2, changectx)):
395 354 reversed = True
396 355 ctx1, ctx2 = ctx2, ctx1
397 356
398 357 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
399 358 match = ctx2._matchstatus(ctx1, match)
400 359 r = scmutil.status([], [], [], [], [], [], [])
401 360 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
402 361 listunknown)
403 362
404 363 if reversed:
405 364 # Reverse added and removed. Clear deleted, unknown and ignored as
406 365 # these make no sense to reverse.
407 366 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
408 367 r.clean)
409 368
410 369 if listsubrepos:
411 370 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
412 371 try:
413 372 rev2 = ctx2.subrev(subpath)
414 373 except KeyError:
415 374 # A subrepo that existed in node1 was deleted between
416 375 # node1 and node2 (inclusive). Thus, ctx2's substate
417 376 # won't contain that subpath. The best we can do ignore it.
418 377 rev2 = None
419 378 submatch = matchmod.subdirmatcher(subpath, match)
420 379 s = sub.status(rev2, match=submatch, ignored=listignored,
421 380 clean=listclean, unknown=listunknown,
422 381 listsubrepos=True)
423 382 for rfiles, sfiles in zip(r, s):
424 383 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
425 384
426 385 for l in r:
427 386 l.sort()
428 387
429 388 return r
430 389
431 390 def _filterederror(repo, changeid):
432 391 """build an exception to be raised about a filtered changeid
433 392
434 393 This is extracted in a function to help extensions (eg: evolve) to
435 394 experiment with various message variants."""
436 395 if repo.filtername.startswith('visible'):
437 396
438 397 # Check if the changeset is obsolete
439 398 unfilteredrepo = repo.unfiltered()
440 399 ctx = unfilteredrepo[changeid]
441 400
442 401 # If the changeset is obsolete, enrich the message with the reason
443 402 # that made this changeset not visible
444 403 if ctx.obsolete():
445 404 msg = obsutil._getfilteredreason(repo, changeid, ctx)
446 405 else:
447 406 msg = _("hidden revision '%s'") % changeid
448 407
449 408 hint = _('use --hidden to access hidden revisions')
450 409
451 410 return error.FilteredRepoLookupError(msg, hint=hint)
452 411 msg = _("filtered revision '%s' (not in '%s' subset)")
453 412 msg %= (changeid, repo.filtername)
454 413 return error.FilteredRepoLookupError(msg)
455 414
456 415 class changectx(basectx):
457 416 """A changecontext object makes access to data related to a particular
458 417 changeset convenient. It represents a read-only context already present in
459 418 the repo."""
460 419 def __init__(self, repo, changeid=''):
461 420 """changeid is a revision number, node, or tag"""
462 421
463 422 # since basectx.__new__ already took care of copying the object, we
464 423 # don't need to do anything in __init__, so we just exit here
465 424 if isinstance(changeid, basectx):
466 425 return
467 426
468 427 if changeid == '':
469 428 changeid = '.'
470 429 self._repo = repo
471 430
472 431 try:
473 432 if isinstance(changeid, int):
474 433 self._node = repo.changelog.node(changeid)
475 434 self._rev = changeid
476 435 return
477 436 if not pycompat.ispy3 and isinstance(changeid, long):
478 437 changeid = str(changeid)
479 438 if changeid == 'null':
480 439 self._node = nullid
481 440 self._rev = nullrev
482 441 return
483 442 if changeid == 'tip':
484 443 self._node = repo.changelog.tip()
485 444 self._rev = repo.changelog.rev(self._node)
486 445 return
487 446 if (changeid == '.'
488 447 or repo.local() and changeid == repo.dirstate.p1()):
489 448 # this is a hack to delay/avoid loading obsmarkers
490 449 # when we know that '.' won't be hidden
491 450 self._node = repo.dirstate.p1()
492 451 self._rev = repo.unfiltered().changelog.rev(self._node)
493 452 return
494 453 if len(changeid) == 20:
495 454 try:
496 455 self._node = changeid
497 456 self._rev = repo.changelog.rev(changeid)
498 457 return
499 458 except error.FilteredRepoLookupError:
500 459 raise
501 460 except LookupError:
502 461 pass
503 462
504 463 try:
505 464 r = int(changeid)
506 465 if '%d' % r != changeid:
507 466 raise ValueError
508 467 l = len(repo.changelog)
509 468 if r < 0:
510 469 r += l
511 470 if r < 0 or r >= l and r != wdirrev:
512 471 raise ValueError
513 472 self._rev = r
514 473 self._node = repo.changelog.node(r)
515 474 return
516 475 except error.FilteredIndexError:
517 476 raise
518 477 except (ValueError, OverflowError, IndexError):
519 478 pass
520 479
521 480 if len(changeid) == 40:
522 481 try:
523 482 self._node = bin(changeid)
524 483 self._rev = repo.changelog.rev(self._node)
525 484 return
526 485 except error.FilteredLookupError:
527 486 raise
528 487 except (TypeError, LookupError):
529 488 pass
530 489
531 490 # lookup bookmarks through the name interface
532 491 try:
533 492 self._node = repo.names.singlenode(repo, changeid)
534 493 self._rev = repo.changelog.rev(self._node)
535 494 return
536 495 except KeyError:
537 496 pass
538 497 except error.FilteredRepoLookupError:
539 498 raise
540 499 except error.RepoLookupError:
541 500 pass
542 501
543 502 self._node = repo.unfiltered().changelog._partialmatch(changeid)
544 503 if self._node is not None:
545 504 self._rev = repo.changelog.rev(self._node)
546 505 return
547 506
548 507 # lookup failed
549 508 # check if it might have come from damaged dirstate
550 509 #
551 510 # XXX we could avoid the unfiltered if we had a recognizable
552 511 # exception for filtered changeset access
553 512 if (repo.local()
554 513 and changeid in repo.unfiltered().dirstate.parents()):
555 514 msg = _("working directory has unknown parent '%s'!")
556 515 raise error.Abort(msg % short(changeid))
557 516 try:
558 517 if len(changeid) == 20 and nonascii(changeid):
559 518 changeid = hex(changeid)
560 519 except TypeError:
561 520 pass
562 521 except (error.FilteredIndexError, error.FilteredLookupError,
563 522 error.FilteredRepoLookupError):
564 523 raise _filterederror(repo, changeid)
565 524 except IndexError:
566 525 pass
567 526 raise error.RepoLookupError(
568 527 _("unknown revision '%s'") % changeid)
569 528
570 529 def __hash__(self):
571 530 try:
572 531 return hash(self._rev)
573 532 except AttributeError:
574 533 return id(self)
575 534
576 535 def __nonzero__(self):
577 536 return self._rev != nullrev
578 537
579 538 __bool__ = __nonzero__
580 539
581 540 @propertycache
582 541 def _changeset(self):
583 542 return self._repo.changelog.changelogrevision(self.rev())
584 543
585 544 @propertycache
586 545 def _manifest(self):
587 546 return self._manifestctx.read()
588 547
589 548 @property
590 549 def _manifestctx(self):
591 550 return self._repo.manifestlog[self._changeset.manifest]
592 551
593 552 @propertycache
594 553 def _manifestdelta(self):
595 554 return self._manifestctx.readdelta()
596 555
597 556 @propertycache
598 557 def _parents(self):
599 558 repo = self._repo
600 559 p1, p2 = repo.changelog.parentrevs(self._rev)
601 560 if p2 == nullrev:
602 561 return [changectx(repo, p1)]
603 562 return [changectx(repo, p1), changectx(repo, p2)]
604 563
605 564 def changeset(self):
606 565 c = self._changeset
607 566 return (
608 567 c.manifest,
609 568 c.user,
610 569 c.date,
611 570 c.files,
612 571 c.description,
613 572 c.extra,
614 573 )
615 574 def manifestnode(self):
616 575 return self._changeset.manifest
617 576
618 577 def user(self):
619 578 return self._changeset.user
620 579 def date(self):
621 580 return self._changeset.date
622 581 def files(self):
623 582 return self._changeset.files
624 583 def description(self):
625 584 return self._changeset.description
626 585 def branch(self):
627 586 return encoding.tolocal(self._changeset.extra.get("branch"))
628 587 def closesbranch(self):
629 588 return 'close' in self._changeset.extra
630 589 def extra(self):
631 590 """Return a dict of extra information."""
632 591 return self._changeset.extra
633 592 def tags(self):
634 593 """Return a list of byte tag names"""
635 594 return self._repo.nodetags(self._node)
636 595 def bookmarks(self):
637 596 """Return a list of byte bookmark names."""
638 597 return self._repo.nodebookmarks(self._node)
639 598 def phase(self):
640 599 return self._repo._phasecache.phase(self._repo, self._rev)
641 600 def hidden(self):
642 601 return self._rev in repoview.filterrevs(self._repo, 'visible')
643 602
644 603 def isinmemory(self):
645 604 return False
646 605
647 606 def children(self):
648 607 """return list of changectx contexts for each child changeset.
649 608
650 609 This returns only the immediate child changesets. Use descendants() to
651 610 recursively walk children.
652 611 """
653 612 c = self._repo.changelog.children(self._node)
654 613 return [changectx(self._repo, x) for x in c]
655 614
656 615 def ancestors(self):
657 616 for a in self._repo.changelog.ancestors([self._rev]):
658 617 yield changectx(self._repo, a)
659 618
660 619 def descendants(self):
661 620 """Recursively yield all children of the changeset.
662 621
663 622 For just the immediate children, use children()
664 623 """
665 624 for d in self._repo.changelog.descendants([self._rev]):
666 625 yield changectx(self._repo, d)
667 626
668 627 def filectx(self, path, fileid=None, filelog=None):
669 628 """get a file context from this changeset"""
670 629 if fileid is None:
671 630 fileid = self.filenode(path)
672 631 return filectx(self._repo, path, fileid=fileid,
673 632 changectx=self, filelog=filelog)
674 633
675 634 def ancestor(self, c2, warn=False):
676 635 """return the "best" ancestor context of self and c2
677 636
678 637 If there are multiple candidates, it will show a message and check
679 638 merge.preferancestor configuration before falling back to the
680 639 revlog ancestor."""
681 640 # deal with workingctxs
682 641 n2 = c2._node
683 642 if n2 is None:
684 643 n2 = c2._parents[0]._node
685 644 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
686 645 if not cahs:
687 646 anc = nullid
688 647 elif len(cahs) == 1:
689 648 anc = cahs[0]
690 649 else:
691 650 # experimental config: merge.preferancestor
692 651 for r in self._repo.ui.configlist('merge', 'preferancestor'):
693 652 try:
694 653 ctx = changectx(self._repo, r)
695 654 except error.RepoLookupError:
696 655 continue
697 656 anc = ctx.node()
698 657 if anc in cahs:
699 658 break
700 659 else:
701 660 anc = self._repo.changelog.ancestor(self._node, n2)
702 661 if warn:
703 662 self._repo.ui.status(
704 663 (_("note: using %s as ancestor of %s and %s\n") %
705 664 (short(anc), short(self._node), short(n2))) +
706 665 ''.join(_(" alternatively, use --config "
707 666 "merge.preferancestor=%s\n") %
708 667 short(n) for n in sorted(cahs) if n != anc))
709 668 return changectx(self._repo, anc)
710 669
711 670 def descendant(self, other):
712 671 """True if other is descendant of this changeset"""
713 672 return self._repo.changelog.descendant(self._rev, other._rev)
714 673
715 674 def walk(self, match):
716 675 '''Generates matching file names.'''
717 676
718 677 # Wrap match.bad method to have message with nodeid
719 678 def bad(fn, msg):
720 679 # The manifest doesn't know about subrepos, so don't complain about
721 680 # paths into valid subrepos.
722 681 if any(fn == s or fn.startswith(s + '/')
723 682 for s in self.substate):
724 683 return
725 684 match.bad(fn, _('no such file in rev %s') % self)
726 685
727 686 m = matchmod.badmatch(match, bad)
728 687 return self._manifest.walk(m)
729 688
730 689 def matches(self, match):
731 690 return self.walk(match)
732 691
733 692 class basefilectx(object):
734 693 """A filecontext object represents the common logic for its children:
735 694 filectx: read-only access to a filerevision that is already present
736 695 in the repo,
737 696 workingfilectx: a filecontext that represents files from the working
738 697 directory,
739 698 memfilectx: a filecontext that represents files in-memory,
740 699 overlayfilectx: duplicate another filecontext with some fields overridden.
741 700 """
742 701 @propertycache
743 702 def _filelog(self):
744 703 return self._repo.file(self._path)
745 704
746 705 @propertycache
747 706 def _changeid(self):
748 707 if r'_changeid' in self.__dict__:
749 708 return self._changeid
750 709 elif r'_changectx' in self.__dict__:
751 710 return self._changectx.rev()
752 711 elif r'_descendantrev' in self.__dict__:
753 712 # this file context was created from a revision with a known
754 713 # descendant, we can (lazily) correct for linkrev aliases
755 714 return self._adjustlinkrev(self._descendantrev)
756 715 else:
757 716 return self._filelog.linkrev(self._filerev)
758 717
759 718 @propertycache
760 719 def _filenode(self):
761 720 if r'_fileid' in self.__dict__:
762 721 return self._filelog.lookup(self._fileid)
763 722 else:
764 723 return self._changectx.filenode(self._path)
765 724
766 725 @propertycache
767 726 def _filerev(self):
768 727 return self._filelog.rev(self._filenode)
769 728
770 729 @propertycache
771 730 def _repopath(self):
772 731 return self._path
773 732
774 733 def __nonzero__(self):
775 734 try:
776 735 self._filenode
777 736 return True
778 737 except error.LookupError:
779 738 # file is missing
780 739 return False
781 740
782 741 __bool__ = __nonzero__
783 742
784 743 def __bytes__(self):
785 744 try:
786 745 return "%s@%s" % (self.path(), self._changectx)
787 746 except error.LookupError:
788 747 return "%s@???" % self.path()
789 748
790 749 __str__ = encoding.strmethod(__bytes__)
791 750
792 751 def __repr__(self):
793 752 return "<%s %s>" % (type(self).__name__, str(self))
794 753
795 754 def __hash__(self):
796 755 try:
797 756 return hash((self._path, self._filenode))
798 757 except AttributeError:
799 758 return id(self)
800 759
801 760 def __eq__(self, other):
802 761 try:
803 762 return (type(self) == type(other) and self._path == other._path
804 763 and self._filenode == other._filenode)
805 764 except AttributeError:
806 765 return False
807 766
808 767 def __ne__(self, other):
809 768 return not (self == other)
810 769
811 770 def filerev(self):
812 771 return self._filerev
813 772 def filenode(self):
814 773 return self._filenode
815 774 @propertycache
816 775 def _flags(self):
817 776 return self._changectx.flags(self._path)
818 777 def flags(self):
819 778 return self._flags
820 779 def filelog(self):
821 780 return self._filelog
822 781 def rev(self):
823 782 return self._changeid
824 783 def linkrev(self):
825 784 return self._filelog.linkrev(self._filerev)
826 785 def node(self):
827 786 return self._changectx.node()
828 787 def hex(self):
829 788 return self._changectx.hex()
830 789 def user(self):
831 790 return self._changectx.user()
832 791 def date(self):
833 792 return self._changectx.date()
834 793 def files(self):
835 794 return self._changectx.files()
836 795 def description(self):
837 796 return self._changectx.description()
838 797 def branch(self):
839 798 return self._changectx.branch()
840 799 def extra(self):
841 800 return self._changectx.extra()
842 801 def phase(self):
843 802 return self._changectx.phase()
844 803 def phasestr(self):
845 804 return self._changectx.phasestr()
846 805 def obsolete(self):
847 806 return self._changectx.obsolete()
848 807 def instabilities(self):
849 808 return self._changectx.instabilities()
850 809 def manifest(self):
851 810 return self._changectx.manifest()
852 811 def changectx(self):
853 812 return self._changectx
854 813 def renamed(self):
855 814 return self._copied
856 815 def repo(self):
857 816 return self._repo
858 817 def size(self):
859 818 return len(self.data())
860 819
861 820 def path(self):
862 821 return self._path
863 822
864 823 def isbinary(self):
865 824 try:
866 825 return util.binary(self.data())
867 826 except IOError:
868 827 return False
869 828 def isexec(self):
870 829 return 'x' in self.flags()
871 830 def islink(self):
872 831 return 'l' in self.flags()
873 832
874 833 def isabsent(self):
875 834 """whether this filectx represents a file not in self._changectx
876 835
877 836 This is mainly for merge code to detect change/delete conflicts. This is
878 837 expected to be True for all subclasses of basectx."""
879 838 return False
880 839
881 840 _customcmp = False
882 841 def cmp(self, fctx):
883 842 """compare with other file context
884 843
885 844 returns True if different than fctx.
886 845 """
887 846 if fctx._customcmp:
888 847 return fctx.cmp(self)
889 848
890 849 if (fctx._filenode is None
891 850 and (self._repo._encodefilterpats
892 851 # if file data starts with '\1\n', empty metadata block is
893 852 # prepended, which adds 4 bytes to filelog.size().
894 853 or self.size() - 4 == fctx.size())
895 854 or self.size() == fctx.size()):
896 855 return self._filelog.cmp(self._filenode, fctx.data())
897 856
898 857 return True
899 858
900 859 def _adjustlinkrev(self, srcrev, inclusive=False):
901 860 """return the first ancestor of <srcrev> introducing <fnode>
902 861
903 862 If the linkrev of the file revision does not point to an ancestor of
904 863 srcrev, we'll walk down the ancestors until we find one introducing
905 864 this file revision.
906 865
907 866 :srcrev: the changeset revision we search ancestors from
908 867 :inclusive: if true, the src revision will also be checked
909 868 """
910 869 repo = self._repo
911 870 cl = repo.unfiltered().changelog
912 871 mfl = repo.manifestlog
913 872 # fetch the linkrev
914 873 lkr = self.linkrev()
915 874 # hack to reuse ancestor computation when searching for renames
916 875 memberanc = getattr(self, '_ancestrycontext', None)
917 876 iteranc = None
918 877 if srcrev is None:
919 878 # wctx case, used by workingfilectx during mergecopy
920 879 revs = [p.rev() for p in self._repo[None].parents()]
921 880 inclusive = True # we skipped the real (revless) source
922 881 else:
923 882 revs = [srcrev]
924 883 if memberanc is None:
925 884 memberanc = iteranc = cl.ancestors(revs, lkr,
926 885 inclusive=inclusive)
927 886 # check if this linkrev is an ancestor of srcrev
928 887 if lkr not in memberanc:
929 888 if iteranc is None:
930 889 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
931 890 fnode = self._filenode
932 891 path = self._path
933 892 for a in iteranc:
934 893 ac = cl.read(a) # get changeset data (we avoid object creation)
935 894 if path in ac[3]: # checking the 'files' field.
936 895 # The file has been touched, check if the content is
937 896 # similar to the one we search for.
938 897 if fnode == mfl[ac[0]].readfast().get(path):
939 898 return a
940 899 # In theory, we should never get out of that loop without a result.
941 900 # But if manifest uses a buggy file revision (not children of the
942 901 # one it replaces) we could. Such a buggy situation will likely
943 902 # result is crash somewhere else at to some point.
944 903 return lkr
945 904
946 905 def introrev(self):
947 906 """return the rev of the changeset which introduced this file revision
948 907
949 908 This method is different from linkrev because it take into account the
950 909 changeset the filectx was created from. It ensures the returned
951 910 revision is one of its ancestors. This prevents bugs from
952 911 'linkrev-shadowing' when a file revision is used by multiple
953 912 changesets.
954 913 """
955 914 lkr = self.linkrev()
956 915 attrs = vars(self)
957 916 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
958 917 if noctx or self.rev() == lkr:
959 918 return self.linkrev()
960 919 return self._adjustlinkrev(self.rev(), inclusive=True)
961 920
962 921 def introfilectx(self):
963 922 """Return filectx having identical contents, but pointing to the
964 923 changeset revision where this filectx was introduced"""
965 924 introrev = self.introrev()
966 925 if self.rev() == introrev:
967 926 return self
968 927 return self.filectx(self.filenode(), changeid=introrev)
969 928
970 929 def _parentfilectx(self, path, fileid, filelog):
971 930 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
972 931 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
973 932 if '_changeid' in vars(self) or '_changectx' in vars(self):
974 933 # If self is associated with a changeset (probably explicitly
975 934 # fed), ensure the created filectx is associated with a
976 935 # changeset that is an ancestor of self.changectx.
977 936 # This lets us later use _adjustlinkrev to get a correct link.
978 937 fctx._descendantrev = self.rev()
979 938 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
980 939 elif '_descendantrev' in vars(self):
981 940 # Otherwise propagate _descendantrev if we have one associated.
982 941 fctx._descendantrev = self._descendantrev
983 942 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
984 943 return fctx
985 944
986 945 def parents(self):
987 946 _path = self._path
988 947 fl = self._filelog
989 948 parents = self._filelog.parents(self._filenode)
990 949 pl = [(_path, node, fl) for node in parents if node != nullid]
991 950
992 951 r = fl.renamed(self._filenode)
993 952 if r:
994 953 # - In the simple rename case, both parent are nullid, pl is empty.
995 954 # - In case of merge, only one of the parent is null id and should
996 955 # be replaced with the rename information. This parent is -always-
997 956 # the first one.
998 957 #
999 958 # As null id have always been filtered out in the previous list
1000 959 # comprehension, inserting to 0 will always result in "replacing
1001 960 # first nullid parent with rename information.
1002 961 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1003 962
1004 963 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1005 964
1006 965 def p1(self):
1007 966 return self.parents()[0]
1008 967
1009 968 def p2(self):
1010 969 p = self.parents()
1011 970 if len(p) == 2:
1012 971 return p[1]
1013 972 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1014 973
1015 974 def annotate(self, follow=False, linenumber=False, skiprevs=None,
1016 975 diffopts=None):
1017 976 '''returns a list of tuples of ((ctx, number), line) for each line
1018 977 in the file, where ctx is the filectx of the node where
1019 978 that line was last changed; if linenumber parameter is true, number is
1020 979 the line number at the first appearance in the managed file, otherwise,
1021 980 number has a fixed value of False.
1022 981 '''
1023 982
1024 983 def lines(text):
1025 984 if text.endswith("\n"):
1026 985 return text.count("\n")
1027 986 return text.count("\n") + int(bool(text))
1028 987
1029 988 if linenumber:
1030 989 def decorate(text, rev):
1031 990 return ([annotateline(fctx=rev, lineno=i)
1032 991 for i in xrange(1, lines(text) + 1)], text)
1033 992 else:
1034 993 def decorate(text, rev):
1035 994 return ([annotateline(fctx=rev)] * lines(text), text)
1036 995
1037 996 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1038 997
1039 998 def parents(f):
1040 999 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1041 1000 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1042 1001 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1043 1002 # isn't an ancestor of the srcrev.
1044 1003 f._changeid
1045 1004 pl = f.parents()
1046 1005
1047 1006 # Don't return renamed parents if we aren't following.
1048 1007 if not follow:
1049 1008 pl = [p for p in pl if p.path() == f.path()]
1050 1009
1051 1010 # renamed filectx won't have a filelog yet, so set it
1052 1011 # from the cache to save time
1053 1012 for p in pl:
1054 1013 if not r'_filelog' in p.__dict__:
1055 1014 p._filelog = getlog(p.path())
1056 1015
1057 1016 return pl
1058 1017
1059 1018 # use linkrev to find the first changeset where self appeared
1060 1019 base = self.introfilectx()
1061 1020 if getattr(base, '_ancestrycontext', None) is None:
1062 1021 cl = self._repo.changelog
1063 1022 if base.rev() is None:
1064 1023 # wctx is not inclusive, but works because _ancestrycontext
1065 1024 # is used to test filelog revisions
1066 1025 ac = cl.ancestors([p.rev() for p in base.parents()],
1067 1026 inclusive=True)
1068 1027 else:
1069 1028 ac = cl.ancestors([base.rev()], inclusive=True)
1070 1029 base._ancestrycontext = ac
1071 1030
1072 1031 # This algorithm would prefer to be recursive, but Python is a
1073 1032 # bit recursion-hostile. Instead we do an iterative
1074 1033 # depth-first search.
1075 1034
1076 1035 # 1st DFS pre-calculates pcache and needed
1077 1036 visit = [base]
1078 1037 pcache = {}
1079 1038 needed = {base: 1}
1080 1039 while visit:
1081 1040 f = visit.pop()
1082 1041 if f in pcache:
1083 1042 continue
1084 1043 pl = parents(f)
1085 1044 pcache[f] = pl
1086 1045 for p in pl:
1087 1046 needed[p] = needed.get(p, 0) + 1
1088 1047 if p not in pcache:
1089 1048 visit.append(p)
1090 1049
1091 1050 # 2nd DFS does the actual annotate
1092 1051 visit[:] = [base]
1093 1052 hist = {}
1094 1053 while visit:
1095 1054 f = visit[-1]
1096 1055 if f in hist:
1097 1056 visit.pop()
1098 1057 continue
1099 1058
1100 1059 ready = True
1101 1060 pl = pcache[f]
1102 1061 for p in pl:
1103 1062 if p not in hist:
1104 1063 ready = False
1105 1064 visit.append(p)
1106 1065 if ready:
1107 1066 visit.pop()
1108 1067 curr = decorate(f.data(), f)
1109 1068 skipchild = False
1110 1069 if skiprevs is not None:
1111 1070 skipchild = f._changeid in skiprevs
1112 1071 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1113 1072 diffopts)
1114 1073 for p in pl:
1115 1074 if needed[p] == 1:
1116 1075 del hist[p]
1117 1076 del needed[p]
1118 1077 else:
1119 1078 needed[p] -= 1
1120 1079
1121 1080 hist[f] = curr
1122 1081 del pcache[f]
1123 1082
1124 1083 return pycompat.ziplist(hist[base][0], hist[base][1].splitlines(True))
1125 1084
1126 1085 def ancestors(self, followfirst=False):
1127 1086 visit = {}
1128 1087 c = self
1129 1088 if followfirst:
1130 1089 cut = 1
1131 1090 else:
1132 1091 cut = None
1133 1092
1134 1093 while True:
1135 1094 for parent in c.parents()[:cut]:
1136 1095 visit[(parent.linkrev(), parent.filenode())] = parent
1137 1096 if not visit:
1138 1097 break
1139 1098 c = visit.pop(max(visit))
1140 1099 yield c
1141 1100
1142 1101 def decodeddata(self):
1143 1102 """Returns `data()` after running repository decoding filters.
1144 1103
1145 1104 This is often equivalent to how the data would be expressed on disk.
1146 1105 """
1147 1106 return self._repo.wwritedata(self.path(), self.data())
1148 1107
1149 1108 @attr.s(slots=True, frozen=True)
1150 1109 class annotateline(object):
1151 1110 fctx = attr.ib()
1152 1111 lineno = attr.ib(default=False)
1153 1112 # Whether this annotation was the result of a skip-annotate.
1154 1113 skip = attr.ib(default=False)
1155 1114
1156 1115 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1157 1116 r'''
1158 1117 Given parent and child fctxes and annotate data for parents, for all lines
1159 1118 in either parent that match the child, annotate the child with the parent's
1160 1119 data.
1161 1120
1162 1121 Additionally, if `skipchild` is True, replace all other lines with parent
1163 1122 annotate data as well such that child is never blamed for any lines.
1164 1123
1165 1124 See test-annotate.py for unit tests.
1166 1125 '''
1167 1126 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1168 1127 for parent in parents]
1169 1128
1170 1129 if skipchild:
1171 1130 # Need to iterate over the blocks twice -- make it a list
1172 1131 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1173 1132 # Mercurial currently prefers p2 over p1 for annotate.
1174 1133 # TODO: change this?
1175 1134 for parent, blocks in pblocks:
1176 1135 for (a1, a2, b1, b2), t in blocks:
1177 1136 # Changed blocks ('!') or blocks made only of blank lines ('~')
1178 1137 # belong to the child.
1179 1138 if t == '=':
1180 1139 child[0][b1:b2] = parent[0][a1:a2]
1181 1140
1182 1141 if skipchild:
1183 1142 # Now try and match up anything that couldn't be matched,
1184 1143 # Reversing pblocks maintains bias towards p2, matching above
1185 1144 # behavior.
1186 1145 pblocks.reverse()
1187 1146
1188 1147 # The heuristics are:
1189 1148 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1190 1149 # This could potentially be smarter but works well enough.
1191 1150 # * For a non-matching section, do a best-effort fit. Match lines in
1192 1151 # diff hunks 1:1, dropping lines as necessary.
1193 1152 # * Repeat the last line as a last resort.
1194 1153
1195 1154 # First, replace as much as possible without repeating the last line.
1196 1155 remaining = [(parent, []) for parent, _blocks in pblocks]
1197 1156 for idx, (parent, blocks) in enumerate(pblocks):
1198 1157 for (a1, a2, b1, b2), _t in blocks:
1199 1158 if a2 - a1 >= b2 - b1:
1200 1159 for bk in xrange(b1, b2):
1201 1160 if child[0][bk].fctx == childfctx:
1202 1161 ak = min(a1 + (bk - b1), a2 - 1)
1203 1162 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1204 1163 else:
1205 1164 remaining[idx][1].append((a1, a2, b1, b2))
1206 1165
1207 1166 # Then, look at anything left, which might involve repeating the last
1208 1167 # line.
1209 1168 for parent, blocks in remaining:
1210 1169 for a1, a2, b1, b2 in blocks:
1211 1170 for bk in xrange(b1, b2):
1212 1171 if child[0][bk].fctx == childfctx:
1213 1172 ak = min(a1 + (bk - b1), a2 - 1)
1214 1173 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1215 1174 return child
1216 1175
1217 1176 class filectx(basefilectx):
1218 1177 """A filecontext object makes access to data related to a particular
1219 1178 filerevision convenient."""
1220 1179 def __init__(self, repo, path, changeid=None, fileid=None,
1221 1180 filelog=None, changectx=None):
1222 1181 """changeid can be a changeset revision, node, or tag.
1223 1182 fileid can be a file revision or node."""
1224 1183 self._repo = repo
1225 1184 self._path = path
1226 1185
1227 1186 assert (changeid is not None
1228 1187 or fileid is not None
1229 1188 or changectx is not None), \
1230 1189 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1231 1190 % (changeid, fileid, changectx))
1232 1191
1233 1192 if filelog is not None:
1234 1193 self._filelog = filelog
1235 1194
1236 1195 if changeid is not None:
1237 1196 self._changeid = changeid
1238 1197 if changectx is not None:
1239 1198 self._changectx = changectx
1240 1199 if fileid is not None:
1241 1200 self._fileid = fileid
1242 1201
1243 1202 @propertycache
1244 1203 def _changectx(self):
1245 1204 try:
1246 1205 return changectx(self._repo, self._changeid)
1247 1206 except error.FilteredRepoLookupError:
1248 1207 # Linkrev may point to any revision in the repository. When the
1249 1208 # repository is filtered this may lead to `filectx` trying to build
1250 1209 # `changectx` for filtered revision. In such case we fallback to
1251 1210 # creating `changectx` on the unfiltered version of the reposition.
1252 1211 # This fallback should not be an issue because `changectx` from
1253 1212 # `filectx` are not used in complex operations that care about
1254 1213 # filtering.
1255 1214 #
1256 1215 # This fallback is a cheap and dirty fix that prevent several
1257 1216 # crashes. It does not ensure the behavior is correct. However the
1258 1217 # behavior was not correct before filtering either and "incorrect
1259 1218 # behavior" is seen as better as "crash"
1260 1219 #
1261 1220 # Linkrevs have several serious troubles with filtering that are
1262 1221 # complicated to solve. Proper handling of the issue here should be
1263 1222 # considered when solving linkrev issue are on the table.
1264 1223 return changectx(self._repo.unfiltered(), self._changeid)
1265 1224
1266 1225 def filectx(self, fileid, changeid=None):
1267 1226 '''opens an arbitrary revision of the file without
1268 1227 opening a new filelog'''
1269 1228 return filectx(self._repo, self._path, fileid=fileid,
1270 1229 filelog=self._filelog, changeid=changeid)
1271 1230
1272 1231 def rawdata(self):
1273 1232 return self._filelog.revision(self._filenode, raw=True)
1274 1233
1275 1234 def rawflags(self):
1276 1235 """low-level revlog flags"""
1277 1236 return self._filelog.flags(self._filerev)
1278 1237
1279 1238 def data(self):
1280 1239 try:
1281 1240 return self._filelog.read(self._filenode)
1282 1241 except error.CensoredNodeError:
1283 1242 if self._repo.ui.config("censor", "policy") == "ignore":
1284 1243 return ""
1285 1244 raise error.Abort(_("censored node: %s") % short(self._filenode),
1286 1245 hint=_("set censor.policy to ignore errors"))
1287 1246
1288 1247 def size(self):
1289 1248 return self._filelog.size(self._filerev)
1290 1249
1291 1250 @propertycache
1292 1251 def _copied(self):
1293 1252 """check if file was actually renamed in this changeset revision
1294 1253
1295 1254 If rename logged in file revision, we report copy for changeset only
1296 1255 if file revisions linkrev points back to the changeset in question
1297 1256 or both changeset parents contain different file revisions.
1298 1257 """
1299 1258
1300 1259 renamed = self._filelog.renamed(self._filenode)
1301 1260 if not renamed:
1302 1261 return renamed
1303 1262
1304 1263 if self.rev() == self.linkrev():
1305 1264 return renamed
1306 1265
1307 1266 name = self.path()
1308 1267 fnode = self._filenode
1309 1268 for p in self._changectx.parents():
1310 1269 try:
1311 1270 if fnode == p.filenode(name):
1312 1271 return None
1313 1272 except error.LookupError:
1314 1273 pass
1315 1274 return renamed
1316 1275
1317 1276 def children(self):
1318 1277 # hard for renames
1319 1278 c = self._filelog.children(self._filenode)
1320 1279 return [filectx(self._repo, self._path, fileid=x,
1321 1280 filelog=self._filelog) for x in c]
1322 1281
1323 1282 class committablectx(basectx):
1324 1283 """A committablectx object provides common functionality for a context that
1325 1284 wants the ability to commit, e.g. workingctx or memctx."""
1326 1285 def __init__(self, repo, text="", user=None, date=None, extra=None,
1327 1286 changes=None):
1328 1287 self._repo = repo
1329 1288 self._rev = None
1330 1289 self._node = None
1331 1290 self._text = text
1332 1291 if date:
1333 1292 self._date = util.parsedate(date)
1334 1293 if user:
1335 1294 self._user = user
1336 1295 if changes:
1337 1296 self._status = changes
1338 1297
1339 1298 self._extra = {}
1340 1299 if extra:
1341 1300 self._extra = extra.copy()
1342 1301 if 'branch' not in self._extra:
1343 1302 try:
1344 1303 branch = encoding.fromlocal(self._repo.dirstate.branch())
1345 1304 except UnicodeDecodeError:
1346 1305 raise error.Abort(_('branch name not in UTF-8!'))
1347 1306 self._extra['branch'] = branch
1348 1307 if self._extra['branch'] == '':
1349 1308 self._extra['branch'] = 'default'
1350 1309
1351 1310 def __bytes__(self):
1352 1311 return bytes(self._parents[0]) + "+"
1353 1312
1354 1313 __str__ = encoding.strmethod(__bytes__)
1355 1314
1356 1315 def __nonzero__(self):
1357 1316 return True
1358 1317
1359 1318 __bool__ = __nonzero__
1360 1319
1361 1320 def _buildflagfunc(self):
1362 1321 # Create a fallback function for getting file flags when the
1363 1322 # filesystem doesn't support them
1364 1323
1365 1324 copiesget = self._repo.dirstate.copies().get
1366 1325 parents = self.parents()
1367 1326 if len(parents) < 2:
1368 1327 # when we have one parent, it's easy: copy from parent
1369 1328 man = parents[0].manifest()
1370 1329 def func(f):
1371 1330 f = copiesget(f, f)
1372 1331 return man.flags(f)
1373 1332 else:
1374 1333 # merges are tricky: we try to reconstruct the unstored
1375 1334 # result from the merge (issue1802)
1376 1335 p1, p2 = parents
1377 1336 pa = p1.ancestor(p2)
1378 1337 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1379 1338
1380 1339 def func(f):
1381 1340 f = copiesget(f, f) # may be wrong for merges with copies
1382 1341 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1383 1342 if fl1 == fl2:
1384 1343 return fl1
1385 1344 if fl1 == fla:
1386 1345 return fl2
1387 1346 if fl2 == fla:
1388 1347 return fl1
1389 1348 return '' # punt for conflicts
1390 1349
1391 1350 return func
1392 1351
1393 1352 @propertycache
1394 1353 def _flagfunc(self):
1395 1354 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1396 1355
1397 1356 @propertycache
1398 1357 def _status(self):
1399 1358 return self._repo.status()
1400 1359
1401 1360 @propertycache
1402 1361 def _user(self):
1403 1362 return self._repo.ui.username()
1404 1363
1405 1364 @propertycache
1406 1365 def _date(self):
1407 1366 ui = self._repo.ui
1408 1367 date = ui.configdate('devel', 'default-date')
1409 1368 if date is None:
1410 1369 date = util.makedate()
1411 1370 return date
1412 1371
1413 1372 def subrev(self, subpath):
1414 1373 return None
1415 1374
1416 1375 def manifestnode(self):
1417 1376 return None
1418 1377 def user(self):
1419 1378 return self._user or self._repo.ui.username()
1420 1379 def date(self):
1421 1380 return self._date
1422 1381 def description(self):
1423 1382 return self._text
1424 1383 def files(self):
1425 1384 return sorted(self._status.modified + self._status.added +
1426 1385 self._status.removed)
1427 1386
1428 1387 def modified(self):
1429 1388 return self._status.modified
1430 1389 def added(self):
1431 1390 return self._status.added
1432 1391 def removed(self):
1433 1392 return self._status.removed
1434 1393 def deleted(self):
1435 1394 return self._status.deleted
1436 1395 def branch(self):
1437 1396 return encoding.tolocal(self._extra['branch'])
1438 1397 def closesbranch(self):
1439 1398 return 'close' in self._extra
1440 1399 def extra(self):
1441 1400 return self._extra
1442 1401
1443 1402 def isinmemory(self):
1444 1403 return False
1445 1404
1446 1405 def tags(self):
1447 1406 return []
1448 1407
1449 1408 def bookmarks(self):
1450 1409 b = []
1451 1410 for p in self.parents():
1452 1411 b.extend(p.bookmarks())
1453 1412 return b
1454 1413
1455 1414 def phase(self):
1456 1415 phase = phases.draft # default phase to draft
1457 1416 for p in self.parents():
1458 1417 phase = max(phase, p.phase())
1459 1418 return phase
1460 1419
1461 1420 def hidden(self):
1462 1421 return False
1463 1422
1464 1423 def children(self):
1465 1424 return []
1466 1425
1467 1426 def flags(self, path):
1468 1427 if r'_manifest' in self.__dict__:
1469 1428 try:
1470 1429 return self._manifest.flags(path)
1471 1430 except KeyError:
1472 1431 return ''
1473 1432
1474 1433 try:
1475 1434 return self._flagfunc(path)
1476 1435 except OSError:
1477 1436 return ''
1478 1437
1479 1438 def ancestor(self, c2):
1480 1439 """return the "best" ancestor context of self and c2"""
1481 1440 return self._parents[0].ancestor(c2) # punt on two parents for now
1482 1441
1483 1442 def walk(self, match):
1484 1443 '''Generates matching file names.'''
1485 1444 return sorted(self._repo.dirstate.walk(match,
1486 1445 subrepos=sorted(self.substate),
1487 1446 unknown=True, ignored=False))
1488 1447
1489 1448 def matches(self, match):
1490 1449 return sorted(self._repo.dirstate.matches(match))
1491 1450
1492 1451 def ancestors(self):
1493 1452 for p in self._parents:
1494 1453 yield p
1495 1454 for a in self._repo.changelog.ancestors(
1496 1455 [p.rev() for p in self._parents]):
1497 1456 yield changectx(self._repo, a)
1498 1457
1499 1458 def markcommitted(self, node):
1500 1459 """Perform post-commit cleanup necessary after committing this ctx
1501 1460
1502 1461 Specifically, this updates backing stores this working context
1503 1462 wraps to reflect the fact that the changes reflected by this
1504 1463 workingctx have been committed. For example, it marks
1505 1464 modified and added files as normal in the dirstate.
1506 1465
1507 1466 """
1508 1467
1509 1468 with self._repo.dirstate.parentchange():
1510 1469 for f in self.modified() + self.added():
1511 1470 self._repo.dirstate.normal(f)
1512 1471 for f in self.removed():
1513 1472 self._repo.dirstate.drop(f)
1514 1473 self._repo.dirstate.setparents(node)
1515 1474
1516 1475 # write changes out explicitly, because nesting wlock at
1517 1476 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1518 1477 # from immediately doing so for subsequent changing files
1519 1478 self._repo.dirstate.write(self._repo.currenttransaction())
1520 1479
1521 1480 def dirty(self, missing=False, merge=True, branch=True):
1522 1481 return False
1523 1482
1524 1483 class workingctx(committablectx):
1525 1484 """A workingctx object makes access to data related to
1526 1485 the current working directory convenient.
1527 1486 date - any valid date string or (unixtime, offset), or None.
1528 1487 user - username string, or None.
1529 1488 extra - a dictionary of extra values, or None.
1530 1489 changes - a list of file lists as returned by localrepo.status()
1531 1490 or None to use the repository status.
1532 1491 """
1533 1492 def __init__(self, repo, text="", user=None, date=None, extra=None,
1534 1493 changes=None):
1535 1494 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1536 1495
1537 1496 def __iter__(self):
1538 1497 d = self._repo.dirstate
1539 1498 for f in d:
1540 1499 if d[f] != 'r':
1541 1500 yield f
1542 1501
1543 1502 def __contains__(self, key):
1544 1503 return self._repo.dirstate[key] not in "?r"
1545 1504
1546 1505 def hex(self):
1547 1506 return hex(wdirid)
1548 1507
1549 1508 @propertycache
1550 1509 def _parents(self):
1551 1510 p = self._repo.dirstate.parents()
1552 1511 if p[1] == nullid:
1553 1512 p = p[:-1]
1554 1513 return [changectx(self._repo, x) for x in p]
1555 1514
1556 1515 def filectx(self, path, filelog=None):
1557 1516 """get a file context from the working directory"""
1558 1517 return workingfilectx(self._repo, path, workingctx=self,
1559 1518 filelog=filelog)
1560 1519
1561 1520 def dirty(self, missing=False, merge=True, branch=True):
1562 1521 "check whether a working directory is modified"
1563 1522 # check subrepos first
1564 1523 for s in sorted(self.substate):
1565 1524 if self.sub(s).dirty(missing=missing):
1566 1525 return True
1567 1526 # check current working dir
1568 1527 return ((merge and self.p2()) or
1569 1528 (branch and self.branch() != self.p1().branch()) or
1570 1529 self.modified() or self.added() or self.removed() or
1571 1530 (missing and self.deleted()))
1572 1531
1573 1532 def add(self, list, prefix=""):
1574 1533 with self._repo.wlock():
1575 1534 ui, ds = self._repo.ui, self._repo.dirstate
1576 1535 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1577 1536 rejected = []
1578 1537 lstat = self._repo.wvfs.lstat
1579 1538 for f in list:
1580 1539 # ds.pathto() returns an absolute file when this is invoked from
1581 1540 # the keyword extension. That gets flagged as non-portable on
1582 1541 # Windows, since it contains the drive letter and colon.
1583 1542 scmutil.checkportable(ui, os.path.join(prefix, f))
1584 1543 try:
1585 1544 st = lstat(f)
1586 1545 except OSError:
1587 1546 ui.warn(_("%s does not exist!\n") % uipath(f))
1588 1547 rejected.append(f)
1589 1548 continue
1590 1549 if st.st_size > 10000000:
1591 1550 ui.warn(_("%s: up to %d MB of RAM may be required "
1592 1551 "to manage this file\n"
1593 1552 "(use 'hg revert %s' to cancel the "
1594 1553 "pending addition)\n")
1595 1554 % (f, 3 * st.st_size // 1000000, uipath(f)))
1596 1555 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1597 1556 ui.warn(_("%s not added: only files and symlinks "
1598 1557 "supported currently\n") % uipath(f))
1599 1558 rejected.append(f)
1600 1559 elif ds[f] in 'amn':
1601 1560 ui.warn(_("%s already tracked!\n") % uipath(f))
1602 1561 elif ds[f] == 'r':
1603 1562 ds.normallookup(f)
1604 1563 else:
1605 1564 ds.add(f)
1606 1565 return rejected
1607 1566
1608 1567 def forget(self, files, prefix=""):
1609 1568 with self._repo.wlock():
1610 1569 ds = self._repo.dirstate
1611 1570 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1612 1571 rejected = []
1613 1572 for f in files:
1614 1573 if f not in self._repo.dirstate:
1615 1574 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1616 1575 rejected.append(f)
1617 1576 elif self._repo.dirstate[f] != 'a':
1618 1577 self._repo.dirstate.remove(f)
1619 1578 else:
1620 1579 self._repo.dirstate.drop(f)
1621 1580 return rejected
1622 1581
1623 1582 def undelete(self, list):
1624 1583 pctxs = self.parents()
1625 1584 with self._repo.wlock():
1626 1585 ds = self._repo.dirstate
1627 1586 for f in list:
1628 1587 if self._repo.dirstate[f] != 'r':
1629 1588 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1630 1589 else:
1631 1590 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1632 1591 t = fctx.data()
1633 1592 self._repo.wwrite(f, t, fctx.flags())
1634 1593 self._repo.dirstate.normal(f)
1635 1594
1636 1595 def copy(self, source, dest):
1637 1596 try:
1638 1597 st = self._repo.wvfs.lstat(dest)
1639 1598 except OSError as err:
1640 1599 if err.errno != errno.ENOENT:
1641 1600 raise
1642 1601 self._repo.ui.warn(_("%s does not exist!\n")
1643 1602 % self._repo.dirstate.pathto(dest))
1644 1603 return
1645 1604 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1646 1605 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1647 1606 "symbolic link\n")
1648 1607 % self._repo.dirstate.pathto(dest))
1649 1608 else:
1650 1609 with self._repo.wlock():
1651 1610 if self._repo.dirstate[dest] in '?':
1652 1611 self._repo.dirstate.add(dest)
1653 1612 elif self._repo.dirstate[dest] in 'r':
1654 1613 self._repo.dirstate.normallookup(dest)
1655 1614 self._repo.dirstate.copy(source, dest)
1656 1615
1657 1616 def match(self, pats=None, include=None, exclude=None, default='glob',
1658 1617 listsubrepos=False, badfn=None):
1659 1618 r = self._repo
1660 1619
1661 1620 # Only a case insensitive filesystem needs magic to translate user input
1662 1621 # to actual case in the filesystem.
1663 1622 icasefs = not util.fscasesensitive(r.root)
1664 1623 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1665 1624 default, auditor=r.auditor, ctx=self,
1666 1625 listsubrepos=listsubrepos, badfn=badfn,
1667 1626 icasefs=icasefs)
1668 1627
1669 1628 def _filtersuspectsymlink(self, files):
1670 1629 if not files or self._repo.dirstate._checklink:
1671 1630 return files
1672 1631
1673 1632 # Symlink placeholders may get non-symlink-like contents
1674 1633 # via user error or dereferencing by NFS or Samba servers,
1675 1634 # so we filter out any placeholders that don't look like a
1676 1635 # symlink
1677 1636 sane = []
1678 1637 for f in files:
1679 1638 if self.flags(f) == 'l':
1680 1639 d = self[f].data()
1681 1640 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1682 1641 self._repo.ui.debug('ignoring suspect symlink placeholder'
1683 1642 ' "%s"\n' % f)
1684 1643 continue
1685 1644 sane.append(f)
1686 1645 return sane
1687 1646
1688 1647 def _checklookup(self, files):
1689 1648 # check for any possibly clean files
1690 1649 if not files:
1691 1650 return [], [], []
1692 1651
1693 1652 modified = []
1694 1653 deleted = []
1695 1654 fixup = []
1696 1655 pctx = self._parents[0]
1697 1656 # do a full compare of any files that might have changed
1698 1657 for f in sorted(files):
1699 1658 try:
1700 1659 # This will return True for a file that got replaced by a
1701 1660 # directory in the interim, but fixing that is pretty hard.
1702 1661 if (f not in pctx or self.flags(f) != pctx.flags(f)
1703 1662 or pctx[f].cmp(self[f])):
1704 1663 modified.append(f)
1705 1664 else:
1706 1665 fixup.append(f)
1707 1666 except (IOError, OSError):
1708 1667 # A file become inaccessible in between? Mark it as deleted,
1709 1668 # matching dirstate behavior (issue5584).
1710 1669 # The dirstate has more complex behavior around whether a
1711 1670 # missing file matches a directory, etc, but we don't need to
1712 1671 # bother with that: if f has made it to this point, we're sure
1713 1672 # it's in the dirstate.
1714 1673 deleted.append(f)
1715 1674
1716 1675 return modified, deleted, fixup
1717 1676
1718 1677 def _poststatusfixup(self, status, fixup):
1719 1678 """update dirstate for files that are actually clean"""
1720 1679 poststatus = self._repo.postdsstatus()
1721 1680 if fixup or poststatus:
1722 1681 try:
1723 1682 oldid = self._repo.dirstate.identity()
1724 1683
1725 1684 # updating the dirstate is optional
1726 1685 # so we don't wait on the lock
1727 1686 # wlock can invalidate the dirstate, so cache normal _after_
1728 1687 # taking the lock
1729 1688 with self._repo.wlock(False):
1730 1689 if self._repo.dirstate.identity() == oldid:
1731 1690 if fixup:
1732 1691 normal = self._repo.dirstate.normal
1733 1692 for f in fixup:
1734 1693 normal(f)
1735 1694 # write changes out explicitly, because nesting
1736 1695 # wlock at runtime may prevent 'wlock.release()'
1737 1696 # after this block from doing so for subsequent
1738 1697 # changing files
1739 1698 tr = self._repo.currenttransaction()
1740 1699 self._repo.dirstate.write(tr)
1741 1700
1742 1701 if poststatus:
1743 1702 for ps in poststatus:
1744 1703 ps(self, status)
1745 1704 else:
1746 1705 # in this case, writing changes out breaks
1747 1706 # consistency, because .hg/dirstate was
1748 1707 # already changed simultaneously after last
1749 1708 # caching (see also issue5584 for detail)
1750 1709 self._repo.ui.debug('skip updating dirstate: '
1751 1710 'identity mismatch\n')
1752 1711 except error.LockError:
1753 1712 pass
1754 1713 finally:
1755 1714 # Even if the wlock couldn't be grabbed, clear out the list.
1756 1715 self._repo.clearpostdsstatus()
1757 1716
1758 1717 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1759 1718 '''Gets the status from the dirstate -- internal use only.'''
1760 1719 subrepos = []
1761 1720 if '.hgsub' in self:
1762 1721 subrepos = sorted(self.substate)
1763 1722 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1764 1723 clean=clean, unknown=unknown)
1765 1724
1766 1725 # check for any possibly clean files
1767 1726 fixup = []
1768 1727 if cmp:
1769 1728 modified2, deleted2, fixup = self._checklookup(cmp)
1770 1729 s.modified.extend(modified2)
1771 1730 s.deleted.extend(deleted2)
1772 1731
1773 1732 if fixup and clean:
1774 1733 s.clean.extend(fixup)
1775 1734
1776 1735 self._poststatusfixup(s, fixup)
1777 1736
1778 1737 if match.always():
1779 1738 # cache for performance
1780 1739 if s.unknown or s.ignored or s.clean:
1781 1740 # "_status" is cached with list*=False in the normal route
1782 1741 self._status = scmutil.status(s.modified, s.added, s.removed,
1783 1742 s.deleted, [], [], [])
1784 1743 else:
1785 1744 self._status = s
1786 1745
1787 1746 return s
1788 1747
1789 1748 @propertycache
1790 1749 def _manifest(self):
1791 1750 """generate a manifest corresponding to the values in self._status
1792 1751
1793 1752 This reuse the file nodeid from parent, but we use special node
1794 1753 identifiers for added and modified files. This is used by manifests
1795 1754 merge to see that files are different and by update logic to avoid
1796 1755 deleting newly added files.
1797 1756 """
1798 1757 return self._buildstatusmanifest(self._status)
1799 1758
1800 1759 def _buildstatusmanifest(self, status):
1801 1760 """Builds a manifest that includes the given status results."""
1802 1761 parents = self.parents()
1803 1762
1804 1763 man = parents[0].manifest().copy()
1805 1764
1806 1765 ff = self._flagfunc
1807 1766 for i, l in ((addednodeid, status.added),
1808 1767 (modifiednodeid, status.modified)):
1809 1768 for f in l:
1810 1769 man[f] = i
1811 1770 try:
1812 1771 man.setflag(f, ff(f))
1813 1772 except OSError:
1814 1773 pass
1815 1774
1816 1775 for f in status.deleted + status.removed:
1817 1776 if f in man:
1818 1777 del man[f]
1819 1778
1820 1779 return man
1821 1780
1822 1781 def _buildstatus(self, other, s, match, listignored, listclean,
1823 1782 listunknown):
1824 1783 """build a status with respect to another context
1825 1784
1826 1785 This includes logic for maintaining the fast path of status when
1827 1786 comparing the working directory against its parent, which is to skip
1828 1787 building a new manifest if self (working directory) is not comparing
1829 1788 against its parent (repo['.']).
1830 1789 """
1831 1790 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1832 1791 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1833 1792 # might have accidentally ended up with the entire contents of the file
1834 1793 # they are supposed to be linking to.
1835 1794 s.modified[:] = self._filtersuspectsymlink(s.modified)
1836 1795 if other != self._repo['.']:
1837 1796 s = super(workingctx, self)._buildstatus(other, s, match,
1838 1797 listignored, listclean,
1839 1798 listunknown)
1840 1799 return s
1841 1800
1842 1801 def _matchstatus(self, other, match):
1843 1802 """override the match method with a filter for directory patterns
1844 1803
1845 1804 We use inheritance to customize the match.bad method only in cases of
1846 1805 workingctx since it belongs only to the working directory when
1847 1806 comparing against the parent changeset.
1848 1807
1849 1808 If we aren't comparing against the working directory's parent, then we
1850 1809 just use the default match object sent to us.
1851 1810 """
1852 1811 if other != self._repo['.']:
1853 1812 def bad(f, msg):
1854 1813 # 'f' may be a directory pattern from 'match.files()',
1855 1814 # so 'f not in ctx1' is not enough
1856 1815 if f not in other and not other.hasdir(f):
1857 1816 self._repo.ui.warn('%s: %s\n' %
1858 1817 (self._repo.dirstate.pathto(f), msg))
1859 1818 match.bad = bad
1860 1819 return match
1861 1820
1862 1821 def markcommitted(self, node):
1863 1822 super(workingctx, self).markcommitted(node)
1864 1823
1865 1824 sparse.aftercommit(self._repo, node)
1866 1825
1867 1826 class committablefilectx(basefilectx):
1868 1827 """A committablefilectx provides common functionality for a file context
1869 1828 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1870 1829 def __init__(self, repo, path, filelog=None, ctx=None):
1871 1830 self._repo = repo
1872 1831 self._path = path
1873 1832 self._changeid = None
1874 1833 self._filerev = self._filenode = None
1875 1834
1876 1835 if filelog is not None:
1877 1836 self._filelog = filelog
1878 1837 if ctx:
1879 1838 self._changectx = ctx
1880 1839
1881 1840 def __nonzero__(self):
1882 1841 return True
1883 1842
1884 1843 __bool__ = __nonzero__
1885 1844
1886 1845 def linkrev(self):
1887 1846 # linked to self._changectx no matter if file is modified or not
1888 1847 return self.rev()
1889 1848
1890 1849 def parents(self):
1891 1850 '''return parent filectxs, following copies if necessary'''
1892 1851 def filenode(ctx, path):
1893 1852 return ctx._manifest.get(path, nullid)
1894 1853
1895 1854 path = self._path
1896 1855 fl = self._filelog
1897 1856 pcl = self._changectx._parents
1898 1857 renamed = self.renamed()
1899 1858
1900 1859 if renamed:
1901 1860 pl = [renamed + (None,)]
1902 1861 else:
1903 1862 pl = [(path, filenode(pcl[0], path), fl)]
1904 1863
1905 1864 for pc in pcl[1:]:
1906 1865 pl.append((path, filenode(pc, path), fl))
1907 1866
1908 1867 return [self._parentfilectx(p, fileid=n, filelog=l)
1909 1868 for p, n, l in pl if n != nullid]
1910 1869
1911 1870 def children(self):
1912 1871 return []
1913 1872
1914 1873 class workingfilectx(committablefilectx):
1915 1874 """A workingfilectx object makes access to data related to a particular
1916 1875 file in the working directory convenient."""
1917 1876 def __init__(self, repo, path, filelog=None, workingctx=None):
1918 1877 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1919 1878
1920 1879 @propertycache
1921 1880 def _changectx(self):
1922 1881 return workingctx(self._repo)
1923 1882
1924 1883 def data(self):
1925 1884 return self._repo.wread(self._path)
1926 1885 def renamed(self):
1927 1886 rp = self._repo.dirstate.copied(self._path)
1928 1887 if not rp:
1929 1888 return None
1930 1889 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1931 1890
1932 1891 def size(self):
1933 1892 return self._repo.wvfs.lstat(self._path).st_size
1934 1893 def date(self):
1935 1894 t, tz = self._changectx.date()
1936 1895 try:
1937 1896 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1938 1897 except OSError as err:
1939 1898 if err.errno != errno.ENOENT:
1940 1899 raise
1941 1900 return (t, tz)
1942 1901
1943 1902 def exists(self):
1944 1903 return self._repo.wvfs.exists(self._path)
1945 1904
1946 1905 def lexists(self):
1947 1906 return self._repo.wvfs.lexists(self._path)
1948 1907
1949 1908 def audit(self):
1950 1909 return self._repo.wvfs.audit(self._path)
1951 1910
1952 1911 def cmp(self, fctx):
1953 1912 """compare with other file context
1954 1913
1955 1914 returns True if different than fctx.
1956 1915 """
1957 1916 # fctx should be a filectx (not a workingfilectx)
1958 1917 # invert comparison to reuse the same code path
1959 1918 return fctx.cmp(self)
1960 1919
1961 1920 def remove(self, ignoremissing=False):
1962 1921 """wraps unlink for a repo's working directory"""
1963 1922 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1964 1923
1965 1924 def write(self, data, flags, backgroundclose=False, **kwargs):
1966 1925 """wraps repo.wwrite"""
1967 1926 self._repo.wwrite(self._path, data, flags,
1968 1927 backgroundclose=backgroundclose,
1969 1928 **kwargs)
1970 1929
1971 1930 def markcopied(self, src):
1972 1931 """marks this file a copy of `src`"""
1973 1932 if self._repo.dirstate[self._path] in "nma":
1974 1933 self._repo.dirstate.copy(src, self._path)
1975 1934
1976 1935 def clearunknown(self):
1977 1936 """Removes conflicting items in the working directory so that
1978 1937 ``write()`` can be called successfully.
1979 1938 """
1980 1939 wvfs = self._repo.wvfs
1981 1940 f = self._path
1982 1941 wvfs.audit(f)
1983 1942 if wvfs.isdir(f) and not wvfs.islink(f):
1984 1943 wvfs.rmtree(f, forcibly=True)
1985 1944 for p in reversed(list(util.finddirs(f))):
1986 1945 if wvfs.isfileorlink(p):
1987 1946 wvfs.unlink(p)
1988 1947 break
1989 1948
1990 1949 def setflags(self, l, x):
1991 1950 self._repo.wvfs.setflags(self._path, l, x)
1992 1951
1993 1952 class overlayworkingctx(committablectx):
1994 1953 """Wraps another mutable context with a write-back cache that can be
1995 1954 converted into a commit context.
1996 1955
1997 1956 self._cache[path] maps to a dict with keys: {
1998 1957 'exists': bool?
1999 1958 'date': date?
2000 1959 'data': str?
2001 1960 'flags': str?
2002 1961 'copied': str? (path or None)
2003 1962 }
2004 1963 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2005 1964 is `False`, the file was deleted.
2006 1965 """
2007 1966
2008 1967 def __init__(self, repo):
2009 1968 super(overlayworkingctx, self).__init__(repo)
2010 1969 self._repo = repo
2011 1970 self.clean()
2012 1971
2013 1972 def setbase(self, wrappedctx):
2014 1973 self._wrappedctx = wrappedctx
2015 1974 self._parents = [wrappedctx]
2016 1975 # Drop old manifest cache as it is now out of date.
2017 1976 # This is necessary when, e.g., rebasing several nodes with one
2018 1977 # ``overlayworkingctx`` (e.g. with --collapse).
2019 1978 util.clearcachedproperty(self, '_manifest')
2020 1979
2021 1980 def data(self, path):
2022 1981 if self.isdirty(path):
2023 1982 if self._cache[path]['exists']:
2024 1983 if self._cache[path]['data']:
2025 1984 return self._cache[path]['data']
2026 1985 else:
2027 1986 # Must fallback here, too, because we only set flags.
2028 1987 return self._wrappedctx[path].data()
2029 1988 else:
2030 1989 raise error.ProgrammingError("No such file or directory: %s" %
2031 1990 path)
2032 1991 else:
2033 1992 return self._wrappedctx[path].data()
2034 1993
2035 1994 @propertycache
2036 1995 def _manifest(self):
2037 1996 parents = self.parents()
2038 1997 man = parents[0].manifest().copy()
2039 1998
2040 1999 flag = self._flagfunc
2041 2000 for path in self.added():
2042 2001 man[path] = addednodeid
2043 2002 man.setflag(path, flag(path))
2044 2003 for path in self.modified():
2045 2004 man[path] = modifiednodeid
2046 2005 man.setflag(path, flag(path))
2047 2006 for path in self.removed():
2048 2007 del man[path]
2049 2008 return man
2050 2009
2051 2010 @propertycache
2052 2011 def _flagfunc(self):
2053 2012 def f(path):
2054 2013 return self._cache[path]['flags']
2055 2014 return f
2056 2015
2057 2016 def files(self):
2058 2017 return sorted(self.added() + self.modified() + self.removed())
2059 2018
2060 2019 def modified(self):
2061 2020 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
2062 2021 self._existsinparent(f)]
2063 2022
2064 2023 def added(self):
2065 2024 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
2066 2025 not self._existsinparent(f)]
2067 2026
2068 2027 def removed(self):
2069 2028 return [f for f in self._cache.keys() if
2070 2029 not self._cache[f]['exists'] and self._existsinparent(f)]
2071 2030
2072 2031 def isinmemory(self):
2073 2032 return True
2074 2033
2075 2034 def filedate(self, path):
2076 2035 if self.isdirty(path):
2077 2036 return self._cache[path]['date']
2078 2037 else:
2079 2038 return self._wrappedctx[path].date()
2080 2039
2081 2040 def markcopied(self, path, origin):
2082 2041 if self.isdirty(path):
2083 2042 self._cache[path]['copied'] = origin
2084 2043 else:
2085 2044 raise error.ProgrammingError('markcopied() called on clean context')
2086 2045
2087 2046 def copydata(self, path):
2088 2047 if self.isdirty(path):
2089 2048 return self._cache[path]['copied']
2090 2049 else:
2091 2050 raise error.ProgrammingError('copydata() called on clean context')
2092 2051
2093 2052 def flags(self, path):
2094 2053 if self.isdirty(path):
2095 2054 if self._cache[path]['exists']:
2096 2055 return self._cache[path]['flags']
2097 2056 else:
2098 2057 raise error.ProgrammingError("No such file or directory: %s" %
2099 2058 self._path)
2100 2059 else:
2101 2060 return self._wrappedctx[path].flags()
2102 2061
2103 2062 def _existsinparent(self, path):
2104 2063 try:
2105 2064 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2106 2065 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2107 2066 # with an ``exists()`` function.
2108 2067 self._wrappedctx[path]
2109 2068 return True
2110 2069 except error.ManifestLookupError:
2111 2070 return False
2112 2071
2113 2072 def _auditconflicts(self, path):
2114 2073 """Replicates conflict checks done by wvfs.write().
2115 2074
2116 2075 Since we never write to the filesystem and never call `applyupdates` in
2117 2076 IMM, we'll never check that a path is actually writable -- e.g., because
2118 2077 it adds `a/foo`, but `a` is actually a file in the other commit.
2119 2078 """
2120 2079 def fail(path, component):
2121 2080 # p1() is the base and we're receiving "writes" for p2()'s
2122 2081 # files.
2123 2082 if 'l' in self.p1()[component].flags():
2124 2083 raise error.Abort("error: %s conflicts with symlink %s "
2125 2084 "in %s." % (path, component,
2126 2085 self.p1().rev()))
2127 2086 else:
2128 2087 raise error.Abort("error: '%s' conflicts with file '%s' in "
2129 2088 "%s." % (path, component,
2130 2089 self.p1().rev()))
2131 2090
2132 2091 # Test that each new directory to be created to write this path from p2
2133 2092 # is not a file in p1.
2134 2093 components = path.split('/')
2135 2094 for i in xrange(len(components)):
2136 2095 component = "/".join(components[0:i])
2137 2096 if component in self.p1():
2138 2097 fail(path, component)
2139 2098
2140 2099 # Test the other direction -- that this path from p2 isn't a directory
2141 2100 # in p1 (test that p1 doesn't any paths matching `path/*`).
2142 2101 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
2143 2102 matches = self.p1().manifest().matches(match)
2144 2103 if len(matches) > 0:
2145 2104 if len(matches) == 1 and matches.keys()[0] == path:
2146 2105 return
2147 2106 raise error.Abort("error: file '%s' cannot be written because "
2148 2107 " '%s/' is a folder in %s (containing %d "
2149 2108 "entries: %s)"
2150 2109 % (path, path, self.p1(), len(matches),
2151 2110 ', '.join(matches.keys())))
2152 2111
2153 2112 def write(self, path, data, flags='', **kwargs):
2154 2113 if data is None:
2155 2114 raise error.ProgrammingError("data must be non-None")
2156 2115 self._auditconflicts(path)
2157 2116 self._markdirty(path, exists=True, data=data, date=util.makedate(),
2158 2117 flags=flags)
2159 2118
2160 2119 def setflags(self, path, l, x):
2161 2120 self._markdirty(path, exists=True, date=util.makedate(),
2162 2121 flags=(l and 'l' or '') + (x and 'x' or ''))
2163 2122
2164 2123 def remove(self, path):
2165 2124 self._markdirty(path, exists=False)
2166 2125
2167 2126 def exists(self, path):
2168 2127 """exists behaves like `lexists`, but needs to follow symlinks and
2169 2128 return False if they are broken.
2170 2129 """
2171 2130 if self.isdirty(path):
2172 2131 # If this path exists and is a symlink, "follow" it by calling
2173 2132 # exists on the destination path.
2174 2133 if (self._cache[path]['exists'] and
2175 2134 'l' in self._cache[path]['flags']):
2176 2135 return self.exists(self._cache[path]['data'].strip())
2177 2136 else:
2178 2137 return self._cache[path]['exists']
2179 2138
2180 2139 return self._existsinparent(path)
2181 2140
2182 2141 def lexists(self, path):
2183 2142 """lexists returns True if the path exists"""
2184 2143 if self.isdirty(path):
2185 2144 return self._cache[path]['exists']
2186 2145
2187 2146 return self._existsinparent(path)
2188 2147
2189 2148 def size(self, path):
2190 2149 if self.isdirty(path):
2191 2150 if self._cache[path]['exists']:
2192 2151 return len(self._cache[path]['data'])
2193 2152 else:
2194 2153 raise error.ProgrammingError("No such file or directory: %s" %
2195 2154 self._path)
2196 2155 return self._wrappedctx[path].size()
2197 2156
2198 2157 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2199 2158 user=None, editor=None):
2200 2159 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2201 2160 committed.
2202 2161
2203 2162 ``text`` is the commit message.
2204 2163 ``parents`` (optional) are rev numbers.
2205 2164 """
2206 2165 # Default parents to the wrapped contexts' if not passed.
2207 2166 if parents is None:
2208 2167 parents = self._wrappedctx.parents()
2209 2168 if len(parents) == 1:
2210 2169 parents = (parents[0], None)
2211 2170
2212 2171 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2213 2172 if parents[1] is None:
2214 2173 parents = (self._repo[parents[0]], None)
2215 2174 else:
2216 2175 parents = (self._repo[parents[0]], self._repo[parents[1]])
2217 2176
2218 2177 files = self._cache.keys()
2219 2178 def getfile(repo, memctx, path):
2220 2179 if self._cache[path]['exists']:
2221 2180 return memfilectx(repo, memctx, path,
2222 2181 self._cache[path]['data'],
2223 2182 'l' in self._cache[path]['flags'],
2224 2183 'x' in self._cache[path]['flags'],
2225 2184 self._cache[path]['copied'])
2226 2185 else:
2227 2186 # Returning None, but including the path in `files`, is
2228 2187 # necessary for memctx to register a deletion.
2229 2188 return None
2230 2189 return memctx(self._repo, parents, text, files, getfile, date=date,
2231 2190 extra=extra, user=user, branch=branch, editor=editor)
2232 2191
2233 2192 def isdirty(self, path):
2234 2193 return path in self._cache
2235 2194
2236 2195 def isempty(self):
2237 2196 # We need to discard any keys that are actually clean before the empty
2238 2197 # commit check.
2239 2198 self._compact()
2240 2199 return len(self._cache) == 0
2241 2200
2242 2201 def clean(self):
2243 2202 self._cache = {}
2244 2203
2245 2204 def _compact(self):
2246 2205 """Removes keys from the cache that are actually clean, by comparing
2247 2206 them with the underlying context.
2248 2207
2249 2208 This can occur during the merge process, e.g. by passing --tool :local
2250 2209 to resolve a conflict.
2251 2210 """
2252 2211 keys = []
2253 2212 for path in self._cache.keys():
2254 2213 cache = self._cache[path]
2255 2214 try:
2256 2215 underlying = self._wrappedctx[path]
2257 2216 if (underlying.data() == cache['data'] and
2258 2217 underlying.flags() == cache['flags']):
2259 2218 keys.append(path)
2260 2219 except error.ManifestLookupError:
2261 2220 # Path not in the underlying manifest (created).
2262 2221 continue
2263 2222
2264 2223 for path in keys:
2265 2224 del self._cache[path]
2266 2225 return keys
2267 2226
2268 2227 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2269 2228 self._cache[path] = {
2270 2229 'exists': exists,
2271 2230 'data': data,
2272 2231 'date': date,
2273 2232 'flags': flags,
2274 2233 'copied': None,
2275 2234 }
2276 2235
2277 2236 def filectx(self, path, filelog=None):
2278 2237 return overlayworkingfilectx(self._repo, path, parent=self,
2279 2238 filelog=filelog)
2280 2239
2281 2240 class overlayworkingfilectx(committablefilectx):
2282 2241 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2283 2242 cache, which can be flushed through later by calling ``flush()``."""
2284 2243
2285 2244 def __init__(self, repo, path, filelog=None, parent=None):
2286 2245 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2287 2246 parent)
2288 2247 self._repo = repo
2289 2248 self._parent = parent
2290 2249 self._path = path
2291 2250
2292 2251 def cmp(self, fctx):
2293 2252 return self.data() != fctx.data()
2294 2253
2295 2254 def changectx(self):
2296 2255 return self._parent
2297 2256
2298 2257 def data(self):
2299 2258 return self._parent.data(self._path)
2300 2259
2301 2260 def date(self):
2302 2261 return self._parent.filedate(self._path)
2303 2262
2304 2263 def exists(self):
2305 2264 return self.lexists()
2306 2265
2307 2266 def lexists(self):
2308 2267 return self._parent.exists(self._path)
2309 2268
2310 2269 def renamed(self):
2311 2270 path = self._parent.copydata(self._path)
2312 2271 if not path:
2313 2272 return None
2314 2273 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2315 2274
2316 2275 def size(self):
2317 2276 return self._parent.size(self._path)
2318 2277
2319 2278 def markcopied(self, origin):
2320 2279 self._parent.markcopied(self._path, origin)
2321 2280
2322 2281 def audit(self):
2323 2282 pass
2324 2283
2325 2284 def flags(self):
2326 2285 return self._parent.flags(self._path)
2327 2286
2328 2287 def setflags(self, islink, isexec):
2329 2288 return self._parent.setflags(self._path, islink, isexec)
2330 2289
2331 2290 def write(self, data, flags, backgroundclose=False, **kwargs):
2332 2291 return self._parent.write(self._path, data, flags, **kwargs)
2333 2292
2334 2293 def remove(self, ignoremissing=False):
2335 2294 return self._parent.remove(self._path)
2336 2295
2337 2296 def clearunknown(self):
2338 2297 pass
2339 2298
2340 2299 class workingcommitctx(workingctx):
2341 2300 """A workingcommitctx object makes access to data related to
2342 2301 the revision being committed convenient.
2343 2302
2344 2303 This hides changes in the working directory, if they aren't
2345 2304 committed in this context.
2346 2305 """
2347 2306 def __init__(self, repo, changes,
2348 2307 text="", user=None, date=None, extra=None):
2349 2308 super(workingctx, self).__init__(repo, text, user, date, extra,
2350 2309 changes)
2351 2310
2352 2311 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2353 2312 """Return matched files only in ``self._status``
2354 2313
2355 2314 Uncommitted files appear "clean" via this context, even if
2356 2315 they aren't actually so in the working directory.
2357 2316 """
2358 2317 if clean:
2359 2318 clean = [f for f in self._manifest if f not in self._changedset]
2360 2319 else:
2361 2320 clean = []
2362 2321 return scmutil.status([f for f in self._status.modified if match(f)],
2363 2322 [f for f in self._status.added if match(f)],
2364 2323 [f for f in self._status.removed if match(f)],
2365 2324 [], [], [], clean)
2366 2325
2367 2326 @propertycache
2368 2327 def _changedset(self):
2369 2328 """Return the set of files changed in this context
2370 2329 """
2371 2330 changed = set(self._status.modified)
2372 2331 changed.update(self._status.added)
2373 2332 changed.update(self._status.removed)
2374 2333 return changed
2375 2334
2376 2335 def makecachingfilectxfn(func):
2377 2336 """Create a filectxfn that caches based on the path.
2378 2337
2379 2338 We can't use util.cachefunc because it uses all arguments as the cache
2380 2339 key and this creates a cycle since the arguments include the repo and
2381 2340 memctx.
2382 2341 """
2383 2342 cache = {}
2384 2343
2385 2344 def getfilectx(repo, memctx, path):
2386 2345 if path not in cache:
2387 2346 cache[path] = func(repo, memctx, path)
2388 2347 return cache[path]
2389 2348
2390 2349 return getfilectx
2391 2350
2392 2351 def memfilefromctx(ctx):
2393 2352 """Given a context return a memfilectx for ctx[path]
2394 2353
2395 2354 This is a convenience method for building a memctx based on another
2396 2355 context.
2397 2356 """
2398 2357 def getfilectx(repo, memctx, path):
2399 2358 fctx = ctx[path]
2400 2359 # this is weird but apparently we only keep track of one parent
2401 2360 # (why not only store that instead of a tuple?)
2402 2361 copied = fctx.renamed()
2403 2362 if copied:
2404 2363 copied = copied[0]
2405 2364 return memfilectx(repo, memctx, path, fctx.data(),
2406 2365 islink=fctx.islink(), isexec=fctx.isexec(),
2407 2366 copied=copied)
2408 2367
2409 2368 return getfilectx
2410 2369
2411 2370 def memfilefrompatch(patchstore):
2412 2371 """Given a patch (e.g. patchstore object) return a memfilectx
2413 2372
2414 2373 This is a convenience method for building a memctx based on a patchstore.
2415 2374 """
2416 2375 def getfilectx(repo, memctx, path):
2417 2376 data, mode, copied = patchstore.getfile(path)
2418 2377 if data is None:
2419 2378 return None
2420 2379 islink, isexec = mode
2421 2380 return memfilectx(repo, memctx, path, data, islink=islink,
2422 2381 isexec=isexec, copied=copied)
2423 2382
2424 2383 return getfilectx
2425 2384
2426 2385 class memctx(committablectx):
2427 2386 """Use memctx to perform in-memory commits via localrepo.commitctx().
2428 2387
2429 2388 Revision information is supplied at initialization time while
2430 2389 related files data and is made available through a callback
2431 2390 mechanism. 'repo' is the current localrepo, 'parents' is a
2432 2391 sequence of two parent revisions identifiers (pass None for every
2433 2392 missing parent), 'text' is the commit message and 'files' lists
2434 2393 names of files touched by the revision (normalized and relative to
2435 2394 repository root).
2436 2395
2437 2396 filectxfn(repo, memctx, path) is a callable receiving the
2438 2397 repository, the current memctx object and the normalized path of
2439 2398 requested file, relative to repository root. It is fired by the
2440 2399 commit function for every file in 'files', but calls order is
2441 2400 undefined. If the file is available in the revision being
2442 2401 committed (updated or added), filectxfn returns a memfilectx
2443 2402 object. If the file was removed, filectxfn return None for recent
2444 2403 Mercurial. Moved files are represented by marking the source file
2445 2404 removed and the new file added with copy information (see
2446 2405 memfilectx).
2447 2406
2448 2407 user receives the committer name and defaults to current
2449 2408 repository username, date is the commit date in any format
2450 2409 supported by util.parsedate() and defaults to current date, extra
2451 2410 is a dictionary of metadata or is left empty.
2452 2411 """
2453 2412
2454 2413 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2455 2414 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2456 2415 # this field to determine what to do in filectxfn.
2457 2416 _returnnoneformissingfiles = True
2458 2417
2459 2418 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2460 2419 date=None, extra=None, branch=None, editor=False):
2461 2420 super(memctx, self).__init__(repo, text, user, date, extra)
2462 2421 self._rev = None
2463 2422 self._node = None
2464 2423 parents = [(p or nullid) for p in parents]
2465 2424 p1, p2 = parents
2466 2425 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2467 2426 files = sorted(set(files))
2468 2427 self._files = files
2469 2428 if branch is not None:
2470 2429 self._extra['branch'] = encoding.fromlocal(branch)
2471 2430 self.substate = {}
2472 2431
2473 2432 if isinstance(filectxfn, patch.filestore):
2474 2433 filectxfn = memfilefrompatch(filectxfn)
2475 2434 elif not callable(filectxfn):
2476 2435 # if store is not callable, wrap it in a function
2477 2436 filectxfn = memfilefromctx(filectxfn)
2478 2437
2479 2438 # memoizing increases performance for e.g. vcs convert scenarios.
2480 2439 self._filectxfn = makecachingfilectxfn(filectxfn)
2481 2440
2482 2441 if editor:
2483 2442 self._text = editor(self._repo, self, [])
2484 2443 self._repo.savecommitmessage(self._text)
2485 2444
2486 2445 def filectx(self, path, filelog=None):
2487 2446 """get a file context from the working directory
2488 2447
2489 2448 Returns None if file doesn't exist and should be removed."""
2490 2449 return self._filectxfn(self._repo, self, path)
2491 2450
2492 2451 def commit(self):
2493 2452 """commit context to the repo"""
2494 2453 return self._repo.commitctx(self)
2495 2454
2496 2455 @propertycache
2497 2456 def _manifest(self):
2498 2457 """generate a manifest based on the return values of filectxfn"""
2499 2458
2500 2459 # keep this simple for now; just worry about p1
2501 2460 pctx = self._parents[0]
2502 2461 man = pctx.manifest().copy()
2503 2462
2504 2463 for f in self._status.modified:
2505 2464 p1node = nullid
2506 2465 p2node = nullid
2507 2466 p = pctx[f].parents() # if file isn't in pctx, check p2?
2508 2467 if len(p) > 0:
2509 2468 p1node = p[0].filenode()
2510 2469 if len(p) > 1:
2511 2470 p2node = p[1].filenode()
2512 2471 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2513 2472
2514 2473 for f in self._status.added:
2515 2474 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2516 2475
2517 2476 for f in self._status.removed:
2518 2477 if f in man:
2519 2478 del man[f]
2520 2479
2521 2480 return man
2522 2481
2523 2482 @propertycache
2524 2483 def _status(self):
2525 2484 """Calculate exact status from ``files`` specified at construction
2526 2485 """
2527 2486 man1 = self.p1().manifest()
2528 2487 p2 = self._parents[1]
2529 2488 # "1 < len(self._parents)" can't be used for checking
2530 2489 # existence of the 2nd parent, because "memctx._parents" is
2531 2490 # explicitly initialized by the list, of which length is 2.
2532 2491 if p2.node() != nullid:
2533 2492 man2 = p2.manifest()
2534 2493 managing = lambda f: f in man1 or f in man2
2535 2494 else:
2536 2495 managing = lambda f: f in man1
2537 2496
2538 2497 modified, added, removed = [], [], []
2539 2498 for f in self._files:
2540 2499 if not managing(f):
2541 2500 added.append(f)
2542 2501 elif self[f]:
2543 2502 modified.append(f)
2544 2503 else:
2545 2504 removed.append(f)
2546 2505
2547 2506 return scmutil.status(modified, added, removed, [], [], [], [])
2548 2507
2549 2508 class memfilectx(committablefilectx):
2550 2509 """memfilectx represents an in-memory file to commit.
2551 2510
2552 2511 See memctx and committablefilectx for more details.
2553 2512 """
2554 2513 def __init__(self, repo, changectx, path, data, islink=False,
2555 2514 isexec=False, copied=None):
2556 2515 """
2557 2516 path is the normalized file path relative to repository root.
2558 2517 data is the file content as a string.
2559 2518 islink is True if the file is a symbolic link.
2560 2519 isexec is True if the file is executable.
2561 2520 copied is the source file path if current file was copied in the
2562 2521 revision being committed, or None."""
2563 2522 super(memfilectx, self).__init__(repo, path, None, changectx)
2564 2523 self._data = data
2565 2524 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2566 2525 self._copied = None
2567 2526 if copied:
2568 2527 self._copied = (copied, nullid)
2569 2528
2570 2529 def data(self):
2571 2530 return self._data
2572 2531
2573 2532 def remove(self, ignoremissing=False):
2574 2533 """wraps unlink for a repo's working directory"""
2575 2534 # need to figure out what to do here
2576 2535 del self._changectx[self._path]
2577 2536
2578 2537 def write(self, data, flags, **kwargs):
2579 2538 """wraps repo.wwrite"""
2580 2539 self._data = data
2581 2540
2582 2541 class overlayfilectx(committablefilectx):
2583 2542 """Like memfilectx but take an original filectx and optional parameters to
2584 2543 override parts of it. This is useful when fctx.data() is expensive (i.e.
2585 2544 flag processor is expensive) and raw data, flags, and filenode could be
2586 2545 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2587 2546 """
2588 2547
2589 2548 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2590 2549 copied=None, ctx=None):
2591 2550 """originalfctx: filecontext to duplicate
2592 2551
2593 2552 datafunc: None or a function to override data (file content). It is a
2594 2553 function to be lazy. path, flags, copied, ctx: None or overridden value
2595 2554
2596 2555 copied could be (path, rev), or False. copied could also be just path,
2597 2556 and will be converted to (path, nullid). This simplifies some callers.
2598 2557 """
2599 2558
2600 2559 if path is None:
2601 2560 path = originalfctx.path()
2602 2561 if ctx is None:
2603 2562 ctx = originalfctx.changectx()
2604 2563 ctxmatch = lambda: True
2605 2564 else:
2606 2565 ctxmatch = lambda: ctx == originalfctx.changectx()
2607 2566
2608 2567 repo = originalfctx.repo()
2609 2568 flog = originalfctx.filelog()
2610 2569 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2611 2570
2612 2571 if copied is None:
2613 2572 copied = originalfctx.renamed()
2614 2573 copiedmatch = lambda: True
2615 2574 else:
2616 2575 if copied and not isinstance(copied, tuple):
2617 2576 # repo._filecommit will recalculate copyrev so nullid is okay
2618 2577 copied = (copied, nullid)
2619 2578 copiedmatch = lambda: copied == originalfctx.renamed()
2620 2579
2621 2580 # When data, copied (could affect data), ctx (could affect filelog
2622 2581 # parents) are not overridden, rawdata, rawflags, and filenode may be
2623 2582 # reused (repo._filecommit should double check filelog parents).
2624 2583 #
2625 2584 # path, flags are not hashed in filelog (but in manifestlog) so they do
2626 2585 # not affect reusable here.
2627 2586 #
2628 2587 # If ctx or copied is overridden to a same value with originalfctx,
2629 2588 # still consider it's reusable. originalfctx.renamed() may be a bit
2630 2589 # expensive so it's not called unless necessary. Assuming datafunc is
2631 2590 # always expensive, do not call it for this "reusable" test.
2632 2591 reusable = datafunc is None and ctxmatch() and copiedmatch()
2633 2592
2634 2593 if datafunc is None:
2635 2594 datafunc = originalfctx.data
2636 2595 if flags is None:
2637 2596 flags = originalfctx.flags()
2638 2597
2639 2598 self._datafunc = datafunc
2640 2599 self._flags = flags
2641 2600 self._copied = copied
2642 2601
2643 2602 if reusable:
2644 2603 # copy extra fields from originalfctx
2645 2604 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2646 2605 for attr_ in attrs:
2647 2606 if util.safehasattr(originalfctx, attr_):
2648 2607 setattr(self, attr_, getattr(originalfctx, attr_))
2649 2608
2650 2609 def data(self):
2651 2610 return self._datafunc()
2652 2611
2653 2612 class metadataonlyctx(committablectx):
2654 2613 """Like memctx but it's reusing the manifest of different commit.
2655 2614 Intended to be used by lightweight operations that are creating
2656 2615 metadata-only changes.
2657 2616
2658 2617 Revision information is supplied at initialization time. 'repo' is the
2659 2618 current localrepo, 'ctx' is original revision which manifest we're reuisng
2660 2619 'parents' is a sequence of two parent revisions identifiers (pass None for
2661 2620 every missing parent), 'text' is the commit.
2662 2621
2663 2622 user receives the committer name and defaults to current repository
2664 2623 username, date is the commit date in any format supported by
2665 2624 util.parsedate() and defaults to current date, extra is a dictionary of
2666 2625 metadata or is left empty.
2667 2626 """
2668 2627 def __new__(cls, repo, originalctx, *args, **kwargs):
2669 2628 return super(metadataonlyctx, cls).__new__(cls, repo)
2670 2629
2671 2630 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2672 2631 date=None, extra=None, editor=False):
2673 2632 if text is None:
2674 2633 text = originalctx.description()
2675 2634 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2676 2635 self._rev = None
2677 2636 self._node = None
2678 2637 self._originalctx = originalctx
2679 2638 self._manifestnode = originalctx.manifestnode()
2680 2639 if parents is None:
2681 2640 parents = originalctx.parents()
2682 2641 else:
2683 2642 parents = [repo[p] for p in parents if p is not None]
2684 2643 parents = parents[:]
2685 2644 while len(parents) < 2:
2686 2645 parents.append(repo[nullid])
2687 2646 p1, p2 = self._parents = parents
2688 2647
2689 2648 # sanity check to ensure that the reused manifest parents are
2690 2649 # manifests of our commit parents
2691 2650 mp1, mp2 = self.manifestctx().parents
2692 2651 if p1 != nullid and p1.manifestnode() != mp1:
2693 2652 raise RuntimeError('can\'t reuse the manifest: '
2694 2653 'its p1 doesn\'t match the new ctx p1')
2695 2654 if p2 != nullid and p2.manifestnode() != mp2:
2696 2655 raise RuntimeError('can\'t reuse the manifest: '
2697 2656 'its p2 doesn\'t match the new ctx p2')
2698 2657
2699 2658 self._files = originalctx.files()
2700 2659 self.substate = {}
2701 2660
2702 2661 if editor:
2703 2662 self._text = editor(self._repo, self, [])
2704 2663 self._repo.savecommitmessage(self._text)
2705 2664
2706 2665 def manifestnode(self):
2707 2666 return self._manifestnode
2708 2667
2709 2668 @property
2710 2669 def _manifestctx(self):
2711 2670 return self._repo.manifestlog[self._manifestnode]
2712 2671
2713 2672 def filectx(self, path, filelog=None):
2714 2673 return self._originalctx.filectx(path, filelog=filelog)
2715 2674
2716 2675 def commit(self):
2717 2676 """commit context to the repo"""
2718 2677 return self._repo.commitctx(self)
2719 2678
2720 2679 @property
2721 2680 def _manifest(self):
2722 2681 return self._originalctx.manifest()
2723 2682
2724 2683 @propertycache
2725 2684 def _status(self):
2726 2685 """Calculate exact status from ``files`` specified in the ``origctx``
2727 2686 and parents manifests.
2728 2687 """
2729 2688 man1 = self.p1().manifest()
2730 2689 p2 = self._parents[1]
2731 2690 # "1 < len(self._parents)" can't be used for checking
2732 2691 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2733 2692 # explicitly initialized by the list, of which length is 2.
2734 2693 if p2.node() != nullid:
2735 2694 man2 = p2.manifest()
2736 2695 managing = lambda f: f in man1 or f in man2
2737 2696 else:
2738 2697 managing = lambda f: f in man1
2739 2698
2740 2699 modified, added, removed = [], [], []
2741 2700 for f in self._files:
2742 2701 if not managing(f):
2743 2702 added.append(f)
2744 2703 elif f in self:
2745 2704 modified.append(f)
2746 2705 else:
2747 2706 removed.append(f)
2748 2707
2749 2708 return scmutil.status(modified, added, removed, [], [], [], [])
2750 2709
2751 2710 class arbitraryfilectx(object):
2752 2711 """Allows you to use filectx-like functions on a file in an arbitrary
2753 2712 location on disk, possibly not in the working directory.
2754 2713 """
2755 2714 def __init__(self, path, repo=None):
2756 2715 # Repo is optional because contrib/simplemerge uses this class.
2757 2716 self._repo = repo
2758 2717 self._path = path
2759 2718
2760 2719 def cmp(self, fctx):
2761 2720 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2762 2721 # path if either side is a symlink.
2763 2722 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2764 2723 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2765 2724 # Add a fast-path for merge if both sides are disk-backed.
2766 2725 # Note that filecmp uses the opposite return values (True if same)
2767 2726 # from our cmp functions (True if different).
2768 2727 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2769 2728 return self.data() != fctx.data()
2770 2729
2771 2730 def path(self):
2772 2731 return self._path
2773 2732
2774 2733 def flags(self):
2775 2734 return ''
2776 2735
2777 2736 def data(self):
2778 2737 return util.readfile(self._path)
2779 2738
2780 2739 def decodeddata(self):
2781 2740 with open(self._path, "rb") as f:
2782 2741 return f.read()
2783 2742
2784 2743 def remove(self):
2785 2744 util.unlink(self._path)
2786 2745
2787 2746 def write(self, data, flags, **kwargs):
2788 2747 assert not flags
2789 2748 with open(self._path, "w") as f:
2790 2749 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now