##// END OF EJS Templates
annotate: make pair take all parents to pair against...
Siddharth Agarwal -
r32484:c50f29b3 default
parent child Browse files
Show More
@@ -1,2251 +1,2255 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 bin,
19 19 hex,
20 20 modifiednodeid,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 wdirid,
25 25 wdirnodes,
26 26 )
27 27 from . import (
28 28 encoding,
29 29 error,
30 30 fileset,
31 31 match as matchmod,
32 32 mdiff,
33 33 obsolete as obsmod,
34 34 patch,
35 35 phases,
36 36 pycompat,
37 37 repoview,
38 38 revlog,
39 39 scmutil,
40 40 subrepo,
41 41 util,
42 42 )
43 43
44 44 propertycache = util.propertycache
45 45
46 46 nonascii = re.compile(r'[^\x21-\x7f]').search
47 47
48 48 class basectx(object):
49 49 """A basectx object represents the common logic for its children:
50 50 changectx: read-only context that is already present in the repo,
51 51 workingctx: a context that represents the working directory and can
52 52 be committed,
53 53 memctx: a context that represents changes in-memory and can also
54 54 be committed."""
55 55 def __new__(cls, repo, changeid='', *args, **kwargs):
56 56 if isinstance(changeid, basectx):
57 57 return changeid
58 58
59 59 o = super(basectx, cls).__new__(cls)
60 60
61 61 o._repo = repo
62 62 o._rev = nullrev
63 63 o._node = nullid
64 64
65 65 return o
66 66
67 67 def __str__(self):
68 68 r = short(self.node())
69 69 if pycompat.ispy3:
70 70 return r.decode('ascii')
71 71 return r
72 72
73 73 def __bytes__(self):
74 74 return short(self.node())
75 75
76 76 def __int__(self):
77 77 return self.rev()
78 78
79 79 def __repr__(self):
80 80 return "<%s %s>" % (type(self).__name__, str(self))
81 81
82 82 def __eq__(self, other):
83 83 try:
84 84 return type(self) == type(other) and self._rev == other._rev
85 85 except AttributeError:
86 86 return False
87 87
88 88 def __ne__(self, other):
89 89 return not (self == other)
90 90
91 91 def __contains__(self, key):
92 92 return key in self._manifest
93 93
94 94 def __getitem__(self, key):
95 95 return self.filectx(key)
96 96
97 97 def __iter__(self):
98 98 return iter(self._manifest)
99 99
100 100 def _buildstatusmanifest(self, status):
101 101 """Builds a manifest that includes the given status results, if this is
102 102 a working copy context. For non-working copy contexts, it just returns
103 103 the normal manifest."""
104 104 return self.manifest()
105 105
106 106 def _matchstatus(self, other, match):
107 107 """return match.always if match is none
108 108
109 109 This internal method provides a way for child objects to override the
110 110 match operator.
111 111 """
112 112 return match or matchmod.always(self._repo.root, self._repo.getcwd())
113 113
114 114 def _buildstatus(self, other, s, match, listignored, listclean,
115 115 listunknown):
116 116 """build a status with respect to another context"""
117 117 # Load earliest manifest first for caching reasons. More specifically,
118 118 # if you have revisions 1000 and 1001, 1001 is probably stored as a
119 119 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
120 120 # 1000 and cache it so that when you read 1001, we just need to apply a
121 121 # delta to what's in the cache. So that's one full reconstruction + one
122 122 # delta application.
123 123 mf2 = None
124 124 if self.rev() is not None and self.rev() < other.rev():
125 125 mf2 = self._buildstatusmanifest(s)
126 126 mf1 = other._buildstatusmanifest(s)
127 127 if mf2 is None:
128 128 mf2 = self._buildstatusmanifest(s)
129 129
130 130 modified, added = [], []
131 131 removed = []
132 132 clean = []
133 133 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
134 134 deletedset = set(deleted)
135 135 d = mf1.diff(mf2, match=match, clean=listclean)
136 136 for fn, value in d.iteritems():
137 137 if fn in deletedset:
138 138 continue
139 139 if value is None:
140 140 clean.append(fn)
141 141 continue
142 142 (node1, flag1), (node2, flag2) = value
143 143 if node1 is None:
144 144 added.append(fn)
145 145 elif node2 is None:
146 146 removed.append(fn)
147 147 elif flag1 != flag2:
148 148 modified.append(fn)
149 149 elif node2 not in wdirnodes:
150 150 # When comparing files between two commits, we save time by
151 151 # not comparing the file contents when the nodeids differ.
152 152 # Note that this means we incorrectly report a reverted change
153 153 # to a file as a modification.
154 154 modified.append(fn)
155 155 elif self[fn].cmp(other[fn]):
156 156 modified.append(fn)
157 157 else:
158 158 clean.append(fn)
159 159
160 160 if removed:
161 161 # need to filter files if they are already reported as removed
162 162 unknown = [fn for fn in unknown if fn not in mf1 and
163 163 (not match or match(fn))]
164 164 ignored = [fn for fn in ignored if fn not in mf1 and
165 165 (not match or match(fn))]
166 166 # if they're deleted, don't report them as removed
167 167 removed = [fn for fn in removed if fn not in deletedset]
168 168
169 169 return scmutil.status(modified, added, removed, deleted, unknown,
170 170 ignored, clean)
171 171
172 172 @propertycache
173 173 def substate(self):
174 174 return subrepo.state(self, self._repo.ui)
175 175
176 176 def subrev(self, subpath):
177 177 return self.substate[subpath][1]
178 178
179 179 def rev(self):
180 180 return self._rev
181 181 def node(self):
182 182 return self._node
183 183 def hex(self):
184 184 return hex(self.node())
185 185 def manifest(self):
186 186 return self._manifest
187 187 def manifestctx(self):
188 188 return self._manifestctx
189 189 def repo(self):
190 190 return self._repo
191 191 def phasestr(self):
192 192 return phases.phasenames[self.phase()]
193 193 def mutable(self):
194 194 return self.phase() > phases.public
195 195
196 196 def getfileset(self, expr):
197 197 return fileset.getfileset(self, expr)
198 198
199 199 def obsolete(self):
200 200 """True if the changeset is obsolete"""
201 201 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
202 202
203 203 def extinct(self):
204 204 """True if the changeset is extinct"""
205 205 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
206 206
207 207 def unstable(self):
208 208 """True if the changeset is not obsolete but it's ancestor are"""
209 209 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
210 210
211 211 def bumped(self):
212 212 """True if the changeset try to be a successor of a public changeset
213 213
214 214 Only non-public and non-obsolete changesets may be bumped.
215 215 """
216 216 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
217 217
218 218 def divergent(self):
219 219 """Is a successors of a changeset with multiple possible successors set
220 220
221 221 Only non-public and non-obsolete changesets may be divergent.
222 222 """
223 223 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
224 224
225 225 def troubled(self):
226 226 """True if the changeset is either unstable, bumped or divergent"""
227 227 return self.unstable() or self.bumped() or self.divergent()
228 228
229 229 def troubles(self):
230 230 """return the list of troubles affecting this changesets.
231 231
232 232 Troubles are returned as strings. possible values are:
233 233 - unstable,
234 234 - bumped,
235 235 - divergent.
236 236 """
237 237 troubles = []
238 238 if self.unstable():
239 239 troubles.append('unstable')
240 240 if self.bumped():
241 241 troubles.append('bumped')
242 242 if self.divergent():
243 243 troubles.append('divergent')
244 244 return troubles
245 245
246 246 def parents(self):
247 247 """return contexts for each parent changeset"""
248 248 return self._parents
249 249
250 250 def p1(self):
251 251 return self._parents[0]
252 252
253 253 def p2(self):
254 254 parents = self._parents
255 255 if len(parents) == 2:
256 256 return parents[1]
257 257 return changectx(self._repo, nullrev)
258 258
259 259 def _fileinfo(self, path):
260 260 if r'_manifest' in self.__dict__:
261 261 try:
262 262 return self._manifest[path], self._manifest.flags(path)
263 263 except KeyError:
264 264 raise error.ManifestLookupError(self._node, path,
265 265 _('not found in manifest'))
266 266 if r'_manifestdelta' in self.__dict__ or path in self.files():
267 267 if path in self._manifestdelta:
268 268 return (self._manifestdelta[path],
269 269 self._manifestdelta.flags(path))
270 270 mfl = self._repo.manifestlog
271 271 try:
272 272 node, flag = mfl[self._changeset.manifest].find(path)
273 273 except KeyError:
274 274 raise error.ManifestLookupError(self._node, path,
275 275 _('not found in manifest'))
276 276
277 277 return node, flag
278 278
279 279 def filenode(self, path):
280 280 return self._fileinfo(path)[0]
281 281
282 282 def flags(self, path):
283 283 try:
284 284 return self._fileinfo(path)[1]
285 285 except error.LookupError:
286 286 return ''
287 287
288 288 def sub(self, path, allowcreate=True):
289 289 '''return a subrepo for the stored revision of path, never wdir()'''
290 290 return subrepo.subrepo(self, path, allowcreate=allowcreate)
291 291
292 292 def nullsub(self, path, pctx):
293 293 return subrepo.nullsubrepo(self, path, pctx)
294 294
295 295 def workingsub(self, path):
296 296 '''return a subrepo for the stored revision, or wdir if this is a wdir
297 297 context.
298 298 '''
299 299 return subrepo.subrepo(self, path, allowwdir=True)
300 300
301 301 def match(self, pats=None, include=None, exclude=None, default='glob',
302 302 listsubrepos=False, badfn=None):
303 303 if pats is None:
304 304 pats = []
305 305 r = self._repo
306 306 return matchmod.match(r.root, r.getcwd(), pats,
307 307 include, exclude, default,
308 308 auditor=r.nofsauditor, ctx=self,
309 309 listsubrepos=listsubrepos, badfn=badfn)
310 310
311 311 def diff(self, ctx2=None, match=None, **opts):
312 312 """Returns a diff generator for the given contexts and matcher"""
313 313 if ctx2 is None:
314 314 ctx2 = self.p1()
315 315 if ctx2 is not None:
316 316 ctx2 = self._repo[ctx2]
317 317 diffopts = patch.diffopts(self._repo.ui, opts)
318 318 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
319 319
320 320 def dirs(self):
321 321 return self._manifest.dirs()
322 322
323 323 def hasdir(self, dir):
324 324 return self._manifest.hasdir(dir)
325 325
326 326 def dirty(self, missing=False, merge=True, branch=True):
327 327 return False
328 328
329 329 def status(self, other=None, match=None, listignored=False,
330 330 listclean=False, listunknown=False, listsubrepos=False):
331 331 """return status of files between two nodes or node and working
332 332 directory.
333 333
334 334 If other is None, compare this node with working directory.
335 335
336 336 returns (modified, added, removed, deleted, unknown, ignored, clean)
337 337 """
338 338
339 339 ctx1 = self
340 340 ctx2 = self._repo[other]
341 341
342 342 # This next code block is, admittedly, fragile logic that tests for
343 343 # reversing the contexts and wouldn't need to exist if it weren't for
344 344 # the fast (and common) code path of comparing the working directory
345 345 # with its first parent.
346 346 #
347 347 # What we're aiming for here is the ability to call:
348 348 #
349 349 # workingctx.status(parentctx)
350 350 #
351 351 # If we always built the manifest for each context and compared those,
352 352 # then we'd be done. But the special case of the above call means we
353 353 # just copy the manifest of the parent.
354 354 reversed = False
355 355 if (not isinstance(ctx1, changectx)
356 356 and isinstance(ctx2, changectx)):
357 357 reversed = True
358 358 ctx1, ctx2 = ctx2, ctx1
359 359
360 360 match = ctx2._matchstatus(ctx1, match)
361 361 r = scmutil.status([], [], [], [], [], [], [])
362 362 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
363 363 listunknown)
364 364
365 365 if reversed:
366 366 # Reverse added and removed. Clear deleted, unknown and ignored as
367 367 # these make no sense to reverse.
368 368 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
369 369 r.clean)
370 370
371 371 if listsubrepos:
372 372 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
373 373 try:
374 374 rev2 = ctx2.subrev(subpath)
375 375 except KeyError:
376 376 # A subrepo that existed in node1 was deleted between
377 377 # node1 and node2 (inclusive). Thus, ctx2's substate
378 378 # won't contain that subpath. The best we can do ignore it.
379 379 rev2 = None
380 380 submatch = matchmod.subdirmatcher(subpath, match)
381 381 s = sub.status(rev2, match=submatch, ignored=listignored,
382 382 clean=listclean, unknown=listunknown,
383 383 listsubrepos=True)
384 384 for rfiles, sfiles in zip(r, s):
385 385 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
386 386
387 387 for l in r:
388 388 l.sort()
389 389
390 390 return r
391 391
392 392
393 393 def makememctx(repo, parents, text, user, date, branch, files, store,
394 394 editor=None, extra=None):
395 395 def getfilectx(repo, memctx, path):
396 396 data, mode, copied = store.getfile(path)
397 397 if data is None:
398 398 return None
399 399 islink, isexec = mode
400 400 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
401 401 copied=copied, memctx=memctx)
402 402 if extra is None:
403 403 extra = {}
404 404 if branch:
405 405 extra['branch'] = encoding.fromlocal(branch)
406 406 ctx = memctx(repo, parents, text, files, getfilectx, user,
407 407 date, extra, editor)
408 408 return ctx
409 409
410 410 def _filterederror(repo, changeid):
411 411 """build an exception to be raised about a filtered changeid
412 412
413 413 This is extracted in a function to help extensions (eg: evolve) to
414 414 experiment with various message variants."""
415 415 if repo.filtername.startswith('visible'):
416 416 msg = _("hidden revision '%s'") % changeid
417 417 hint = _('use --hidden to access hidden revisions')
418 418 return error.FilteredRepoLookupError(msg, hint=hint)
419 419 msg = _("filtered revision '%s' (not in '%s' subset)")
420 420 msg %= (changeid, repo.filtername)
421 421 return error.FilteredRepoLookupError(msg)
422 422
423 423 class changectx(basectx):
424 424 """A changecontext object makes access to data related to a particular
425 425 changeset convenient. It represents a read-only context already present in
426 426 the repo."""
427 427 def __init__(self, repo, changeid=''):
428 428 """changeid is a revision number, node, or tag"""
429 429
430 430 # since basectx.__new__ already took care of copying the object, we
431 431 # don't need to do anything in __init__, so we just exit here
432 432 if isinstance(changeid, basectx):
433 433 return
434 434
435 435 if changeid == '':
436 436 changeid = '.'
437 437 self._repo = repo
438 438
439 439 try:
440 440 if isinstance(changeid, int):
441 441 self._node = repo.changelog.node(changeid)
442 442 self._rev = changeid
443 443 return
444 444 if not pycompat.ispy3 and isinstance(changeid, long):
445 445 changeid = str(changeid)
446 446 if changeid == 'null':
447 447 self._node = nullid
448 448 self._rev = nullrev
449 449 return
450 450 if changeid == 'tip':
451 451 self._node = repo.changelog.tip()
452 452 self._rev = repo.changelog.rev(self._node)
453 453 return
454 454 if changeid == '.' or changeid == repo.dirstate.p1():
455 455 # this is a hack to delay/avoid loading obsmarkers
456 456 # when we know that '.' won't be hidden
457 457 self._node = repo.dirstate.p1()
458 458 self._rev = repo.unfiltered().changelog.rev(self._node)
459 459 return
460 460 if len(changeid) == 20:
461 461 try:
462 462 self._node = changeid
463 463 self._rev = repo.changelog.rev(changeid)
464 464 return
465 465 except error.FilteredRepoLookupError:
466 466 raise
467 467 except LookupError:
468 468 pass
469 469
470 470 try:
471 471 r = int(changeid)
472 472 if '%d' % r != changeid:
473 473 raise ValueError
474 474 l = len(repo.changelog)
475 475 if r < 0:
476 476 r += l
477 477 if r < 0 or r >= l:
478 478 raise ValueError
479 479 self._rev = r
480 480 self._node = repo.changelog.node(r)
481 481 return
482 482 except error.FilteredIndexError:
483 483 raise
484 484 except (ValueError, OverflowError, IndexError):
485 485 pass
486 486
487 487 if len(changeid) == 40:
488 488 try:
489 489 self._node = bin(changeid)
490 490 self._rev = repo.changelog.rev(self._node)
491 491 return
492 492 except error.FilteredLookupError:
493 493 raise
494 494 except (TypeError, LookupError):
495 495 pass
496 496
497 497 # lookup bookmarks through the name interface
498 498 try:
499 499 self._node = repo.names.singlenode(repo, changeid)
500 500 self._rev = repo.changelog.rev(self._node)
501 501 return
502 502 except KeyError:
503 503 pass
504 504 except error.FilteredRepoLookupError:
505 505 raise
506 506 except error.RepoLookupError:
507 507 pass
508 508
509 509 self._node = repo.unfiltered().changelog._partialmatch(changeid)
510 510 if self._node is not None:
511 511 self._rev = repo.changelog.rev(self._node)
512 512 return
513 513
514 514 # lookup failed
515 515 # check if it might have come from damaged dirstate
516 516 #
517 517 # XXX we could avoid the unfiltered if we had a recognizable
518 518 # exception for filtered changeset access
519 519 if changeid in repo.unfiltered().dirstate.parents():
520 520 msg = _("working directory has unknown parent '%s'!")
521 521 raise error.Abort(msg % short(changeid))
522 522 try:
523 523 if len(changeid) == 20 and nonascii(changeid):
524 524 changeid = hex(changeid)
525 525 except TypeError:
526 526 pass
527 527 except (error.FilteredIndexError, error.FilteredLookupError,
528 528 error.FilteredRepoLookupError):
529 529 raise _filterederror(repo, changeid)
530 530 except IndexError:
531 531 pass
532 532 raise error.RepoLookupError(
533 533 _("unknown revision '%s'") % changeid)
534 534
535 535 def __hash__(self):
536 536 try:
537 537 return hash(self._rev)
538 538 except AttributeError:
539 539 return id(self)
540 540
541 541 def __nonzero__(self):
542 542 return self._rev != nullrev
543 543
544 544 __bool__ = __nonzero__
545 545
546 546 @propertycache
547 547 def _changeset(self):
548 548 return self._repo.changelog.changelogrevision(self.rev())
549 549
550 550 @propertycache
551 551 def _manifest(self):
552 552 return self._manifestctx.read()
553 553
554 554 @propertycache
555 555 def _manifestctx(self):
556 556 return self._repo.manifestlog[self._changeset.manifest]
557 557
558 558 @propertycache
559 559 def _manifestdelta(self):
560 560 return self._manifestctx.readdelta()
561 561
562 562 @propertycache
563 563 def _parents(self):
564 564 repo = self._repo
565 565 p1, p2 = repo.changelog.parentrevs(self._rev)
566 566 if p2 == nullrev:
567 567 return [changectx(repo, p1)]
568 568 return [changectx(repo, p1), changectx(repo, p2)]
569 569
570 570 def changeset(self):
571 571 c = self._changeset
572 572 return (
573 573 c.manifest,
574 574 c.user,
575 575 c.date,
576 576 c.files,
577 577 c.description,
578 578 c.extra,
579 579 )
580 580 def manifestnode(self):
581 581 return self._changeset.manifest
582 582
583 583 def user(self):
584 584 return self._changeset.user
585 585 def date(self):
586 586 return self._changeset.date
587 587 def files(self):
588 588 return self._changeset.files
589 589 def description(self):
590 590 return self._changeset.description
591 591 def branch(self):
592 592 return encoding.tolocal(self._changeset.extra.get("branch"))
593 593 def closesbranch(self):
594 594 return 'close' in self._changeset.extra
595 595 def extra(self):
596 596 return self._changeset.extra
597 597 def tags(self):
598 598 return self._repo.nodetags(self._node)
599 599 def bookmarks(self):
600 600 return self._repo.nodebookmarks(self._node)
601 601 def phase(self):
602 602 return self._repo._phasecache.phase(self._repo, self._rev)
603 603 def hidden(self):
604 604 return self._rev in repoview.filterrevs(self._repo, 'visible')
605 605
606 606 def children(self):
607 607 """return contexts for each child changeset"""
608 608 c = self._repo.changelog.children(self._node)
609 609 return [changectx(self._repo, x) for x in c]
610 610
611 611 def ancestors(self):
612 612 for a in self._repo.changelog.ancestors([self._rev]):
613 613 yield changectx(self._repo, a)
614 614
615 615 def descendants(self):
616 616 for d in self._repo.changelog.descendants([self._rev]):
617 617 yield changectx(self._repo, d)
618 618
619 619 def filectx(self, path, fileid=None, filelog=None):
620 620 """get a file context from this changeset"""
621 621 if fileid is None:
622 622 fileid = self.filenode(path)
623 623 return filectx(self._repo, path, fileid=fileid,
624 624 changectx=self, filelog=filelog)
625 625
626 626 def ancestor(self, c2, warn=False):
627 627 """return the "best" ancestor context of self and c2
628 628
629 629 If there are multiple candidates, it will show a message and check
630 630 merge.preferancestor configuration before falling back to the
631 631 revlog ancestor."""
632 632 # deal with workingctxs
633 633 n2 = c2._node
634 634 if n2 is None:
635 635 n2 = c2._parents[0]._node
636 636 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
637 637 if not cahs:
638 638 anc = nullid
639 639 elif len(cahs) == 1:
640 640 anc = cahs[0]
641 641 else:
642 642 # experimental config: merge.preferancestor
643 643 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
644 644 try:
645 645 ctx = changectx(self._repo, r)
646 646 except error.RepoLookupError:
647 647 continue
648 648 anc = ctx.node()
649 649 if anc in cahs:
650 650 break
651 651 else:
652 652 anc = self._repo.changelog.ancestor(self._node, n2)
653 653 if warn:
654 654 self._repo.ui.status(
655 655 (_("note: using %s as ancestor of %s and %s\n") %
656 656 (short(anc), short(self._node), short(n2))) +
657 657 ''.join(_(" alternatively, use --config "
658 658 "merge.preferancestor=%s\n") %
659 659 short(n) for n in sorted(cahs) if n != anc))
660 660 return changectx(self._repo, anc)
661 661
662 662 def descendant(self, other):
663 663 """True if other is descendant of this changeset"""
664 664 return self._repo.changelog.descendant(self._rev, other._rev)
665 665
666 666 def walk(self, match):
667 667 '''Generates matching file names.'''
668 668
669 669 # Wrap match.bad method to have message with nodeid
670 670 def bad(fn, msg):
671 671 # The manifest doesn't know about subrepos, so don't complain about
672 672 # paths into valid subrepos.
673 673 if any(fn == s or fn.startswith(s + '/')
674 674 for s in self.substate):
675 675 return
676 676 match.bad(fn, _('no such file in rev %s') % self)
677 677
678 678 m = matchmod.badmatch(match, bad)
679 679 return self._manifest.walk(m)
680 680
681 681 def matches(self, match):
682 682 return self.walk(match)
683 683
684 684 class basefilectx(object):
685 685 """A filecontext object represents the common logic for its children:
686 686 filectx: read-only access to a filerevision that is already present
687 687 in the repo,
688 688 workingfilectx: a filecontext that represents files from the working
689 689 directory,
690 690 memfilectx: a filecontext that represents files in-memory,
691 691 overlayfilectx: duplicate another filecontext with some fields overridden.
692 692 """
693 693 @propertycache
694 694 def _filelog(self):
695 695 return self._repo.file(self._path)
696 696
697 697 @propertycache
698 698 def _changeid(self):
699 699 if r'_changeid' in self.__dict__:
700 700 return self._changeid
701 701 elif r'_changectx' in self.__dict__:
702 702 return self._changectx.rev()
703 703 elif r'_descendantrev' in self.__dict__:
704 704 # this file context was created from a revision with a known
705 705 # descendant, we can (lazily) correct for linkrev aliases
706 706 return self._adjustlinkrev(self._descendantrev)
707 707 else:
708 708 return self._filelog.linkrev(self._filerev)
709 709
710 710 @propertycache
711 711 def _filenode(self):
712 712 if r'_fileid' in self.__dict__:
713 713 return self._filelog.lookup(self._fileid)
714 714 else:
715 715 return self._changectx.filenode(self._path)
716 716
717 717 @propertycache
718 718 def _filerev(self):
719 719 return self._filelog.rev(self._filenode)
720 720
721 721 @propertycache
722 722 def _repopath(self):
723 723 return self._path
724 724
725 725 def __nonzero__(self):
726 726 try:
727 727 self._filenode
728 728 return True
729 729 except error.LookupError:
730 730 # file is missing
731 731 return False
732 732
733 733 __bool__ = __nonzero__
734 734
735 735 def __str__(self):
736 736 try:
737 737 return "%s@%s" % (self.path(), self._changectx)
738 738 except error.LookupError:
739 739 return "%s@???" % self.path()
740 740
741 741 def __repr__(self):
742 742 return "<%s %s>" % (type(self).__name__, str(self))
743 743
744 744 def __hash__(self):
745 745 try:
746 746 return hash((self._path, self._filenode))
747 747 except AttributeError:
748 748 return id(self)
749 749
750 750 def __eq__(self, other):
751 751 try:
752 752 return (type(self) == type(other) and self._path == other._path
753 753 and self._filenode == other._filenode)
754 754 except AttributeError:
755 755 return False
756 756
757 757 def __ne__(self, other):
758 758 return not (self == other)
759 759
760 760 def filerev(self):
761 761 return self._filerev
762 762 def filenode(self):
763 763 return self._filenode
764 764 @propertycache
765 765 def _flags(self):
766 766 return self._changectx.flags(self._path)
767 767 def flags(self):
768 768 return self._flags
769 769 def filelog(self):
770 770 return self._filelog
771 771 def rev(self):
772 772 return self._changeid
773 773 def linkrev(self):
774 774 return self._filelog.linkrev(self._filerev)
775 775 def node(self):
776 776 return self._changectx.node()
777 777 def hex(self):
778 778 return self._changectx.hex()
779 779 def user(self):
780 780 return self._changectx.user()
781 781 def date(self):
782 782 return self._changectx.date()
783 783 def files(self):
784 784 return self._changectx.files()
785 785 def description(self):
786 786 return self._changectx.description()
787 787 def branch(self):
788 788 return self._changectx.branch()
789 789 def extra(self):
790 790 return self._changectx.extra()
791 791 def phase(self):
792 792 return self._changectx.phase()
793 793 def phasestr(self):
794 794 return self._changectx.phasestr()
795 795 def manifest(self):
796 796 return self._changectx.manifest()
797 797 def changectx(self):
798 798 return self._changectx
799 799 def renamed(self):
800 800 return self._copied
801 801 def repo(self):
802 802 return self._repo
803 803 def size(self):
804 804 return len(self.data())
805 805
806 806 def path(self):
807 807 return self._path
808 808
809 809 def isbinary(self):
810 810 try:
811 811 return util.binary(self.data())
812 812 except IOError:
813 813 return False
814 814 def isexec(self):
815 815 return 'x' in self.flags()
816 816 def islink(self):
817 817 return 'l' in self.flags()
818 818
819 819 def isabsent(self):
820 820 """whether this filectx represents a file not in self._changectx
821 821
822 822 This is mainly for merge code to detect change/delete conflicts. This is
823 823 expected to be True for all subclasses of basectx."""
824 824 return False
825 825
826 826 _customcmp = False
827 827 def cmp(self, fctx):
828 828 """compare with other file context
829 829
830 830 returns True if different than fctx.
831 831 """
832 832 if fctx._customcmp:
833 833 return fctx.cmp(self)
834 834
835 835 if (fctx._filenode is None
836 836 and (self._repo._encodefilterpats
837 837 # if file data starts with '\1\n', empty metadata block is
838 838 # prepended, which adds 4 bytes to filelog.size().
839 839 or self.size() - 4 == fctx.size())
840 840 or self.size() == fctx.size()):
841 841 return self._filelog.cmp(self._filenode, fctx.data())
842 842
843 843 return True
844 844
845 845 def _adjustlinkrev(self, srcrev, inclusive=False):
846 846 """return the first ancestor of <srcrev> introducing <fnode>
847 847
848 848 If the linkrev of the file revision does not point to an ancestor of
849 849 srcrev, we'll walk down the ancestors until we find one introducing
850 850 this file revision.
851 851
852 852 :srcrev: the changeset revision we search ancestors from
853 853 :inclusive: if true, the src revision will also be checked
854 854 """
855 855 repo = self._repo
856 856 cl = repo.unfiltered().changelog
857 857 mfl = repo.manifestlog
858 858 # fetch the linkrev
859 859 lkr = self.linkrev()
860 860 # hack to reuse ancestor computation when searching for renames
861 861 memberanc = getattr(self, '_ancestrycontext', None)
862 862 iteranc = None
863 863 if srcrev is None:
864 864 # wctx case, used by workingfilectx during mergecopy
865 865 revs = [p.rev() for p in self._repo[None].parents()]
866 866 inclusive = True # we skipped the real (revless) source
867 867 else:
868 868 revs = [srcrev]
869 869 if memberanc is None:
870 870 memberanc = iteranc = cl.ancestors(revs, lkr,
871 871 inclusive=inclusive)
872 872 # check if this linkrev is an ancestor of srcrev
873 873 if lkr not in memberanc:
874 874 if iteranc is None:
875 875 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
876 876 fnode = self._filenode
877 877 path = self._path
878 878 for a in iteranc:
879 879 ac = cl.read(a) # get changeset data (we avoid object creation)
880 880 if path in ac[3]: # checking the 'files' field.
881 881 # The file has been touched, check if the content is
882 882 # similar to the one we search for.
883 883 if fnode == mfl[ac[0]].readfast().get(path):
884 884 return a
885 885 # In theory, we should never get out of that loop without a result.
886 886 # But if manifest uses a buggy file revision (not children of the
887 887 # one it replaces) we could. Such a buggy situation will likely
888 888 # result is crash somewhere else at to some point.
889 889 return lkr
890 890
891 891 def introrev(self):
892 892 """return the rev of the changeset which introduced this file revision
893 893
894 894 This method is different from linkrev because it take into account the
895 895 changeset the filectx was created from. It ensures the returned
896 896 revision is one of its ancestors. This prevents bugs from
897 897 'linkrev-shadowing' when a file revision is used by multiple
898 898 changesets.
899 899 """
900 900 lkr = self.linkrev()
901 901 attrs = vars(self)
902 902 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
903 903 if noctx or self.rev() == lkr:
904 904 return self.linkrev()
905 905 return self._adjustlinkrev(self.rev(), inclusive=True)
906 906
907 907 def _parentfilectx(self, path, fileid, filelog):
908 908 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
909 909 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
910 910 if '_changeid' in vars(self) or '_changectx' in vars(self):
911 911 # If self is associated with a changeset (probably explicitly
912 912 # fed), ensure the created filectx is associated with a
913 913 # changeset that is an ancestor of self.changectx.
914 914 # This lets us later use _adjustlinkrev to get a correct link.
915 915 fctx._descendantrev = self.rev()
916 916 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
917 917 elif '_descendantrev' in vars(self):
918 918 # Otherwise propagate _descendantrev if we have one associated.
919 919 fctx._descendantrev = self._descendantrev
920 920 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
921 921 return fctx
922 922
923 923 def parents(self):
924 924 _path = self._path
925 925 fl = self._filelog
926 926 parents = self._filelog.parents(self._filenode)
927 927 pl = [(_path, node, fl) for node in parents if node != nullid]
928 928
929 929 r = fl.renamed(self._filenode)
930 930 if r:
931 931 # - In the simple rename case, both parent are nullid, pl is empty.
932 932 # - In case of merge, only one of the parent is null id and should
933 933 # be replaced with the rename information. This parent is -always-
934 934 # the first one.
935 935 #
936 936 # As null id have always been filtered out in the previous list
937 937 # comprehension, inserting to 0 will always result in "replacing
938 938 # first nullid parent with rename information.
939 939 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
940 940
941 941 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
942 942
943 943 def p1(self):
944 944 return self.parents()[0]
945 945
946 946 def p2(self):
947 947 p = self.parents()
948 948 if len(p) == 2:
949 949 return p[1]
950 950 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
951 951
952 952 def annotate(self, follow=False, linenumber=False, diffopts=None):
953 953 '''returns a list of tuples of ((ctx, number), line) for each line
954 954 in the file, where ctx is the filectx of the node where
955 955 that line was last changed; if linenumber parameter is true, number is
956 956 the line number at the first appearance in the managed file, otherwise,
957 957 number has a fixed value of False.
958 958 '''
959 959
960 960 def lines(text):
961 961 if text.endswith("\n"):
962 962 return text.count("\n")
963 963 return text.count("\n") + int(bool(text))
964 964
965 965 if linenumber:
966 966 def decorate(text, rev):
967 967 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
968 968 else:
969 969 def decorate(text, rev):
970 970 return ([(rev, False)] * lines(text), text)
971 971
972 972 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
973 973
974 974 def parents(f):
975 975 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
976 976 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
977 977 # from the topmost introrev (= srcrev) down to p.linkrev() if it
978 978 # isn't an ancestor of the srcrev.
979 979 f._changeid
980 980 pl = f.parents()
981 981
982 982 # Don't return renamed parents if we aren't following.
983 983 if not follow:
984 984 pl = [p for p in pl if p.path() == f.path()]
985 985
986 986 # renamed filectx won't have a filelog yet, so set it
987 987 # from the cache to save time
988 988 for p in pl:
989 989 if not '_filelog' in p.__dict__:
990 990 p._filelog = getlog(p.path())
991 991
992 992 return pl
993 993
994 994 # use linkrev to find the first changeset where self appeared
995 995 base = self
996 996 introrev = self.introrev()
997 997 if self.rev() != introrev:
998 998 base = self.filectx(self.filenode(), changeid=introrev)
999 999 if getattr(base, '_ancestrycontext', None) is None:
1000 1000 cl = self._repo.changelog
1001 1001 if introrev is None:
1002 1002 # wctx is not inclusive, but works because _ancestrycontext
1003 1003 # is used to test filelog revisions
1004 1004 ac = cl.ancestors([p.rev() for p in base.parents()],
1005 1005 inclusive=True)
1006 1006 else:
1007 1007 ac = cl.ancestors([introrev], inclusive=True)
1008 1008 base._ancestrycontext = ac
1009 1009
1010 1010 # This algorithm would prefer to be recursive, but Python is a
1011 1011 # bit recursion-hostile. Instead we do an iterative
1012 1012 # depth-first search.
1013 1013
1014 1014 # 1st DFS pre-calculates pcache and needed
1015 1015 visit = [base]
1016 1016 pcache = {}
1017 1017 needed = {base: 1}
1018 1018 while visit:
1019 1019 f = visit.pop()
1020 1020 if f in pcache:
1021 1021 continue
1022 1022 pl = parents(f)
1023 1023 pcache[f] = pl
1024 1024 for p in pl:
1025 1025 needed[p] = needed.get(p, 0) + 1
1026 1026 if p not in pcache:
1027 1027 visit.append(p)
1028 1028
1029 1029 # 2nd DFS does the actual annotate
1030 1030 visit[:] = [base]
1031 1031 hist = {}
1032 1032 while visit:
1033 1033 f = visit[-1]
1034 1034 if f in hist:
1035 1035 visit.pop()
1036 1036 continue
1037 1037
1038 1038 ready = True
1039 1039 pl = pcache[f]
1040 1040 for p in pl:
1041 1041 if p not in hist:
1042 1042 ready = False
1043 1043 visit.append(p)
1044 1044 if ready:
1045 1045 visit.pop()
1046 1046 curr = decorate(f.data(), f)
1047 curr = _annotatepair([hist[p] for p in pl], curr, diffopts)
1047 1048 for p in pl:
1048 curr = _annotatepair(hist[p], curr, diffopts)
1049 1049 if needed[p] == 1:
1050 1050 del hist[p]
1051 1051 del needed[p]
1052 1052 else:
1053 1053 needed[p] -= 1
1054 1054
1055 1055 hist[f] = curr
1056 1056 del pcache[f]
1057 1057
1058 1058 return zip(hist[base][0], hist[base][1].splitlines(True))
1059 1059
1060 1060 def ancestors(self, followfirst=False):
1061 1061 visit = {}
1062 1062 c = self
1063 1063 if followfirst:
1064 1064 cut = 1
1065 1065 else:
1066 1066 cut = None
1067 1067
1068 1068 while True:
1069 1069 for parent in c.parents()[:cut]:
1070 1070 visit[(parent.linkrev(), parent.filenode())] = parent
1071 1071 if not visit:
1072 1072 break
1073 1073 c = visit.pop(max(visit))
1074 1074 yield c
1075 1075
1076 def _annotatepair(parent, child, diffopts):
1077 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
1078 for (a1, a2, b1, b2), t in blocks:
1079 # Changed blocks ('!') or blocks made only of blank lines ('~')
1080 # belong to the child.
1081 if t == '=':
1082 child[0][b1:b2] = parent[0][a1:a2]
1076 def _annotatepair(parents, child, diffopts):
1077 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1078 for parent in parents]
1079 # Mercurial currently prefers p2 over p1 for annotate.
1080 # TODO: change this?
1081 for parent, blocks in pblocks:
1082 for (a1, a2, b1, b2), t in blocks:
1083 # Changed blocks ('!') or blocks made only of blank lines ('~')
1084 # belong to the child.
1085 if t == '=':
1086 child[0][b1:b2] = parent[0][a1:a2]
1083 1087 return child
1084 1088
1085 1089 class filectx(basefilectx):
1086 1090 """A filecontext object makes access to data related to a particular
1087 1091 filerevision convenient."""
1088 1092 def __init__(self, repo, path, changeid=None, fileid=None,
1089 1093 filelog=None, changectx=None):
1090 1094 """changeid can be a changeset revision, node, or tag.
1091 1095 fileid can be a file revision or node."""
1092 1096 self._repo = repo
1093 1097 self._path = path
1094 1098
1095 1099 assert (changeid is not None
1096 1100 or fileid is not None
1097 1101 or changectx is not None), \
1098 1102 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1099 1103 % (changeid, fileid, changectx))
1100 1104
1101 1105 if filelog is not None:
1102 1106 self._filelog = filelog
1103 1107
1104 1108 if changeid is not None:
1105 1109 self._changeid = changeid
1106 1110 if changectx is not None:
1107 1111 self._changectx = changectx
1108 1112 if fileid is not None:
1109 1113 self._fileid = fileid
1110 1114
1111 1115 @propertycache
1112 1116 def _changectx(self):
1113 1117 try:
1114 1118 return changectx(self._repo, self._changeid)
1115 1119 except error.FilteredRepoLookupError:
1116 1120 # Linkrev may point to any revision in the repository. When the
1117 1121 # repository is filtered this may lead to `filectx` trying to build
1118 1122 # `changectx` for filtered revision. In such case we fallback to
1119 1123 # creating `changectx` on the unfiltered version of the reposition.
1120 1124 # This fallback should not be an issue because `changectx` from
1121 1125 # `filectx` are not used in complex operations that care about
1122 1126 # filtering.
1123 1127 #
1124 1128 # This fallback is a cheap and dirty fix that prevent several
1125 1129 # crashes. It does not ensure the behavior is correct. However the
1126 1130 # behavior was not correct before filtering either and "incorrect
1127 1131 # behavior" is seen as better as "crash"
1128 1132 #
1129 1133 # Linkrevs have several serious troubles with filtering that are
1130 1134 # complicated to solve. Proper handling of the issue here should be
1131 1135 # considered when solving linkrev issue are on the table.
1132 1136 return changectx(self._repo.unfiltered(), self._changeid)
1133 1137
1134 1138 def filectx(self, fileid, changeid=None):
1135 1139 '''opens an arbitrary revision of the file without
1136 1140 opening a new filelog'''
1137 1141 return filectx(self._repo, self._path, fileid=fileid,
1138 1142 filelog=self._filelog, changeid=changeid)
1139 1143
1140 1144 def rawdata(self):
1141 1145 return self._filelog.revision(self._filenode, raw=True)
1142 1146
1143 1147 def rawflags(self):
1144 1148 """low-level revlog flags"""
1145 1149 return self._filelog.flags(self._filerev)
1146 1150
1147 1151 def data(self):
1148 1152 try:
1149 1153 return self._filelog.read(self._filenode)
1150 1154 except error.CensoredNodeError:
1151 1155 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1152 1156 return ""
1153 1157 raise error.Abort(_("censored node: %s") % short(self._filenode),
1154 1158 hint=_("set censor.policy to ignore errors"))
1155 1159
1156 1160 def size(self):
1157 1161 return self._filelog.size(self._filerev)
1158 1162
1159 1163 @propertycache
1160 1164 def _copied(self):
1161 1165 """check if file was actually renamed in this changeset revision
1162 1166
1163 1167 If rename logged in file revision, we report copy for changeset only
1164 1168 if file revisions linkrev points back to the changeset in question
1165 1169 or both changeset parents contain different file revisions.
1166 1170 """
1167 1171
1168 1172 renamed = self._filelog.renamed(self._filenode)
1169 1173 if not renamed:
1170 1174 return renamed
1171 1175
1172 1176 if self.rev() == self.linkrev():
1173 1177 return renamed
1174 1178
1175 1179 name = self.path()
1176 1180 fnode = self._filenode
1177 1181 for p in self._changectx.parents():
1178 1182 try:
1179 1183 if fnode == p.filenode(name):
1180 1184 return None
1181 1185 except error.LookupError:
1182 1186 pass
1183 1187 return renamed
1184 1188
1185 1189 def children(self):
1186 1190 # hard for renames
1187 1191 c = self._filelog.children(self._filenode)
1188 1192 return [filectx(self._repo, self._path, fileid=x,
1189 1193 filelog=self._filelog) for x in c]
1190 1194
1191 1195 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1192 1196 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1193 1197 if diff from fctx2 to fctx1 has changes in linerange2 and
1194 1198 `linerange1` is the new line range for fctx1.
1195 1199 """
1196 1200 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1197 1201 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1198 1202 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1199 1203 return diffinrange, linerange1
1200 1204
1201 1205 def blockancestors(fctx, fromline, toline, followfirst=False):
1202 1206 """Yield ancestors of `fctx` with respect to the block of lines within
1203 1207 `fromline`-`toline` range.
1204 1208 """
1205 1209 diffopts = patch.diffopts(fctx._repo.ui)
1206 1210 introrev = fctx.introrev()
1207 1211 if fctx.rev() != introrev:
1208 1212 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
1209 1213 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1210 1214 while visit:
1211 1215 c, linerange2 = visit.pop(max(visit))
1212 1216 pl = c.parents()
1213 1217 if followfirst:
1214 1218 pl = pl[:1]
1215 1219 if not pl:
1216 1220 # The block originates from the initial revision.
1217 1221 yield c, linerange2
1218 1222 continue
1219 1223 inrange = False
1220 1224 for p in pl:
1221 1225 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1222 1226 inrange = inrange or inrangep
1223 1227 if linerange1[0] == linerange1[1]:
1224 1228 # Parent's linerange is empty, meaning that the block got
1225 1229 # introduced in this revision; no need to go futher in this
1226 1230 # branch.
1227 1231 continue
1228 1232 # Set _descendantrev with 'c' (a known descendant) so that, when
1229 1233 # _adjustlinkrev is called for 'p', it receives this descendant
1230 1234 # (as srcrev) instead possibly topmost introrev.
1231 1235 p._descendantrev = c.rev()
1232 1236 visit[p.linkrev(), p.filenode()] = p, linerange1
1233 1237 if inrange:
1234 1238 yield c, linerange2
1235 1239
1236 1240 def blockdescendants(fctx, fromline, toline):
1237 1241 """Yield descendants of `fctx` with respect to the block of lines within
1238 1242 `fromline`-`toline` range.
1239 1243 """
1240 1244 # First possibly yield 'fctx' if it has changes in range with respect to
1241 1245 # its parents.
1242 1246 try:
1243 1247 c, linerange1 = next(blockancestors(fctx, fromline, toline))
1244 1248 except StopIteration:
1245 1249 pass
1246 1250 else:
1247 1251 if c == fctx:
1248 1252 yield c, linerange1
1249 1253
1250 1254 diffopts = patch.diffopts(fctx._repo.ui)
1251 1255 fl = fctx.filelog()
1252 1256 seen = {fctx.filerev(): (fctx, (fromline, toline))}
1253 1257 for i in fl.descendants([fctx.filerev()]):
1254 1258 c = fctx.filectx(i)
1255 1259 inrange = False
1256 1260 for x in fl.parentrevs(i):
1257 1261 try:
1258 1262 p, linerange2 = seen[x]
1259 1263 except KeyError:
1260 1264 # nullrev or other branch
1261 1265 continue
1262 1266 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
1263 1267 inrange = inrange or inrangep
1264 1268 # If revision 'i' has been seen (it's a merge), we assume that its
1265 1269 # line range is the same independently of which parents was used
1266 1270 # to compute it.
1267 1271 assert i not in seen or seen[i][1] == linerange1, (
1268 1272 'computed line range for %s is not consistent between '
1269 1273 'ancestor branches' % c)
1270 1274 seen[i] = c, linerange1
1271 1275 if inrange:
1272 1276 yield c, linerange1
1273 1277
1274 1278 class committablectx(basectx):
1275 1279 """A committablectx object provides common functionality for a context that
1276 1280 wants the ability to commit, e.g. workingctx or memctx."""
1277 1281 def __init__(self, repo, text="", user=None, date=None, extra=None,
1278 1282 changes=None):
1279 1283 self._repo = repo
1280 1284 self._rev = None
1281 1285 self._node = None
1282 1286 self._text = text
1283 1287 if date:
1284 1288 self._date = util.parsedate(date)
1285 1289 if user:
1286 1290 self._user = user
1287 1291 if changes:
1288 1292 self._status = changes
1289 1293
1290 1294 self._extra = {}
1291 1295 if extra:
1292 1296 self._extra = extra.copy()
1293 1297 if 'branch' not in self._extra:
1294 1298 try:
1295 1299 branch = encoding.fromlocal(self._repo.dirstate.branch())
1296 1300 except UnicodeDecodeError:
1297 1301 raise error.Abort(_('branch name not in UTF-8!'))
1298 1302 self._extra['branch'] = branch
1299 1303 if self._extra['branch'] == '':
1300 1304 self._extra['branch'] = 'default'
1301 1305
1302 1306 def __str__(self):
1303 1307 return str(self._parents[0]) + "+"
1304 1308
1305 1309 def __nonzero__(self):
1306 1310 return True
1307 1311
1308 1312 __bool__ = __nonzero__
1309 1313
1310 1314 def _buildflagfunc(self):
1311 1315 # Create a fallback function for getting file flags when the
1312 1316 # filesystem doesn't support them
1313 1317
1314 1318 copiesget = self._repo.dirstate.copies().get
1315 1319 parents = self.parents()
1316 1320 if len(parents) < 2:
1317 1321 # when we have one parent, it's easy: copy from parent
1318 1322 man = parents[0].manifest()
1319 1323 def func(f):
1320 1324 f = copiesget(f, f)
1321 1325 return man.flags(f)
1322 1326 else:
1323 1327 # merges are tricky: we try to reconstruct the unstored
1324 1328 # result from the merge (issue1802)
1325 1329 p1, p2 = parents
1326 1330 pa = p1.ancestor(p2)
1327 1331 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1328 1332
1329 1333 def func(f):
1330 1334 f = copiesget(f, f) # may be wrong for merges with copies
1331 1335 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1332 1336 if fl1 == fl2:
1333 1337 return fl1
1334 1338 if fl1 == fla:
1335 1339 return fl2
1336 1340 if fl2 == fla:
1337 1341 return fl1
1338 1342 return '' # punt for conflicts
1339 1343
1340 1344 return func
1341 1345
1342 1346 @propertycache
1343 1347 def _flagfunc(self):
1344 1348 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1345 1349
1346 1350 @propertycache
1347 1351 def _status(self):
1348 1352 return self._repo.status()
1349 1353
1350 1354 @propertycache
1351 1355 def _user(self):
1352 1356 return self._repo.ui.username()
1353 1357
1354 1358 @propertycache
1355 1359 def _date(self):
1356 1360 ui = self._repo.ui
1357 1361 date = ui.configdate('devel', 'default-date')
1358 1362 if date is None:
1359 1363 date = util.makedate()
1360 1364 return date
1361 1365
1362 1366 def subrev(self, subpath):
1363 1367 return None
1364 1368
1365 1369 def manifestnode(self):
1366 1370 return None
1367 1371 def user(self):
1368 1372 return self._user or self._repo.ui.username()
1369 1373 def date(self):
1370 1374 return self._date
1371 1375 def description(self):
1372 1376 return self._text
1373 1377 def files(self):
1374 1378 return sorted(self._status.modified + self._status.added +
1375 1379 self._status.removed)
1376 1380
1377 1381 def modified(self):
1378 1382 return self._status.modified
1379 1383 def added(self):
1380 1384 return self._status.added
1381 1385 def removed(self):
1382 1386 return self._status.removed
1383 1387 def deleted(self):
1384 1388 return self._status.deleted
1385 1389 def branch(self):
1386 1390 return encoding.tolocal(self._extra['branch'])
1387 1391 def closesbranch(self):
1388 1392 return 'close' in self._extra
1389 1393 def extra(self):
1390 1394 return self._extra
1391 1395
1392 1396 def tags(self):
1393 1397 return []
1394 1398
1395 1399 def bookmarks(self):
1396 1400 b = []
1397 1401 for p in self.parents():
1398 1402 b.extend(p.bookmarks())
1399 1403 return b
1400 1404
1401 1405 def phase(self):
1402 1406 phase = phases.draft # default phase to draft
1403 1407 for p in self.parents():
1404 1408 phase = max(phase, p.phase())
1405 1409 return phase
1406 1410
1407 1411 def hidden(self):
1408 1412 return False
1409 1413
1410 1414 def children(self):
1411 1415 return []
1412 1416
1413 1417 def flags(self, path):
1414 1418 if r'_manifest' in self.__dict__:
1415 1419 try:
1416 1420 return self._manifest.flags(path)
1417 1421 except KeyError:
1418 1422 return ''
1419 1423
1420 1424 try:
1421 1425 return self._flagfunc(path)
1422 1426 except OSError:
1423 1427 return ''
1424 1428
1425 1429 def ancestor(self, c2):
1426 1430 """return the "best" ancestor context of self and c2"""
1427 1431 return self._parents[0].ancestor(c2) # punt on two parents for now
1428 1432
1429 1433 def walk(self, match):
1430 1434 '''Generates matching file names.'''
1431 1435 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1432 1436 True, False))
1433 1437
1434 1438 def matches(self, match):
1435 1439 return sorted(self._repo.dirstate.matches(match))
1436 1440
1437 1441 def ancestors(self):
1438 1442 for p in self._parents:
1439 1443 yield p
1440 1444 for a in self._repo.changelog.ancestors(
1441 1445 [p.rev() for p in self._parents]):
1442 1446 yield changectx(self._repo, a)
1443 1447
1444 1448 def markcommitted(self, node):
1445 1449 """Perform post-commit cleanup necessary after committing this ctx
1446 1450
1447 1451 Specifically, this updates backing stores this working context
1448 1452 wraps to reflect the fact that the changes reflected by this
1449 1453 workingctx have been committed. For example, it marks
1450 1454 modified and added files as normal in the dirstate.
1451 1455
1452 1456 """
1453 1457
1454 1458 with self._repo.dirstate.parentchange():
1455 1459 for f in self.modified() + self.added():
1456 1460 self._repo.dirstate.normal(f)
1457 1461 for f in self.removed():
1458 1462 self._repo.dirstate.drop(f)
1459 1463 self._repo.dirstate.setparents(node)
1460 1464
1461 1465 # write changes out explicitly, because nesting wlock at
1462 1466 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1463 1467 # from immediately doing so for subsequent changing files
1464 1468 self._repo.dirstate.write(self._repo.currenttransaction())
1465 1469
1466 1470 class workingctx(committablectx):
1467 1471 """A workingctx object makes access to data related to
1468 1472 the current working directory convenient.
1469 1473 date - any valid date string or (unixtime, offset), or None.
1470 1474 user - username string, or None.
1471 1475 extra - a dictionary of extra values, or None.
1472 1476 changes - a list of file lists as returned by localrepo.status()
1473 1477 or None to use the repository status.
1474 1478 """
1475 1479 def __init__(self, repo, text="", user=None, date=None, extra=None,
1476 1480 changes=None):
1477 1481 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1478 1482
1479 1483 def __iter__(self):
1480 1484 d = self._repo.dirstate
1481 1485 for f in d:
1482 1486 if d[f] != 'r':
1483 1487 yield f
1484 1488
1485 1489 def __contains__(self, key):
1486 1490 return self._repo.dirstate[key] not in "?r"
1487 1491
1488 1492 def hex(self):
1489 1493 return hex(wdirid)
1490 1494
1491 1495 @propertycache
1492 1496 def _parents(self):
1493 1497 p = self._repo.dirstate.parents()
1494 1498 if p[1] == nullid:
1495 1499 p = p[:-1]
1496 1500 return [changectx(self._repo, x) for x in p]
1497 1501
1498 1502 def filectx(self, path, filelog=None):
1499 1503 """get a file context from the working directory"""
1500 1504 return workingfilectx(self._repo, path, workingctx=self,
1501 1505 filelog=filelog)
1502 1506
1503 1507 def dirty(self, missing=False, merge=True, branch=True):
1504 1508 "check whether a working directory is modified"
1505 1509 # check subrepos first
1506 1510 for s in sorted(self.substate):
1507 1511 if self.sub(s).dirty():
1508 1512 return True
1509 1513 # check current working dir
1510 1514 return ((merge and self.p2()) or
1511 1515 (branch and self.branch() != self.p1().branch()) or
1512 1516 self.modified() or self.added() or self.removed() or
1513 1517 (missing and self.deleted()))
1514 1518
1515 1519 def add(self, list, prefix=""):
1516 1520 join = lambda f: os.path.join(prefix, f)
1517 1521 with self._repo.wlock():
1518 1522 ui, ds = self._repo.ui, self._repo.dirstate
1519 1523 rejected = []
1520 1524 lstat = self._repo.wvfs.lstat
1521 1525 for f in list:
1522 1526 scmutil.checkportable(ui, join(f))
1523 1527 try:
1524 1528 st = lstat(f)
1525 1529 except OSError:
1526 1530 ui.warn(_("%s does not exist!\n") % join(f))
1527 1531 rejected.append(f)
1528 1532 continue
1529 1533 if st.st_size > 10000000:
1530 1534 ui.warn(_("%s: up to %d MB of RAM may be required "
1531 1535 "to manage this file\n"
1532 1536 "(use 'hg revert %s' to cancel the "
1533 1537 "pending addition)\n")
1534 1538 % (f, 3 * st.st_size // 1000000, join(f)))
1535 1539 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1536 1540 ui.warn(_("%s not added: only files and symlinks "
1537 1541 "supported currently\n") % join(f))
1538 1542 rejected.append(f)
1539 1543 elif ds[f] in 'amn':
1540 1544 ui.warn(_("%s already tracked!\n") % join(f))
1541 1545 elif ds[f] == 'r':
1542 1546 ds.normallookup(f)
1543 1547 else:
1544 1548 ds.add(f)
1545 1549 return rejected
1546 1550
1547 1551 def forget(self, files, prefix=""):
1548 1552 join = lambda f: os.path.join(prefix, f)
1549 1553 with self._repo.wlock():
1550 1554 rejected = []
1551 1555 for f in files:
1552 1556 if f not in self._repo.dirstate:
1553 1557 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1554 1558 rejected.append(f)
1555 1559 elif self._repo.dirstate[f] != 'a':
1556 1560 self._repo.dirstate.remove(f)
1557 1561 else:
1558 1562 self._repo.dirstate.drop(f)
1559 1563 return rejected
1560 1564
1561 1565 def undelete(self, list):
1562 1566 pctxs = self.parents()
1563 1567 with self._repo.wlock():
1564 1568 for f in list:
1565 1569 if self._repo.dirstate[f] != 'r':
1566 1570 self._repo.ui.warn(_("%s not removed!\n") % f)
1567 1571 else:
1568 1572 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1569 1573 t = fctx.data()
1570 1574 self._repo.wwrite(f, t, fctx.flags())
1571 1575 self._repo.dirstate.normal(f)
1572 1576
1573 1577 def copy(self, source, dest):
1574 1578 try:
1575 1579 st = self._repo.wvfs.lstat(dest)
1576 1580 except OSError as err:
1577 1581 if err.errno != errno.ENOENT:
1578 1582 raise
1579 1583 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1580 1584 return
1581 1585 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1582 1586 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1583 1587 "symbolic link\n") % dest)
1584 1588 else:
1585 1589 with self._repo.wlock():
1586 1590 if self._repo.dirstate[dest] in '?':
1587 1591 self._repo.dirstate.add(dest)
1588 1592 elif self._repo.dirstate[dest] in 'r':
1589 1593 self._repo.dirstate.normallookup(dest)
1590 1594 self._repo.dirstate.copy(source, dest)
1591 1595
1592 1596 def match(self, pats=None, include=None, exclude=None, default='glob',
1593 1597 listsubrepos=False, badfn=None):
1594 1598 if pats is None:
1595 1599 pats = []
1596 1600 r = self._repo
1597 1601
1598 1602 # Only a case insensitive filesystem needs magic to translate user input
1599 1603 # to actual case in the filesystem.
1600 1604 icasefs = not util.fscasesensitive(r.root)
1601 1605 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1602 1606 default, auditor=r.auditor, ctx=self,
1603 1607 listsubrepos=listsubrepos, badfn=badfn,
1604 1608 icasefs=icasefs)
1605 1609
1606 1610 def _filtersuspectsymlink(self, files):
1607 1611 if not files or self._repo.dirstate._checklink:
1608 1612 return files
1609 1613
1610 1614 # Symlink placeholders may get non-symlink-like contents
1611 1615 # via user error or dereferencing by NFS or Samba servers,
1612 1616 # so we filter out any placeholders that don't look like a
1613 1617 # symlink
1614 1618 sane = []
1615 1619 for f in files:
1616 1620 if self.flags(f) == 'l':
1617 1621 d = self[f].data()
1618 1622 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1619 1623 self._repo.ui.debug('ignoring suspect symlink placeholder'
1620 1624 ' "%s"\n' % f)
1621 1625 continue
1622 1626 sane.append(f)
1623 1627 return sane
1624 1628
1625 1629 def _checklookup(self, files):
1626 1630 # check for any possibly clean files
1627 1631 if not files:
1628 1632 return [], []
1629 1633
1630 1634 modified = []
1631 1635 fixup = []
1632 1636 pctx = self._parents[0]
1633 1637 # do a full compare of any files that might have changed
1634 1638 for f in sorted(files):
1635 1639 if (f not in pctx or self.flags(f) != pctx.flags(f)
1636 1640 or pctx[f].cmp(self[f])):
1637 1641 modified.append(f)
1638 1642 else:
1639 1643 fixup.append(f)
1640 1644
1641 1645 # update dirstate for files that are actually clean
1642 1646 if fixup:
1643 1647 try:
1644 1648 # updating the dirstate is optional
1645 1649 # so we don't wait on the lock
1646 1650 # wlock can invalidate the dirstate, so cache normal _after_
1647 1651 # taking the lock
1648 1652 with self._repo.wlock(False):
1649 1653 normal = self._repo.dirstate.normal
1650 1654 for f in fixup:
1651 1655 normal(f)
1652 1656 # write changes out explicitly, because nesting
1653 1657 # wlock at runtime may prevent 'wlock.release()'
1654 1658 # after this block from doing so for subsequent
1655 1659 # changing files
1656 1660 self._repo.dirstate.write(self._repo.currenttransaction())
1657 1661 except error.LockError:
1658 1662 pass
1659 1663 return modified, fixup
1660 1664
1661 1665 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1662 1666 unknown=False):
1663 1667 '''Gets the status from the dirstate -- internal use only.'''
1664 1668 listignored, listclean, listunknown = ignored, clean, unknown
1665 1669 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1666 1670 subrepos = []
1667 1671 if '.hgsub' in self:
1668 1672 subrepos = sorted(self.substate)
1669 1673 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1670 1674 listclean, listunknown)
1671 1675
1672 1676 # check for any possibly clean files
1673 1677 if cmp:
1674 1678 modified2, fixup = self._checklookup(cmp)
1675 1679 s.modified.extend(modified2)
1676 1680
1677 1681 # update dirstate for files that are actually clean
1678 1682 if fixup and listclean:
1679 1683 s.clean.extend(fixup)
1680 1684
1681 1685 if match.always():
1682 1686 # cache for performance
1683 1687 if s.unknown or s.ignored or s.clean:
1684 1688 # "_status" is cached with list*=False in the normal route
1685 1689 self._status = scmutil.status(s.modified, s.added, s.removed,
1686 1690 s.deleted, [], [], [])
1687 1691 else:
1688 1692 self._status = s
1689 1693
1690 1694 return s
1691 1695
1692 1696 @propertycache
1693 1697 def _manifest(self):
1694 1698 """generate a manifest corresponding to the values in self._status
1695 1699
1696 1700 This reuse the file nodeid from parent, but we use special node
1697 1701 identifiers for added and modified files. This is used by manifests
1698 1702 merge to see that files are different and by update logic to avoid
1699 1703 deleting newly added files.
1700 1704 """
1701 1705 return self._buildstatusmanifest(self._status)
1702 1706
1703 1707 def _buildstatusmanifest(self, status):
1704 1708 """Builds a manifest that includes the given status results."""
1705 1709 parents = self.parents()
1706 1710
1707 1711 man = parents[0].manifest().copy()
1708 1712
1709 1713 ff = self._flagfunc
1710 1714 for i, l in ((addednodeid, status.added),
1711 1715 (modifiednodeid, status.modified)):
1712 1716 for f in l:
1713 1717 man[f] = i
1714 1718 try:
1715 1719 man.setflag(f, ff(f))
1716 1720 except OSError:
1717 1721 pass
1718 1722
1719 1723 for f in status.deleted + status.removed:
1720 1724 if f in man:
1721 1725 del man[f]
1722 1726
1723 1727 return man
1724 1728
1725 1729 def _buildstatus(self, other, s, match, listignored, listclean,
1726 1730 listunknown):
1727 1731 """build a status with respect to another context
1728 1732
1729 1733 This includes logic for maintaining the fast path of status when
1730 1734 comparing the working directory against its parent, which is to skip
1731 1735 building a new manifest if self (working directory) is not comparing
1732 1736 against its parent (repo['.']).
1733 1737 """
1734 1738 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1735 1739 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1736 1740 # might have accidentally ended up with the entire contents of the file
1737 1741 # they are supposed to be linking to.
1738 1742 s.modified[:] = self._filtersuspectsymlink(s.modified)
1739 1743 if other != self._repo['.']:
1740 1744 s = super(workingctx, self)._buildstatus(other, s, match,
1741 1745 listignored, listclean,
1742 1746 listunknown)
1743 1747 return s
1744 1748
1745 1749 def _matchstatus(self, other, match):
1746 1750 """override the match method with a filter for directory patterns
1747 1751
1748 1752 We use inheritance to customize the match.bad method only in cases of
1749 1753 workingctx since it belongs only to the working directory when
1750 1754 comparing against the parent changeset.
1751 1755
1752 1756 If we aren't comparing against the working directory's parent, then we
1753 1757 just use the default match object sent to us.
1754 1758 """
1755 1759 superself = super(workingctx, self)
1756 1760 match = superself._matchstatus(other, match)
1757 1761 if other != self._repo['.']:
1758 1762 def bad(f, msg):
1759 1763 # 'f' may be a directory pattern from 'match.files()',
1760 1764 # so 'f not in ctx1' is not enough
1761 1765 if f not in other and not other.hasdir(f):
1762 1766 self._repo.ui.warn('%s: %s\n' %
1763 1767 (self._repo.dirstate.pathto(f), msg))
1764 1768 match.bad = bad
1765 1769 return match
1766 1770
1767 1771 class committablefilectx(basefilectx):
1768 1772 """A committablefilectx provides common functionality for a file context
1769 1773 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1770 1774 def __init__(self, repo, path, filelog=None, ctx=None):
1771 1775 self._repo = repo
1772 1776 self._path = path
1773 1777 self._changeid = None
1774 1778 self._filerev = self._filenode = None
1775 1779
1776 1780 if filelog is not None:
1777 1781 self._filelog = filelog
1778 1782 if ctx:
1779 1783 self._changectx = ctx
1780 1784
1781 1785 def __nonzero__(self):
1782 1786 return True
1783 1787
1784 1788 __bool__ = __nonzero__
1785 1789
1786 1790 def linkrev(self):
1787 1791 # linked to self._changectx no matter if file is modified or not
1788 1792 return self.rev()
1789 1793
1790 1794 def parents(self):
1791 1795 '''return parent filectxs, following copies if necessary'''
1792 1796 def filenode(ctx, path):
1793 1797 return ctx._manifest.get(path, nullid)
1794 1798
1795 1799 path = self._path
1796 1800 fl = self._filelog
1797 1801 pcl = self._changectx._parents
1798 1802 renamed = self.renamed()
1799 1803
1800 1804 if renamed:
1801 1805 pl = [renamed + (None,)]
1802 1806 else:
1803 1807 pl = [(path, filenode(pcl[0], path), fl)]
1804 1808
1805 1809 for pc in pcl[1:]:
1806 1810 pl.append((path, filenode(pc, path), fl))
1807 1811
1808 1812 return [self._parentfilectx(p, fileid=n, filelog=l)
1809 1813 for p, n, l in pl if n != nullid]
1810 1814
1811 1815 def children(self):
1812 1816 return []
1813 1817
1814 1818 class workingfilectx(committablefilectx):
1815 1819 """A workingfilectx object makes access to data related to a particular
1816 1820 file in the working directory convenient."""
1817 1821 def __init__(self, repo, path, filelog=None, workingctx=None):
1818 1822 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1819 1823
1820 1824 @propertycache
1821 1825 def _changectx(self):
1822 1826 return workingctx(self._repo)
1823 1827
1824 1828 def data(self):
1825 1829 return self._repo.wread(self._path)
1826 1830 def renamed(self):
1827 1831 rp = self._repo.dirstate.copied(self._path)
1828 1832 if not rp:
1829 1833 return None
1830 1834 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1831 1835
1832 1836 def size(self):
1833 1837 return self._repo.wvfs.lstat(self._path).st_size
1834 1838 def date(self):
1835 1839 t, tz = self._changectx.date()
1836 1840 try:
1837 1841 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1838 1842 except OSError as err:
1839 1843 if err.errno != errno.ENOENT:
1840 1844 raise
1841 1845 return (t, tz)
1842 1846
1843 1847 def cmp(self, fctx):
1844 1848 """compare with other file context
1845 1849
1846 1850 returns True if different than fctx.
1847 1851 """
1848 1852 # fctx should be a filectx (not a workingfilectx)
1849 1853 # invert comparison to reuse the same code path
1850 1854 return fctx.cmp(self)
1851 1855
1852 1856 def remove(self, ignoremissing=False):
1853 1857 """wraps unlink for a repo's working directory"""
1854 1858 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1855 1859
1856 1860 def write(self, data, flags):
1857 1861 """wraps repo.wwrite"""
1858 1862 self._repo.wwrite(self._path, data, flags)
1859 1863
1860 1864 class workingcommitctx(workingctx):
1861 1865 """A workingcommitctx object makes access to data related to
1862 1866 the revision being committed convenient.
1863 1867
1864 1868 This hides changes in the working directory, if they aren't
1865 1869 committed in this context.
1866 1870 """
1867 1871 def __init__(self, repo, changes,
1868 1872 text="", user=None, date=None, extra=None):
1869 1873 super(workingctx, self).__init__(repo, text, user, date, extra,
1870 1874 changes)
1871 1875
1872 1876 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1873 1877 unknown=False):
1874 1878 """Return matched files only in ``self._status``
1875 1879
1876 1880 Uncommitted files appear "clean" via this context, even if
1877 1881 they aren't actually so in the working directory.
1878 1882 """
1879 1883 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1880 1884 if clean:
1881 1885 clean = [f for f in self._manifest if f not in self._changedset]
1882 1886 else:
1883 1887 clean = []
1884 1888 return scmutil.status([f for f in self._status.modified if match(f)],
1885 1889 [f for f in self._status.added if match(f)],
1886 1890 [f for f in self._status.removed if match(f)],
1887 1891 [], [], [], clean)
1888 1892
1889 1893 @propertycache
1890 1894 def _changedset(self):
1891 1895 """Return the set of files changed in this context
1892 1896 """
1893 1897 changed = set(self._status.modified)
1894 1898 changed.update(self._status.added)
1895 1899 changed.update(self._status.removed)
1896 1900 return changed
1897 1901
1898 1902 def makecachingfilectxfn(func):
1899 1903 """Create a filectxfn that caches based on the path.
1900 1904
1901 1905 We can't use util.cachefunc because it uses all arguments as the cache
1902 1906 key and this creates a cycle since the arguments include the repo and
1903 1907 memctx.
1904 1908 """
1905 1909 cache = {}
1906 1910
1907 1911 def getfilectx(repo, memctx, path):
1908 1912 if path not in cache:
1909 1913 cache[path] = func(repo, memctx, path)
1910 1914 return cache[path]
1911 1915
1912 1916 return getfilectx
1913 1917
1914 1918 class memctx(committablectx):
1915 1919 """Use memctx to perform in-memory commits via localrepo.commitctx().
1916 1920
1917 1921 Revision information is supplied at initialization time while
1918 1922 related files data and is made available through a callback
1919 1923 mechanism. 'repo' is the current localrepo, 'parents' is a
1920 1924 sequence of two parent revisions identifiers (pass None for every
1921 1925 missing parent), 'text' is the commit message and 'files' lists
1922 1926 names of files touched by the revision (normalized and relative to
1923 1927 repository root).
1924 1928
1925 1929 filectxfn(repo, memctx, path) is a callable receiving the
1926 1930 repository, the current memctx object and the normalized path of
1927 1931 requested file, relative to repository root. It is fired by the
1928 1932 commit function for every file in 'files', but calls order is
1929 1933 undefined. If the file is available in the revision being
1930 1934 committed (updated or added), filectxfn returns a memfilectx
1931 1935 object. If the file was removed, filectxfn return None for recent
1932 1936 Mercurial. Moved files are represented by marking the source file
1933 1937 removed and the new file added with copy information (see
1934 1938 memfilectx).
1935 1939
1936 1940 user receives the committer name and defaults to current
1937 1941 repository username, date is the commit date in any format
1938 1942 supported by util.parsedate() and defaults to current date, extra
1939 1943 is a dictionary of metadata or is left empty.
1940 1944 """
1941 1945
1942 1946 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1943 1947 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1944 1948 # this field to determine what to do in filectxfn.
1945 1949 _returnnoneformissingfiles = True
1946 1950
1947 1951 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1948 1952 date=None, extra=None, editor=False):
1949 1953 super(memctx, self).__init__(repo, text, user, date, extra)
1950 1954 self._rev = None
1951 1955 self._node = None
1952 1956 parents = [(p or nullid) for p in parents]
1953 1957 p1, p2 = parents
1954 1958 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1955 1959 files = sorted(set(files))
1956 1960 self._files = files
1957 1961 self.substate = {}
1958 1962
1959 1963 # if store is not callable, wrap it in a function
1960 1964 if not callable(filectxfn):
1961 1965 def getfilectx(repo, memctx, path):
1962 1966 fctx = filectxfn[path]
1963 1967 # this is weird but apparently we only keep track of one parent
1964 1968 # (why not only store that instead of a tuple?)
1965 1969 copied = fctx.renamed()
1966 1970 if copied:
1967 1971 copied = copied[0]
1968 1972 return memfilectx(repo, path, fctx.data(),
1969 1973 islink=fctx.islink(), isexec=fctx.isexec(),
1970 1974 copied=copied, memctx=memctx)
1971 1975 self._filectxfn = getfilectx
1972 1976 else:
1973 1977 # memoizing increases performance for e.g. vcs convert scenarios.
1974 1978 self._filectxfn = makecachingfilectxfn(filectxfn)
1975 1979
1976 1980 if extra:
1977 1981 self._extra = extra.copy()
1978 1982 else:
1979 1983 self._extra = {}
1980 1984
1981 1985 if self._extra.get('branch', '') == '':
1982 1986 self._extra['branch'] = 'default'
1983 1987
1984 1988 if editor:
1985 1989 self._text = editor(self._repo, self, [])
1986 1990 self._repo.savecommitmessage(self._text)
1987 1991
1988 1992 def filectx(self, path, filelog=None):
1989 1993 """get a file context from the working directory
1990 1994
1991 1995 Returns None if file doesn't exist and should be removed."""
1992 1996 return self._filectxfn(self._repo, self, path)
1993 1997
1994 1998 def commit(self):
1995 1999 """commit context to the repo"""
1996 2000 return self._repo.commitctx(self)
1997 2001
1998 2002 @propertycache
1999 2003 def _manifest(self):
2000 2004 """generate a manifest based on the return values of filectxfn"""
2001 2005
2002 2006 # keep this simple for now; just worry about p1
2003 2007 pctx = self._parents[0]
2004 2008 man = pctx.manifest().copy()
2005 2009
2006 2010 for f in self._status.modified:
2007 2011 p1node = nullid
2008 2012 p2node = nullid
2009 2013 p = pctx[f].parents() # if file isn't in pctx, check p2?
2010 2014 if len(p) > 0:
2011 2015 p1node = p[0].filenode()
2012 2016 if len(p) > 1:
2013 2017 p2node = p[1].filenode()
2014 2018 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2015 2019
2016 2020 for f in self._status.added:
2017 2021 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2018 2022
2019 2023 for f in self._status.removed:
2020 2024 if f in man:
2021 2025 del man[f]
2022 2026
2023 2027 return man
2024 2028
2025 2029 @propertycache
2026 2030 def _status(self):
2027 2031 """Calculate exact status from ``files`` specified at construction
2028 2032 """
2029 2033 man1 = self.p1().manifest()
2030 2034 p2 = self._parents[1]
2031 2035 # "1 < len(self._parents)" can't be used for checking
2032 2036 # existence of the 2nd parent, because "memctx._parents" is
2033 2037 # explicitly initialized by the list, of which length is 2.
2034 2038 if p2.node() != nullid:
2035 2039 man2 = p2.manifest()
2036 2040 managing = lambda f: f in man1 or f in man2
2037 2041 else:
2038 2042 managing = lambda f: f in man1
2039 2043
2040 2044 modified, added, removed = [], [], []
2041 2045 for f in self._files:
2042 2046 if not managing(f):
2043 2047 added.append(f)
2044 2048 elif self[f]:
2045 2049 modified.append(f)
2046 2050 else:
2047 2051 removed.append(f)
2048 2052
2049 2053 return scmutil.status(modified, added, removed, [], [], [], [])
2050 2054
2051 2055 class memfilectx(committablefilectx):
2052 2056 """memfilectx represents an in-memory file to commit.
2053 2057
2054 2058 See memctx and committablefilectx for more details.
2055 2059 """
2056 2060 def __init__(self, repo, path, data, islink=False,
2057 2061 isexec=False, copied=None, memctx=None):
2058 2062 """
2059 2063 path is the normalized file path relative to repository root.
2060 2064 data is the file content as a string.
2061 2065 islink is True if the file is a symbolic link.
2062 2066 isexec is True if the file is executable.
2063 2067 copied is the source file path if current file was copied in the
2064 2068 revision being committed, or None."""
2065 2069 super(memfilectx, self).__init__(repo, path, None, memctx)
2066 2070 self._data = data
2067 2071 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2068 2072 self._copied = None
2069 2073 if copied:
2070 2074 self._copied = (copied, nullid)
2071 2075
2072 2076 def data(self):
2073 2077 return self._data
2074 2078
2075 2079 def remove(self, ignoremissing=False):
2076 2080 """wraps unlink for a repo's working directory"""
2077 2081 # need to figure out what to do here
2078 2082 del self._changectx[self._path]
2079 2083
2080 2084 def write(self, data, flags):
2081 2085 """wraps repo.wwrite"""
2082 2086 self._data = data
2083 2087
2084 2088 class overlayfilectx(committablefilectx):
2085 2089 """Like memfilectx but take an original filectx and optional parameters to
2086 2090 override parts of it. This is useful when fctx.data() is expensive (i.e.
2087 2091 flag processor is expensive) and raw data, flags, and filenode could be
2088 2092 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2089 2093 """
2090 2094
2091 2095 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2092 2096 copied=None, ctx=None):
2093 2097 """originalfctx: filecontext to duplicate
2094 2098
2095 2099 datafunc: None or a function to override data (file content). It is a
2096 2100 function to be lazy. path, flags, copied, ctx: None or overridden value
2097 2101
2098 2102 copied could be (path, rev), or False. copied could also be just path,
2099 2103 and will be converted to (path, nullid). This simplifies some callers.
2100 2104 """
2101 2105
2102 2106 if path is None:
2103 2107 path = originalfctx.path()
2104 2108 if ctx is None:
2105 2109 ctx = originalfctx.changectx()
2106 2110 ctxmatch = lambda: True
2107 2111 else:
2108 2112 ctxmatch = lambda: ctx == originalfctx.changectx()
2109 2113
2110 2114 repo = originalfctx.repo()
2111 2115 flog = originalfctx.filelog()
2112 2116 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2113 2117
2114 2118 if copied is None:
2115 2119 copied = originalfctx.renamed()
2116 2120 copiedmatch = lambda: True
2117 2121 else:
2118 2122 if copied and not isinstance(copied, tuple):
2119 2123 # repo._filecommit will recalculate copyrev so nullid is okay
2120 2124 copied = (copied, nullid)
2121 2125 copiedmatch = lambda: copied == originalfctx.renamed()
2122 2126
2123 2127 # When data, copied (could affect data), ctx (could affect filelog
2124 2128 # parents) are not overridden, rawdata, rawflags, and filenode may be
2125 2129 # reused (repo._filecommit should double check filelog parents).
2126 2130 #
2127 2131 # path, flags are not hashed in filelog (but in manifestlog) so they do
2128 2132 # not affect reusable here.
2129 2133 #
2130 2134 # If ctx or copied is overridden to a same value with originalfctx,
2131 2135 # still consider it's reusable. originalfctx.renamed() may be a bit
2132 2136 # expensive so it's not called unless necessary. Assuming datafunc is
2133 2137 # always expensive, do not call it for this "reusable" test.
2134 2138 reusable = datafunc is None and ctxmatch() and copiedmatch()
2135 2139
2136 2140 if datafunc is None:
2137 2141 datafunc = originalfctx.data
2138 2142 if flags is None:
2139 2143 flags = originalfctx.flags()
2140 2144
2141 2145 self._datafunc = datafunc
2142 2146 self._flags = flags
2143 2147 self._copied = copied
2144 2148
2145 2149 if reusable:
2146 2150 # copy extra fields from originalfctx
2147 2151 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2148 2152 for attr in attrs:
2149 2153 if util.safehasattr(originalfctx, attr):
2150 2154 setattr(self, attr, getattr(originalfctx, attr))
2151 2155
2152 2156 def data(self):
2153 2157 return self._datafunc()
2154 2158
2155 2159 class metadataonlyctx(committablectx):
2156 2160 """Like memctx but it's reusing the manifest of different commit.
2157 2161 Intended to be used by lightweight operations that are creating
2158 2162 metadata-only changes.
2159 2163
2160 2164 Revision information is supplied at initialization time. 'repo' is the
2161 2165 current localrepo, 'ctx' is original revision which manifest we're reuisng
2162 2166 'parents' is a sequence of two parent revisions identifiers (pass None for
2163 2167 every missing parent), 'text' is the commit.
2164 2168
2165 2169 user receives the committer name and defaults to current repository
2166 2170 username, date is the commit date in any format supported by
2167 2171 util.parsedate() and defaults to current date, extra is a dictionary of
2168 2172 metadata or is left empty.
2169 2173 """
2170 2174 def __new__(cls, repo, originalctx, *args, **kwargs):
2171 2175 return super(metadataonlyctx, cls).__new__(cls, repo)
2172 2176
2173 2177 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2174 2178 extra=None, editor=False):
2175 2179 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2176 2180 self._rev = None
2177 2181 self._node = None
2178 2182 self._originalctx = originalctx
2179 2183 self._manifestnode = originalctx.manifestnode()
2180 2184 parents = [(p or nullid) for p in parents]
2181 2185 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2182 2186
2183 2187 # sanity check to ensure that the reused manifest parents are
2184 2188 # manifests of our commit parents
2185 2189 mp1, mp2 = self.manifestctx().parents
2186 2190 if p1 != nullid and p1.manifestnode() != mp1:
2187 2191 raise RuntimeError('can\'t reuse the manifest: '
2188 2192 'its p1 doesn\'t match the new ctx p1')
2189 2193 if p2 != nullid and p2.manifestnode() != mp2:
2190 2194 raise RuntimeError('can\'t reuse the manifest: '
2191 2195 'its p2 doesn\'t match the new ctx p2')
2192 2196
2193 2197 self._files = originalctx.files()
2194 2198 self.substate = {}
2195 2199
2196 2200 if extra:
2197 2201 self._extra = extra.copy()
2198 2202 else:
2199 2203 self._extra = {}
2200 2204
2201 2205 if self._extra.get('branch', '') == '':
2202 2206 self._extra['branch'] = 'default'
2203 2207
2204 2208 if editor:
2205 2209 self._text = editor(self._repo, self, [])
2206 2210 self._repo.savecommitmessage(self._text)
2207 2211
2208 2212 def manifestnode(self):
2209 2213 return self._manifestnode
2210 2214
2211 2215 @propertycache
2212 2216 def _manifestctx(self):
2213 2217 return self._repo.manifestlog[self._manifestnode]
2214 2218
2215 2219 def filectx(self, path, filelog=None):
2216 2220 return self._originalctx.filectx(path, filelog=filelog)
2217 2221
2218 2222 def commit(self):
2219 2223 """commit context to the repo"""
2220 2224 return self._repo.commitctx(self)
2221 2225
2222 2226 @property
2223 2227 def _manifest(self):
2224 2228 return self._originalctx.manifest()
2225 2229
2226 2230 @propertycache
2227 2231 def _status(self):
2228 2232 """Calculate exact status from ``files`` specified in the ``origctx``
2229 2233 and parents manifests.
2230 2234 """
2231 2235 man1 = self.p1().manifest()
2232 2236 p2 = self._parents[1]
2233 2237 # "1 < len(self._parents)" can't be used for checking
2234 2238 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2235 2239 # explicitly initialized by the list, of which length is 2.
2236 2240 if p2.node() != nullid:
2237 2241 man2 = p2.manifest()
2238 2242 managing = lambda f: f in man1 or f in man2
2239 2243 else:
2240 2244 managing = lambda f: f in man1
2241 2245
2242 2246 modified, added, removed = [], [], []
2243 2247 for f in self._files:
2244 2248 if not managing(f):
2245 2249 added.append(f)
2246 2250 elif self[f]:
2247 2251 modified.append(f)
2248 2252 else:
2249 2253 removed.append(f)
2250 2254
2251 2255 return scmutil.status(modified, added, removed, [], [], [], [])
General Comments 0
You need to be logged in to leave comments. Login now