##// END OF EJS Templates
annotate: add core algorithm to skip a rev...
Siddharth Agarwal -
r32485:05abc47f default
parent child Browse files
Show More
@@ -1,2255 +1,2353 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 bin,
19 19 hex,
20 20 modifiednodeid,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 wdirid,
25 25 wdirnodes,
26 26 )
27 27 from . import (
28 28 encoding,
29 29 error,
30 30 fileset,
31 31 match as matchmod,
32 32 mdiff,
33 33 obsolete as obsmod,
34 34 patch,
35 35 phases,
36 36 pycompat,
37 37 repoview,
38 38 revlog,
39 39 scmutil,
40 40 subrepo,
41 41 util,
42 42 )
43 43
44 44 propertycache = util.propertycache
45 45
46 46 nonascii = re.compile(r'[^\x21-\x7f]').search
47 47
48 48 class basectx(object):
49 49 """A basectx object represents the common logic for its children:
50 50 changectx: read-only context that is already present in the repo,
51 51 workingctx: a context that represents the working directory and can
52 52 be committed,
53 53 memctx: a context that represents changes in-memory and can also
54 54 be committed."""
55 55 def __new__(cls, repo, changeid='', *args, **kwargs):
56 56 if isinstance(changeid, basectx):
57 57 return changeid
58 58
59 59 o = super(basectx, cls).__new__(cls)
60 60
61 61 o._repo = repo
62 62 o._rev = nullrev
63 63 o._node = nullid
64 64
65 65 return o
66 66
67 67 def __str__(self):
68 68 r = short(self.node())
69 69 if pycompat.ispy3:
70 70 return r.decode('ascii')
71 71 return r
72 72
73 73 def __bytes__(self):
74 74 return short(self.node())
75 75
76 76 def __int__(self):
77 77 return self.rev()
78 78
79 79 def __repr__(self):
80 80 return "<%s %s>" % (type(self).__name__, str(self))
81 81
82 82 def __eq__(self, other):
83 83 try:
84 84 return type(self) == type(other) and self._rev == other._rev
85 85 except AttributeError:
86 86 return False
87 87
88 88 def __ne__(self, other):
89 89 return not (self == other)
90 90
91 91 def __contains__(self, key):
92 92 return key in self._manifest
93 93
94 94 def __getitem__(self, key):
95 95 return self.filectx(key)
96 96
97 97 def __iter__(self):
98 98 return iter(self._manifest)
99 99
100 100 def _buildstatusmanifest(self, status):
101 101 """Builds a manifest that includes the given status results, if this is
102 102 a working copy context. For non-working copy contexts, it just returns
103 103 the normal manifest."""
104 104 return self.manifest()
105 105
106 106 def _matchstatus(self, other, match):
107 107 """return match.always if match is none
108 108
109 109 This internal method provides a way for child objects to override the
110 110 match operator.
111 111 """
112 112 return match or matchmod.always(self._repo.root, self._repo.getcwd())
113 113
114 114 def _buildstatus(self, other, s, match, listignored, listclean,
115 115 listunknown):
116 116 """build a status with respect to another context"""
117 117 # Load earliest manifest first for caching reasons. More specifically,
118 118 # if you have revisions 1000 and 1001, 1001 is probably stored as a
119 119 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
120 120 # 1000 and cache it so that when you read 1001, we just need to apply a
121 121 # delta to what's in the cache. So that's one full reconstruction + one
122 122 # delta application.
123 123 mf2 = None
124 124 if self.rev() is not None and self.rev() < other.rev():
125 125 mf2 = self._buildstatusmanifest(s)
126 126 mf1 = other._buildstatusmanifest(s)
127 127 if mf2 is None:
128 128 mf2 = self._buildstatusmanifest(s)
129 129
130 130 modified, added = [], []
131 131 removed = []
132 132 clean = []
133 133 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
134 134 deletedset = set(deleted)
135 135 d = mf1.diff(mf2, match=match, clean=listclean)
136 136 for fn, value in d.iteritems():
137 137 if fn in deletedset:
138 138 continue
139 139 if value is None:
140 140 clean.append(fn)
141 141 continue
142 142 (node1, flag1), (node2, flag2) = value
143 143 if node1 is None:
144 144 added.append(fn)
145 145 elif node2 is None:
146 146 removed.append(fn)
147 147 elif flag1 != flag2:
148 148 modified.append(fn)
149 149 elif node2 not in wdirnodes:
150 150 # When comparing files between two commits, we save time by
151 151 # not comparing the file contents when the nodeids differ.
152 152 # Note that this means we incorrectly report a reverted change
153 153 # to a file as a modification.
154 154 modified.append(fn)
155 155 elif self[fn].cmp(other[fn]):
156 156 modified.append(fn)
157 157 else:
158 158 clean.append(fn)
159 159
160 160 if removed:
161 161 # need to filter files if they are already reported as removed
162 162 unknown = [fn for fn in unknown if fn not in mf1 and
163 163 (not match or match(fn))]
164 164 ignored = [fn for fn in ignored if fn not in mf1 and
165 165 (not match or match(fn))]
166 166 # if they're deleted, don't report them as removed
167 167 removed = [fn for fn in removed if fn not in deletedset]
168 168
169 169 return scmutil.status(modified, added, removed, deleted, unknown,
170 170 ignored, clean)
171 171
172 172 @propertycache
173 173 def substate(self):
174 174 return subrepo.state(self, self._repo.ui)
175 175
176 176 def subrev(self, subpath):
177 177 return self.substate[subpath][1]
178 178
179 179 def rev(self):
180 180 return self._rev
181 181 def node(self):
182 182 return self._node
183 183 def hex(self):
184 184 return hex(self.node())
185 185 def manifest(self):
186 186 return self._manifest
187 187 def manifestctx(self):
188 188 return self._manifestctx
189 189 def repo(self):
190 190 return self._repo
191 191 def phasestr(self):
192 192 return phases.phasenames[self.phase()]
193 193 def mutable(self):
194 194 return self.phase() > phases.public
195 195
196 196 def getfileset(self, expr):
197 197 return fileset.getfileset(self, expr)
198 198
199 199 def obsolete(self):
200 200 """True if the changeset is obsolete"""
201 201 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
202 202
203 203 def extinct(self):
204 204 """True if the changeset is extinct"""
205 205 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
206 206
207 207 def unstable(self):
208 208 """True if the changeset is not obsolete but it's ancestor are"""
209 209 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
210 210
211 211 def bumped(self):
212 212 """True if the changeset try to be a successor of a public changeset
213 213
214 214 Only non-public and non-obsolete changesets may be bumped.
215 215 """
216 216 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
217 217
218 218 def divergent(self):
219 219 """Is a successors of a changeset with multiple possible successors set
220 220
221 221 Only non-public and non-obsolete changesets may be divergent.
222 222 """
223 223 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
224 224
225 225 def troubled(self):
226 226 """True if the changeset is either unstable, bumped or divergent"""
227 227 return self.unstable() or self.bumped() or self.divergent()
228 228
229 229 def troubles(self):
230 230 """return the list of troubles affecting this changesets.
231 231
232 232 Troubles are returned as strings. possible values are:
233 233 - unstable,
234 234 - bumped,
235 235 - divergent.
236 236 """
237 237 troubles = []
238 238 if self.unstable():
239 239 troubles.append('unstable')
240 240 if self.bumped():
241 241 troubles.append('bumped')
242 242 if self.divergent():
243 243 troubles.append('divergent')
244 244 return troubles
245 245
246 246 def parents(self):
247 247 """return contexts for each parent changeset"""
248 248 return self._parents
249 249
250 250 def p1(self):
251 251 return self._parents[0]
252 252
253 253 def p2(self):
254 254 parents = self._parents
255 255 if len(parents) == 2:
256 256 return parents[1]
257 257 return changectx(self._repo, nullrev)
258 258
259 259 def _fileinfo(self, path):
260 260 if r'_manifest' in self.__dict__:
261 261 try:
262 262 return self._manifest[path], self._manifest.flags(path)
263 263 except KeyError:
264 264 raise error.ManifestLookupError(self._node, path,
265 265 _('not found in manifest'))
266 266 if r'_manifestdelta' in self.__dict__ or path in self.files():
267 267 if path in self._manifestdelta:
268 268 return (self._manifestdelta[path],
269 269 self._manifestdelta.flags(path))
270 270 mfl = self._repo.manifestlog
271 271 try:
272 272 node, flag = mfl[self._changeset.manifest].find(path)
273 273 except KeyError:
274 274 raise error.ManifestLookupError(self._node, path,
275 275 _('not found in manifest'))
276 276
277 277 return node, flag
278 278
279 279 def filenode(self, path):
280 280 return self._fileinfo(path)[0]
281 281
282 282 def flags(self, path):
283 283 try:
284 284 return self._fileinfo(path)[1]
285 285 except error.LookupError:
286 286 return ''
287 287
288 288 def sub(self, path, allowcreate=True):
289 289 '''return a subrepo for the stored revision of path, never wdir()'''
290 290 return subrepo.subrepo(self, path, allowcreate=allowcreate)
291 291
292 292 def nullsub(self, path, pctx):
293 293 return subrepo.nullsubrepo(self, path, pctx)
294 294
295 295 def workingsub(self, path):
296 296 '''return a subrepo for the stored revision, or wdir if this is a wdir
297 297 context.
298 298 '''
299 299 return subrepo.subrepo(self, path, allowwdir=True)
300 300
301 301 def match(self, pats=None, include=None, exclude=None, default='glob',
302 302 listsubrepos=False, badfn=None):
303 303 if pats is None:
304 304 pats = []
305 305 r = self._repo
306 306 return matchmod.match(r.root, r.getcwd(), pats,
307 307 include, exclude, default,
308 308 auditor=r.nofsauditor, ctx=self,
309 309 listsubrepos=listsubrepos, badfn=badfn)
310 310
311 311 def diff(self, ctx2=None, match=None, **opts):
312 312 """Returns a diff generator for the given contexts and matcher"""
313 313 if ctx2 is None:
314 314 ctx2 = self.p1()
315 315 if ctx2 is not None:
316 316 ctx2 = self._repo[ctx2]
317 317 diffopts = patch.diffopts(self._repo.ui, opts)
318 318 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
319 319
320 320 def dirs(self):
321 321 return self._manifest.dirs()
322 322
323 323 def hasdir(self, dir):
324 324 return self._manifest.hasdir(dir)
325 325
326 326 def dirty(self, missing=False, merge=True, branch=True):
327 327 return False
328 328
329 329 def status(self, other=None, match=None, listignored=False,
330 330 listclean=False, listunknown=False, listsubrepos=False):
331 331 """return status of files between two nodes or node and working
332 332 directory.
333 333
334 334 If other is None, compare this node with working directory.
335 335
336 336 returns (modified, added, removed, deleted, unknown, ignored, clean)
337 337 """
338 338
339 339 ctx1 = self
340 340 ctx2 = self._repo[other]
341 341
342 342 # This next code block is, admittedly, fragile logic that tests for
343 343 # reversing the contexts and wouldn't need to exist if it weren't for
344 344 # the fast (and common) code path of comparing the working directory
345 345 # with its first parent.
346 346 #
347 347 # What we're aiming for here is the ability to call:
348 348 #
349 349 # workingctx.status(parentctx)
350 350 #
351 351 # If we always built the manifest for each context and compared those,
352 352 # then we'd be done. But the special case of the above call means we
353 353 # just copy the manifest of the parent.
354 354 reversed = False
355 355 if (not isinstance(ctx1, changectx)
356 356 and isinstance(ctx2, changectx)):
357 357 reversed = True
358 358 ctx1, ctx2 = ctx2, ctx1
359 359
360 360 match = ctx2._matchstatus(ctx1, match)
361 361 r = scmutil.status([], [], [], [], [], [], [])
362 362 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
363 363 listunknown)
364 364
365 365 if reversed:
366 366 # Reverse added and removed. Clear deleted, unknown and ignored as
367 367 # these make no sense to reverse.
368 368 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
369 369 r.clean)
370 370
371 371 if listsubrepos:
372 372 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
373 373 try:
374 374 rev2 = ctx2.subrev(subpath)
375 375 except KeyError:
376 376 # A subrepo that existed in node1 was deleted between
377 377 # node1 and node2 (inclusive). Thus, ctx2's substate
378 378 # won't contain that subpath. The best we can do ignore it.
379 379 rev2 = None
380 380 submatch = matchmod.subdirmatcher(subpath, match)
381 381 s = sub.status(rev2, match=submatch, ignored=listignored,
382 382 clean=listclean, unknown=listunknown,
383 383 listsubrepos=True)
384 384 for rfiles, sfiles in zip(r, s):
385 385 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
386 386
387 387 for l in r:
388 388 l.sort()
389 389
390 390 return r
391 391
392 392
393 393 def makememctx(repo, parents, text, user, date, branch, files, store,
394 394 editor=None, extra=None):
395 395 def getfilectx(repo, memctx, path):
396 396 data, mode, copied = store.getfile(path)
397 397 if data is None:
398 398 return None
399 399 islink, isexec = mode
400 400 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
401 401 copied=copied, memctx=memctx)
402 402 if extra is None:
403 403 extra = {}
404 404 if branch:
405 405 extra['branch'] = encoding.fromlocal(branch)
406 406 ctx = memctx(repo, parents, text, files, getfilectx, user,
407 407 date, extra, editor)
408 408 return ctx
409 409
410 410 def _filterederror(repo, changeid):
411 411 """build an exception to be raised about a filtered changeid
412 412
413 413 This is extracted in a function to help extensions (eg: evolve) to
414 414 experiment with various message variants."""
415 415 if repo.filtername.startswith('visible'):
416 416 msg = _("hidden revision '%s'") % changeid
417 417 hint = _('use --hidden to access hidden revisions')
418 418 return error.FilteredRepoLookupError(msg, hint=hint)
419 419 msg = _("filtered revision '%s' (not in '%s' subset)")
420 420 msg %= (changeid, repo.filtername)
421 421 return error.FilteredRepoLookupError(msg)
422 422
423 423 class changectx(basectx):
424 424 """A changecontext object makes access to data related to a particular
425 425 changeset convenient. It represents a read-only context already present in
426 426 the repo."""
427 427 def __init__(self, repo, changeid=''):
428 428 """changeid is a revision number, node, or tag"""
429 429
430 430 # since basectx.__new__ already took care of copying the object, we
431 431 # don't need to do anything in __init__, so we just exit here
432 432 if isinstance(changeid, basectx):
433 433 return
434 434
435 435 if changeid == '':
436 436 changeid = '.'
437 437 self._repo = repo
438 438
439 439 try:
440 440 if isinstance(changeid, int):
441 441 self._node = repo.changelog.node(changeid)
442 442 self._rev = changeid
443 443 return
444 444 if not pycompat.ispy3 and isinstance(changeid, long):
445 445 changeid = str(changeid)
446 446 if changeid == 'null':
447 447 self._node = nullid
448 448 self._rev = nullrev
449 449 return
450 450 if changeid == 'tip':
451 451 self._node = repo.changelog.tip()
452 452 self._rev = repo.changelog.rev(self._node)
453 453 return
454 454 if changeid == '.' or changeid == repo.dirstate.p1():
455 455 # this is a hack to delay/avoid loading obsmarkers
456 456 # when we know that '.' won't be hidden
457 457 self._node = repo.dirstate.p1()
458 458 self._rev = repo.unfiltered().changelog.rev(self._node)
459 459 return
460 460 if len(changeid) == 20:
461 461 try:
462 462 self._node = changeid
463 463 self._rev = repo.changelog.rev(changeid)
464 464 return
465 465 except error.FilteredRepoLookupError:
466 466 raise
467 467 except LookupError:
468 468 pass
469 469
470 470 try:
471 471 r = int(changeid)
472 472 if '%d' % r != changeid:
473 473 raise ValueError
474 474 l = len(repo.changelog)
475 475 if r < 0:
476 476 r += l
477 477 if r < 0 or r >= l:
478 478 raise ValueError
479 479 self._rev = r
480 480 self._node = repo.changelog.node(r)
481 481 return
482 482 except error.FilteredIndexError:
483 483 raise
484 484 except (ValueError, OverflowError, IndexError):
485 485 pass
486 486
487 487 if len(changeid) == 40:
488 488 try:
489 489 self._node = bin(changeid)
490 490 self._rev = repo.changelog.rev(self._node)
491 491 return
492 492 except error.FilteredLookupError:
493 493 raise
494 494 except (TypeError, LookupError):
495 495 pass
496 496
497 497 # lookup bookmarks through the name interface
498 498 try:
499 499 self._node = repo.names.singlenode(repo, changeid)
500 500 self._rev = repo.changelog.rev(self._node)
501 501 return
502 502 except KeyError:
503 503 pass
504 504 except error.FilteredRepoLookupError:
505 505 raise
506 506 except error.RepoLookupError:
507 507 pass
508 508
509 509 self._node = repo.unfiltered().changelog._partialmatch(changeid)
510 510 if self._node is not None:
511 511 self._rev = repo.changelog.rev(self._node)
512 512 return
513 513
514 514 # lookup failed
515 515 # check if it might have come from damaged dirstate
516 516 #
517 517 # XXX we could avoid the unfiltered if we had a recognizable
518 518 # exception for filtered changeset access
519 519 if changeid in repo.unfiltered().dirstate.parents():
520 520 msg = _("working directory has unknown parent '%s'!")
521 521 raise error.Abort(msg % short(changeid))
522 522 try:
523 523 if len(changeid) == 20 and nonascii(changeid):
524 524 changeid = hex(changeid)
525 525 except TypeError:
526 526 pass
527 527 except (error.FilteredIndexError, error.FilteredLookupError,
528 528 error.FilteredRepoLookupError):
529 529 raise _filterederror(repo, changeid)
530 530 except IndexError:
531 531 pass
532 532 raise error.RepoLookupError(
533 533 _("unknown revision '%s'") % changeid)
534 534
535 535 def __hash__(self):
536 536 try:
537 537 return hash(self._rev)
538 538 except AttributeError:
539 539 return id(self)
540 540
541 541 def __nonzero__(self):
542 542 return self._rev != nullrev
543 543
544 544 __bool__ = __nonzero__
545 545
546 546 @propertycache
547 547 def _changeset(self):
548 548 return self._repo.changelog.changelogrevision(self.rev())
549 549
550 550 @propertycache
551 551 def _manifest(self):
552 552 return self._manifestctx.read()
553 553
554 554 @propertycache
555 555 def _manifestctx(self):
556 556 return self._repo.manifestlog[self._changeset.manifest]
557 557
558 558 @propertycache
559 559 def _manifestdelta(self):
560 560 return self._manifestctx.readdelta()
561 561
562 562 @propertycache
563 563 def _parents(self):
564 564 repo = self._repo
565 565 p1, p2 = repo.changelog.parentrevs(self._rev)
566 566 if p2 == nullrev:
567 567 return [changectx(repo, p1)]
568 568 return [changectx(repo, p1), changectx(repo, p2)]
569 569
570 570 def changeset(self):
571 571 c = self._changeset
572 572 return (
573 573 c.manifest,
574 574 c.user,
575 575 c.date,
576 576 c.files,
577 577 c.description,
578 578 c.extra,
579 579 )
580 580 def manifestnode(self):
581 581 return self._changeset.manifest
582 582
583 583 def user(self):
584 584 return self._changeset.user
585 585 def date(self):
586 586 return self._changeset.date
587 587 def files(self):
588 588 return self._changeset.files
589 589 def description(self):
590 590 return self._changeset.description
591 591 def branch(self):
592 592 return encoding.tolocal(self._changeset.extra.get("branch"))
593 593 def closesbranch(self):
594 594 return 'close' in self._changeset.extra
595 595 def extra(self):
596 596 return self._changeset.extra
597 597 def tags(self):
598 598 return self._repo.nodetags(self._node)
599 599 def bookmarks(self):
600 600 return self._repo.nodebookmarks(self._node)
601 601 def phase(self):
602 602 return self._repo._phasecache.phase(self._repo, self._rev)
603 603 def hidden(self):
604 604 return self._rev in repoview.filterrevs(self._repo, 'visible')
605 605
606 606 def children(self):
607 607 """return contexts for each child changeset"""
608 608 c = self._repo.changelog.children(self._node)
609 609 return [changectx(self._repo, x) for x in c]
610 610
611 611 def ancestors(self):
612 612 for a in self._repo.changelog.ancestors([self._rev]):
613 613 yield changectx(self._repo, a)
614 614
615 615 def descendants(self):
616 616 for d in self._repo.changelog.descendants([self._rev]):
617 617 yield changectx(self._repo, d)
618 618
619 619 def filectx(self, path, fileid=None, filelog=None):
620 620 """get a file context from this changeset"""
621 621 if fileid is None:
622 622 fileid = self.filenode(path)
623 623 return filectx(self._repo, path, fileid=fileid,
624 624 changectx=self, filelog=filelog)
625 625
626 626 def ancestor(self, c2, warn=False):
627 627 """return the "best" ancestor context of self and c2
628 628
629 629 If there are multiple candidates, it will show a message and check
630 630 merge.preferancestor configuration before falling back to the
631 631 revlog ancestor."""
632 632 # deal with workingctxs
633 633 n2 = c2._node
634 634 if n2 is None:
635 635 n2 = c2._parents[0]._node
636 636 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
637 637 if not cahs:
638 638 anc = nullid
639 639 elif len(cahs) == 1:
640 640 anc = cahs[0]
641 641 else:
642 642 # experimental config: merge.preferancestor
643 643 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
644 644 try:
645 645 ctx = changectx(self._repo, r)
646 646 except error.RepoLookupError:
647 647 continue
648 648 anc = ctx.node()
649 649 if anc in cahs:
650 650 break
651 651 else:
652 652 anc = self._repo.changelog.ancestor(self._node, n2)
653 653 if warn:
654 654 self._repo.ui.status(
655 655 (_("note: using %s as ancestor of %s and %s\n") %
656 656 (short(anc), short(self._node), short(n2))) +
657 657 ''.join(_(" alternatively, use --config "
658 658 "merge.preferancestor=%s\n") %
659 659 short(n) for n in sorted(cahs) if n != anc))
660 660 return changectx(self._repo, anc)
661 661
662 662 def descendant(self, other):
663 663 """True if other is descendant of this changeset"""
664 664 return self._repo.changelog.descendant(self._rev, other._rev)
665 665
666 666 def walk(self, match):
667 667 '''Generates matching file names.'''
668 668
669 669 # Wrap match.bad method to have message with nodeid
670 670 def bad(fn, msg):
671 671 # The manifest doesn't know about subrepos, so don't complain about
672 672 # paths into valid subrepos.
673 673 if any(fn == s or fn.startswith(s + '/')
674 674 for s in self.substate):
675 675 return
676 676 match.bad(fn, _('no such file in rev %s') % self)
677 677
678 678 m = matchmod.badmatch(match, bad)
679 679 return self._manifest.walk(m)
680 680
681 681 def matches(self, match):
682 682 return self.walk(match)
683 683
684 684 class basefilectx(object):
685 685 """A filecontext object represents the common logic for its children:
686 686 filectx: read-only access to a filerevision that is already present
687 687 in the repo,
688 688 workingfilectx: a filecontext that represents files from the working
689 689 directory,
690 690 memfilectx: a filecontext that represents files in-memory,
691 691 overlayfilectx: duplicate another filecontext with some fields overridden.
692 692 """
693 693 @propertycache
694 694 def _filelog(self):
695 695 return self._repo.file(self._path)
696 696
697 697 @propertycache
698 698 def _changeid(self):
699 699 if r'_changeid' in self.__dict__:
700 700 return self._changeid
701 701 elif r'_changectx' in self.__dict__:
702 702 return self._changectx.rev()
703 703 elif r'_descendantrev' in self.__dict__:
704 704 # this file context was created from a revision with a known
705 705 # descendant, we can (lazily) correct for linkrev aliases
706 706 return self._adjustlinkrev(self._descendantrev)
707 707 else:
708 708 return self._filelog.linkrev(self._filerev)
709 709
710 710 @propertycache
711 711 def _filenode(self):
712 712 if r'_fileid' in self.__dict__:
713 713 return self._filelog.lookup(self._fileid)
714 714 else:
715 715 return self._changectx.filenode(self._path)
716 716
717 717 @propertycache
718 718 def _filerev(self):
719 719 return self._filelog.rev(self._filenode)
720 720
721 721 @propertycache
722 722 def _repopath(self):
723 723 return self._path
724 724
725 725 def __nonzero__(self):
726 726 try:
727 727 self._filenode
728 728 return True
729 729 except error.LookupError:
730 730 # file is missing
731 731 return False
732 732
733 733 __bool__ = __nonzero__
734 734
735 735 def __str__(self):
736 736 try:
737 737 return "%s@%s" % (self.path(), self._changectx)
738 738 except error.LookupError:
739 739 return "%s@???" % self.path()
740 740
741 741 def __repr__(self):
742 742 return "<%s %s>" % (type(self).__name__, str(self))
743 743
744 744 def __hash__(self):
745 745 try:
746 746 return hash((self._path, self._filenode))
747 747 except AttributeError:
748 748 return id(self)
749 749
750 750 def __eq__(self, other):
751 751 try:
752 752 return (type(self) == type(other) and self._path == other._path
753 753 and self._filenode == other._filenode)
754 754 except AttributeError:
755 755 return False
756 756
757 757 def __ne__(self, other):
758 758 return not (self == other)
759 759
760 760 def filerev(self):
761 761 return self._filerev
762 762 def filenode(self):
763 763 return self._filenode
764 764 @propertycache
765 765 def _flags(self):
766 766 return self._changectx.flags(self._path)
767 767 def flags(self):
768 768 return self._flags
769 769 def filelog(self):
770 770 return self._filelog
771 771 def rev(self):
772 772 return self._changeid
773 773 def linkrev(self):
774 774 return self._filelog.linkrev(self._filerev)
775 775 def node(self):
776 776 return self._changectx.node()
777 777 def hex(self):
778 778 return self._changectx.hex()
779 779 def user(self):
780 780 return self._changectx.user()
781 781 def date(self):
782 782 return self._changectx.date()
783 783 def files(self):
784 784 return self._changectx.files()
785 785 def description(self):
786 786 return self._changectx.description()
787 787 def branch(self):
788 788 return self._changectx.branch()
789 789 def extra(self):
790 790 return self._changectx.extra()
791 791 def phase(self):
792 792 return self._changectx.phase()
793 793 def phasestr(self):
794 794 return self._changectx.phasestr()
795 795 def manifest(self):
796 796 return self._changectx.manifest()
797 797 def changectx(self):
798 798 return self._changectx
799 799 def renamed(self):
800 800 return self._copied
801 801 def repo(self):
802 802 return self._repo
803 803 def size(self):
804 804 return len(self.data())
805 805
806 806 def path(self):
807 807 return self._path
808 808
809 809 def isbinary(self):
810 810 try:
811 811 return util.binary(self.data())
812 812 except IOError:
813 813 return False
814 814 def isexec(self):
815 815 return 'x' in self.flags()
816 816 def islink(self):
817 817 return 'l' in self.flags()
818 818
819 819 def isabsent(self):
820 820 """whether this filectx represents a file not in self._changectx
821 821
822 822 This is mainly for merge code to detect change/delete conflicts. This is
823 823 expected to be True for all subclasses of basectx."""
824 824 return False
825 825
826 826 _customcmp = False
827 827 def cmp(self, fctx):
828 828 """compare with other file context
829 829
830 830 returns True if different than fctx.
831 831 """
832 832 if fctx._customcmp:
833 833 return fctx.cmp(self)
834 834
835 835 if (fctx._filenode is None
836 836 and (self._repo._encodefilterpats
837 837 # if file data starts with '\1\n', empty metadata block is
838 838 # prepended, which adds 4 bytes to filelog.size().
839 839 or self.size() - 4 == fctx.size())
840 840 or self.size() == fctx.size()):
841 841 return self._filelog.cmp(self._filenode, fctx.data())
842 842
843 843 return True
844 844
845 845 def _adjustlinkrev(self, srcrev, inclusive=False):
846 846 """return the first ancestor of <srcrev> introducing <fnode>
847 847
848 848 If the linkrev of the file revision does not point to an ancestor of
849 849 srcrev, we'll walk down the ancestors until we find one introducing
850 850 this file revision.
851 851
852 852 :srcrev: the changeset revision we search ancestors from
853 853 :inclusive: if true, the src revision will also be checked
854 854 """
855 855 repo = self._repo
856 856 cl = repo.unfiltered().changelog
857 857 mfl = repo.manifestlog
858 858 # fetch the linkrev
859 859 lkr = self.linkrev()
860 860 # hack to reuse ancestor computation when searching for renames
861 861 memberanc = getattr(self, '_ancestrycontext', None)
862 862 iteranc = None
863 863 if srcrev is None:
864 864 # wctx case, used by workingfilectx during mergecopy
865 865 revs = [p.rev() for p in self._repo[None].parents()]
866 866 inclusive = True # we skipped the real (revless) source
867 867 else:
868 868 revs = [srcrev]
869 869 if memberanc is None:
870 870 memberanc = iteranc = cl.ancestors(revs, lkr,
871 871 inclusive=inclusive)
872 872 # check if this linkrev is an ancestor of srcrev
873 873 if lkr not in memberanc:
874 874 if iteranc is None:
875 875 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
876 876 fnode = self._filenode
877 877 path = self._path
878 878 for a in iteranc:
879 879 ac = cl.read(a) # get changeset data (we avoid object creation)
880 880 if path in ac[3]: # checking the 'files' field.
881 881 # The file has been touched, check if the content is
882 882 # similar to the one we search for.
883 883 if fnode == mfl[ac[0]].readfast().get(path):
884 884 return a
885 885 # In theory, we should never get out of that loop without a result.
886 886 # But if manifest uses a buggy file revision (not children of the
887 887 # one it replaces) we could. Such a buggy situation will likely
888 888 # result is crash somewhere else at to some point.
889 889 return lkr
890 890
891 891 def introrev(self):
892 892 """return the rev of the changeset which introduced this file revision
893 893
894 894 This method is different from linkrev because it take into account the
895 895 changeset the filectx was created from. It ensures the returned
896 896 revision is one of its ancestors. This prevents bugs from
897 897 'linkrev-shadowing' when a file revision is used by multiple
898 898 changesets.
899 899 """
900 900 lkr = self.linkrev()
901 901 attrs = vars(self)
902 902 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
903 903 if noctx or self.rev() == lkr:
904 904 return self.linkrev()
905 905 return self._adjustlinkrev(self.rev(), inclusive=True)
906 906
907 907 def _parentfilectx(self, path, fileid, filelog):
908 908 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
909 909 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
910 910 if '_changeid' in vars(self) or '_changectx' in vars(self):
911 911 # If self is associated with a changeset (probably explicitly
912 912 # fed), ensure the created filectx is associated with a
913 913 # changeset that is an ancestor of self.changectx.
914 914 # This lets us later use _adjustlinkrev to get a correct link.
915 915 fctx._descendantrev = self.rev()
916 916 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
917 917 elif '_descendantrev' in vars(self):
918 918 # Otherwise propagate _descendantrev if we have one associated.
919 919 fctx._descendantrev = self._descendantrev
920 920 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
921 921 return fctx
922 922
923 923 def parents(self):
924 924 _path = self._path
925 925 fl = self._filelog
926 926 parents = self._filelog.parents(self._filenode)
927 927 pl = [(_path, node, fl) for node in parents if node != nullid]
928 928
929 929 r = fl.renamed(self._filenode)
930 930 if r:
931 931 # - In the simple rename case, both parent are nullid, pl is empty.
932 932 # - In case of merge, only one of the parent is null id and should
933 933 # be replaced with the rename information. This parent is -always-
934 934 # the first one.
935 935 #
936 936 # As null id have always been filtered out in the previous list
937 937 # comprehension, inserting to 0 will always result in "replacing
938 938 # first nullid parent with rename information.
939 939 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
940 940
941 941 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
942 942
943 943 def p1(self):
944 944 return self.parents()[0]
945 945
946 946 def p2(self):
947 947 p = self.parents()
948 948 if len(p) == 2:
949 949 return p[1]
950 950 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
951 951
952 952 def annotate(self, follow=False, linenumber=False, diffopts=None):
953 953 '''returns a list of tuples of ((ctx, number), line) for each line
954 954 in the file, where ctx is the filectx of the node where
955 955 that line was last changed; if linenumber parameter is true, number is
956 956 the line number at the first appearance in the managed file, otherwise,
957 957 number has a fixed value of False.
958 958 '''
959 959
960 960 def lines(text):
961 961 if text.endswith("\n"):
962 962 return text.count("\n")
963 963 return text.count("\n") + int(bool(text))
964 964
965 965 if linenumber:
966 966 def decorate(text, rev):
967 967 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
968 968 else:
969 969 def decorate(text, rev):
970 970 return ([(rev, False)] * lines(text), text)
971 971
972 972 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
973 973
974 974 def parents(f):
975 975 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
976 976 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
977 977 # from the topmost introrev (= srcrev) down to p.linkrev() if it
978 978 # isn't an ancestor of the srcrev.
979 979 f._changeid
980 980 pl = f.parents()
981 981
982 982 # Don't return renamed parents if we aren't following.
983 983 if not follow:
984 984 pl = [p for p in pl if p.path() == f.path()]
985 985
986 986 # renamed filectx won't have a filelog yet, so set it
987 987 # from the cache to save time
988 988 for p in pl:
989 989 if not '_filelog' in p.__dict__:
990 990 p._filelog = getlog(p.path())
991 991
992 992 return pl
993 993
994 994 # use linkrev to find the first changeset where self appeared
995 995 base = self
996 996 introrev = self.introrev()
997 997 if self.rev() != introrev:
998 998 base = self.filectx(self.filenode(), changeid=introrev)
999 999 if getattr(base, '_ancestrycontext', None) is None:
1000 1000 cl = self._repo.changelog
1001 1001 if introrev is None:
1002 1002 # wctx is not inclusive, but works because _ancestrycontext
1003 1003 # is used to test filelog revisions
1004 1004 ac = cl.ancestors([p.rev() for p in base.parents()],
1005 1005 inclusive=True)
1006 1006 else:
1007 1007 ac = cl.ancestors([introrev], inclusive=True)
1008 1008 base._ancestrycontext = ac
1009 1009
1010 1010 # This algorithm would prefer to be recursive, but Python is a
1011 1011 # bit recursion-hostile. Instead we do an iterative
1012 1012 # depth-first search.
1013 1013
1014 1014 # 1st DFS pre-calculates pcache and needed
1015 1015 visit = [base]
1016 1016 pcache = {}
1017 1017 needed = {base: 1}
1018 1018 while visit:
1019 1019 f = visit.pop()
1020 1020 if f in pcache:
1021 1021 continue
1022 1022 pl = parents(f)
1023 1023 pcache[f] = pl
1024 1024 for p in pl:
1025 1025 needed[p] = needed.get(p, 0) + 1
1026 1026 if p not in pcache:
1027 1027 visit.append(p)
1028 1028
1029 1029 # 2nd DFS does the actual annotate
1030 1030 visit[:] = [base]
1031 1031 hist = {}
1032 1032 while visit:
1033 1033 f = visit[-1]
1034 1034 if f in hist:
1035 1035 visit.pop()
1036 1036 continue
1037 1037
1038 1038 ready = True
1039 1039 pl = pcache[f]
1040 1040 for p in pl:
1041 1041 if p not in hist:
1042 1042 ready = False
1043 1043 visit.append(p)
1044 1044 if ready:
1045 1045 visit.pop()
1046 1046 curr = decorate(f.data(), f)
1047 curr = _annotatepair([hist[p] for p in pl], curr, diffopts)
1047 curr = _annotatepair([hist[p] for p in pl], f, curr, False,
1048 diffopts)
1048 1049 for p in pl:
1049 1050 if needed[p] == 1:
1050 1051 del hist[p]
1051 1052 del needed[p]
1052 1053 else:
1053 1054 needed[p] -= 1
1054 1055
1055 1056 hist[f] = curr
1056 1057 del pcache[f]
1057 1058
1058 1059 return zip(hist[base][0], hist[base][1].splitlines(True))
1059 1060
1060 1061 def ancestors(self, followfirst=False):
1061 1062 visit = {}
1062 1063 c = self
1063 1064 if followfirst:
1064 1065 cut = 1
1065 1066 else:
1066 1067 cut = None
1067 1068
1068 1069 while True:
1069 1070 for parent in c.parents()[:cut]:
1070 1071 visit[(parent.linkrev(), parent.filenode())] = parent
1071 1072 if not visit:
1072 1073 break
1073 1074 c = visit.pop(max(visit))
1074 1075 yield c
1075 1076
1076 def _annotatepair(parents, child, diffopts):
1077 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1078 r'''
1079 Given parent and child fctxes and annotate data for parents, for all lines
1080 in either parent that match the child, annotate the child with the parent's
1081 data.
1082
1083 Additionally, if `skipchild` is True, replace all other lines with parent
1084 annotate data as well such that child is never blamed for any lines.
1085
1086 >>> oldfctx = 'old'
1087 >>> p1fctx, p2fctx, childfctx = 'p1', 'p2', 'c'
1088 >>> olddata = 'a\nb\n'
1089 >>> p1data = 'a\nb\nc\n'
1090 >>> p2data = 'a\nc\nd\n'
1091 >>> childdata = 'a\nb2\nc\nc2\nd\n'
1092 >>> diffopts = mdiff.diffopts()
1093
1094 >>> def decorate(text, rev):
1095 ... return ([(rev, i) for i in xrange(1, text.count('\n') + 1)], text)
1096
1097 Basic usage:
1098
1099 >>> oldann = decorate(olddata, oldfctx)
1100 >>> p1ann = decorate(p1data, p1fctx)
1101 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1102 >>> p1ann[0]
1103 [('old', 1), ('old', 2), ('p1', 3)]
1104 >>> p2ann = decorate(p2data, p2fctx)
1105 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1106 >>> p2ann[0]
1107 [('old', 1), ('p2', 2), ('p2', 3)]
1108
1109 Test with multiple parents (note the difference caused by ordering):
1110
1111 >>> childann = decorate(childdata, childfctx)
1112 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1113 ... diffopts)
1114 >>> childann[0]
1115 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1116
1117 >>> childann = decorate(childdata, childfctx)
1118 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1119 ... diffopts)
1120 >>> childann[0]
1121 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1122
1123 Test with skipchild (note the difference caused by ordering):
1124
1125 >>> childann = decorate(childdata, childfctx)
1126 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1127 ... diffopts)
1128 >>> childann[0]
1129 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1130
1131 >>> childann = decorate(childdata, childfctx)
1132 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1133 ... diffopts)
1134 >>> childann[0]
1135 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1136 '''
1077 1137 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1078 1138 for parent in parents]
1139
1140 if skipchild:
1141 # Need to iterate over the blocks twice -- make it a list
1142 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1079 1143 # Mercurial currently prefers p2 over p1 for annotate.
1080 1144 # TODO: change this?
1081 1145 for parent, blocks in pblocks:
1082 1146 for (a1, a2, b1, b2), t in blocks:
1083 1147 # Changed blocks ('!') or blocks made only of blank lines ('~')
1084 1148 # belong to the child.
1085 1149 if t == '=':
1086 1150 child[0][b1:b2] = parent[0][a1:a2]
1151
1152 if skipchild:
1153 # Now try and match up anything that couldn't be matched,
1154 # Reversing pblocks maintains bias towards p2, matching above
1155 # behavior.
1156 pblocks.reverse()
1157
1158 # The heuristics are:
1159 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1160 # This could potentially be smarter but works well enough.
1161 # * For a non-matching section, do a best-effort fit. Match lines in
1162 # diff hunks 1:1, dropping lines as necessary.
1163 # * Repeat the last line as a last resort.
1164
1165 # First, replace as much as possible without repeating the last line.
1166 remaining = [(parent, []) for parent, _blocks in pblocks]
1167 for idx, (parent, blocks) in enumerate(pblocks):
1168 for (a1, a2, b1, b2), _t in blocks:
1169 if a2 - a1 >= b2 - b1:
1170 for bk in xrange(b1, b2):
1171 if child[0][bk][0] == childfctx:
1172 ak = min(a1 + (bk - b1), a2 - 1)
1173 child[0][bk] = parent[0][ak]
1174 else:
1175 remaining[idx][1].append((a1, a2, b1, b2))
1176
1177 # Then, look at anything left, which might involve repeating the last
1178 # line.
1179 for parent, blocks in remaining:
1180 for a1, a2, b1, b2 in blocks:
1181 for bk in xrange(b1, b2):
1182 if child[0][bk][0] == childfctx:
1183 ak = min(a1 + (bk - b1), a2 - 1)
1184 child[0][bk] = parent[0][ak]
1087 1185 return child
1088 1186
1089 1187 class filectx(basefilectx):
1090 1188 """A filecontext object makes access to data related to a particular
1091 1189 filerevision convenient."""
1092 1190 def __init__(self, repo, path, changeid=None, fileid=None,
1093 1191 filelog=None, changectx=None):
1094 1192 """changeid can be a changeset revision, node, or tag.
1095 1193 fileid can be a file revision or node."""
1096 1194 self._repo = repo
1097 1195 self._path = path
1098 1196
1099 1197 assert (changeid is not None
1100 1198 or fileid is not None
1101 1199 or changectx is not None), \
1102 1200 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1103 1201 % (changeid, fileid, changectx))
1104 1202
1105 1203 if filelog is not None:
1106 1204 self._filelog = filelog
1107 1205
1108 1206 if changeid is not None:
1109 1207 self._changeid = changeid
1110 1208 if changectx is not None:
1111 1209 self._changectx = changectx
1112 1210 if fileid is not None:
1113 1211 self._fileid = fileid
1114 1212
1115 1213 @propertycache
1116 1214 def _changectx(self):
1117 1215 try:
1118 1216 return changectx(self._repo, self._changeid)
1119 1217 except error.FilteredRepoLookupError:
1120 1218 # Linkrev may point to any revision in the repository. When the
1121 1219 # repository is filtered this may lead to `filectx` trying to build
1122 1220 # `changectx` for filtered revision. In such case we fallback to
1123 1221 # creating `changectx` on the unfiltered version of the reposition.
1124 1222 # This fallback should not be an issue because `changectx` from
1125 1223 # `filectx` are not used in complex operations that care about
1126 1224 # filtering.
1127 1225 #
1128 1226 # This fallback is a cheap and dirty fix that prevent several
1129 1227 # crashes. It does not ensure the behavior is correct. However the
1130 1228 # behavior was not correct before filtering either and "incorrect
1131 1229 # behavior" is seen as better as "crash"
1132 1230 #
1133 1231 # Linkrevs have several serious troubles with filtering that are
1134 1232 # complicated to solve. Proper handling of the issue here should be
1135 1233 # considered when solving linkrev issue are on the table.
1136 1234 return changectx(self._repo.unfiltered(), self._changeid)
1137 1235
1138 1236 def filectx(self, fileid, changeid=None):
1139 1237 '''opens an arbitrary revision of the file without
1140 1238 opening a new filelog'''
1141 1239 return filectx(self._repo, self._path, fileid=fileid,
1142 1240 filelog=self._filelog, changeid=changeid)
1143 1241
1144 1242 def rawdata(self):
1145 1243 return self._filelog.revision(self._filenode, raw=True)
1146 1244
1147 1245 def rawflags(self):
1148 1246 """low-level revlog flags"""
1149 1247 return self._filelog.flags(self._filerev)
1150 1248
1151 1249 def data(self):
1152 1250 try:
1153 1251 return self._filelog.read(self._filenode)
1154 1252 except error.CensoredNodeError:
1155 1253 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1156 1254 return ""
1157 1255 raise error.Abort(_("censored node: %s") % short(self._filenode),
1158 1256 hint=_("set censor.policy to ignore errors"))
1159 1257
1160 1258 def size(self):
1161 1259 return self._filelog.size(self._filerev)
1162 1260
1163 1261 @propertycache
1164 1262 def _copied(self):
1165 1263 """check if file was actually renamed in this changeset revision
1166 1264
1167 1265 If rename logged in file revision, we report copy for changeset only
1168 1266 if file revisions linkrev points back to the changeset in question
1169 1267 or both changeset parents contain different file revisions.
1170 1268 """
1171 1269
1172 1270 renamed = self._filelog.renamed(self._filenode)
1173 1271 if not renamed:
1174 1272 return renamed
1175 1273
1176 1274 if self.rev() == self.linkrev():
1177 1275 return renamed
1178 1276
1179 1277 name = self.path()
1180 1278 fnode = self._filenode
1181 1279 for p in self._changectx.parents():
1182 1280 try:
1183 1281 if fnode == p.filenode(name):
1184 1282 return None
1185 1283 except error.LookupError:
1186 1284 pass
1187 1285 return renamed
1188 1286
1189 1287 def children(self):
1190 1288 # hard for renames
1191 1289 c = self._filelog.children(self._filenode)
1192 1290 return [filectx(self._repo, self._path, fileid=x,
1193 1291 filelog=self._filelog) for x in c]
1194 1292
1195 1293 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1196 1294 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1197 1295 if diff from fctx2 to fctx1 has changes in linerange2 and
1198 1296 `linerange1` is the new line range for fctx1.
1199 1297 """
1200 1298 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1201 1299 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1202 1300 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1203 1301 return diffinrange, linerange1
1204 1302
1205 1303 def blockancestors(fctx, fromline, toline, followfirst=False):
1206 1304 """Yield ancestors of `fctx` with respect to the block of lines within
1207 1305 `fromline`-`toline` range.
1208 1306 """
1209 1307 diffopts = patch.diffopts(fctx._repo.ui)
1210 1308 introrev = fctx.introrev()
1211 1309 if fctx.rev() != introrev:
1212 1310 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
1213 1311 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1214 1312 while visit:
1215 1313 c, linerange2 = visit.pop(max(visit))
1216 1314 pl = c.parents()
1217 1315 if followfirst:
1218 1316 pl = pl[:1]
1219 1317 if not pl:
1220 1318 # The block originates from the initial revision.
1221 1319 yield c, linerange2
1222 1320 continue
1223 1321 inrange = False
1224 1322 for p in pl:
1225 1323 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1226 1324 inrange = inrange or inrangep
1227 1325 if linerange1[0] == linerange1[1]:
1228 1326 # Parent's linerange is empty, meaning that the block got
1229 1327 # introduced in this revision; no need to go futher in this
1230 1328 # branch.
1231 1329 continue
1232 1330 # Set _descendantrev with 'c' (a known descendant) so that, when
1233 1331 # _adjustlinkrev is called for 'p', it receives this descendant
1234 1332 # (as srcrev) instead possibly topmost introrev.
1235 1333 p._descendantrev = c.rev()
1236 1334 visit[p.linkrev(), p.filenode()] = p, linerange1
1237 1335 if inrange:
1238 1336 yield c, linerange2
1239 1337
1240 1338 def blockdescendants(fctx, fromline, toline):
1241 1339 """Yield descendants of `fctx` with respect to the block of lines within
1242 1340 `fromline`-`toline` range.
1243 1341 """
1244 1342 # First possibly yield 'fctx' if it has changes in range with respect to
1245 1343 # its parents.
1246 1344 try:
1247 1345 c, linerange1 = next(blockancestors(fctx, fromline, toline))
1248 1346 except StopIteration:
1249 1347 pass
1250 1348 else:
1251 1349 if c == fctx:
1252 1350 yield c, linerange1
1253 1351
1254 1352 diffopts = patch.diffopts(fctx._repo.ui)
1255 1353 fl = fctx.filelog()
1256 1354 seen = {fctx.filerev(): (fctx, (fromline, toline))}
1257 1355 for i in fl.descendants([fctx.filerev()]):
1258 1356 c = fctx.filectx(i)
1259 1357 inrange = False
1260 1358 for x in fl.parentrevs(i):
1261 1359 try:
1262 1360 p, linerange2 = seen[x]
1263 1361 except KeyError:
1264 1362 # nullrev or other branch
1265 1363 continue
1266 1364 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
1267 1365 inrange = inrange or inrangep
1268 1366 # If revision 'i' has been seen (it's a merge), we assume that its
1269 1367 # line range is the same independently of which parents was used
1270 1368 # to compute it.
1271 1369 assert i not in seen or seen[i][1] == linerange1, (
1272 1370 'computed line range for %s is not consistent between '
1273 1371 'ancestor branches' % c)
1274 1372 seen[i] = c, linerange1
1275 1373 if inrange:
1276 1374 yield c, linerange1
1277 1375
1278 1376 class committablectx(basectx):
1279 1377 """A committablectx object provides common functionality for a context that
1280 1378 wants the ability to commit, e.g. workingctx or memctx."""
1281 1379 def __init__(self, repo, text="", user=None, date=None, extra=None,
1282 1380 changes=None):
1283 1381 self._repo = repo
1284 1382 self._rev = None
1285 1383 self._node = None
1286 1384 self._text = text
1287 1385 if date:
1288 1386 self._date = util.parsedate(date)
1289 1387 if user:
1290 1388 self._user = user
1291 1389 if changes:
1292 1390 self._status = changes
1293 1391
1294 1392 self._extra = {}
1295 1393 if extra:
1296 1394 self._extra = extra.copy()
1297 1395 if 'branch' not in self._extra:
1298 1396 try:
1299 1397 branch = encoding.fromlocal(self._repo.dirstate.branch())
1300 1398 except UnicodeDecodeError:
1301 1399 raise error.Abort(_('branch name not in UTF-8!'))
1302 1400 self._extra['branch'] = branch
1303 1401 if self._extra['branch'] == '':
1304 1402 self._extra['branch'] = 'default'
1305 1403
1306 1404 def __str__(self):
1307 1405 return str(self._parents[0]) + "+"
1308 1406
1309 1407 def __nonzero__(self):
1310 1408 return True
1311 1409
1312 1410 __bool__ = __nonzero__
1313 1411
1314 1412 def _buildflagfunc(self):
1315 1413 # Create a fallback function for getting file flags when the
1316 1414 # filesystem doesn't support them
1317 1415
1318 1416 copiesget = self._repo.dirstate.copies().get
1319 1417 parents = self.parents()
1320 1418 if len(parents) < 2:
1321 1419 # when we have one parent, it's easy: copy from parent
1322 1420 man = parents[0].manifest()
1323 1421 def func(f):
1324 1422 f = copiesget(f, f)
1325 1423 return man.flags(f)
1326 1424 else:
1327 1425 # merges are tricky: we try to reconstruct the unstored
1328 1426 # result from the merge (issue1802)
1329 1427 p1, p2 = parents
1330 1428 pa = p1.ancestor(p2)
1331 1429 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1332 1430
1333 1431 def func(f):
1334 1432 f = copiesget(f, f) # may be wrong for merges with copies
1335 1433 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1336 1434 if fl1 == fl2:
1337 1435 return fl1
1338 1436 if fl1 == fla:
1339 1437 return fl2
1340 1438 if fl2 == fla:
1341 1439 return fl1
1342 1440 return '' # punt for conflicts
1343 1441
1344 1442 return func
1345 1443
1346 1444 @propertycache
1347 1445 def _flagfunc(self):
1348 1446 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1349 1447
1350 1448 @propertycache
1351 1449 def _status(self):
1352 1450 return self._repo.status()
1353 1451
1354 1452 @propertycache
1355 1453 def _user(self):
1356 1454 return self._repo.ui.username()
1357 1455
1358 1456 @propertycache
1359 1457 def _date(self):
1360 1458 ui = self._repo.ui
1361 1459 date = ui.configdate('devel', 'default-date')
1362 1460 if date is None:
1363 1461 date = util.makedate()
1364 1462 return date
1365 1463
1366 1464 def subrev(self, subpath):
1367 1465 return None
1368 1466
1369 1467 def manifestnode(self):
1370 1468 return None
1371 1469 def user(self):
1372 1470 return self._user or self._repo.ui.username()
1373 1471 def date(self):
1374 1472 return self._date
1375 1473 def description(self):
1376 1474 return self._text
1377 1475 def files(self):
1378 1476 return sorted(self._status.modified + self._status.added +
1379 1477 self._status.removed)
1380 1478
1381 1479 def modified(self):
1382 1480 return self._status.modified
1383 1481 def added(self):
1384 1482 return self._status.added
1385 1483 def removed(self):
1386 1484 return self._status.removed
1387 1485 def deleted(self):
1388 1486 return self._status.deleted
1389 1487 def branch(self):
1390 1488 return encoding.tolocal(self._extra['branch'])
1391 1489 def closesbranch(self):
1392 1490 return 'close' in self._extra
1393 1491 def extra(self):
1394 1492 return self._extra
1395 1493
1396 1494 def tags(self):
1397 1495 return []
1398 1496
1399 1497 def bookmarks(self):
1400 1498 b = []
1401 1499 for p in self.parents():
1402 1500 b.extend(p.bookmarks())
1403 1501 return b
1404 1502
1405 1503 def phase(self):
1406 1504 phase = phases.draft # default phase to draft
1407 1505 for p in self.parents():
1408 1506 phase = max(phase, p.phase())
1409 1507 return phase
1410 1508
1411 1509 def hidden(self):
1412 1510 return False
1413 1511
1414 1512 def children(self):
1415 1513 return []
1416 1514
1417 1515 def flags(self, path):
1418 1516 if r'_manifest' in self.__dict__:
1419 1517 try:
1420 1518 return self._manifest.flags(path)
1421 1519 except KeyError:
1422 1520 return ''
1423 1521
1424 1522 try:
1425 1523 return self._flagfunc(path)
1426 1524 except OSError:
1427 1525 return ''
1428 1526
1429 1527 def ancestor(self, c2):
1430 1528 """return the "best" ancestor context of self and c2"""
1431 1529 return self._parents[0].ancestor(c2) # punt on two parents for now
1432 1530
1433 1531 def walk(self, match):
1434 1532 '''Generates matching file names.'''
1435 1533 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1436 1534 True, False))
1437 1535
1438 1536 def matches(self, match):
1439 1537 return sorted(self._repo.dirstate.matches(match))
1440 1538
1441 1539 def ancestors(self):
1442 1540 for p in self._parents:
1443 1541 yield p
1444 1542 for a in self._repo.changelog.ancestors(
1445 1543 [p.rev() for p in self._parents]):
1446 1544 yield changectx(self._repo, a)
1447 1545
1448 1546 def markcommitted(self, node):
1449 1547 """Perform post-commit cleanup necessary after committing this ctx
1450 1548
1451 1549 Specifically, this updates backing stores this working context
1452 1550 wraps to reflect the fact that the changes reflected by this
1453 1551 workingctx have been committed. For example, it marks
1454 1552 modified and added files as normal in the dirstate.
1455 1553
1456 1554 """
1457 1555
1458 1556 with self._repo.dirstate.parentchange():
1459 1557 for f in self.modified() + self.added():
1460 1558 self._repo.dirstate.normal(f)
1461 1559 for f in self.removed():
1462 1560 self._repo.dirstate.drop(f)
1463 1561 self._repo.dirstate.setparents(node)
1464 1562
1465 1563 # write changes out explicitly, because nesting wlock at
1466 1564 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1467 1565 # from immediately doing so for subsequent changing files
1468 1566 self._repo.dirstate.write(self._repo.currenttransaction())
1469 1567
1470 1568 class workingctx(committablectx):
1471 1569 """A workingctx object makes access to data related to
1472 1570 the current working directory convenient.
1473 1571 date - any valid date string or (unixtime, offset), or None.
1474 1572 user - username string, or None.
1475 1573 extra - a dictionary of extra values, or None.
1476 1574 changes - a list of file lists as returned by localrepo.status()
1477 1575 or None to use the repository status.
1478 1576 """
1479 1577 def __init__(self, repo, text="", user=None, date=None, extra=None,
1480 1578 changes=None):
1481 1579 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1482 1580
1483 1581 def __iter__(self):
1484 1582 d = self._repo.dirstate
1485 1583 for f in d:
1486 1584 if d[f] != 'r':
1487 1585 yield f
1488 1586
1489 1587 def __contains__(self, key):
1490 1588 return self._repo.dirstate[key] not in "?r"
1491 1589
1492 1590 def hex(self):
1493 1591 return hex(wdirid)
1494 1592
1495 1593 @propertycache
1496 1594 def _parents(self):
1497 1595 p = self._repo.dirstate.parents()
1498 1596 if p[1] == nullid:
1499 1597 p = p[:-1]
1500 1598 return [changectx(self._repo, x) for x in p]
1501 1599
1502 1600 def filectx(self, path, filelog=None):
1503 1601 """get a file context from the working directory"""
1504 1602 return workingfilectx(self._repo, path, workingctx=self,
1505 1603 filelog=filelog)
1506 1604
1507 1605 def dirty(self, missing=False, merge=True, branch=True):
1508 1606 "check whether a working directory is modified"
1509 1607 # check subrepos first
1510 1608 for s in sorted(self.substate):
1511 1609 if self.sub(s).dirty():
1512 1610 return True
1513 1611 # check current working dir
1514 1612 return ((merge and self.p2()) or
1515 1613 (branch and self.branch() != self.p1().branch()) or
1516 1614 self.modified() or self.added() or self.removed() or
1517 1615 (missing and self.deleted()))
1518 1616
1519 1617 def add(self, list, prefix=""):
1520 1618 join = lambda f: os.path.join(prefix, f)
1521 1619 with self._repo.wlock():
1522 1620 ui, ds = self._repo.ui, self._repo.dirstate
1523 1621 rejected = []
1524 1622 lstat = self._repo.wvfs.lstat
1525 1623 for f in list:
1526 1624 scmutil.checkportable(ui, join(f))
1527 1625 try:
1528 1626 st = lstat(f)
1529 1627 except OSError:
1530 1628 ui.warn(_("%s does not exist!\n") % join(f))
1531 1629 rejected.append(f)
1532 1630 continue
1533 1631 if st.st_size > 10000000:
1534 1632 ui.warn(_("%s: up to %d MB of RAM may be required "
1535 1633 "to manage this file\n"
1536 1634 "(use 'hg revert %s' to cancel the "
1537 1635 "pending addition)\n")
1538 1636 % (f, 3 * st.st_size // 1000000, join(f)))
1539 1637 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1540 1638 ui.warn(_("%s not added: only files and symlinks "
1541 1639 "supported currently\n") % join(f))
1542 1640 rejected.append(f)
1543 1641 elif ds[f] in 'amn':
1544 1642 ui.warn(_("%s already tracked!\n") % join(f))
1545 1643 elif ds[f] == 'r':
1546 1644 ds.normallookup(f)
1547 1645 else:
1548 1646 ds.add(f)
1549 1647 return rejected
1550 1648
1551 1649 def forget(self, files, prefix=""):
1552 1650 join = lambda f: os.path.join(prefix, f)
1553 1651 with self._repo.wlock():
1554 1652 rejected = []
1555 1653 for f in files:
1556 1654 if f not in self._repo.dirstate:
1557 1655 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1558 1656 rejected.append(f)
1559 1657 elif self._repo.dirstate[f] != 'a':
1560 1658 self._repo.dirstate.remove(f)
1561 1659 else:
1562 1660 self._repo.dirstate.drop(f)
1563 1661 return rejected
1564 1662
1565 1663 def undelete(self, list):
1566 1664 pctxs = self.parents()
1567 1665 with self._repo.wlock():
1568 1666 for f in list:
1569 1667 if self._repo.dirstate[f] != 'r':
1570 1668 self._repo.ui.warn(_("%s not removed!\n") % f)
1571 1669 else:
1572 1670 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1573 1671 t = fctx.data()
1574 1672 self._repo.wwrite(f, t, fctx.flags())
1575 1673 self._repo.dirstate.normal(f)
1576 1674
1577 1675 def copy(self, source, dest):
1578 1676 try:
1579 1677 st = self._repo.wvfs.lstat(dest)
1580 1678 except OSError as err:
1581 1679 if err.errno != errno.ENOENT:
1582 1680 raise
1583 1681 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1584 1682 return
1585 1683 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1586 1684 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1587 1685 "symbolic link\n") % dest)
1588 1686 else:
1589 1687 with self._repo.wlock():
1590 1688 if self._repo.dirstate[dest] in '?':
1591 1689 self._repo.dirstate.add(dest)
1592 1690 elif self._repo.dirstate[dest] in 'r':
1593 1691 self._repo.dirstate.normallookup(dest)
1594 1692 self._repo.dirstate.copy(source, dest)
1595 1693
1596 1694 def match(self, pats=None, include=None, exclude=None, default='glob',
1597 1695 listsubrepos=False, badfn=None):
1598 1696 if pats is None:
1599 1697 pats = []
1600 1698 r = self._repo
1601 1699
1602 1700 # Only a case insensitive filesystem needs magic to translate user input
1603 1701 # to actual case in the filesystem.
1604 1702 icasefs = not util.fscasesensitive(r.root)
1605 1703 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1606 1704 default, auditor=r.auditor, ctx=self,
1607 1705 listsubrepos=listsubrepos, badfn=badfn,
1608 1706 icasefs=icasefs)
1609 1707
1610 1708 def _filtersuspectsymlink(self, files):
1611 1709 if not files or self._repo.dirstate._checklink:
1612 1710 return files
1613 1711
1614 1712 # Symlink placeholders may get non-symlink-like contents
1615 1713 # via user error or dereferencing by NFS or Samba servers,
1616 1714 # so we filter out any placeholders that don't look like a
1617 1715 # symlink
1618 1716 sane = []
1619 1717 for f in files:
1620 1718 if self.flags(f) == 'l':
1621 1719 d = self[f].data()
1622 1720 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1623 1721 self._repo.ui.debug('ignoring suspect symlink placeholder'
1624 1722 ' "%s"\n' % f)
1625 1723 continue
1626 1724 sane.append(f)
1627 1725 return sane
1628 1726
1629 1727 def _checklookup(self, files):
1630 1728 # check for any possibly clean files
1631 1729 if not files:
1632 1730 return [], []
1633 1731
1634 1732 modified = []
1635 1733 fixup = []
1636 1734 pctx = self._parents[0]
1637 1735 # do a full compare of any files that might have changed
1638 1736 for f in sorted(files):
1639 1737 if (f not in pctx or self.flags(f) != pctx.flags(f)
1640 1738 or pctx[f].cmp(self[f])):
1641 1739 modified.append(f)
1642 1740 else:
1643 1741 fixup.append(f)
1644 1742
1645 1743 # update dirstate for files that are actually clean
1646 1744 if fixup:
1647 1745 try:
1648 1746 # updating the dirstate is optional
1649 1747 # so we don't wait on the lock
1650 1748 # wlock can invalidate the dirstate, so cache normal _after_
1651 1749 # taking the lock
1652 1750 with self._repo.wlock(False):
1653 1751 normal = self._repo.dirstate.normal
1654 1752 for f in fixup:
1655 1753 normal(f)
1656 1754 # write changes out explicitly, because nesting
1657 1755 # wlock at runtime may prevent 'wlock.release()'
1658 1756 # after this block from doing so for subsequent
1659 1757 # changing files
1660 1758 self._repo.dirstate.write(self._repo.currenttransaction())
1661 1759 except error.LockError:
1662 1760 pass
1663 1761 return modified, fixup
1664 1762
1665 1763 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1666 1764 unknown=False):
1667 1765 '''Gets the status from the dirstate -- internal use only.'''
1668 1766 listignored, listclean, listunknown = ignored, clean, unknown
1669 1767 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1670 1768 subrepos = []
1671 1769 if '.hgsub' in self:
1672 1770 subrepos = sorted(self.substate)
1673 1771 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1674 1772 listclean, listunknown)
1675 1773
1676 1774 # check for any possibly clean files
1677 1775 if cmp:
1678 1776 modified2, fixup = self._checklookup(cmp)
1679 1777 s.modified.extend(modified2)
1680 1778
1681 1779 # update dirstate for files that are actually clean
1682 1780 if fixup and listclean:
1683 1781 s.clean.extend(fixup)
1684 1782
1685 1783 if match.always():
1686 1784 # cache for performance
1687 1785 if s.unknown or s.ignored or s.clean:
1688 1786 # "_status" is cached with list*=False in the normal route
1689 1787 self._status = scmutil.status(s.modified, s.added, s.removed,
1690 1788 s.deleted, [], [], [])
1691 1789 else:
1692 1790 self._status = s
1693 1791
1694 1792 return s
1695 1793
1696 1794 @propertycache
1697 1795 def _manifest(self):
1698 1796 """generate a manifest corresponding to the values in self._status
1699 1797
1700 1798 This reuse the file nodeid from parent, but we use special node
1701 1799 identifiers for added and modified files. This is used by manifests
1702 1800 merge to see that files are different and by update logic to avoid
1703 1801 deleting newly added files.
1704 1802 """
1705 1803 return self._buildstatusmanifest(self._status)
1706 1804
1707 1805 def _buildstatusmanifest(self, status):
1708 1806 """Builds a manifest that includes the given status results."""
1709 1807 parents = self.parents()
1710 1808
1711 1809 man = parents[0].manifest().copy()
1712 1810
1713 1811 ff = self._flagfunc
1714 1812 for i, l in ((addednodeid, status.added),
1715 1813 (modifiednodeid, status.modified)):
1716 1814 for f in l:
1717 1815 man[f] = i
1718 1816 try:
1719 1817 man.setflag(f, ff(f))
1720 1818 except OSError:
1721 1819 pass
1722 1820
1723 1821 for f in status.deleted + status.removed:
1724 1822 if f in man:
1725 1823 del man[f]
1726 1824
1727 1825 return man
1728 1826
1729 1827 def _buildstatus(self, other, s, match, listignored, listclean,
1730 1828 listunknown):
1731 1829 """build a status with respect to another context
1732 1830
1733 1831 This includes logic for maintaining the fast path of status when
1734 1832 comparing the working directory against its parent, which is to skip
1735 1833 building a new manifest if self (working directory) is not comparing
1736 1834 against its parent (repo['.']).
1737 1835 """
1738 1836 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1739 1837 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1740 1838 # might have accidentally ended up with the entire contents of the file
1741 1839 # they are supposed to be linking to.
1742 1840 s.modified[:] = self._filtersuspectsymlink(s.modified)
1743 1841 if other != self._repo['.']:
1744 1842 s = super(workingctx, self)._buildstatus(other, s, match,
1745 1843 listignored, listclean,
1746 1844 listunknown)
1747 1845 return s
1748 1846
1749 1847 def _matchstatus(self, other, match):
1750 1848 """override the match method with a filter for directory patterns
1751 1849
1752 1850 We use inheritance to customize the match.bad method only in cases of
1753 1851 workingctx since it belongs only to the working directory when
1754 1852 comparing against the parent changeset.
1755 1853
1756 1854 If we aren't comparing against the working directory's parent, then we
1757 1855 just use the default match object sent to us.
1758 1856 """
1759 1857 superself = super(workingctx, self)
1760 1858 match = superself._matchstatus(other, match)
1761 1859 if other != self._repo['.']:
1762 1860 def bad(f, msg):
1763 1861 # 'f' may be a directory pattern from 'match.files()',
1764 1862 # so 'f not in ctx1' is not enough
1765 1863 if f not in other and not other.hasdir(f):
1766 1864 self._repo.ui.warn('%s: %s\n' %
1767 1865 (self._repo.dirstate.pathto(f), msg))
1768 1866 match.bad = bad
1769 1867 return match
1770 1868
1771 1869 class committablefilectx(basefilectx):
1772 1870 """A committablefilectx provides common functionality for a file context
1773 1871 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1774 1872 def __init__(self, repo, path, filelog=None, ctx=None):
1775 1873 self._repo = repo
1776 1874 self._path = path
1777 1875 self._changeid = None
1778 1876 self._filerev = self._filenode = None
1779 1877
1780 1878 if filelog is not None:
1781 1879 self._filelog = filelog
1782 1880 if ctx:
1783 1881 self._changectx = ctx
1784 1882
1785 1883 def __nonzero__(self):
1786 1884 return True
1787 1885
1788 1886 __bool__ = __nonzero__
1789 1887
1790 1888 def linkrev(self):
1791 1889 # linked to self._changectx no matter if file is modified or not
1792 1890 return self.rev()
1793 1891
1794 1892 def parents(self):
1795 1893 '''return parent filectxs, following copies if necessary'''
1796 1894 def filenode(ctx, path):
1797 1895 return ctx._manifest.get(path, nullid)
1798 1896
1799 1897 path = self._path
1800 1898 fl = self._filelog
1801 1899 pcl = self._changectx._parents
1802 1900 renamed = self.renamed()
1803 1901
1804 1902 if renamed:
1805 1903 pl = [renamed + (None,)]
1806 1904 else:
1807 1905 pl = [(path, filenode(pcl[0], path), fl)]
1808 1906
1809 1907 for pc in pcl[1:]:
1810 1908 pl.append((path, filenode(pc, path), fl))
1811 1909
1812 1910 return [self._parentfilectx(p, fileid=n, filelog=l)
1813 1911 for p, n, l in pl if n != nullid]
1814 1912
1815 1913 def children(self):
1816 1914 return []
1817 1915
1818 1916 class workingfilectx(committablefilectx):
1819 1917 """A workingfilectx object makes access to data related to a particular
1820 1918 file in the working directory convenient."""
1821 1919 def __init__(self, repo, path, filelog=None, workingctx=None):
1822 1920 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1823 1921
1824 1922 @propertycache
1825 1923 def _changectx(self):
1826 1924 return workingctx(self._repo)
1827 1925
1828 1926 def data(self):
1829 1927 return self._repo.wread(self._path)
1830 1928 def renamed(self):
1831 1929 rp = self._repo.dirstate.copied(self._path)
1832 1930 if not rp:
1833 1931 return None
1834 1932 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1835 1933
1836 1934 def size(self):
1837 1935 return self._repo.wvfs.lstat(self._path).st_size
1838 1936 def date(self):
1839 1937 t, tz = self._changectx.date()
1840 1938 try:
1841 1939 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1842 1940 except OSError as err:
1843 1941 if err.errno != errno.ENOENT:
1844 1942 raise
1845 1943 return (t, tz)
1846 1944
1847 1945 def cmp(self, fctx):
1848 1946 """compare with other file context
1849 1947
1850 1948 returns True if different than fctx.
1851 1949 """
1852 1950 # fctx should be a filectx (not a workingfilectx)
1853 1951 # invert comparison to reuse the same code path
1854 1952 return fctx.cmp(self)
1855 1953
1856 1954 def remove(self, ignoremissing=False):
1857 1955 """wraps unlink for a repo's working directory"""
1858 1956 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1859 1957
1860 1958 def write(self, data, flags):
1861 1959 """wraps repo.wwrite"""
1862 1960 self._repo.wwrite(self._path, data, flags)
1863 1961
1864 1962 class workingcommitctx(workingctx):
1865 1963 """A workingcommitctx object makes access to data related to
1866 1964 the revision being committed convenient.
1867 1965
1868 1966 This hides changes in the working directory, if they aren't
1869 1967 committed in this context.
1870 1968 """
1871 1969 def __init__(self, repo, changes,
1872 1970 text="", user=None, date=None, extra=None):
1873 1971 super(workingctx, self).__init__(repo, text, user, date, extra,
1874 1972 changes)
1875 1973
1876 1974 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1877 1975 unknown=False):
1878 1976 """Return matched files only in ``self._status``
1879 1977
1880 1978 Uncommitted files appear "clean" via this context, even if
1881 1979 they aren't actually so in the working directory.
1882 1980 """
1883 1981 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1884 1982 if clean:
1885 1983 clean = [f for f in self._manifest if f not in self._changedset]
1886 1984 else:
1887 1985 clean = []
1888 1986 return scmutil.status([f for f in self._status.modified if match(f)],
1889 1987 [f for f in self._status.added if match(f)],
1890 1988 [f for f in self._status.removed if match(f)],
1891 1989 [], [], [], clean)
1892 1990
1893 1991 @propertycache
1894 1992 def _changedset(self):
1895 1993 """Return the set of files changed in this context
1896 1994 """
1897 1995 changed = set(self._status.modified)
1898 1996 changed.update(self._status.added)
1899 1997 changed.update(self._status.removed)
1900 1998 return changed
1901 1999
1902 2000 def makecachingfilectxfn(func):
1903 2001 """Create a filectxfn that caches based on the path.
1904 2002
1905 2003 We can't use util.cachefunc because it uses all arguments as the cache
1906 2004 key and this creates a cycle since the arguments include the repo and
1907 2005 memctx.
1908 2006 """
1909 2007 cache = {}
1910 2008
1911 2009 def getfilectx(repo, memctx, path):
1912 2010 if path not in cache:
1913 2011 cache[path] = func(repo, memctx, path)
1914 2012 return cache[path]
1915 2013
1916 2014 return getfilectx
1917 2015
1918 2016 class memctx(committablectx):
1919 2017 """Use memctx to perform in-memory commits via localrepo.commitctx().
1920 2018
1921 2019 Revision information is supplied at initialization time while
1922 2020 related files data and is made available through a callback
1923 2021 mechanism. 'repo' is the current localrepo, 'parents' is a
1924 2022 sequence of two parent revisions identifiers (pass None for every
1925 2023 missing parent), 'text' is the commit message and 'files' lists
1926 2024 names of files touched by the revision (normalized and relative to
1927 2025 repository root).
1928 2026
1929 2027 filectxfn(repo, memctx, path) is a callable receiving the
1930 2028 repository, the current memctx object and the normalized path of
1931 2029 requested file, relative to repository root. It is fired by the
1932 2030 commit function for every file in 'files', but calls order is
1933 2031 undefined. If the file is available in the revision being
1934 2032 committed (updated or added), filectxfn returns a memfilectx
1935 2033 object. If the file was removed, filectxfn return None for recent
1936 2034 Mercurial. Moved files are represented by marking the source file
1937 2035 removed and the new file added with copy information (see
1938 2036 memfilectx).
1939 2037
1940 2038 user receives the committer name and defaults to current
1941 2039 repository username, date is the commit date in any format
1942 2040 supported by util.parsedate() and defaults to current date, extra
1943 2041 is a dictionary of metadata or is left empty.
1944 2042 """
1945 2043
1946 2044 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1947 2045 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1948 2046 # this field to determine what to do in filectxfn.
1949 2047 _returnnoneformissingfiles = True
1950 2048
1951 2049 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1952 2050 date=None, extra=None, editor=False):
1953 2051 super(memctx, self).__init__(repo, text, user, date, extra)
1954 2052 self._rev = None
1955 2053 self._node = None
1956 2054 parents = [(p or nullid) for p in parents]
1957 2055 p1, p2 = parents
1958 2056 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1959 2057 files = sorted(set(files))
1960 2058 self._files = files
1961 2059 self.substate = {}
1962 2060
1963 2061 # if store is not callable, wrap it in a function
1964 2062 if not callable(filectxfn):
1965 2063 def getfilectx(repo, memctx, path):
1966 2064 fctx = filectxfn[path]
1967 2065 # this is weird but apparently we only keep track of one parent
1968 2066 # (why not only store that instead of a tuple?)
1969 2067 copied = fctx.renamed()
1970 2068 if copied:
1971 2069 copied = copied[0]
1972 2070 return memfilectx(repo, path, fctx.data(),
1973 2071 islink=fctx.islink(), isexec=fctx.isexec(),
1974 2072 copied=copied, memctx=memctx)
1975 2073 self._filectxfn = getfilectx
1976 2074 else:
1977 2075 # memoizing increases performance for e.g. vcs convert scenarios.
1978 2076 self._filectxfn = makecachingfilectxfn(filectxfn)
1979 2077
1980 2078 if extra:
1981 2079 self._extra = extra.copy()
1982 2080 else:
1983 2081 self._extra = {}
1984 2082
1985 2083 if self._extra.get('branch', '') == '':
1986 2084 self._extra['branch'] = 'default'
1987 2085
1988 2086 if editor:
1989 2087 self._text = editor(self._repo, self, [])
1990 2088 self._repo.savecommitmessage(self._text)
1991 2089
1992 2090 def filectx(self, path, filelog=None):
1993 2091 """get a file context from the working directory
1994 2092
1995 2093 Returns None if file doesn't exist and should be removed."""
1996 2094 return self._filectxfn(self._repo, self, path)
1997 2095
1998 2096 def commit(self):
1999 2097 """commit context to the repo"""
2000 2098 return self._repo.commitctx(self)
2001 2099
2002 2100 @propertycache
2003 2101 def _manifest(self):
2004 2102 """generate a manifest based on the return values of filectxfn"""
2005 2103
2006 2104 # keep this simple for now; just worry about p1
2007 2105 pctx = self._parents[0]
2008 2106 man = pctx.manifest().copy()
2009 2107
2010 2108 for f in self._status.modified:
2011 2109 p1node = nullid
2012 2110 p2node = nullid
2013 2111 p = pctx[f].parents() # if file isn't in pctx, check p2?
2014 2112 if len(p) > 0:
2015 2113 p1node = p[0].filenode()
2016 2114 if len(p) > 1:
2017 2115 p2node = p[1].filenode()
2018 2116 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2019 2117
2020 2118 for f in self._status.added:
2021 2119 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2022 2120
2023 2121 for f in self._status.removed:
2024 2122 if f in man:
2025 2123 del man[f]
2026 2124
2027 2125 return man
2028 2126
2029 2127 @propertycache
2030 2128 def _status(self):
2031 2129 """Calculate exact status from ``files`` specified at construction
2032 2130 """
2033 2131 man1 = self.p1().manifest()
2034 2132 p2 = self._parents[1]
2035 2133 # "1 < len(self._parents)" can't be used for checking
2036 2134 # existence of the 2nd parent, because "memctx._parents" is
2037 2135 # explicitly initialized by the list, of which length is 2.
2038 2136 if p2.node() != nullid:
2039 2137 man2 = p2.manifest()
2040 2138 managing = lambda f: f in man1 or f in man2
2041 2139 else:
2042 2140 managing = lambda f: f in man1
2043 2141
2044 2142 modified, added, removed = [], [], []
2045 2143 for f in self._files:
2046 2144 if not managing(f):
2047 2145 added.append(f)
2048 2146 elif self[f]:
2049 2147 modified.append(f)
2050 2148 else:
2051 2149 removed.append(f)
2052 2150
2053 2151 return scmutil.status(modified, added, removed, [], [], [], [])
2054 2152
2055 2153 class memfilectx(committablefilectx):
2056 2154 """memfilectx represents an in-memory file to commit.
2057 2155
2058 2156 See memctx and committablefilectx for more details.
2059 2157 """
2060 2158 def __init__(self, repo, path, data, islink=False,
2061 2159 isexec=False, copied=None, memctx=None):
2062 2160 """
2063 2161 path is the normalized file path relative to repository root.
2064 2162 data is the file content as a string.
2065 2163 islink is True if the file is a symbolic link.
2066 2164 isexec is True if the file is executable.
2067 2165 copied is the source file path if current file was copied in the
2068 2166 revision being committed, or None."""
2069 2167 super(memfilectx, self).__init__(repo, path, None, memctx)
2070 2168 self._data = data
2071 2169 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2072 2170 self._copied = None
2073 2171 if copied:
2074 2172 self._copied = (copied, nullid)
2075 2173
2076 2174 def data(self):
2077 2175 return self._data
2078 2176
2079 2177 def remove(self, ignoremissing=False):
2080 2178 """wraps unlink for a repo's working directory"""
2081 2179 # need to figure out what to do here
2082 2180 del self._changectx[self._path]
2083 2181
2084 2182 def write(self, data, flags):
2085 2183 """wraps repo.wwrite"""
2086 2184 self._data = data
2087 2185
2088 2186 class overlayfilectx(committablefilectx):
2089 2187 """Like memfilectx but take an original filectx and optional parameters to
2090 2188 override parts of it. This is useful when fctx.data() is expensive (i.e.
2091 2189 flag processor is expensive) and raw data, flags, and filenode could be
2092 2190 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2093 2191 """
2094 2192
2095 2193 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2096 2194 copied=None, ctx=None):
2097 2195 """originalfctx: filecontext to duplicate
2098 2196
2099 2197 datafunc: None or a function to override data (file content). It is a
2100 2198 function to be lazy. path, flags, copied, ctx: None or overridden value
2101 2199
2102 2200 copied could be (path, rev), or False. copied could also be just path,
2103 2201 and will be converted to (path, nullid). This simplifies some callers.
2104 2202 """
2105 2203
2106 2204 if path is None:
2107 2205 path = originalfctx.path()
2108 2206 if ctx is None:
2109 2207 ctx = originalfctx.changectx()
2110 2208 ctxmatch = lambda: True
2111 2209 else:
2112 2210 ctxmatch = lambda: ctx == originalfctx.changectx()
2113 2211
2114 2212 repo = originalfctx.repo()
2115 2213 flog = originalfctx.filelog()
2116 2214 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2117 2215
2118 2216 if copied is None:
2119 2217 copied = originalfctx.renamed()
2120 2218 copiedmatch = lambda: True
2121 2219 else:
2122 2220 if copied and not isinstance(copied, tuple):
2123 2221 # repo._filecommit will recalculate copyrev so nullid is okay
2124 2222 copied = (copied, nullid)
2125 2223 copiedmatch = lambda: copied == originalfctx.renamed()
2126 2224
2127 2225 # When data, copied (could affect data), ctx (could affect filelog
2128 2226 # parents) are not overridden, rawdata, rawflags, and filenode may be
2129 2227 # reused (repo._filecommit should double check filelog parents).
2130 2228 #
2131 2229 # path, flags are not hashed in filelog (but in manifestlog) so they do
2132 2230 # not affect reusable here.
2133 2231 #
2134 2232 # If ctx or copied is overridden to a same value with originalfctx,
2135 2233 # still consider it's reusable. originalfctx.renamed() may be a bit
2136 2234 # expensive so it's not called unless necessary. Assuming datafunc is
2137 2235 # always expensive, do not call it for this "reusable" test.
2138 2236 reusable = datafunc is None and ctxmatch() and copiedmatch()
2139 2237
2140 2238 if datafunc is None:
2141 2239 datafunc = originalfctx.data
2142 2240 if flags is None:
2143 2241 flags = originalfctx.flags()
2144 2242
2145 2243 self._datafunc = datafunc
2146 2244 self._flags = flags
2147 2245 self._copied = copied
2148 2246
2149 2247 if reusable:
2150 2248 # copy extra fields from originalfctx
2151 2249 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2152 2250 for attr in attrs:
2153 2251 if util.safehasattr(originalfctx, attr):
2154 2252 setattr(self, attr, getattr(originalfctx, attr))
2155 2253
2156 2254 def data(self):
2157 2255 return self._datafunc()
2158 2256
2159 2257 class metadataonlyctx(committablectx):
2160 2258 """Like memctx but it's reusing the manifest of different commit.
2161 2259 Intended to be used by lightweight operations that are creating
2162 2260 metadata-only changes.
2163 2261
2164 2262 Revision information is supplied at initialization time. 'repo' is the
2165 2263 current localrepo, 'ctx' is original revision which manifest we're reuisng
2166 2264 'parents' is a sequence of two parent revisions identifiers (pass None for
2167 2265 every missing parent), 'text' is the commit.
2168 2266
2169 2267 user receives the committer name and defaults to current repository
2170 2268 username, date is the commit date in any format supported by
2171 2269 util.parsedate() and defaults to current date, extra is a dictionary of
2172 2270 metadata or is left empty.
2173 2271 """
2174 2272 def __new__(cls, repo, originalctx, *args, **kwargs):
2175 2273 return super(metadataonlyctx, cls).__new__(cls, repo)
2176 2274
2177 2275 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2178 2276 extra=None, editor=False):
2179 2277 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2180 2278 self._rev = None
2181 2279 self._node = None
2182 2280 self._originalctx = originalctx
2183 2281 self._manifestnode = originalctx.manifestnode()
2184 2282 parents = [(p or nullid) for p in parents]
2185 2283 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2186 2284
2187 2285 # sanity check to ensure that the reused manifest parents are
2188 2286 # manifests of our commit parents
2189 2287 mp1, mp2 = self.manifestctx().parents
2190 2288 if p1 != nullid and p1.manifestnode() != mp1:
2191 2289 raise RuntimeError('can\'t reuse the manifest: '
2192 2290 'its p1 doesn\'t match the new ctx p1')
2193 2291 if p2 != nullid and p2.manifestnode() != mp2:
2194 2292 raise RuntimeError('can\'t reuse the manifest: '
2195 2293 'its p2 doesn\'t match the new ctx p2')
2196 2294
2197 2295 self._files = originalctx.files()
2198 2296 self.substate = {}
2199 2297
2200 2298 if extra:
2201 2299 self._extra = extra.copy()
2202 2300 else:
2203 2301 self._extra = {}
2204 2302
2205 2303 if self._extra.get('branch', '') == '':
2206 2304 self._extra['branch'] = 'default'
2207 2305
2208 2306 if editor:
2209 2307 self._text = editor(self._repo, self, [])
2210 2308 self._repo.savecommitmessage(self._text)
2211 2309
2212 2310 def manifestnode(self):
2213 2311 return self._manifestnode
2214 2312
2215 2313 @propertycache
2216 2314 def _manifestctx(self):
2217 2315 return self._repo.manifestlog[self._manifestnode]
2218 2316
2219 2317 def filectx(self, path, filelog=None):
2220 2318 return self._originalctx.filectx(path, filelog=filelog)
2221 2319
2222 2320 def commit(self):
2223 2321 """commit context to the repo"""
2224 2322 return self._repo.commitctx(self)
2225 2323
2226 2324 @property
2227 2325 def _manifest(self):
2228 2326 return self._originalctx.manifest()
2229 2327
2230 2328 @propertycache
2231 2329 def _status(self):
2232 2330 """Calculate exact status from ``files`` specified in the ``origctx``
2233 2331 and parents manifests.
2234 2332 """
2235 2333 man1 = self.p1().manifest()
2236 2334 p2 = self._parents[1]
2237 2335 # "1 < len(self._parents)" can't be used for checking
2238 2336 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2239 2337 # explicitly initialized by the list, of which length is 2.
2240 2338 if p2.node() != nullid:
2241 2339 man2 = p2.manifest()
2242 2340 managing = lambda f: f in man1 or f in man2
2243 2341 else:
2244 2342 managing = lambda f: f in man1
2245 2343
2246 2344 modified, added, removed = [], [], []
2247 2345 for f in self._files:
2248 2346 if not managing(f):
2249 2347 added.append(f)
2250 2348 elif self[f]:
2251 2349 modified.append(f)
2252 2350 else:
2253 2351 removed.append(f)
2254 2352
2255 2353 return scmutil.status(modified, added, removed, [], [], [], [])
@@ -1,56 +1,57 b''
1 1 # this is hack to make sure no escape characters are inserted into the output
2 2
3 3 from __future__ import absolute_import
4 4
5 5 import doctest
6 6 import os
7 7 import sys
8 8
9 9 ispy3 = (sys.version_info[0] >= 3)
10 10
11 11 if 'TERM' in os.environ:
12 12 del os.environ['TERM']
13 13
14 14 # TODO: migrate doctests to py3 and enable them on both versions
15 15 def testmod(name, optionflags=0, testtarget=None, py2=True, py3=False):
16 16 if not (not ispy3 and py2 or ispy3 and py3):
17 17 return
18 18 __import__(name)
19 19 mod = sys.modules[name]
20 20 if testtarget is not None:
21 21 mod = getattr(mod, testtarget)
22 22 doctest.testmod(mod, optionflags=optionflags)
23 23
24 24 testmod('mercurial.changegroup')
25 25 testmod('mercurial.changelog')
26 26 testmod('mercurial.color')
27 27 testmod('mercurial.config')
28 testmod('mercurial.context')
28 29 testmod('mercurial.dagparser', optionflags=doctest.NORMALIZE_WHITESPACE)
29 30 testmod('mercurial.dispatch')
30 31 testmod('mercurial.encoding')
31 32 testmod('mercurial.formatter')
32 33 testmod('mercurial.hg')
33 34 testmod('mercurial.hgweb.hgwebdir_mod')
34 35 testmod('mercurial.match')
35 36 testmod('mercurial.mdiff')
36 37 testmod('mercurial.minirst')
37 38 testmod('mercurial.patch')
38 39 testmod('mercurial.pathutil')
39 40 testmod('mercurial.parser')
40 41 testmod('mercurial.pycompat', py3=True)
41 42 testmod('mercurial.revsetlang')
42 43 testmod('mercurial.smartset')
43 44 testmod('mercurial.store')
44 45 testmod('mercurial.subrepo')
45 46 testmod('mercurial.templatefilters')
46 47 testmod('mercurial.templater')
47 48 testmod('mercurial.ui')
48 49 testmod('mercurial.url')
49 50 testmod('mercurial.util')
50 51 testmod('mercurial.util', testtarget='platform')
51 52 testmod('hgext.convert.convcmd')
52 53 testmod('hgext.convert.cvsps')
53 54 testmod('hgext.convert.filemap')
54 55 testmod('hgext.convert.p4')
55 56 testmod('hgext.convert.subversion')
56 57 testmod('hgext.mq')
General Comments 0
You need to be logged in to leave comments. Login now