##// END OF EJS Templates
annotate: do not poorly split lines at CR (issue5798)...
Yuya Nishihara -
r36556:0a7c59a4 stable
parent child Browse files
Show More
@@ -1,2790 +1,2791 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import re
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 addednodeid,
19 19 bin,
20 20 hex,
21 21 modifiednodeid,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 wdirid,
26 26 wdirnodes,
27 27 wdirrev,
28 28 )
29 29 from .thirdparty import (
30 30 attr,
31 31 )
32 32 from . import (
33 33 encoding,
34 34 error,
35 35 fileset,
36 36 match as matchmod,
37 37 mdiff,
38 38 obsolete as obsmod,
39 39 obsutil,
40 40 patch,
41 41 pathutil,
42 42 phases,
43 43 pycompat,
44 44 repoview,
45 45 revlog,
46 46 scmutil,
47 47 sparse,
48 48 subrepo,
49 49 util,
50 50 )
51 51
52 52 propertycache = util.propertycache
53 53
54 54 nonascii = re.compile(r'[^\x21-\x7f]').search
55 55
56 56 class basectx(object):
57 57 """A basectx object represents the common logic for its children:
58 58 changectx: read-only context that is already present in the repo,
59 59 workingctx: a context that represents the working directory and can
60 60 be committed,
61 61 memctx: a context that represents changes in-memory and can also
62 62 be committed."""
63 63 def __new__(cls, repo, changeid='', *args, **kwargs):
64 64 if isinstance(changeid, basectx):
65 65 return changeid
66 66
67 67 o = super(basectx, cls).__new__(cls)
68 68
69 69 o._repo = repo
70 70 o._rev = nullrev
71 71 o._node = nullid
72 72
73 73 return o
74 74
75 75 def __bytes__(self):
76 76 return short(self.node())
77 77
78 78 __str__ = encoding.strmethod(__bytes__)
79 79
80 80 def __int__(self):
81 81 return self.rev()
82 82
83 83 def __repr__(self):
84 84 return r"<%s %s>" % (type(self).__name__, str(self))
85 85
86 86 def __eq__(self, other):
87 87 try:
88 88 return type(self) == type(other) and self._rev == other._rev
89 89 except AttributeError:
90 90 return False
91 91
92 92 def __ne__(self, other):
93 93 return not (self == other)
94 94
95 95 def __contains__(self, key):
96 96 return key in self._manifest
97 97
98 98 def __getitem__(self, key):
99 99 return self.filectx(key)
100 100
101 101 def __iter__(self):
102 102 return iter(self._manifest)
103 103
104 104 def _buildstatusmanifest(self, status):
105 105 """Builds a manifest that includes the given status results, if this is
106 106 a working copy context. For non-working copy contexts, it just returns
107 107 the normal manifest."""
108 108 return self.manifest()
109 109
110 110 def _matchstatus(self, other, match):
111 111 """This internal method provides a way for child objects to override the
112 112 match operator.
113 113 """
114 114 return match
115 115
116 116 def _buildstatus(self, other, s, match, listignored, listclean,
117 117 listunknown):
118 118 """build a status with respect to another context"""
119 119 # Load earliest manifest first for caching reasons. More specifically,
120 120 # if you have revisions 1000 and 1001, 1001 is probably stored as a
121 121 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
122 122 # 1000 and cache it so that when you read 1001, we just need to apply a
123 123 # delta to what's in the cache. So that's one full reconstruction + one
124 124 # delta application.
125 125 mf2 = None
126 126 if self.rev() is not None and self.rev() < other.rev():
127 127 mf2 = self._buildstatusmanifest(s)
128 128 mf1 = other._buildstatusmanifest(s)
129 129 if mf2 is None:
130 130 mf2 = self._buildstatusmanifest(s)
131 131
132 132 modified, added = [], []
133 133 removed = []
134 134 clean = []
135 135 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
136 136 deletedset = set(deleted)
137 137 d = mf1.diff(mf2, match=match, clean=listclean)
138 138 for fn, value in d.iteritems():
139 139 if fn in deletedset:
140 140 continue
141 141 if value is None:
142 142 clean.append(fn)
143 143 continue
144 144 (node1, flag1), (node2, flag2) = value
145 145 if node1 is None:
146 146 added.append(fn)
147 147 elif node2 is None:
148 148 removed.append(fn)
149 149 elif flag1 != flag2:
150 150 modified.append(fn)
151 151 elif node2 not in wdirnodes:
152 152 # When comparing files between two commits, we save time by
153 153 # not comparing the file contents when the nodeids differ.
154 154 # Note that this means we incorrectly report a reverted change
155 155 # to a file as a modification.
156 156 modified.append(fn)
157 157 elif self[fn].cmp(other[fn]):
158 158 modified.append(fn)
159 159 else:
160 160 clean.append(fn)
161 161
162 162 if removed:
163 163 # need to filter files if they are already reported as removed
164 164 unknown = [fn for fn in unknown if fn not in mf1 and
165 165 (not match or match(fn))]
166 166 ignored = [fn for fn in ignored if fn not in mf1 and
167 167 (not match or match(fn))]
168 168 # if they're deleted, don't report them as removed
169 169 removed = [fn for fn in removed if fn not in deletedset]
170 170
171 171 return scmutil.status(modified, added, removed, deleted, unknown,
172 172 ignored, clean)
173 173
174 174 @propertycache
175 175 def substate(self):
176 176 return subrepo.state(self, self._repo.ui)
177 177
178 178 def subrev(self, subpath):
179 179 return self.substate[subpath][1]
180 180
181 181 def rev(self):
182 182 return self._rev
183 183 def node(self):
184 184 return self._node
185 185 def hex(self):
186 186 return hex(self.node())
187 187 def manifest(self):
188 188 return self._manifest
189 189 def manifestctx(self):
190 190 return self._manifestctx
191 191 def repo(self):
192 192 return self._repo
193 193 def phasestr(self):
194 194 return phases.phasenames[self.phase()]
195 195 def mutable(self):
196 196 return self.phase() > phases.public
197 197
198 198 def getfileset(self, expr):
199 199 return fileset.getfileset(self, expr)
200 200
201 201 def obsolete(self):
202 202 """True if the changeset is obsolete"""
203 203 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
204 204
205 205 def extinct(self):
206 206 """True if the changeset is extinct"""
207 207 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
208 208
209 209 def unstable(self):
210 210 msg = ("'context.unstable' is deprecated, "
211 211 "use 'context.orphan'")
212 212 self._repo.ui.deprecwarn(msg, '4.4')
213 213 return self.orphan()
214 214
215 215 def orphan(self):
216 216 """True if the changeset is not obsolete but it's ancestor are"""
217 217 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
218 218
219 219 def bumped(self):
220 220 msg = ("'context.bumped' is deprecated, "
221 221 "use 'context.phasedivergent'")
222 222 self._repo.ui.deprecwarn(msg, '4.4')
223 223 return self.phasedivergent()
224 224
225 225 def phasedivergent(self):
226 226 """True if the changeset try to be a successor of a public changeset
227 227
228 228 Only non-public and non-obsolete changesets may be bumped.
229 229 """
230 230 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
231 231
232 232 def divergent(self):
233 233 msg = ("'context.divergent' is deprecated, "
234 234 "use 'context.contentdivergent'")
235 235 self._repo.ui.deprecwarn(msg, '4.4')
236 236 return self.contentdivergent()
237 237
238 238 def contentdivergent(self):
239 239 """Is a successors of a changeset with multiple possible successors set
240 240
241 241 Only non-public and non-obsolete changesets may be divergent.
242 242 """
243 243 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
244 244
245 245 def troubled(self):
246 246 msg = ("'context.troubled' is deprecated, "
247 247 "use 'context.isunstable'")
248 248 self._repo.ui.deprecwarn(msg, '4.4')
249 249 return self.isunstable()
250 250
251 251 def isunstable(self):
252 252 """True if the changeset is either unstable, bumped or divergent"""
253 253 return self.orphan() or self.phasedivergent() or self.contentdivergent()
254 254
255 255 def troubles(self):
256 256 """Keep the old version around in order to avoid breaking extensions
257 257 about different return values.
258 258 """
259 259 msg = ("'context.troubles' is deprecated, "
260 260 "use 'context.instabilities'")
261 261 self._repo.ui.deprecwarn(msg, '4.4')
262 262
263 263 troubles = []
264 264 if self.orphan():
265 265 troubles.append('orphan')
266 266 if self.phasedivergent():
267 267 troubles.append('bumped')
268 268 if self.contentdivergent():
269 269 troubles.append('divergent')
270 270 return troubles
271 271
272 272 def instabilities(self):
273 273 """return the list of instabilities affecting this changeset.
274 274
275 275 Instabilities are returned as strings. possible values are:
276 276 - orphan,
277 277 - phase-divergent,
278 278 - content-divergent.
279 279 """
280 280 instabilities = []
281 281 if self.orphan():
282 282 instabilities.append('orphan')
283 283 if self.phasedivergent():
284 284 instabilities.append('phase-divergent')
285 285 if self.contentdivergent():
286 286 instabilities.append('content-divergent')
287 287 return instabilities
288 288
289 289 def parents(self):
290 290 """return contexts for each parent changeset"""
291 291 return self._parents
292 292
293 293 def p1(self):
294 294 return self._parents[0]
295 295
296 296 def p2(self):
297 297 parents = self._parents
298 298 if len(parents) == 2:
299 299 return parents[1]
300 300 return changectx(self._repo, nullrev)
301 301
302 302 def _fileinfo(self, path):
303 303 if r'_manifest' in self.__dict__:
304 304 try:
305 305 return self._manifest[path], self._manifest.flags(path)
306 306 except KeyError:
307 307 raise error.ManifestLookupError(self._node, path,
308 308 _('not found in manifest'))
309 309 if r'_manifestdelta' in self.__dict__ or path in self.files():
310 310 if path in self._manifestdelta:
311 311 return (self._manifestdelta[path],
312 312 self._manifestdelta.flags(path))
313 313 mfl = self._repo.manifestlog
314 314 try:
315 315 node, flag = mfl[self._changeset.manifest].find(path)
316 316 except KeyError:
317 317 raise error.ManifestLookupError(self._node, path,
318 318 _('not found in manifest'))
319 319
320 320 return node, flag
321 321
322 322 def filenode(self, path):
323 323 return self._fileinfo(path)[0]
324 324
325 325 def flags(self, path):
326 326 try:
327 327 return self._fileinfo(path)[1]
328 328 except error.LookupError:
329 329 return ''
330 330
331 331 def sub(self, path, allowcreate=True):
332 332 '''return a subrepo for the stored revision of path, never wdir()'''
333 333 return subrepo.subrepo(self, path, allowcreate=allowcreate)
334 334
335 335 def nullsub(self, path, pctx):
336 336 return subrepo.nullsubrepo(self, path, pctx)
337 337
338 338 def workingsub(self, path):
339 339 '''return a subrepo for the stored revision, or wdir if this is a wdir
340 340 context.
341 341 '''
342 342 return subrepo.subrepo(self, path, allowwdir=True)
343 343
344 344 def match(self, pats=None, include=None, exclude=None, default='glob',
345 345 listsubrepos=False, badfn=None):
346 346 r = self._repo
347 347 return matchmod.match(r.root, r.getcwd(), pats,
348 348 include, exclude, default,
349 349 auditor=r.nofsauditor, ctx=self,
350 350 listsubrepos=listsubrepos, badfn=badfn)
351 351
352 352 def diff(self, ctx2=None, match=None, **opts):
353 353 """Returns a diff generator for the given contexts and matcher"""
354 354 if ctx2 is None:
355 355 ctx2 = self.p1()
356 356 if ctx2 is not None:
357 357 ctx2 = self._repo[ctx2]
358 358 diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
359 359 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
360 360
361 361 def dirs(self):
362 362 return self._manifest.dirs()
363 363
364 364 def hasdir(self, dir):
365 365 return self._manifest.hasdir(dir)
366 366
367 367 def status(self, other=None, match=None, listignored=False,
368 368 listclean=False, listunknown=False, listsubrepos=False):
369 369 """return status of files between two nodes or node and working
370 370 directory.
371 371
372 372 If other is None, compare this node with working directory.
373 373
374 374 returns (modified, added, removed, deleted, unknown, ignored, clean)
375 375 """
376 376
377 377 ctx1 = self
378 378 ctx2 = self._repo[other]
379 379
380 380 # This next code block is, admittedly, fragile logic that tests for
381 381 # reversing the contexts and wouldn't need to exist if it weren't for
382 382 # the fast (and common) code path of comparing the working directory
383 383 # with its first parent.
384 384 #
385 385 # What we're aiming for here is the ability to call:
386 386 #
387 387 # workingctx.status(parentctx)
388 388 #
389 389 # If we always built the manifest for each context and compared those,
390 390 # then we'd be done. But the special case of the above call means we
391 391 # just copy the manifest of the parent.
392 392 reversed = False
393 393 if (not isinstance(ctx1, changectx)
394 394 and isinstance(ctx2, changectx)):
395 395 reversed = True
396 396 ctx1, ctx2 = ctx2, ctx1
397 397
398 398 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
399 399 match = ctx2._matchstatus(ctx1, match)
400 400 r = scmutil.status([], [], [], [], [], [], [])
401 401 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
402 402 listunknown)
403 403
404 404 if reversed:
405 405 # Reverse added and removed. Clear deleted, unknown and ignored as
406 406 # these make no sense to reverse.
407 407 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
408 408 r.clean)
409 409
410 410 if listsubrepos:
411 411 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
412 412 try:
413 413 rev2 = ctx2.subrev(subpath)
414 414 except KeyError:
415 415 # A subrepo that existed in node1 was deleted between
416 416 # node1 and node2 (inclusive). Thus, ctx2's substate
417 417 # won't contain that subpath. The best we can do ignore it.
418 418 rev2 = None
419 419 submatch = matchmod.subdirmatcher(subpath, match)
420 420 s = sub.status(rev2, match=submatch, ignored=listignored,
421 421 clean=listclean, unknown=listunknown,
422 422 listsubrepos=True)
423 423 for rfiles, sfiles in zip(r, s):
424 424 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
425 425
426 426 for l in r:
427 427 l.sort()
428 428
429 429 return r
430 430
431 431 def _filterederror(repo, changeid):
432 432 """build an exception to be raised about a filtered changeid
433 433
434 434 This is extracted in a function to help extensions (eg: evolve) to
435 435 experiment with various message variants."""
436 436 if repo.filtername.startswith('visible'):
437 437
438 438 # Check if the changeset is obsolete
439 439 unfilteredrepo = repo.unfiltered()
440 440 ctx = unfilteredrepo[changeid]
441 441
442 442 # If the changeset is obsolete, enrich the message with the reason
443 443 # that made this changeset not visible
444 444 if ctx.obsolete():
445 445 msg = obsutil._getfilteredreason(repo, changeid, ctx)
446 446 else:
447 447 msg = _("hidden revision '%s'") % changeid
448 448
449 449 hint = _('use --hidden to access hidden revisions')
450 450
451 451 return error.FilteredRepoLookupError(msg, hint=hint)
452 452 msg = _("filtered revision '%s' (not in '%s' subset)")
453 453 msg %= (changeid, repo.filtername)
454 454 return error.FilteredRepoLookupError(msg)
455 455
456 456 class changectx(basectx):
457 457 """A changecontext object makes access to data related to a particular
458 458 changeset convenient. It represents a read-only context already present in
459 459 the repo."""
460 460 def __init__(self, repo, changeid=''):
461 461 """changeid is a revision number, node, or tag"""
462 462
463 463 # since basectx.__new__ already took care of copying the object, we
464 464 # don't need to do anything in __init__, so we just exit here
465 465 if isinstance(changeid, basectx):
466 466 return
467 467
468 468 if changeid == '':
469 469 changeid = '.'
470 470 self._repo = repo
471 471
472 472 try:
473 473 if isinstance(changeid, int):
474 474 self._node = repo.changelog.node(changeid)
475 475 self._rev = changeid
476 476 return
477 477 if not pycompat.ispy3 and isinstance(changeid, long):
478 478 changeid = str(changeid)
479 479 if changeid == 'null':
480 480 self._node = nullid
481 481 self._rev = nullrev
482 482 return
483 483 if changeid == 'tip':
484 484 self._node = repo.changelog.tip()
485 485 self._rev = repo.changelog.rev(self._node)
486 486 return
487 487 if (changeid == '.'
488 488 or repo.local() and changeid == repo.dirstate.p1()):
489 489 # this is a hack to delay/avoid loading obsmarkers
490 490 # when we know that '.' won't be hidden
491 491 self._node = repo.dirstate.p1()
492 492 self._rev = repo.unfiltered().changelog.rev(self._node)
493 493 return
494 494 if len(changeid) == 20:
495 495 try:
496 496 self._node = changeid
497 497 self._rev = repo.changelog.rev(changeid)
498 498 return
499 499 except error.FilteredRepoLookupError:
500 500 raise
501 501 except LookupError:
502 502 pass
503 503
504 504 try:
505 505 r = int(changeid)
506 506 if '%d' % r != changeid:
507 507 raise ValueError
508 508 l = len(repo.changelog)
509 509 if r < 0:
510 510 r += l
511 511 if r < 0 or r >= l and r != wdirrev:
512 512 raise ValueError
513 513 self._rev = r
514 514 self._node = repo.changelog.node(r)
515 515 return
516 516 except error.FilteredIndexError:
517 517 raise
518 518 except (ValueError, OverflowError, IndexError):
519 519 pass
520 520
521 521 if len(changeid) == 40:
522 522 try:
523 523 self._node = bin(changeid)
524 524 self._rev = repo.changelog.rev(self._node)
525 525 return
526 526 except error.FilteredLookupError:
527 527 raise
528 528 except (TypeError, LookupError):
529 529 pass
530 530
531 531 # lookup bookmarks through the name interface
532 532 try:
533 533 self._node = repo.names.singlenode(repo, changeid)
534 534 self._rev = repo.changelog.rev(self._node)
535 535 return
536 536 except KeyError:
537 537 pass
538 538 except error.FilteredRepoLookupError:
539 539 raise
540 540 except error.RepoLookupError:
541 541 pass
542 542
543 543 self._node = repo.unfiltered().changelog._partialmatch(changeid)
544 544 if self._node is not None:
545 545 self._rev = repo.changelog.rev(self._node)
546 546 return
547 547
548 548 # lookup failed
549 549 # check if it might have come from damaged dirstate
550 550 #
551 551 # XXX we could avoid the unfiltered if we had a recognizable
552 552 # exception for filtered changeset access
553 553 if (repo.local()
554 554 and changeid in repo.unfiltered().dirstate.parents()):
555 555 msg = _("working directory has unknown parent '%s'!")
556 556 raise error.Abort(msg % short(changeid))
557 557 try:
558 558 if len(changeid) == 20 and nonascii(changeid):
559 559 changeid = hex(changeid)
560 560 except TypeError:
561 561 pass
562 562 except (error.FilteredIndexError, error.FilteredLookupError,
563 563 error.FilteredRepoLookupError):
564 564 raise _filterederror(repo, changeid)
565 565 except IndexError:
566 566 pass
567 567 raise error.RepoLookupError(
568 568 _("unknown revision '%s'") % changeid)
569 569
570 570 def __hash__(self):
571 571 try:
572 572 return hash(self._rev)
573 573 except AttributeError:
574 574 return id(self)
575 575
576 576 def __nonzero__(self):
577 577 return self._rev != nullrev
578 578
579 579 __bool__ = __nonzero__
580 580
581 581 @propertycache
582 582 def _changeset(self):
583 583 return self._repo.changelog.changelogrevision(self.rev())
584 584
585 585 @propertycache
586 586 def _manifest(self):
587 587 return self._manifestctx.read()
588 588
589 589 @property
590 590 def _manifestctx(self):
591 591 return self._repo.manifestlog[self._changeset.manifest]
592 592
593 593 @propertycache
594 594 def _manifestdelta(self):
595 595 return self._manifestctx.readdelta()
596 596
597 597 @propertycache
598 598 def _parents(self):
599 599 repo = self._repo
600 600 p1, p2 = repo.changelog.parentrevs(self._rev)
601 601 if p2 == nullrev:
602 602 return [changectx(repo, p1)]
603 603 return [changectx(repo, p1), changectx(repo, p2)]
604 604
605 605 def changeset(self):
606 606 c = self._changeset
607 607 return (
608 608 c.manifest,
609 609 c.user,
610 610 c.date,
611 611 c.files,
612 612 c.description,
613 613 c.extra,
614 614 )
615 615 def manifestnode(self):
616 616 return self._changeset.manifest
617 617
618 618 def user(self):
619 619 return self._changeset.user
620 620 def date(self):
621 621 return self._changeset.date
622 622 def files(self):
623 623 return self._changeset.files
624 624 def description(self):
625 625 return self._changeset.description
626 626 def branch(self):
627 627 return encoding.tolocal(self._changeset.extra.get("branch"))
628 628 def closesbranch(self):
629 629 return 'close' in self._changeset.extra
630 630 def extra(self):
631 631 """Return a dict of extra information."""
632 632 return self._changeset.extra
633 633 def tags(self):
634 634 """Return a list of byte tag names"""
635 635 return self._repo.nodetags(self._node)
636 636 def bookmarks(self):
637 637 """Return a list of byte bookmark names."""
638 638 return self._repo.nodebookmarks(self._node)
639 639 def phase(self):
640 640 return self._repo._phasecache.phase(self._repo, self._rev)
641 641 def hidden(self):
642 642 return self._rev in repoview.filterrevs(self._repo, 'visible')
643 643
644 644 def isinmemory(self):
645 645 return False
646 646
647 647 def children(self):
648 648 """return list of changectx contexts for each child changeset.
649 649
650 650 This returns only the immediate child changesets. Use descendants() to
651 651 recursively walk children.
652 652 """
653 653 c = self._repo.changelog.children(self._node)
654 654 return [changectx(self._repo, x) for x in c]
655 655
656 656 def ancestors(self):
657 657 for a in self._repo.changelog.ancestors([self._rev]):
658 658 yield changectx(self._repo, a)
659 659
660 660 def descendants(self):
661 661 """Recursively yield all children of the changeset.
662 662
663 663 For just the immediate children, use children()
664 664 """
665 665 for d in self._repo.changelog.descendants([self._rev]):
666 666 yield changectx(self._repo, d)
667 667
668 668 def filectx(self, path, fileid=None, filelog=None):
669 669 """get a file context from this changeset"""
670 670 if fileid is None:
671 671 fileid = self.filenode(path)
672 672 return filectx(self._repo, path, fileid=fileid,
673 673 changectx=self, filelog=filelog)
674 674
675 675 def ancestor(self, c2, warn=False):
676 676 """return the "best" ancestor context of self and c2
677 677
678 678 If there are multiple candidates, it will show a message and check
679 679 merge.preferancestor configuration before falling back to the
680 680 revlog ancestor."""
681 681 # deal with workingctxs
682 682 n2 = c2._node
683 683 if n2 is None:
684 684 n2 = c2._parents[0]._node
685 685 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
686 686 if not cahs:
687 687 anc = nullid
688 688 elif len(cahs) == 1:
689 689 anc = cahs[0]
690 690 else:
691 691 # experimental config: merge.preferancestor
692 692 for r in self._repo.ui.configlist('merge', 'preferancestor'):
693 693 try:
694 694 ctx = changectx(self._repo, r)
695 695 except error.RepoLookupError:
696 696 continue
697 697 anc = ctx.node()
698 698 if anc in cahs:
699 699 break
700 700 else:
701 701 anc = self._repo.changelog.ancestor(self._node, n2)
702 702 if warn:
703 703 self._repo.ui.status(
704 704 (_("note: using %s as ancestor of %s and %s\n") %
705 705 (short(anc), short(self._node), short(n2))) +
706 706 ''.join(_(" alternatively, use --config "
707 707 "merge.preferancestor=%s\n") %
708 708 short(n) for n in sorted(cahs) if n != anc))
709 709 return changectx(self._repo, anc)
710 710
711 711 def descendant(self, other):
712 712 """True if other is descendant of this changeset"""
713 713 return self._repo.changelog.descendant(self._rev, other._rev)
714 714
715 715 def walk(self, match):
716 716 '''Generates matching file names.'''
717 717
718 718 # Wrap match.bad method to have message with nodeid
719 719 def bad(fn, msg):
720 720 # The manifest doesn't know about subrepos, so don't complain about
721 721 # paths into valid subrepos.
722 722 if any(fn == s or fn.startswith(s + '/')
723 723 for s in self.substate):
724 724 return
725 725 match.bad(fn, _('no such file in rev %s') % self)
726 726
727 727 m = matchmod.badmatch(match, bad)
728 728 return self._manifest.walk(m)
729 729
730 730 def matches(self, match):
731 731 return self.walk(match)
732 732
733 733 class basefilectx(object):
734 734 """A filecontext object represents the common logic for its children:
735 735 filectx: read-only access to a filerevision that is already present
736 736 in the repo,
737 737 workingfilectx: a filecontext that represents files from the working
738 738 directory,
739 739 memfilectx: a filecontext that represents files in-memory,
740 740 overlayfilectx: duplicate another filecontext with some fields overridden.
741 741 """
742 742 @propertycache
743 743 def _filelog(self):
744 744 return self._repo.file(self._path)
745 745
746 746 @propertycache
747 747 def _changeid(self):
748 748 if r'_changeid' in self.__dict__:
749 749 return self._changeid
750 750 elif r'_changectx' in self.__dict__:
751 751 return self._changectx.rev()
752 752 elif r'_descendantrev' in self.__dict__:
753 753 # this file context was created from a revision with a known
754 754 # descendant, we can (lazily) correct for linkrev aliases
755 755 return self._adjustlinkrev(self._descendantrev)
756 756 else:
757 757 return self._filelog.linkrev(self._filerev)
758 758
759 759 @propertycache
760 760 def _filenode(self):
761 761 if r'_fileid' in self.__dict__:
762 762 return self._filelog.lookup(self._fileid)
763 763 else:
764 764 return self._changectx.filenode(self._path)
765 765
766 766 @propertycache
767 767 def _filerev(self):
768 768 return self._filelog.rev(self._filenode)
769 769
770 770 @propertycache
771 771 def _repopath(self):
772 772 return self._path
773 773
774 774 def __nonzero__(self):
775 775 try:
776 776 self._filenode
777 777 return True
778 778 except error.LookupError:
779 779 # file is missing
780 780 return False
781 781
782 782 __bool__ = __nonzero__
783 783
784 784 def __bytes__(self):
785 785 try:
786 786 return "%s@%s" % (self.path(), self._changectx)
787 787 except error.LookupError:
788 788 return "%s@???" % self.path()
789 789
790 790 __str__ = encoding.strmethod(__bytes__)
791 791
792 792 def __repr__(self):
793 793 return "<%s %s>" % (type(self).__name__, str(self))
794 794
795 795 def __hash__(self):
796 796 try:
797 797 return hash((self._path, self._filenode))
798 798 except AttributeError:
799 799 return id(self)
800 800
801 801 def __eq__(self, other):
802 802 try:
803 803 return (type(self) == type(other) and self._path == other._path
804 804 and self._filenode == other._filenode)
805 805 except AttributeError:
806 806 return False
807 807
808 808 def __ne__(self, other):
809 809 return not (self == other)
810 810
811 811 def filerev(self):
812 812 return self._filerev
813 813 def filenode(self):
814 814 return self._filenode
815 815 @propertycache
816 816 def _flags(self):
817 817 return self._changectx.flags(self._path)
818 818 def flags(self):
819 819 return self._flags
820 820 def filelog(self):
821 821 return self._filelog
822 822 def rev(self):
823 823 return self._changeid
824 824 def linkrev(self):
825 825 return self._filelog.linkrev(self._filerev)
826 826 def node(self):
827 827 return self._changectx.node()
828 828 def hex(self):
829 829 return self._changectx.hex()
830 830 def user(self):
831 831 return self._changectx.user()
832 832 def date(self):
833 833 return self._changectx.date()
834 834 def files(self):
835 835 return self._changectx.files()
836 836 def description(self):
837 837 return self._changectx.description()
838 838 def branch(self):
839 839 return self._changectx.branch()
840 840 def extra(self):
841 841 return self._changectx.extra()
842 842 def phase(self):
843 843 return self._changectx.phase()
844 844 def phasestr(self):
845 845 return self._changectx.phasestr()
846 846 def obsolete(self):
847 847 return self._changectx.obsolete()
848 848 def instabilities(self):
849 849 return self._changectx.instabilities()
850 850 def manifest(self):
851 851 return self._changectx.manifest()
852 852 def changectx(self):
853 853 return self._changectx
854 854 def renamed(self):
855 855 return self._copied
856 856 def repo(self):
857 857 return self._repo
858 858 def size(self):
859 859 return len(self.data())
860 860
861 861 def path(self):
862 862 return self._path
863 863
864 864 def isbinary(self):
865 865 try:
866 866 return util.binary(self.data())
867 867 except IOError:
868 868 return False
869 869 def isexec(self):
870 870 return 'x' in self.flags()
871 871 def islink(self):
872 872 return 'l' in self.flags()
873 873
874 874 def isabsent(self):
875 875 """whether this filectx represents a file not in self._changectx
876 876
877 877 This is mainly for merge code to detect change/delete conflicts. This is
878 878 expected to be True for all subclasses of basectx."""
879 879 return False
880 880
881 881 _customcmp = False
882 882 def cmp(self, fctx):
883 883 """compare with other file context
884 884
885 885 returns True if different than fctx.
886 886 """
887 887 if fctx._customcmp:
888 888 return fctx.cmp(self)
889 889
890 890 if (fctx._filenode is None
891 891 and (self._repo._encodefilterpats
892 892 # if file data starts with '\1\n', empty metadata block is
893 893 # prepended, which adds 4 bytes to filelog.size().
894 894 or self.size() - 4 == fctx.size())
895 895 or self.size() == fctx.size()):
896 896 return self._filelog.cmp(self._filenode, fctx.data())
897 897
898 898 return True
899 899
900 900 def _adjustlinkrev(self, srcrev, inclusive=False):
901 901 """return the first ancestor of <srcrev> introducing <fnode>
902 902
903 903 If the linkrev of the file revision does not point to an ancestor of
904 904 srcrev, we'll walk down the ancestors until we find one introducing
905 905 this file revision.
906 906
907 907 :srcrev: the changeset revision we search ancestors from
908 908 :inclusive: if true, the src revision will also be checked
909 909 """
910 910 repo = self._repo
911 911 cl = repo.unfiltered().changelog
912 912 mfl = repo.manifestlog
913 913 # fetch the linkrev
914 914 lkr = self.linkrev()
915 915 # hack to reuse ancestor computation when searching for renames
916 916 memberanc = getattr(self, '_ancestrycontext', None)
917 917 iteranc = None
918 918 if srcrev is None:
919 919 # wctx case, used by workingfilectx during mergecopy
920 920 revs = [p.rev() for p in self._repo[None].parents()]
921 921 inclusive = True # we skipped the real (revless) source
922 922 else:
923 923 revs = [srcrev]
924 924 if memberanc is None:
925 925 memberanc = iteranc = cl.ancestors(revs, lkr,
926 926 inclusive=inclusive)
927 927 # check if this linkrev is an ancestor of srcrev
928 928 if lkr not in memberanc:
929 929 if iteranc is None:
930 930 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
931 931 fnode = self._filenode
932 932 path = self._path
933 933 for a in iteranc:
934 934 ac = cl.read(a) # get changeset data (we avoid object creation)
935 935 if path in ac[3]: # checking the 'files' field.
936 936 # The file has been touched, check if the content is
937 937 # similar to the one we search for.
938 938 if fnode == mfl[ac[0]].readfast().get(path):
939 939 return a
940 940 # In theory, we should never get out of that loop without a result.
941 941 # But if manifest uses a buggy file revision (not children of the
942 942 # one it replaces) we could. Such a buggy situation will likely
943 943 # result is crash somewhere else at to some point.
944 944 return lkr
945 945
946 946 def introrev(self):
947 947 """return the rev of the changeset which introduced this file revision
948 948
949 949 This method is different from linkrev because it take into account the
950 950 changeset the filectx was created from. It ensures the returned
951 951 revision is one of its ancestors. This prevents bugs from
952 952 'linkrev-shadowing' when a file revision is used by multiple
953 953 changesets.
954 954 """
955 955 lkr = self.linkrev()
956 956 attrs = vars(self)
957 957 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
958 958 if noctx or self.rev() == lkr:
959 959 return self.linkrev()
960 960 return self._adjustlinkrev(self.rev(), inclusive=True)
961 961
962 962 def introfilectx(self):
963 963 """Return filectx having identical contents, but pointing to the
964 964 changeset revision where this filectx was introduced"""
965 965 introrev = self.introrev()
966 966 if self.rev() == introrev:
967 967 return self
968 968 return self.filectx(self.filenode(), changeid=introrev)
969 969
970 970 def _parentfilectx(self, path, fileid, filelog):
971 971 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
972 972 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
973 973 if '_changeid' in vars(self) or '_changectx' in vars(self):
974 974 # If self is associated with a changeset (probably explicitly
975 975 # fed), ensure the created filectx is associated with a
976 976 # changeset that is an ancestor of self.changectx.
977 977 # This lets us later use _adjustlinkrev to get a correct link.
978 978 fctx._descendantrev = self.rev()
979 979 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
980 980 elif '_descendantrev' in vars(self):
981 981 # Otherwise propagate _descendantrev if we have one associated.
982 982 fctx._descendantrev = self._descendantrev
983 983 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
984 984 return fctx
985 985
986 986 def parents(self):
987 987 _path = self._path
988 988 fl = self._filelog
989 989 parents = self._filelog.parents(self._filenode)
990 990 pl = [(_path, node, fl) for node in parents if node != nullid]
991 991
992 992 r = fl.renamed(self._filenode)
993 993 if r:
994 994 # - In the simple rename case, both parent are nullid, pl is empty.
995 995 # - In case of merge, only one of the parent is null id and should
996 996 # be replaced with the rename information. This parent is -always-
997 997 # the first one.
998 998 #
999 999 # As null id have always been filtered out in the previous list
1000 1000 # comprehension, inserting to 0 will always result in "replacing
1001 1001 # first nullid parent with rename information.
1002 1002 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1003 1003
1004 1004 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1005 1005
1006 1006 def p1(self):
1007 1007 return self.parents()[0]
1008 1008
1009 1009 def p2(self):
1010 1010 p = self.parents()
1011 1011 if len(p) == 2:
1012 1012 return p[1]
1013 1013 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1014 1014
1015 1015 def annotate(self, follow=False, linenumber=False, skiprevs=None,
1016 1016 diffopts=None):
1017 1017 '''returns a list of tuples of ((ctx, number), line) for each line
1018 1018 in the file, where ctx is the filectx of the node where
1019 1019 that line was last changed; if linenumber parameter is true, number is
1020 1020 the line number at the first appearance in the managed file, otherwise,
1021 1021 number has a fixed value of False.
1022 1022 '''
1023 1023
1024 1024 def lines(text):
1025 1025 if text.endswith("\n"):
1026 1026 return text.count("\n")
1027 1027 return text.count("\n") + int(bool(text))
1028 1028
1029 1029 if linenumber:
1030 1030 def decorate(text, rev):
1031 1031 return ([annotateline(fctx=rev, lineno=i)
1032 1032 for i in xrange(1, lines(text) + 1)], text)
1033 1033 else:
1034 1034 def decorate(text, rev):
1035 1035 return ([annotateline(fctx=rev)] * lines(text), text)
1036 1036
1037 1037 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1038 1038
1039 1039 def parents(f):
1040 1040 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1041 1041 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1042 1042 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1043 1043 # isn't an ancestor of the srcrev.
1044 1044 f._changeid
1045 1045 pl = f.parents()
1046 1046
1047 1047 # Don't return renamed parents if we aren't following.
1048 1048 if not follow:
1049 1049 pl = [p for p in pl if p.path() == f.path()]
1050 1050
1051 1051 # renamed filectx won't have a filelog yet, so set it
1052 1052 # from the cache to save time
1053 1053 for p in pl:
1054 1054 if not '_filelog' in p.__dict__:
1055 1055 p._filelog = getlog(p.path())
1056 1056
1057 1057 return pl
1058 1058
1059 1059 # use linkrev to find the first changeset where self appeared
1060 1060 base = self.introfilectx()
1061 1061 if getattr(base, '_ancestrycontext', None) is None:
1062 1062 cl = self._repo.changelog
1063 1063 if base.rev() is None:
1064 1064 # wctx is not inclusive, but works because _ancestrycontext
1065 1065 # is used to test filelog revisions
1066 1066 ac = cl.ancestors([p.rev() for p in base.parents()],
1067 1067 inclusive=True)
1068 1068 else:
1069 1069 ac = cl.ancestors([base.rev()], inclusive=True)
1070 1070 base._ancestrycontext = ac
1071 1071
1072 1072 # This algorithm would prefer to be recursive, but Python is a
1073 1073 # bit recursion-hostile. Instead we do an iterative
1074 1074 # depth-first search.
1075 1075
1076 1076 # 1st DFS pre-calculates pcache and needed
1077 1077 visit = [base]
1078 1078 pcache = {}
1079 1079 needed = {base: 1}
1080 1080 while visit:
1081 1081 f = visit.pop()
1082 1082 if f in pcache:
1083 1083 continue
1084 1084 pl = parents(f)
1085 1085 pcache[f] = pl
1086 1086 for p in pl:
1087 1087 needed[p] = needed.get(p, 0) + 1
1088 1088 if p not in pcache:
1089 1089 visit.append(p)
1090 1090
1091 1091 # 2nd DFS does the actual annotate
1092 1092 visit[:] = [base]
1093 1093 hist = {}
1094 1094 while visit:
1095 1095 f = visit[-1]
1096 1096 if f in hist:
1097 1097 visit.pop()
1098 1098 continue
1099 1099
1100 1100 ready = True
1101 1101 pl = pcache[f]
1102 1102 for p in pl:
1103 1103 if p not in hist:
1104 1104 ready = False
1105 1105 visit.append(p)
1106 1106 if ready:
1107 1107 visit.pop()
1108 1108 curr = decorate(f.data(), f)
1109 1109 skipchild = False
1110 1110 if skiprevs is not None:
1111 1111 skipchild = f._changeid in skiprevs
1112 1112 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1113 1113 diffopts)
1114 1114 for p in pl:
1115 1115 if needed[p] == 1:
1116 1116 del hist[p]
1117 1117 del needed[p]
1118 1118 else:
1119 1119 needed[p] -= 1
1120 1120
1121 1121 hist[f] = curr
1122 1122 del pcache[f]
1123 1123
1124 return pycompat.ziplist(hist[base][0], hist[base][1].splitlines(True))
1124 lineattrs, text = hist[base]
1125 return pycompat.ziplist(lineattrs, mdiff.splitnewlines(text))
1125 1126
1126 1127 def ancestors(self, followfirst=False):
1127 1128 visit = {}
1128 1129 c = self
1129 1130 if followfirst:
1130 1131 cut = 1
1131 1132 else:
1132 1133 cut = None
1133 1134
1134 1135 while True:
1135 1136 for parent in c.parents()[:cut]:
1136 1137 visit[(parent.linkrev(), parent.filenode())] = parent
1137 1138 if not visit:
1138 1139 break
1139 1140 c = visit.pop(max(visit))
1140 1141 yield c
1141 1142
1142 1143 def decodeddata(self):
1143 1144 """Returns `data()` after running repository decoding filters.
1144 1145
1145 1146 This is often equivalent to how the data would be expressed on disk.
1146 1147 """
1147 1148 return self._repo.wwritedata(self.path(), self.data())
1148 1149
1149 1150 @attr.s(slots=True, frozen=True)
1150 1151 class annotateline(object):
1151 1152 fctx = attr.ib()
1152 1153 lineno = attr.ib(default=False)
1153 1154 # Whether this annotation was the result of a skip-annotate.
1154 1155 skip = attr.ib(default=False)
1155 1156
1156 1157 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1157 1158 r'''
1158 1159 Given parent and child fctxes and annotate data for parents, for all lines
1159 1160 in either parent that match the child, annotate the child with the parent's
1160 1161 data.
1161 1162
1162 1163 Additionally, if `skipchild` is True, replace all other lines with parent
1163 1164 annotate data as well such that child is never blamed for any lines.
1164 1165
1165 1166 See test-annotate.py for unit tests.
1166 1167 '''
1167 1168 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1168 1169 for parent in parents]
1169 1170
1170 1171 if skipchild:
1171 1172 # Need to iterate over the blocks twice -- make it a list
1172 1173 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1173 1174 # Mercurial currently prefers p2 over p1 for annotate.
1174 1175 # TODO: change this?
1175 1176 for parent, blocks in pblocks:
1176 1177 for (a1, a2, b1, b2), t in blocks:
1177 1178 # Changed blocks ('!') or blocks made only of blank lines ('~')
1178 1179 # belong to the child.
1179 1180 if t == '=':
1180 1181 child[0][b1:b2] = parent[0][a1:a2]
1181 1182
1182 1183 if skipchild:
1183 1184 # Now try and match up anything that couldn't be matched,
1184 1185 # Reversing pblocks maintains bias towards p2, matching above
1185 1186 # behavior.
1186 1187 pblocks.reverse()
1187 1188
1188 1189 # The heuristics are:
1189 1190 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1190 1191 # This could potentially be smarter but works well enough.
1191 1192 # * For a non-matching section, do a best-effort fit. Match lines in
1192 1193 # diff hunks 1:1, dropping lines as necessary.
1193 1194 # * Repeat the last line as a last resort.
1194 1195
1195 1196 # First, replace as much as possible without repeating the last line.
1196 1197 remaining = [(parent, []) for parent, _blocks in pblocks]
1197 1198 for idx, (parent, blocks) in enumerate(pblocks):
1198 1199 for (a1, a2, b1, b2), _t in blocks:
1199 1200 if a2 - a1 >= b2 - b1:
1200 1201 for bk in xrange(b1, b2):
1201 1202 if child[0][bk].fctx == childfctx:
1202 1203 ak = min(a1 + (bk - b1), a2 - 1)
1203 1204 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1204 1205 else:
1205 1206 remaining[idx][1].append((a1, a2, b1, b2))
1206 1207
1207 1208 # Then, look at anything left, which might involve repeating the last
1208 1209 # line.
1209 1210 for parent, blocks in remaining:
1210 1211 for a1, a2, b1, b2 in blocks:
1211 1212 for bk in xrange(b1, b2):
1212 1213 if child[0][bk].fctx == childfctx:
1213 1214 ak = min(a1 + (bk - b1), a2 - 1)
1214 1215 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1215 1216 return child
1216 1217
1217 1218 class filectx(basefilectx):
1218 1219 """A filecontext object makes access to data related to a particular
1219 1220 filerevision convenient."""
1220 1221 def __init__(self, repo, path, changeid=None, fileid=None,
1221 1222 filelog=None, changectx=None):
1222 1223 """changeid can be a changeset revision, node, or tag.
1223 1224 fileid can be a file revision or node."""
1224 1225 self._repo = repo
1225 1226 self._path = path
1226 1227
1227 1228 assert (changeid is not None
1228 1229 or fileid is not None
1229 1230 or changectx is not None), \
1230 1231 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1231 1232 % (changeid, fileid, changectx))
1232 1233
1233 1234 if filelog is not None:
1234 1235 self._filelog = filelog
1235 1236
1236 1237 if changeid is not None:
1237 1238 self._changeid = changeid
1238 1239 if changectx is not None:
1239 1240 self._changectx = changectx
1240 1241 if fileid is not None:
1241 1242 self._fileid = fileid
1242 1243
1243 1244 @propertycache
1244 1245 def _changectx(self):
1245 1246 try:
1246 1247 return changectx(self._repo, self._changeid)
1247 1248 except error.FilteredRepoLookupError:
1248 1249 # Linkrev may point to any revision in the repository. When the
1249 1250 # repository is filtered this may lead to `filectx` trying to build
1250 1251 # `changectx` for filtered revision. In such case we fallback to
1251 1252 # creating `changectx` on the unfiltered version of the reposition.
1252 1253 # This fallback should not be an issue because `changectx` from
1253 1254 # `filectx` are not used in complex operations that care about
1254 1255 # filtering.
1255 1256 #
1256 1257 # This fallback is a cheap and dirty fix that prevent several
1257 1258 # crashes. It does not ensure the behavior is correct. However the
1258 1259 # behavior was not correct before filtering either and "incorrect
1259 1260 # behavior" is seen as better as "crash"
1260 1261 #
1261 1262 # Linkrevs have several serious troubles with filtering that are
1262 1263 # complicated to solve. Proper handling of the issue here should be
1263 1264 # considered when solving linkrev issue are on the table.
1264 1265 return changectx(self._repo.unfiltered(), self._changeid)
1265 1266
1266 1267 def filectx(self, fileid, changeid=None):
1267 1268 '''opens an arbitrary revision of the file without
1268 1269 opening a new filelog'''
1269 1270 return filectx(self._repo, self._path, fileid=fileid,
1270 1271 filelog=self._filelog, changeid=changeid)
1271 1272
1272 1273 def rawdata(self):
1273 1274 return self._filelog.revision(self._filenode, raw=True)
1274 1275
1275 1276 def rawflags(self):
1276 1277 """low-level revlog flags"""
1277 1278 return self._filelog.flags(self._filerev)
1278 1279
1279 1280 def data(self):
1280 1281 try:
1281 1282 return self._filelog.read(self._filenode)
1282 1283 except error.CensoredNodeError:
1283 1284 if self._repo.ui.config("censor", "policy") == "ignore":
1284 1285 return ""
1285 1286 raise error.Abort(_("censored node: %s") % short(self._filenode),
1286 1287 hint=_("set censor.policy to ignore errors"))
1287 1288
1288 1289 def size(self):
1289 1290 return self._filelog.size(self._filerev)
1290 1291
1291 1292 @propertycache
1292 1293 def _copied(self):
1293 1294 """check if file was actually renamed in this changeset revision
1294 1295
1295 1296 If rename logged in file revision, we report copy for changeset only
1296 1297 if file revisions linkrev points back to the changeset in question
1297 1298 or both changeset parents contain different file revisions.
1298 1299 """
1299 1300
1300 1301 renamed = self._filelog.renamed(self._filenode)
1301 1302 if not renamed:
1302 1303 return renamed
1303 1304
1304 1305 if self.rev() == self.linkrev():
1305 1306 return renamed
1306 1307
1307 1308 name = self.path()
1308 1309 fnode = self._filenode
1309 1310 for p in self._changectx.parents():
1310 1311 try:
1311 1312 if fnode == p.filenode(name):
1312 1313 return None
1313 1314 except error.LookupError:
1314 1315 pass
1315 1316 return renamed
1316 1317
1317 1318 def children(self):
1318 1319 # hard for renames
1319 1320 c = self._filelog.children(self._filenode)
1320 1321 return [filectx(self._repo, self._path, fileid=x,
1321 1322 filelog=self._filelog) for x in c]
1322 1323
1323 1324 class committablectx(basectx):
1324 1325 """A committablectx object provides common functionality for a context that
1325 1326 wants the ability to commit, e.g. workingctx or memctx."""
1326 1327 def __init__(self, repo, text="", user=None, date=None, extra=None,
1327 1328 changes=None):
1328 1329 self._repo = repo
1329 1330 self._rev = None
1330 1331 self._node = None
1331 1332 self._text = text
1332 1333 if date:
1333 1334 self._date = util.parsedate(date)
1334 1335 if user:
1335 1336 self._user = user
1336 1337 if changes:
1337 1338 self._status = changes
1338 1339
1339 1340 self._extra = {}
1340 1341 if extra:
1341 1342 self._extra = extra.copy()
1342 1343 if 'branch' not in self._extra:
1343 1344 try:
1344 1345 branch = encoding.fromlocal(self._repo.dirstate.branch())
1345 1346 except UnicodeDecodeError:
1346 1347 raise error.Abort(_('branch name not in UTF-8!'))
1347 1348 self._extra['branch'] = branch
1348 1349 if self._extra['branch'] == '':
1349 1350 self._extra['branch'] = 'default'
1350 1351
1351 1352 def __bytes__(self):
1352 1353 return bytes(self._parents[0]) + "+"
1353 1354
1354 1355 __str__ = encoding.strmethod(__bytes__)
1355 1356
1356 1357 def __nonzero__(self):
1357 1358 return True
1358 1359
1359 1360 __bool__ = __nonzero__
1360 1361
1361 1362 def _buildflagfunc(self):
1362 1363 # Create a fallback function for getting file flags when the
1363 1364 # filesystem doesn't support them
1364 1365
1365 1366 copiesget = self._repo.dirstate.copies().get
1366 1367 parents = self.parents()
1367 1368 if len(parents) < 2:
1368 1369 # when we have one parent, it's easy: copy from parent
1369 1370 man = parents[0].manifest()
1370 1371 def func(f):
1371 1372 f = copiesget(f, f)
1372 1373 return man.flags(f)
1373 1374 else:
1374 1375 # merges are tricky: we try to reconstruct the unstored
1375 1376 # result from the merge (issue1802)
1376 1377 p1, p2 = parents
1377 1378 pa = p1.ancestor(p2)
1378 1379 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1379 1380
1380 1381 def func(f):
1381 1382 f = copiesget(f, f) # may be wrong for merges with copies
1382 1383 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1383 1384 if fl1 == fl2:
1384 1385 return fl1
1385 1386 if fl1 == fla:
1386 1387 return fl2
1387 1388 if fl2 == fla:
1388 1389 return fl1
1389 1390 return '' # punt for conflicts
1390 1391
1391 1392 return func
1392 1393
1393 1394 @propertycache
1394 1395 def _flagfunc(self):
1395 1396 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1396 1397
1397 1398 @propertycache
1398 1399 def _status(self):
1399 1400 return self._repo.status()
1400 1401
1401 1402 @propertycache
1402 1403 def _user(self):
1403 1404 return self._repo.ui.username()
1404 1405
1405 1406 @propertycache
1406 1407 def _date(self):
1407 1408 ui = self._repo.ui
1408 1409 date = ui.configdate('devel', 'default-date')
1409 1410 if date is None:
1410 1411 date = util.makedate()
1411 1412 return date
1412 1413
1413 1414 def subrev(self, subpath):
1414 1415 return None
1415 1416
1416 1417 def manifestnode(self):
1417 1418 return None
1418 1419 def user(self):
1419 1420 return self._user or self._repo.ui.username()
1420 1421 def date(self):
1421 1422 return self._date
1422 1423 def description(self):
1423 1424 return self._text
1424 1425 def files(self):
1425 1426 return sorted(self._status.modified + self._status.added +
1426 1427 self._status.removed)
1427 1428
1428 1429 def modified(self):
1429 1430 return self._status.modified
1430 1431 def added(self):
1431 1432 return self._status.added
1432 1433 def removed(self):
1433 1434 return self._status.removed
1434 1435 def deleted(self):
1435 1436 return self._status.deleted
1436 1437 def branch(self):
1437 1438 return encoding.tolocal(self._extra['branch'])
1438 1439 def closesbranch(self):
1439 1440 return 'close' in self._extra
1440 1441 def extra(self):
1441 1442 return self._extra
1442 1443
1443 1444 def isinmemory(self):
1444 1445 return False
1445 1446
1446 1447 def tags(self):
1447 1448 return []
1448 1449
1449 1450 def bookmarks(self):
1450 1451 b = []
1451 1452 for p in self.parents():
1452 1453 b.extend(p.bookmarks())
1453 1454 return b
1454 1455
1455 1456 def phase(self):
1456 1457 phase = phases.draft # default phase to draft
1457 1458 for p in self.parents():
1458 1459 phase = max(phase, p.phase())
1459 1460 return phase
1460 1461
1461 1462 def hidden(self):
1462 1463 return False
1463 1464
1464 1465 def children(self):
1465 1466 return []
1466 1467
1467 1468 def flags(self, path):
1468 1469 if r'_manifest' in self.__dict__:
1469 1470 try:
1470 1471 return self._manifest.flags(path)
1471 1472 except KeyError:
1472 1473 return ''
1473 1474
1474 1475 try:
1475 1476 return self._flagfunc(path)
1476 1477 except OSError:
1477 1478 return ''
1478 1479
1479 1480 def ancestor(self, c2):
1480 1481 """return the "best" ancestor context of self and c2"""
1481 1482 return self._parents[0].ancestor(c2) # punt on two parents for now
1482 1483
1483 1484 def walk(self, match):
1484 1485 '''Generates matching file names.'''
1485 1486 return sorted(self._repo.dirstate.walk(match,
1486 1487 subrepos=sorted(self.substate),
1487 1488 unknown=True, ignored=False))
1488 1489
1489 1490 def matches(self, match):
1490 1491 return sorted(self._repo.dirstate.matches(match))
1491 1492
1492 1493 def ancestors(self):
1493 1494 for p in self._parents:
1494 1495 yield p
1495 1496 for a in self._repo.changelog.ancestors(
1496 1497 [p.rev() for p in self._parents]):
1497 1498 yield changectx(self._repo, a)
1498 1499
1499 1500 def markcommitted(self, node):
1500 1501 """Perform post-commit cleanup necessary after committing this ctx
1501 1502
1502 1503 Specifically, this updates backing stores this working context
1503 1504 wraps to reflect the fact that the changes reflected by this
1504 1505 workingctx have been committed. For example, it marks
1505 1506 modified and added files as normal in the dirstate.
1506 1507
1507 1508 """
1508 1509
1509 1510 with self._repo.dirstate.parentchange():
1510 1511 for f in self.modified() + self.added():
1511 1512 self._repo.dirstate.normal(f)
1512 1513 for f in self.removed():
1513 1514 self._repo.dirstate.drop(f)
1514 1515 self._repo.dirstate.setparents(node)
1515 1516
1516 1517 # write changes out explicitly, because nesting wlock at
1517 1518 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1518 1519 # from immediately doing so for subsequent changing files
1519 1520 self._repo.dirstate.write(self._repo.currenttransaction())
1520 1521
1521 1522 def dirty(self, missing=False, merge=True, branch=True):
1522 1523 return False
1523 1524
1524 1525 class workingctx(committablectx):
1525 1526 """A workingctx object makes access to data related to
1526 1527 the current working directory convenient.
1527 1528 date - any valid date string or (unixtime, offset), or None.
1528 1529 user - username string, or None.
1529 1530 extra - a dictionary of extra values, or None.
1530 1531 changes - a list of file lists as returned by localrepo.status()
1531 1532 or None to use the repository status.
1532 1533 """
1533 1534 def __init__(self, repo, text="", user=None, date=None, extra=None,
1534 1535 changes=None):
1535 1536 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1536 1537
1537 1538 def __iter__(self):
1538 1539 d = self._repo.dirstate
1539 1540 for f in d:
1540 1541 if d[f] != 'r':
1541 1542 yield f
1542 1543
1543 1544 def __contains__(self, key):
1544 1545 return self._repo.dirstate[key] not in "?r"
1545 1546
1546 1547 def hex(self):
1547 1548 return hex(wdirid)
1548 1549
1549 1550 @propertycache
1550 1551 def _parents(self):
1551 1552 p = self._repo.dirstate.parents()
1552 1553 if p[1] == nullid:
1553 1554 p = p[:-1]
1554 1555 return [changectx(self._repo, x) for x in p]
1555 1556
1556 1557 def filectx(self, path, filelog=None):
1557 1558 """get a file context from the working directory"""
1558 1559 return workingfilectx(self._repo, path, workingctx=self,
1559 1560 filelog=filelog)
1560 1561
1561 1562 def dirty(self, missing=False, merge=True, branch=True):
1562 1563 "check whether a working directory is modified"
1563 1564 # check subrepos first
1564 1565 for s in sorted(self.substate):
1565 1566 if self.sub(s).dirty(missing=missing):
1566 1567 return True
1567 1568 # check current working dir
1568 1569 return ((merge and self.p2()) or
1569 1570 (branch and self.branch() != self.p1().branch()) or
1570 1571 self.modified() or self.added() or self.removed() or
1571 1572 (missing and self.deleted()))
1572 1573
1573 1574 def add(self, list, prefix=""):
1574 1575 with self._repo.wlock():
1575 1576 ui, ds = self._repo.ui, self._repo.dirstate
1576 1577 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1577 1578 rejected = []
1578 1579 lstat = self._repo.wvfs.lstat
1579 1580 for f in list:
1580 1581 # ds.pathto() returns an absolute file when this is invoked from
1581 1582 # the keyword extension. That gets flagged as non-portable on
1582 1583 # Windows, since it contains the drive letter and colon.
1583 1584 scmutil.checkportable(ui, os.path.join(prefix, f))
1584 1585 try:
1585 1586 st = lstat(f)
1586 1587 except OSError:
1587 1588 ui.warn(_("%s does not exist!\n") % uipath(f))
1588 1589 rejected.append(f)
1589 1590 continue
1590 1591 if st.st_size > 10000000:
1591 1592 ui.warn(_("%s: up to %d MB of RAM may be required "
1592 1593 "to manage this file\n"
1593 1594 "(use 'hg revert %s' to cancel the "
1594 1595 "pending addition)\n")
1595 1596 % (f, 3 * st.st_size // 1000000, uipath(f)))
1596 1597 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1597 1598 ui.warn(_("%s not added: only files and symlinks "
1598 1599 "supported currently\n") % uipath(f))
1599 1600 rejected.append(f)
1600 1601 elif ds[f] in 'amn':
1601 1602 ui.warn(_("%s already tracked!\n") % uipath(f))
1602 1603 elif ds[f] == 'r':
1603 1604 ds.normallookup(f)
1604 1605 else:
1605 1606 ds.add(f)
1606 1607 return rejected
1607 1608
1608 1609 def forget(self, files, prefix=""):
1609 1610 with self._repo.wlock():
1610 1611 ds = self._repo.dirstate
1611 1612 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1612 1613 rejected = []
1613 1614 for f in files:
1614 1615 if f not in self._repo.dirstate:
1615 1616 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1616 1617 rejected.append(f)
1617 1618 elif self._repo.dirstate[f] != 'a':
1618 1619 self._repo.dirstate.remove(f)
1619 1620 else:
1620 1621 self._repo.dirstate.drop(f)
1621 1622 return rejected
1622 1623
1623 1624 def undelete(self, list):
1624 1625 pctxs = self.parents()
1625 1626 with self._repo.wlock():
1626 1627 ds = self._repo.dirstate
1627 1628 for f in list:
1628 1629 if self._repo.dirstate[f] != 'r':
1629 1630 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1630 1631 else:
1631 1632 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1632 1633 t = fctx.data()
1633 1634 self._repo.wwrite(f, t, fctx.flags())
1634 1635 self._repo.dirstate.normal(f)
1635 1636
1636 1637 def copy(self, source, dest):
1637 1638 try:
1638 1639 st = self._repo.wvfs.lstat(dest)
1639 1640 except OSError as err:
1640 1641 if err.errno != errno.ENOENT:
1641 1642 raise
1642 1643 self._repo.ui.warn(_("%s does not exist!\n")
1643 1644 % self._repo.dirstate.pathto(dest))
1644 1645 return
1645 1646 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1646 1647 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1647 1648 "symbolic link\n")
1648 1649 % self._repo.dirstate.pathto(dest))
1649 1650 else:
1650 1651 with self._repo.wlock():
1651 1652 if self._repo.dirstate[dest] in '?':
1652 1653 self._repo.dirstate.add(dest)
1653 1654 elif self._repo.dirstate[dest] in 'r':
1654 1655 self._repo.dirstate.normallookup(dest)
1655 1656 self._repo.dirstate.copy(source, dest)
1656 1657
1657 1658 def match(self, pats=None, include=None, exclude=None, default='glob',
1658 1659 listsubrepos=False, badfn=None):
1659 1660 r = self._repo
1660 1661
1661 1662 # Only a case insensitive filesystem needs magic to translate user input
1662 1663 # to actual case in the filesystem.
1663 1664 icasefs = not util.fscasesensitive(r.root)
1664 1665 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1665 1666 default, auditor=r.auditor, ctx=self,
1666 1667 listsubrepos=listsubrepos, badfn=badfn,
1667 1668 icasefs=icasefs)
1668 1669
1669 1670 def _filtersuspectsymlink(self, files):
1670 1671 if not files or self._repo.dirstate._checklink:
1671 1672 return files
1672 1673
1673 1674 # Symlink placeholders may get non-symlink-like contents
1674 1675 # via user error or dereferencing by NFS or Samba servers,
1675 1676 # so we filter out any placeholders that don't look like a
1676 1677 # symlink
1677 1678 sane = []
1678 1679 for f in files:
1679 1680 if self.flags(f) == 'l':
1680 1681 d = self[f].data()
1681 1682 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1682 1683 self._repo.ui.debug('ignoring suspect symlink placeholder'
1683 1684 ' "%s"\n' % f)
1684 1685 continue
1685 1686 sane.append(f)
1686 1687 return sane
1687 1688
1688 1689 def _checklookup(self, files):
1689 1690 # check for any possibly clean files
1690 1691 if not files:
1691 1692 return [], [], []
1692 1693
1693 1694 modified = []
1694 1695 deleted = []
1695 1696 fixup = []
1696 1697 pctx = self._parents[0]
1697 1698 # do a full compare of any files that might have changed
1698 1699 for f in sorted(files):
1699 1700 try:
1700 1701 # This will return True for a file that got replaced by a
1701 1702 # directory in the interim, but fixing that is pretty hard.
1702 1703 if (f not in pctx or self.flags(f) != pctx.flags(f)
1703 1704 or pctx[f].cmp(self[f])):
1704 1705 modified.append(f)
1705 1706 else:
1706 1707 fixup.append(f)
1707 1708 except (IOError, OSError):
1708 1709 # A file become inaccessible in between? Mark it as deleted,
1709 1710 # matching dirstate behavior (issue5584).
1710 1711 # The dirstate has more complex behavior around whether a
1711 1712 # missing file matches a directory, etc, but we don't need to
1712 1713 # bother with that: if f has made it to this point, we're sure
1713 1714 # it's in the dirstate.
1714 1715 deleted.append(f)
1715 1716
1716 1717 return modified, deleted, fixup
1717 1718
1718 1719 def _poststatusfixup(self, status, fixup):
1719 1720 """update dirstate for files that are actually clean"""
1720 1721 poststatus = self._repo.postdsstatus()
1721 1722 if fixup or poststatus:
1722 1723 try:
1723 1724 oldid = self._repo.dirstate.identity()
1724 1725
1725 1726 # updating the dirstate is optional
1726 1727 # so we don't wait on the lock
1727 1728 # wlock can invalidate the dirstate, so cache normal _after_
1728 1729 # taking the lock
1729 1730 with self._repo.wlock(False):
1730 1731 if self._repo.dirstate.identity() == oldid:
1731 1732 if fixup:
1732 1733 normal = self._repo.dirstate.normal
1733 1734 for f in fixup:
1734 1735 normal(f)
1735 1736 # write changes out explicitly, because nesting
1736 1737 # wlock at runtime may prevent 'wlock.release()'
1737 1738 # after this block from doing so for subsequent
1738 1739 # changing files
1739 1740 tr = self._repo.currenttransaction()
1740 1741 self._repo.dirstate.write(tr)
1741 1742
1742 1743 if poststatus:
1743 1744 for ps in poststatus:
1744 1745 ps(self, status)
1745 1746 else:
1746 1747 # in this case, writing changes out breaks
1747 1748 # consistency, because .hg/dirstate was
1748 1749 # already changed simultaneously after last
1749 1750 # caching (see also issue5584 for detail)
1750 1751 self._repo.ui.debug('skip updating dirstate: '
1751 1752 'identity mismatch\n')
1752 1753 except error.LockError:
1753 1754 pass
1754 1755 finally:
1755 1756 # Even if the wlock couldn't be grabbed, clear out the list.
1756 1757 self._repo.clearpostdsstatus()
1757 1758
1758 1759 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1759 1760 '''Gets the status from the dirstate -- internal use only.'''
1760 1761 subrepos = []
1761 1762 if '.hgsub' in self:
1762 1763 subrepos = sorted(self.substate)
1763 1764 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1764 1765 clean=clean, unknown=unknown)
1765 1766
1766 1767 # check for any possibly clean files
1767 1768 fixup = []
1768 1769 if cmp:
1769 1770 modified2, deleted2, fixup = self._checklookup(cmp)
1770 1771 s.modified.extend(modified2)
1771 1772 s.deleted.extend(deleted2)
1772 1773
1773 1774 if fixup and clean:
1774 1775 s.clean.extend(fixup)
1775 1776
1776 1777 self._poststatusfixup(s, fixup)
1777 1778
1778 1779 if match.always():
1779 1780 # cache for performance
1780 1781 if s.unknown or s.ignored or s.clean:
1781 1782 # "_status" is cached with list*=False in the normal route
1782 1783 self._status = scmutil.status(s.modified, s.added, s.removed,
1783 1784 s.deleted, [], [], [])
1784 1785 else:
1785 1786 self._status = s
1786 1787
1787 1788 return s
1788 1789
1789 1790 @propertycache
1790 1791 def _manifest(self):
1791 1792 """generate a manifest corresponding to the values in self._status
1792 1793
1793 1794 This reuse the file nodeid from parent, but we use special node
1794 1795 identifiers for added and modified files. This is used by manifests
1795 1796 merge to see that files are different and by update logic to avoid
1796 1797 deleting newly added files.
1797 1798 """
1798 1799 return self._buildstatusmanifest(self._status)
1799 1800
1800 1801 def _buildstatusmanifest(self, status):
1801 1802 """Builds a manifest that includes the given status results."""
1802 1803 parents = self.parents()
1803 1804
1804 1805 man = parents[0].manifest().copy()
1805 1806
1806 1807 ff = self._flagfunc
1807 1808 for i, l in ((addednodeid, status.added),
1808 1809 (modifiednodeid, status.modified)):
1809 1810 for f in l:
1810 1811 man[f] = i
1811 1812 try:
1812 1813 man.setflag(f, ff(f))
1813 1814 except OSError:
1814 1815 pass
1815 1816
1816 1817 for f in status.deleted + status.removed:
1817 1818 if f in man:
1818 1819 del man[f]
1819 1820
1820 1821 return man
1821 1822
1822 1823 def _buildstatus(self, other, s, match, listignored, listclean,
1823 1824 listunknown):
1824 1825 """build a status with respect to another context
1825 1826
1826 1827 This includes logic for maintaining the fast path of status when
1827 1828 comparing the working directory against its parent, which is to skip
1828 1829 building a new manifest if self (working directory) is not comparing
1829 1830 against its parent (repo['.']).
1830 1831 """
1831 1832 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1832 1833 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1833 1834 # might have accidentally ended up with the entire contents of the file
1834 1835 # they are supposed to be linking to.
1835 1836 s.modified[:] = self._filtersuspectsymlink(s.modified)
1836 1837 if other != self._repo['.']:
1837 1838 s = super(workingctx, self)._buildstatus(other, s, match,
1838 1839 listignored, listclean,
1839 1840 listunknown)
1840 1841 return s
1841 1842
1842 1843 def _matchstatus(self, other, match):
1843 1844 """override the match method with a filter for directory patterns
1844 1845
1845 1846 We use inheritance to customize the match.bad method only in cases of
1846 1847 workingctx since it belongs only to the working directory when
1847 1848 comparing against the parent changeset.
1848 1849
1849 1850 If we aren't comparing against the working directory's parent, then we
1850 1851 just use the default match object sent to us.
1851 1852 """
1852 1853 if other != self._repo['.']:
1853 1854 def bad(f, msg):
1854 1855 # 'f' may be a directory pattern from 'match.files()',
1855 1856 # so 'f not in ctx1' is not enough
1856 1857 if f not in other and not other.hasdir(f):
1857 1858 self._repo.ui.warn('%s: %s\n' %
1858 1859 (self._repo.dirstate.pathto(f), msg))
1859 1860 match.bad = bad
1860 1861 return match
1861 1862
1862 1863 def markcommitted(self, node):
1863 1864 super(workingctx, self).markcommitted(node)
1864 1865
1865 1866 sparse.aftercommit(self._repo, node)
1866 1867
1867 1868 class committablefilectx(basefilectx):
1868 1869 """A committablefilectx provides common functionality for a file context
1869 1870 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1870 1871 def __init__(self, repo, path, filelog=None, ctx=None):
1871 1872 self._repo = repo
1872 1873 self._path = path
1873 1874 self._changeid = None
1874 1875 self._filerev = self._filenode = None
1875 1876
1876 1877 if filelog is not None:
1877 1878 self._filelog = filelog
1878 1879 if ctx:
1879 1880 self._changectx = ctx
1880 1881
1881 1882 def __nonzero__(self):
1882 1883 return True
1883 1884
1884 1885 __bool__ = __nonzero__
1885 1886
1886 1887 def linkrev(self):
1887 1888 # linked to self._changectx no matter if file is modified or not
1888 1889 return self.rev()
1889 1890
1890 1891 def parents(self):
1891 1892 '''return parent filectxs, following copies if necessary'''
1892 1893 def filenode(ctx, path):
1893 1894 return ctx._manifest.get(path, nullid)
1894 1895
1895 1896 path = self._path
1896 1897 fl = self._filelog
1897 1898 pcl = self._changectx._parents
1898 1899 renamed = self.renamed()
1899 1900
1900 1901 if renamed:
1901 1902 pl = [renamed + (None,)]
1902 1903 else:
1903 1904 pl = [(path, filenode(pcl[0], path), fl)]
1904 1905
1905 1906 for pc in pcl[1:]:
1906 1907 pl.append((path, filenode(pc, path), fl))
1907 1908
1908 1909 return [self._parentfilectx(p, fileid=n, filelog=l)
1909 1910 for p, n, l in pl if n != nullid]
1910 1911
1911 1912 def children(self):
1912 1913 return []
1913 1914
1914 1915 class workingfilectx(committablefilectx):
1915 1916 """A workingfilectx object makes access to data related to a particular
1916 1917 file in the working directory convenient."""
1917 1918 def __init__(self, repo, path, filelog=None, workingctx=None):
1918 1919 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1919 1920
1920 1921 @propertycache
1921 1922 def _changectx(self):
1922 1923 return workingctx(self._repo)
1923 1924
1924 1925 def data(self):
1925 1926 return self._repo.wread(self._path)
1926 1927 def renamed(self):
1927 1928 rp = self._repo.dirstate.copied(self._path)
1928 1929 if not rp:
1929 1930 return None
1930 1931 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1931 1932
1932 1933 def size(self):
1933 1934 return self._repo.wvfs.lstat(self._path).st_size
1934 1935 def date(self):
1935 1936 t, tz = self._changectx.date()
1936 1937 try:
1937 1938 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1938 1939 except OSError as err:
1939 1940 if err.errno != errno.ENOENT:
1940 1941 raise
1941 1942 return (t, tz)
1942 1943
1943 1944 def exists(self):
1944 1945 return self._repo.wvfs.exists(self._path)
1945 1946
1946 1947 def lexists(self):
1947 1948 return self._repo.wvfs.lexists(self._path)
1948 1949
1949 1950 def audit(self):
1950 1951 return self._repo.wvfs.audit(self._path)
1951 1952
1952 1953 def cmp(self, fctx):
1953 1954 """compare with other file context
1954 1955
1955 1956 returns True if different than fctx.
1956 1957 """
1957 1958 # fctx should be a filectx (not a workingfilectx)
1958 1959 # invert comparison to reuse the same code path
1959 1960 return fctx.cmp(self)
1960 1961
1961 1962 def remove(self, ignoremissing=False):
1962 1963 """wraps unlink for a repo's working directory"""
1963 1964 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1964 1965
1965 1966 def write(self, data, flags, backgroundclose=False, **kwargs):
1966 1967 """wraps repo.wwrite"""
1967 1968 self._repo.wwrite(self._path, data, flags,
1968 1969 backgroundclose=backgroundclose,
1969 1970 **kwargs)
1970 1971
1971 1972 def markcopied(self, src):
1972 1973 """marks this file a copy of `src`"""
1973 1974 if self._repo.dirstate[self._path] in "nma":
1974 1975 self._repo.dirstate.copy(src, self._path)
1975 1976
1976 1977 def clearunknown(self):
1977 1978 """Removes conflicting items in the working directory so that
1978 1979 ``write()`` can be called successfully.
1979 1980 """
1980 1981 wvfs = self._repo.wvfs
1981 1982 f = self._path
1982 1983 wvfs.audit(f)
1983 1984 if wvfs.isdir(f) and not wvfs.islink(f):
1984 1985 wvfs.rmtree(f, forcibly=True)
1985 1986 for p in reversed(list(util.finddirs(f))):
1986 1987 if wvfs.isfileorlink(p):
1987 1988 wvfs.unlink(p)
1988 1989 break
1989 1990
1990 1991 def setflags(self, l, x):
1991 1992 self._repo.wvfs.setflags(self._path, l, x)
1992 1993
1993 1994 class overlayworkingctx(committablectx):
1994 1995 """Wraps another mutable context with a write-back cache that can be
1995 1996 converted into a commit context.
1996 1997
1997 1998 self._cache[path] maps to a dict with keys: {
1998 1999 'exists': bool?
1999 2000 'date': date?
2000 2001 'data': str?
2001 2002 'flags': str?
2002 2003 'copied': str? (path or None)
2003 2004 }
2004 2005 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2005 2006 is `False`, the file was deleted.
2006 2007 """
2007 2008
2008 2009 def __init__(self, repo):
2009 2010 super(overlayworkingctx, self).__init__(repo)
2010 2011 self._repo = repo
2011 2012 self.clean()
2012 2013
2013 2014 def setbase(self, wrappedctx):
2014 2015 self._wrappedctx = wrappedctx
2015 2016 self._parents = [wrappedctx]
2016 2017 # Drop old manifest cache as it is now out of date.
2017 2018 # This is necessary when, e.g., rebasing several nodes with one
2018 2019 # ``overlayworkingctx`` (e.g. with --collapse).
2019 2020 util.clearcachedproperty(self, '_manifest')
2020 2021
2021 2022 def data(self, path):
2022 2023 if self.isdirty(path):
2023 2024 if self._cache[path]['exists']:
2024 2025 if self._cache[path]['data']:
2025 2026 return self._cache[path]['data']
2026 2027 else:
2027 2028 # Must fallback here, too, because we only set flags.
2028 2029 return self._wrappedctx[path].data()
2029 2030 else:
2030 2031 raise error.ProgrammingError("No such file or directory: %s" %
2031 2032 path)
2032 2033 else:
2033 2034 return self._wrappedctx[path].data()
2034 2035
2035 2036 @propertycache
2036 2037 def _manifest(self):
2037 2038 parents = self.parents()
2038 2039 man = parents[0].manifest().copy()
2039 2040
2040 2041 flag = self._flagfunc
2041 2042 for path in self.added():
2042 2043 man[path] = addednodeid
2043 2044 man.setflag(path, flag(path))
2044 2045 for path in self.modified():
2045 2046 man[path] = modifiednodeid
2046 2047 man.setflag(path, flag(path))
2047 2048 for path in self.removed():
2048 2049 del man[path]
2049 2050 return man
2050 2051
2051 2052 @propertycache
2052 2053 def _flagfunc(self):
2053 2054 def f(path):
2054 2055 return self._cache[path]['flags']
2055 2056 return f
2056 2057
2057 2058 def files(self):
2058 2059 return sorted(self.added() + self.modified() + self.removed())
2059 2060
2060 2061 def modified(self):
2061 2062 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
2062 2063 self._existsinparent(f)]
2063 2064
2064 2065 def added(self):
2065 2066 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
2066 2067 not self._existsinparent(f)]
2067 2068
2068 2069 def removed(self):
2069 2070 return [f for f in self._cache.keys() if
2070 2071 not self._cache[f]['exists'] and self._existsinparent(f)]
2071 2072
2072 2073 def isinmemory(self):
2073 2074 return True
2074 2075
2075 2076 def filedate(self, path):
2076 2077 if self.isdirty(path):
2077 2078 return self._cache[path]['date']
2078 2079 else:
2079 2080 return self._wrappedctx[path].date()
2080 2081
2081 2082 def markcopied(self, path, origin):
2082 2083 if self.isdirty(path):
2083 2084 self._cache[path]['copied'] = origin
2084 2085 else:
2085 2086 raise error.ProgrammingError('markcopied() called on clean context')
2086 2087
2087 2088 def copydata(self, path):
2088 2089 if self.isdirty(path):
2089 2090 return self._cache[path]['copied']
2090 2091 else:
2091 2092 raise error.ProgrammingError('copydata() called on clean context')
2092 2093
2093 2094 def flags(self, path):
2094 2095 if self.isdirty(path):
2095 2096 if self._cache[path]['exists']:
2096 2097 return self._cache[path]['flags']
2097 2098 else:
2098 2099 raise error.ProgrammingError("No such file or directory: %s" %
2099 2100 self._path)
2100 2101 else:
2101 2102 return self._wrappedctx[path].flags()
2102 2103
2103 2104 def _existsinparent(self, path):
2104 2105 try:
2105 2106 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2106 2107 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2107 2108 # with an ``exists()`` function.
2108 2109 self._wrappedctx[path]
2109 2110 return True
2110 2111 except error.ManifestLookupError:
2111 2112 return False
2112 2113
2113 2114 def _auditconflicts(self, path):
2114 2115 """Replicates conflict checks done by wvfs.write().
2115 2116
2116 2117 Since we never write to the filesystem and never call `applyupdates` in
2117 2118 IMM, we'll never check that a path is actually writable -- e.g., because
2118 2119 it adds `a/foo`, but `a` is actually a file in the other commit.
2119 2120 """
2120 2121 def fail(path, component):
2121 2122 # p1() is the base and we're receiving "writes" for p2()'s
2122 2123 # files.
2123 2124 if 'l' in self.p1()[component].flags():
2124 2125 raise error.Abort("error: %s conflicts with symlink %s "
2125 2126 "in %s." % (path, component,
2126 2127 self.p1().rev()))
2127 2128 else:
2128 2129 raise error.Abort("error: '%s' conflicts with file '%s' in "
2129 2130 "%s." % (path, component,
2130 2131 self.p1().rev()))
2131 2132
2132 2133 # Test that each new directory to be created to write this path from p2
2133 2134 # is not a file in p1.
2134 2135 components = path.split('/')
2135 2136 for i in xrange(len(components)):
2136 2137 component = "/".join(components[0:i])
2137 2138 if component in self.p1():
2138 2139 fail(path, component)
2139 2140
2140 2141 # Test the other direction -- that this path from p2 isn't a directory
2141 2142 # in p1 (test that p1 doesn't any paths matching `path/*`).
2142 2143 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
2143 2144 matches = self.p1().manifest().matches(match)
2144 2145 if len(matches) > 0:
2145 2146 if len(matches) == 1 and matches.keys()[0] == path:
2146 2147 return
2147 2148 raise error.Abort("error: file '%s' cannot be written because "
2148 2149 " '%s/' is a folder in %s (containing %d "
2149 2150 "entries: %s)"
2150 2151 % (path, path, self.p1(), len(matches),
2151 2152 ', '.join(matches.keys())))
2152 2153
2153 2154 def write(self, path, data, flags='', **kwargs):
2154 2155 if data is None:
2155 2156 raise error.ProgrammingError("data must be non-None")
2156 2157 self._auditconflicts(path)
2157 2158 self._markdirty(path, exists=True, data=data, date=util.makedate(),
2158 2159 flags=flags)
2159 2160
2160 2161 def setflags(self, path, l, x):
2161 2162 self._markdirty(path, exists=True, date=util.makedate(),
2162 2163 flags=(l and 'l' or '') + (x and 'x' or ''))
2163 2164
2164 2165 def remove(self, path):
2165 2166 self._markdirty(path, exists=False)
2166 2167
2167 2168 def exists(self, path):
2168 2169 """exists behaves like `lexists`, but needs to follow symlinks and
2169 2170 return False if they are broken.
2170 2171 """
2171 2172 if self.isdirty(path):
2172 2173 # If this path exists and is a symlink, "follow" it by calling
2173 2174 # exists on the destination path.
2174 2175 if (self._cache[path]['exists'] and
2175 2176 'l' in self._cache[path]['flags']):
2176 2177 return self.exists(self._cache[path]['data'].strip())
2177 2178 else:
2178 2179 return self._cache[path]['exists']
2179 2180
2180 2181 return self._existsinparent(path)
2181 2182
2182 2183 def lexists(self, path):
2183 2184 """lexists returns True if the path exists"""
2184 2185 if self.isdirty(path):
2185 2186 return self._cache[path]['exists']
2186 2187
2187 2188 return self._existsinparent(path)
2188 2189
2189 2190 def size(self, path):
2190 2191 if self.isdirty(path):
2191 2192 if self._cache[path]['exists']:
2192 2193 return len(self._cache[path]['data'])
2193 2194 else:
2194 2195 raise error.ProgrammingError("No such file or directory: %s" %
2195 2196 self._path)
2196 2197 return self._wrappedctx[path].size()
2197 2198
2198 2199 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2199 2200 user=None, editor=None):
2200 2201 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2201 2202 committed.
2202 2203
2203 2204 ``text`` is the commit message.
2204 2205 ``parents`` (optional) are rev numbers.
2205 2206 """
2206 2207 # Default parents to the wrapped contexts' if not passed.
2207 2208 if parents is None:
2208 2209 parents = self._wrappedctx.parents()
2209 2210 if len(parents) == 1:
2210 2211 parents = (parents[0], None)
2211 2212
2212 2213 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2213 2214 if parents[1] is None:
2214 2215 parents = (self._repo[parents[0]], None)
2215 2216 else:
2216 2217 parents = (self._repo[parents[0]], self._repo[parents[1]])
2217 2218
2218 2219 files = self._cache.keys()
2219 2220 def getfile(repo, memctx, path):
2220 2221 if self._cache[path]['exists']:
2221 2222 return memfilectx(repo, memctx, path,
2222 2223 self._cache[path]['data'],
2223 2224 'l' in self._cache[path]['flags'],
2224 2225 'x' in self._cache[path]['flags'],
2225 2226 self._cache[path]['copied'])
2226 2227 else:
2227 2228 # Returning None, but including the path in `files`, is
2228 2229 # necessary for memctx to register a deletion.
2229 2230 return None
2230 2231 return memctx(self._repo, parents, text, files, getfile, date=date,
2231 2232 extra=extra, user=user, branch=branch, editor=editor)
2232 2233
2233 2234 def isdirty(self, path):
2234 2235 return path in self._cache
2235 2236
2236 2237 def isempty(self):
2237 2238 # We need to discard any keys that are actually clean before the empty
2238 2239 # commit check.
2239 2240 self._compact()
2240 2241 return len(self._cache) == 0
2241 2242
2242 2243 def clean(self):
2243 2244 self._cache = {}
2244 2245
2245 2246 def _compact(self):
2246 2247 """Removes keys from the cache that are actually clean, by comparing
2247 2248 them with the underlying context.
2248 2249
2249 2250 This can occur during the merge process, e.g. by passing --tool :local
2250 2251 to resolve a conflict.
2251 2252 """
2252 2253 keys = []
2253 2254 for path in self._cache.keys():
2254 2255 cache = self._cache[path]
2255 2256 try:
2256 2257 underlying = self._wrappedctx[path]
2257 2258 if (underlying.data() == cache['data'] and
2258 2259 underlying.flags() == cache['flags']):
2259 2260 keys.append(path)
2260 2261 except error.ManifestLookupError:
2261 2262 # Path not in the underlying manifest (created).
2262 2263 continue
2263 2264
2264 2265 for path in keys:
2265 2266 del self._cache[path]
2266 2267 return keys
2267 2268
2268 2269 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2269 2270 self._cache[path] = {
2270 2271 'exists': exists,
2271 2272 'data': data,
2272 2273 'date': date,
2273 2274 'flags': flags,
2274 2275 'copied': None,
2275 2276 }
2276 2277
2277 2278 def filectx(self, path, filelog=None):
2278 2279 return overlayworkingfilectx(self._repo, path, parent=self,
2279 2280 filelog=filelog)
2280 2281
2281 2282 class overlayworkingfilectx(committablefilectx):
2282 2283 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2283 2284 cache, which can be flushed through later by calling ``flush()``."""
2284 2285
2285 2286 def __init__(self, repo, path, filelog=None, parent=None):
2286 2287 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2287 2288 parent)
2288 2289 self._repo = repo
2289 2290 self._parent = parent
2290 2291 self._path = path
2291 2292
2292 2293 def cmp(self, fctx):
2293 2294 return self.data() != fctx.data()
2294 2295
2295 2296 def changectx(self):
2296 2297 return self._parent
2297 2298
2298 2299 def data(self):
2299 2300 return self._parent.data(self._path)
2300 2301
2301 2302 def date(self):
2302 2303 return self._parent.filedate(self._path)
2303 2304
2304 2305 def exists(self):
2305 2306 return self.lexists()
2306 2307
2307 2308 def lexists(self):
2308 2309 return self._parent.exists(self._path)
2309 2310
2310 2311 def renamed(self):
2311 2312 path = self._parent.copydata(self._path)
2312 2313 if not path:
2313 2314 return None
2314 2315 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2315 2316
2316 2317 def size(self):
2317 2318 return self._parent.size(self._path)
2318 2319
2319 2320 def markcopied(self, origin):
2320 2321 self._parent.markcopied(self._path, origin)
2321 2322
2322 2323 def audit(self):
2323 2324 pass
2324 2325
2325 2326 def flags(self):
2326 2327 return self._parent.flags(self._path)
2327 2328
2328 2329 def setflags(self, islink, isexec):
2329 2330 return self._parent.setflags(self._path, islink, isexec)
2330 2331
2331 2332 def write(self, data, flags, backgroundclose=False, **kwargs):
2332 2333 return self._parent.write(self._path, data, flags, **kwargs)
2333 2334
2334 2335 def remove(self, ignoremissing=False):
2335 2336 return self._parent.remove(self._path)
2336 2337
2337 2338 def clearunknown(self):
2338 2339 pass
2339 2340
2340 2341 class workingcommitctx(workingctx):
2341 2342 """A workingcommitctx object makes access to data related to
2342 2343 the revision being committed convenient.
2343 2344
2344 2345 This hides changes in the working directory, if they aren't
2345 2346 committed in this context.
2346 2347 """
2347 2348 def __init__(self, repo, changes,
2348 2349 text="", user=None, date=None, extra=None):
2349 2350 super(workingctx, self).__init__(repo, text, user, date, extra,
2350 2351 changes)
2351 2352
2352 2353 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2353 2354 """Return matched files only in ``self._status``
2354 2355
2355 2356 Uncommitted files appear "clean" via this context, even if
2356 2357 they aren't actually so in the working directory.
2357 2358 """
2358 2359 if clean:
2359 2360 clean = [f for f in self._manifest if f not in self._changedset]
2360 2361 else:
2361 2362 clean = []
2362 2363 return scmutil.status([f for f in self._status.modified if match(f)],
2363 2364 [f for f in self._status.added if match(f)],
2364 2365 [f for f in self._status.removed if match(f)],
2365 2366 [], [], [], clean)
2366 2367
2367 2368 @propertycache
2368 2369 def _changedset(self):
2369 2370 """Return the set of files changed in this context
2370 2371 """
2371 2372 changed = set(self._status.modified)
2372 2373 changed.update(self._status.added)
2373 2374 changed.update(self._status.removed)
2374 2375 return changed
2375 2376
2376 2377 def makecachingfilectxfn(func):
2377 2378 """Create a filectxfn that caches based on the path.
2378 2379
2379 2380 We can't use util.cachefunc because it uses all arguments as the cache
2380 2381 key and this creates a cycle since the arguments include the repo and
2381 2382 memctx.
2382 2383 """
2383 2384 cache = {}
2384 2385
2385 2386 def getfilectx(repo, memctx, path):
2386 2387 if path not in cache:
2387 2388 cache[path] = func(repo, memctx, path)
2388 2389 return cache[path]
2389 2390
2390 2391 return getfilectx
2391 2392
2392 2393 def memfilefromctx(ctx):
2393 2394 """Given a context return a memfilectx for ctx[path]
2394 2395
2395 2396 This is a convenience method for building a memctx based on another
2396 2397 context.
2397 2398 """
2398 2399 def getfilectx(repo, memctx, path):
2399 2400 fctx = ctx[path]
2400 2401 # this is weird but apparently we only keep track of one parent
2401 2402 # (why not only store that instead of a tuple?)
2402 2403 copied = fctx.renamed()
2403 2404 if copied:
2404 2405 copied = copied[0]
2405 2406 return memfilectx(repo, memctx, path, fctx.data(),
2406 2407 islink=fctx.islink(), isexec=fctx.isexec(),
2407 2408 copied=copied)
2408 2409
2409 2410 return getfilectx
2410 2411
2411 2412 def memfilefrompatch(patchstore):
2412 2413 """Given a patch (e.g. patchstore object) return a memfilectx
2413 2414
2414 2415 This is a convenience method for building a memctx based on a patchstore.
2415 2416 """
2416 2417 def getfilectx(repo, memctx, path):
2417 2418 data, mode, copied = patchstore.getfile(path)
2418 2419 if data is None:
2419 2420 return None
2420 2421 islink, isexec = mode
2421 2422 return memfilectx(repo, memctx, path, data, islink=islink,
2422 2423 isexec=isexec, copied=copied)
2423 2424
2424 2425 return getfilectx
2425 2426
2426 2427 class memctx(committablectx):
2427 2428 """Use memctx to perform in-memory commits via localrepo.commitctx().
2428 2429
2429 2430 Revision information is supplied at initialization time while
2430 2431 related files data and is made available through a callback
2431 2432 mechanism. 'repo' is the current localrepo, 'parents' is a
2432 2433 sequence of two parent revisions identifiers (pass None for every
2433 2434 missing parent), 'text' is the commit message and 'files' lists
2434 2435 names of files touched by the revision (normalized and relative to
2435 2436 repository root).
2436 2437
2437 2438 filectxfn(repo, memctx, path) is a callable receiving the
2438 2439 repository, the current memctx object and the normalized path of
2439 2440 requested file, relative to repository root. It is fired by the
2440 2441 commit function for every file in 'files', but calls order is
2441 2442 undefined. If the file is available in the revision being
2442 2443 committed (updated or added), filectxfn returns a memfilectx
2443 2444 object. If the file was removed, filectxfn return None for recent
2444 2445 Mercurial. Moved files are represented by marking the source file
2445 2446 removed and the new file added with copy information (see
2446 2447 memfilectx).
2447 2448
2448 2449 user receives the committer name and defaults to current
2449 2450 repository username, date is the commit date in any format
2450 2451 supported by util.parsedate() and defaults to current date, extra
2451 2452 is a dictionary of metadata or is left empty.
2452 2453 """
2453 2454
2454 2455 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2455 2456 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2456 2457 # this field to determine what to do in filectxfn.
2457 2458 _returnnoneformissingfiles = True
2458 2459
2459 2460 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2460 2461 date=None, extra=None, branch=None, editor=False):
2461 2462 super(memctx, self).__init__(repo, text, user, date, extra)
2462 2463 self._rev = None
2463 2464 self._node = None
2464 2465 parents = [(p or nullid) for p in parents]
2465 2466 p1, p2 = parents
2466 2467 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2467 2468 files = sorted(set(files))
2468 2469 self._files = files
2469 2470 if branch is not None:
2470 2471 self._extra['branch'] = encoding.fromlocal(branch)
2471 2472 self.substate = {}
2472 2473
2473 2474 if isinstance(filectxfn, patch.filestore):
2474 2475 filectxfn = memfilefrompatch(filectxfn)
2475 2476 elif not callable(filectxfn):
2476 2477 # if store is not callable, wrap it in a function
2477 2478 filectxfn = memfilefromctx(filectxfn)
2478 2479
2479 2480 # memoizing increases performance for e.g. vcs convert scenarios.
2480 2481 self._filectxfn = makecachingfilectxfn(filectxfn)
2481 2482
2482 2483 if editor:
2483 2484 self._text = editor(self._repo, self, [])
2484 2485 self._repo.savecommitmessage(self._text)
2485 2486
2486 2487 def filectx(self, path, filelog=None):
2487 2488 """get a file context from the working directory
2488 2489
2489 2490 Returns None if file doesn't exist and should be removed."""
2490 2491 return self._filectxfn(self._repo, self, path)
2491 2492
2492 2493 def commit(self):
2493 2494 """commit context to the repo"""
2494 2495 return self._repo.commitctx(self)
2495 2496
2496 2497 @propertycache
2497 2498 def _manifest(self):
2498 2499 """generate a manifest based on the return values of filectxfn"""
2499 2500
2500 2501 # keep this simple for now; just worry about p1
2501 2502 pctx = self._parents[0]
2502 2503 man = pctx.manifest().copy()
2503 2504
2504 2505 for f in self._status.modified:
2505 2506 p1node = nullid
2506 2507 p2node = nullid
2507 2508 p = pctx[f].parents() # if file isn't in pctx, check p2?
2508 2509 if len(p) > 0:
2509 2510 p1node = p[0].filenode()
2510 2511 if len(p) > 1:
2511 2512 p2node = p[1].filenode()
2512 2513 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2513 2514
2514 2515 for f in self._status.added:
2515 2516 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2516 2517
2517 2518 for f in self._status.removed:
2518 2519 if f in man:
2519 2520 del man[f]
2520 2521
2521 2522 return man
2522 2523
2523 2524 @propertycache
2524 2525 def _status(self):
2525 2526 """Calculate exact status from ``files`` specified at construction
2526 2527 """
2527 2528 man1 = self.p1().manifest()
2528 2529 p2 = self._parents[1]
2529 2530 # "1 < len(self._parents)" can't be used for checking
2530 2531 # existence of the 2nd parent, because "memctx._parents" is
2531 2532 # explicitly initialized by the list, of which length is 2.
2532 2533 if p2.node() != nullid:
2533 2534 man2 = p2.manifest()
2534 2535 managing = lambda f: f in man1 or f in man2
2535 2536 else:
2536 2537 managing = lambda f: f in man1
2537 2538
2538 2539 modified, added, removed = [], [], []
2539 2540 for f in self._files:
2540 2541 if not managing(f):
2541 2542 added.append(f)
2542 2543 elif self[f]:
2543 2544 modified.append(f)
2544 2545 else:
2545 2546 removed.append(f)
2546 2547
2547 2548 return scmutil.status(modified, added, removed, [], [], [], [])
2548 2549
2549 2550 class memfilectx(committablefilectx):
2550 2551 """memfilectx represents an in-memory file to commit.
2551 2552
2552 2553 See memctx and committablefilectx for more details.
2553 2554 """
2554 2555 def __init__(self, repo, changectx, path, data, islink=False,
2555 2556 isexec=False, copied=None):
2556 2557 """
2557 2558 path is the normalized file path relative to repository root.
2558 2559 data is the file content as a string.
2559 2560 islink is True if the file is a symbolic link.
2560 2561 isexec is True if the file is executable.
2561 2562 copied is the source file path if current file was copied in the
2562 2563 revision being committed, or None."""
2563 2564 super(memfilectx, self).__init__(repo, path, None, changectx)
2564 2565 self._data = data
2565 2566 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2566 2567 self._copied = None
2567 2568 if copied:
2568 2569 self._copied = (copied, nullid)
2569 2570
2570 2571 def data(self):
2571 2572 return self._data
2572 2573
2573 2574 def remove(self, ignoremissing=False):
2574 2575 """wraps unlink for a repo's working directory"""
2575 2576 # need to figure out what to do here
2576 2577 del self._changectx[self._path]
2577 2578
2578 2579 def write(self, data, flags, **kwargs):
2579 2580 """wraps repo.wwrite"""
2580 2581 self._data = data
2581 2582
2582 2583 class overlayfilectx(committablefilectx):
2583 2584 """Like memfilectx but take an original filectx and optional parameters to
2584 2585 override parts of it. This is useful when fctx.data() is expensive (i.e.
2585 2586 flag processor is expensive) and raw data, flags, and filenode could be
2586 2587 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2587 2588 """
2588 2589
2589 2590 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2590 2591 copied=None, ctx=None):
2591 2592 """originalfctx: filecontext to duplicate
2592 2593
2593 2594 datafunc: None or a function to override data (file content). It is a
2594 2595 function to be lazy. path, flags, copied, ctx: None or overridden value
2595 2596
2596 2597 copied could be (path, rev), or False. copied could also be just path,
2597 2598 and will be converted to (path, nullid). This simplifies some callers.
2598 2599 """
2599 2600
2600 2601 if path is None:
2601 2602 path = originalfctx.path()
2602 2603 if ctx is None:
2603 2604 ctx = originalfctx.changectx()
2604 2605 ctxmatch = lambda: True
2605 2606 else:
2606 2607 ctxmatch = lambda: ctx == originalfctx.changectx()
2607 2608
2608 2609 repo = originalfctx.repo()
2609 2610 flog = originalfctx.filelog()
2610 2611 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2611 2612
2612 2613 if copied is None:
2613 2614 copied = originalfctx.renamed()
2614 2615 copiedmatch = lambda: True
2615 2616 else:
2616 2617 if copied and not isinstance(copied, tuple):
2617 2618 # repo._filecommit will recalculate copyrev so nullid is okay
2618 2619 copied = (copied, nullid)
2619 2620 copiedmatch = lambda: copied == originalfctx.renamed()
2620 2621
2621 2622 # When data, copied (could affect data), ctx (could affect filelog
2622 2623 # parents) are not overridden, rawdata, rawflags, and filenode may be
2623 2624 # reused (repo._filecommit should double check filelog parents).
2624 2625 #
2625 2626 # path, flags are not hashed in filelog (but in manifestlog) so they do
2626 2627 # not affect reusable here.
2627 2628 #
2628 2629 # If ctx or copied is overridden to a same value with originalfctx,
2629 2630 # still consider it's reusable. originalfctx.renamed() may be a bit
2630 2631 # expensive so it's not called unless necessary. Assuming datafunc is
2631 2632 # always expensive, do not call it for this "reusable" test.
2632 2633 reusable = datafunc is None and ctxmatch() and copiedmatch()
2633 2634
2634 2635 if datafunc is None:
2635 2636 datafunc = originalfctx.data
2636 2637 if flags is None:
2637 2638 flags = originalfctx.flags()
2638 2639
2639 2640 self._datafunc = datafunc
2640 2641 self._flags = flags
2641 2642 self._copied = copied
2642 2643
2643 2644 if reusable:
2644 2645 # copy extra fields from originalfctx
2645 2646 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2646 2647 for attr_ in attrs:
2647 2648 if util.safehasattr(originalfctx, attr_):
2648 2649 setattr(self, attr_, getattr(originalfctx, attr_))
2649 2650
2650 2651 def data(self):
2651 2652 return self._datafunc()
2652 2653
2653 2654 class metadataonlyctx(committablectx):
2654 2655 """Like memctx but it's reusing the manifest of different commit.
2655 2656 Intended to be used by lightweight operations that are creating
2656 2657 metadata-only changes.
2657 2658
2658 2659 Revision information is supplied at initialization time. 'repo' is the
2659 2660 current localrepo, 'ctx' is original revision which manifest we're reuisng
2660 2661 'parents' is a sequence of two parent revisions identifiers (pass None for
2661 2662 every missing parent), 'text' is the commit.
2662 2663
2663 2664 user receives the committer name and defaults to current repository
2664 2665 username, date is the commit date in any format supported by
2665 2666 util.parsedate() and defaults to current date, extra is a dictionary of
2666 2667 metadata or is left empty.
2667 2668 """
2668 2669 def __new__(cls, repo, originalctx, *args, **kwargs):
2669 2670 return super(metadataonlyctx, cls).__new__(cls, repo)
2670 2671
2671 2672 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2672 2673 date=None, extra=None, editor=False):
2673 2674 if text is None:
2674 2675 text = originalctx.description()
2675 2676 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2676 2677 self._rev = None
2677 2678 self._node = None
2678 2679 self._originalctx = originalctx
2679 2680 self._manifestnode = originalctx.manifestnode()
2680 2681 if parents is None:
2681 2682 parents = originalctx.parents()
2682 2683 else:
2683 2684 parents = [repo[p] for p in parents if p is not None]
2684 2685 parents = parents[:]
2685 2686 while len(parents) < 2:
2686 2687 parents.append(repo[nullid])
2687 2688 p1, p2 = self._parents = parents
2688 2689
2689 2690 # sanity check to ensure that the reused manifest parents are
2690 2691 # manifests of our commit parents
2691 2692 mp1, mp2 = self.manifestctx().parents
2692 2693 if p1 != nullid and p1.manifestnode() != mp1:
2693 2694 raise RuntimeError('can\'t reuse the manifest: '
2694 2695 'its p1 doesn\'t match the new ctx p1')
2695 2696 if p2 != nullid and p2.manifestnode() != mp2:
2696 2697 raise RuntimeError('can\'t reuse the manifest: '
2697 2698 'its p2 doesn\'t match the new ctx p2')
2698 2699
2699 2700 self._files = originalctx.files()
2700 2701 self.substate = {}
2701 2702
2702 2703 if editor:
2703 2704 self._text = editor(self._repo, self, [])
2704 2705 self._repo.savecommitmessage(self._text)
2705 2706
2706 2707 def manifestnode(self):
2707 2708 return self._manifestnode
2708 2709
2709 2710 @property
2710 2711 def _manifestctx(self):
2711 2712 return self._repo.manifestlog[self._manifestnode]
2712 2713
2713 2714 def filectx(self, path, filelog=None):
2714 2715 return self._originalctx.filectx(path, filelog=filelog)
2715 2716
2716 2717 def commit(self):
2717 2718 """commit context to the repo"""
2718 2719 return self._repo.commitctx(self)
2719 2720
2720 2721 @property
2721 2722 def _manifest(self):
2722 2723 return self._originalctx.manifest()
2723 2724
2724 2725 @propertycache
2725 2726 def _status(self):
2726 2727 """Calculate exact status from ``files`` specified in the ``origctx``
2727 2728 and parents manifests.
2728 2729 """
2729 2730 man1 = self.p1().manifest()
2730 2731 p2 = self._parents[1]
2731 2732 # "1 < len(self._parents)" can't be used for checking
2732 2733 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2733 2734 # explicitly initialized by the list, of which length is 2.
2734 2735 if p2.node() != nullid:
2735 2736 man2 = p2.manifest()
2736 2737 managing = lambda f: f in man1 or f in man2
2737 2738 else:
2738 2739 managing = lambda f: f in man1
2739 2740
2740 2741 modified, added, removed = [], [], []
2741 2742 for f in self._files:
2742 2743 if not managing(f):
2743 2744 added.append(f)
2744 2745 elif f in self:
2745 2746 modified.append(f)
2746 2747 else:
2747 2748 removed.append(f)
2748 2749
2749 2750 return scmutil.status(modified, added, removed, [], [], [], [])
2750 2751
2751 2752 class arbitraryfilectx(object):
2752 2753 """Allows you to use filectx-like functions on a file in an arbitrary
2753 2754 location on disk, possibly not in the working directory.
2754 2755 """
2755 2756 def __init__(self, path, repo=None):
2756 2757 # Repo is optional because contrib/simplemerge uses this class.
2757 2758 self._repo = repo
2758 2759 self._path = path
2759 2760
2760 2761 def cmp(self, fctx):
2761 2762 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2762 2763 # path if either side is a symlink.
2763 2764 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2764 2765 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2765 2766 # Add a fast-path for merge if both sides are disk-backed.
2766 2767 # Note that filecmp uses the opposite return values (True if same)
2767 2768 # from our cmp functions (True if different).
2768 2769 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2769 2770 return self.data() != fctx.data()
2770 2771
2771 2772 def path(self):
2772 2773 return self._path
2773 2774
2774 2775 def flags(self):
2775 2776 return ''
2776 2777
2777 2778 def data(self):
2778 2779 return util.readfile(self._path)
2779 2780
2780 2781 def decodeddata(self):
2781 2782 with open(self._path, "rb") as f:
2782 2783 return f.read()
2783 2784
2784 2785 def remove(self):
2785 2786 util.unlink(self._path)
2786 2787
2787 2788 def write(self, data, flags, **kwargs):
2788 2789 assert not flags
2789 2790 with open(self._path, "w") as f:
2790 2791 f.write(data)
@@ -1,1001 +1,1033 b''
1 1 $ HGMERGE=true; export HGMERGE
2 2
3 3 init
4 4
5 5 $ hg init repo
6 6 $ cd repo
7 7
8 8 commit
9 9
10 10 $ echo 'a' > a
11 11 $ hg ci -A -m test -u nobody -d '1 0'
12 12 adding a
13 13
14 14 annotate -c
15 15
16 16 $ hg annotate -c a
17 17 8435f90966e4: a
18 18
19 19 annotate -cl
20 20
21 21 $ hg annotate -cl a
22 22 8435f90966e4:1: a
23 23
24 24 annotate -d
25 25
26 26 $ hg annotate -d a
27 27 Thu Jan 01 00:00:01 1970 +0000: a
28 28
29 29 annotate -n
30 30
31 31 $ hg annotate -n a
32 32 0: a
33 33
34 34 annotate -nl
35 35
36 36 $ hg annotate -nl a
37 37 0:1: a
38 38
39 39 annotate -u
40 40
41 41 $ hg annotate -u a
42 42 nobody: a
43 43
44 44 annotate -cdnu
45 45
46 46 $ hg annotate -cdnu a
47 47 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000: a
48 48
49 49 annotate -cdnul
50 50
51 51 $ hg annotate -cdnul a
52 52 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000:1: a
53 53
54 54 annotate (JSON)
55 55
56 56 $ hg annotate -Tjson a
57 57 [
58 58 {
59 59 "abspath": "a",
60 60 "lines": [{"line": "a\n", "rev": 0}],
61 61 "path": "a"
62 62 }
63 63 ]
64 64
65 65 $ hg annotate -Tjson -cdfnul a
66 66 [
67 67 {
68 68 "abspath": "a",
69 69 "lines": [{"date": [1.0, 0], "file": "a", "line": "a\n", "line_number": 1, "node": "8435f90966e442695d2ded29fdade2bac5ad8065", "rev": 0, "user": "nobody"}],
70 70 "path": "a"
71 71 }
72 72 ]
73 73
74 74 $ cat <<EOF >>a
75 75 > a
76 76 > a
77 77 > EOF
78 78 $ hg ci -ma1 -d '1 0'
79 79 $ hg cp a b
80 80 $ hg ci -mb -d '1 0'
81 81 $ cat <<EOF >> b
82 82 > b4
83 83 > b5
84 84 > b6
85 85 > EOF
86 86 $ hg ci -mb2 -d '2 0'
87 87
88 88 annotate multiple files (JSON)
89 89
90 90 $ hg annotate -Tjson a b
91 91 [
92 92 {
93 93 "abspath": "a",
94 94 "lines": [{"line": "a\n", "rev": 0}, {"line": "a\n", "rev": 1}, {"line": "a\n", "rev": 1}],
95 95 "path": "a"
96 96 },
97 97 {
98 98 "abspath": "b",
99 99 "lines": [{"line": "a\n", "rev": 0}, {"line": "a\n", "rev": 1}, {"line": "a\n", "rev": 1}, {"line": "b4\n", "rev": 3}, {"line": "b5\n", "rev": 3}, {"line": "b6\n", "rev": 3}],
100 100 "path": "b"
101 101 }
102 102 ]
103 103
104 104 annotate multiple files (template)
105 105
106 106 $ hg annotate -T'== {abspath} ==\n{lines % "{rev}: {line}"}' a b
107 107 == a ==
108 108 0: a
109 109 1: a
110 110 1: a
111 111 == b ==
112 112 0: a
113 113 1: a
114 114 1: a
115 115 3: b4
116 116 3: b5
117 117 3: b6
118 118
119 119 annotate -n b
120 120
121 121 $ hg annotate -n b
122 122 0: a
123 123 1: a
124 124 1: a
125 125 3: b4
126 126 3: b5
127 127 3: b6
128 128
129 129 annotate --no-follow b
130 130
131 131 $ hg annotate --no-follow b
132 132 2: a
133 133 2: a
134 134 2: a
135 135 3: b4
136 136 3: b5
137 137 3: b6
138 138
139 139 annotate -nl b
140 140
141 141 $ hg annotate -nl b
142 142 0:1: a
143 143 1:2: a
144 144 1:3: a
145 145 3:4: b4
146 146 3:5: b5
147 147 3:6: b6
148 148
149 149 annotate -nf b
150 150
151 151 $ hg annotate -nf b
152 152 0 a: a
153 153 1 a: a
154 154 1 a: a
155 155 3 b: b4
156 156 3 b: b5
157 157 3 b: b6
158 158
159 159 annotate -nlf b
160 160
161 161 $ hg annotate -nlf b
162 162 0 a:1: a
163 163 1 a:2: a
164 164 1 a:3: a
165 165 3 b:4: b4
166 166 3 b:5: b5
167 167 3 b:6: b6
168 168
169 169 $ hg up -C 2
170 170 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
171 171 $ cat <<EOF >> b
172 172 > b4
173 173 > c
174 174 > b5
175 175 > EOF
176 176 $ hg ci -mb2.1 -d '2 0'
177 177 created new head
178 178 $ hg merge
179 179 merging b
180 180 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
181 181 (branch merge, don't forget to commit)
182 182 $ hg ci -mmergeb -d '3 0'
183 183
184 184 annotate after merge
185 185
186 186 $ hg annotate -nf b
187 187 0 a: a
188 188 1 a: a
189 189 1 a: a
190 190 3 b: b4
191 191 4 b: c
192 192 3 b: b5
193 193
194 194 annotate after merge with -l
195 195
196 196 $ hg annotate -nlf b
197 197 0 a:1: a
198 198 1 a:2: a
199 199 1 a:3: a
200 200 3 b:4: b4
201 201 4 b:5: c
202 202 3 b:5: b5
203 203
204 204 $ hg up -C 1
205 205 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
206 206 $ hg cp a b
207 207 $ cat <<EOF > b
208 208 > a
209 209 > z
210 210 > a
211 211 > EOF
212 212 $ hg ci -mc -d '3 0'
213 213 created new head
214 214 $ hg merge
215 215 merging b
216 216 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
217 217 (branch merge, don't forget to commit)
218 218 $ cat <<EOF >> b
219 219 > b4
220 220 > c
221 221 > b5
222 222 > EOF
223 223 $ echo d >> b
224 224 $ hg ci -mmerge2 -d '4 0'
225 225
226 226 annotate after rename merge
227 227
228 228 $ hg annotate -nf b
229 229 0 a: a
230 230 6 b: z
231 231 1 a: a
232 232 3 b: b4
233 233 4 b: c
234 234 3 b: b5
235 235 7 b: d
236 236
237 237 annotate after rename merge with -l
238 238
239 239 $ hg annotate -nlf b
240 240 0 a:1: a
241 241 6 b:2: z
242 242 1 a:3: a
243 243 3 b:4: b4
244 244 4 b:5: c
245 245 3 b:5: b5
246 246 7 b:7: d
247 247
248 248 --skip nothing (should be the same as no --skip at all)
249 249
250 250 $ hg annotate -nlf b --skip '1::0'
251 251 0 a:1: a
252 252 6 b:2: z
253 253 1 a:3: a
254 254 3 b:4: b4
255 255 4 b:5: c
256 256 3 b:5: b5
257 257 7 b:7: d
258 258
259 259 --skip a modified line. Note a slight behavior difference in pure - this is
260 260 because the pure code comes up with slightly different deltas internally.
261 261
262 262 $ hg annotate -nlf b --skip 6
263 263 0 a:1: a
264 264 1 a:2* z (no-pure !)
265 265 0 a:1* z (pure !)
266 266 1 a:3: a
267 267 3 b:4: b4
268 268 4 b:5: c
269 269 3 b:5: b5
270 270 7 b:7: d
271 271
272 272 --skip added lines (and test multiple skip)
273 273
274 274 $ hg annotate -nlf b --skip 3
275 275 0 a:1: a
276 276 6 b:2: z
277 277 1 a:3: a
278 278 1 a:3* b4
279 279 4 b:5: c
280 280 1 a:3* b5
281 281 7 b:7: d
282 282
283 283 $ hg annotate -nlf b --skip 4
284 284 0 a:1: a
285 285 6 b:2: z
286 286 1 a:3: a
287 287 3 b:4: b4
288 288 1 a:3* c
289 289 3 b:5: b5
290 290 7 b:7: d
291 291
292 292 $ hg annotate -nlf b --skip 3 --skip 4
293 293 0 a:1: a
294 294 6 b:2: z
295 295 1 a:3: a
296 296 1 a:3* b4
297 297 1 a:3* c
298 298 1 a:3* b5
299 299 7 b:7: d
300 300
301 301 $ hg annotate -nlf b --skip 'merge()'
302 302 0 a:1: a
303 303 6 b:2: z
304 304 1 a:3: a
305 305 3 b:4: b4
306 306 4 b:5: c
307 307 3 b:5: b5
308 308 3 b:5* d
309 309
310 310 --skip everything -- use the revision the file was introduced in
311 311
312 312 $ hg annotate -nlf b --skip 'all()'
313 313 0 a:1: a
314 314 0 a:1* z
315 315 0 a:1* a
316 316 0 a:1* b4
317 317 0 a:1* c
318 318 0 a:1* b5
319 319 0 a:1* d
320 320
321 321 Issue2807: alignment of line numbers with -l
322 322
323 323 $ echo more >> b
324 324 $ hg ci -mmore -d '5 0'
325 325 $ echo more >> b
326 326 $ hg ci -mmore -d '6 0'
327 327 $ echo more >> b
328 328 $ hg ci -mmore -d '7 0'
329 329 $ hg annotate -nlf b
330 330 0 a: 1: a
331 331 6 b: 2: z
332 332 1 a: 3: a
333 333 3 b: 4: b4
334 334 4 b: 5: c
335 335 3 b: 5: b5
336 336 7 b: 7: d
337 337 8 b: 8: more
338 338 9 b: 9: more
339 339 10 b:10: more
340 340
341 341 linkrev vs rev
342 342
343 343 $ hg annotate -r tip -n a
344 344 0: a
345 345 1: a
346 346 1: a
347 347
348 348 linkrev vs rev with -l
349 349
350 350 $ hg annotate -r tip -nl a
351 351 0:1: a
352 352 1:2: a
353 353 1:3: a
354 354
355 355 Issue589: "undelete" sequence leads to crash
356 356
357 357 annotate was crashing when trying to --follow something
358 358
359 359 like A -> B -> A
360 360
361 361 generate ABA rename configuration
362 362
363 363 $ echo foo > foo
364 364 $ hg add foo
365 365 $ hg ci -m addfoo
366 366 $ hg rename foo bar
367 367 $ hg ci -m renamefoo
368 368 $ hg rename bar foo
369 369 $ hg ci -m renamebar
370 370
371 371 annotate after ABA with follow
372 372
373 373 $ hg annotate --follow foo
374 374 foo: foo
375 375
376 376 missing file
377 377
378 378 $ hg ann nosuchfile
379 379 abort: nosuchfile: no such file in rev e9e6b4fa872f
380 380 [255]
381 381
382 382 annotate file without '\n' on last line
383 383
384 384 $ printf "" > c
385 385 $ hg ci -A -m test -u nobody -d '1 0'
386 386 adding c
387 387 $ hg annotate c
388 388 $ printf "a\nb" > c
389 389 $ hg ci -m test
390 390 $ hg annotate c
391 391 [0-9]+: a (re)
392 392 [0-9]+: b (re)
393 393
394 394 Issue3841: check annotation of the file of which filelog includes
395 395 merging between the revision and its ancestor
396 396
397 397 to reproduce the situation with recent Mercurial, this script uses (1)
398 398 "hg debugsetparents" to merge without ancestor check by "hg merge",
399 399 and (2) the extension to allow filelog merging between the revision
400 400 and its ancestor by overriding "repo._filecommit".
401 401
402 402 $ cat > ../legacyrepo.py <<EOF
403 403 > from __future__ import absolute_import
404 404 > from mercurial import error, node
405 405 > def reposetup(ui, repo):
406 406 > class legacyrepo(repo.__class__):
407 407 > def _filecommit(self, fctx, manifest1, manifest2,
408 408 > linkrev, tr, changelist):
409 409 > fname = fctx.path()
410 410 > text = fctx.data()
411 411 > flog = self.file(fname)
412 412 > fparent1 = manifest1.get(fname, node.nullid)
413 413 > fparent2 = manifest2.get(fname, node.nullid)
414 414 > meta = {}
415 415 > copy = fctx.renamed()
416 416 > if copy and copy[0] != fname:
417 417 > raise error.Abort('copying is not supported')
418 418 > if fparent2 != node.nullid:
419 419 > changelist.append(fname)
420 420 > return flog.add(text, meta, tr, linkrev,
421 421 > fparent1, fparent2)
422 422 > raise error.Abort('only merging is supported')
423 423 > repo.__class__ = legacyrepo
424 424 > EOF
425 425
426 426 $ cat > baz <<EOF
427 427 > 1
428 428 > 2
429 429 > 3
430 430 > 4
431 431 > 5
432 432 > EOF
433 433 $ hg add baz
434 434 $ hg commit -m "baz:0"
435 435
436 436 $ cat > baz <<EOF
437 437 > 1 baz:1
438 438 > 2
439 439 > 3
440 440 > 4
441 441 > 5
442 442 > EOF
443 443 $ hg commit -m "baz:1"
444 444
445 445 $ cat > baz <<EOF
446 446 > 1 baz:1
447 447 > 2 baz:2
448 448 > 3
449 449 > 4
450 450 > 5
451 451 > EOF
452 452 $ hg debugsetparents 17 17
453 453 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:2"
454 454 $ hg debugindexdot .hg/store/data/baz.i
455 455 digraph G {
456 456 -1 -> 0
457 457 0 -> 1
458 458 1 -> 2
459 459 1 -> 2
460 460 }
461 461 $ hg annotate baz
462 462 17: 1 baz:1
463 463 18: 2 baz:2
464 464 16: 3
465 465 16: 4
466 466 16: 5
467 467
468 468 $ cat > baz <<EOF
469 469 > 1 baz:1
470 470 > 2 baz:2
471 471 > 3 baz:3
472 472 > 4
473 473 > 5
474 474 > EOF
475 475 $ hg commit -m "baz:3"
476 476
477 477 $ cat > baz <<EOF
478 478 > 1 baz:1
479 479 > 2 baz:2
480 480 > 3 baz:3
481 481 > 4 baz:4
482 482 > 5
483 483 > EOF
484 484 $ hg debugsetparents 19 18
485 485 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:4"
486 486 $ hg debugindexdot .hg/store/data/baz.i
487 487 digraph G {
488 488 -1 -> 0
489 489 0 -> 1
490 490 1 -> 2
491 491 1 -> 2
492 492 2 -> 3
493 493 3 -> 4
494 494 2 -> 4
495 495 }
496 496 $ hg annotate baz
497 497 17: 1 baz:1
498 498 18: 2 baz:2
499 499 19: 3 baz:3
500 500 20: 4 baz:4
501 501 16: 5
502 502
503 503 annotate clean file
504 504
505 505 $ hg annotate -ncr "wdir()" foo
506 506 11 472b18db256d : foo
507 507
508 508 annotate modified file
509 509
510 510 $ echo foofoo >> foo
511 511 $ hg annotate -r "wdir()" foo
512 512 11 : foo
513 513 20+: foofoo
514 514
515 515 $ hg annotate -cr "wdir()" foo
516 516 472b18db256d : foo
517 517 b6bedd5477e7+: foofoo
518 518
519 519 $ hg annotate -ncr "wdir()" foo
520 520 11 472b18db256d : foo
521 521 20 b6bedd5477e7+: foofoo
522 522
523 523 $ hg annotate --debug -ncr "wdir()" foo
524 524 11 472b18db256d1e8282064eab4bfdaf48cbfe83cd : foo
525 525 20 b6bedd5477e797f25e568a6402d4697f3f895a72+: foofoo
526 526
527 527 $ hg annotate -udr "wdir()" foo
528 528 test Thu Jan 01 00:00:00 1970 +0000: foo
529 529 test [A-Za-z0-9:+ ]+: foofoo (re)
530 530
531 531 $ hg annotate -ncr "wdir()" -Tjson foo
532 532 [
533 533 {
534 534 "abspath": "foo",
535 535 "lines": [{"line": "foo\n", "node": "472b18db256d1e8282064eab4bfdaf48cbfe83cd", "rev": 11}, {"line": "foofoo\n", "node": null, "rev": null}],
536 536 "path": "foo"
537 537 }
538 538 ]
539 539
540 540 annotate added file
541 541
542 542 $ echo bar > bar
543 543 $ hg add bar
544 544 $ hg annotate -ncr "wdir()" bar
545 545 20 b6bedd5477e7+: bar
546 546
547 547 annotate renamed file
548 548
549 549 $ hg rename foo renamefoo2
550 550 $ hg annotate -ncr "wdir()" renamefoo2
551 551 11 472b18db256d : foo
552 552 20 b6bedd5477e7+: foofoo
553 553
554 554 annotate missing file
555 555
556 556 $ rm baz
557 557
558 558 $ hg annotate -ncr "wdir()" baz
559 559 abort: $TESTTMP\repo\baz: $ENOENT$ (windows !)
560 560 abort: $ENOENT$: $TESTTMP/repo/baz (no-windows !)
561 561 [255]
562 562
563 563 annotate removed file
564 564
565 565 $ hg rm baz
566 566
567 567 $ hg annotate -ncr "wdir()" baz
568 568 abort: $TESTTMP\repo\baz: $ENOENT$ (windows !)
569 569 abort: $ENOENT$: $TESTTMP/repo/baz (no-windows !)
570 570 [255]
571 571
572 572 $ hg revert --all --no-backup --quiet
573 573 $ hg id -n
574 574 20
575 575
576 576 Test followlines() revset; we usually check both followlines(pat, range) and
577 577 followlines(pat, range, descend=True) to make sure both give the same result
578 578 when they should.
579 579
580 580 $ echo a >> foo
581 581 $ hg ci -m 'foo: add a'
582 582 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5)'
583 583 16: baz:0
584 584 19: baz:3
585 585 20: baz:4
586 586 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=20)'
587 587 16: baz:0
588 588 19: baz:3
589 589 20: baz:4
590 590 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=19)'
591 591 16: baz:0
592 592 19: baz:3
593 593 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=19, descend=True)'
594 594 19: baz:3
595 595 20: baz:4
596 596 $ printf "0\n0\n" | cat - baz > baz1
597 597 $ mv baz1 baz
598 598 $ hg ci -m 'added two lines with 0'
599 599 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7)'
600 600 16: baz:0
601 601 19: baz:3
602 602 20: baz:4
603 603 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, descend=true, startrev=19)'
604 604 19: baz:3
605 605 20: baz:4
606 606 $ echo 6 >> baz
607 607 $ hg ci -m 'added line 8'
608 608 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7)'
609 609 16: baz:0
610 610 19: baz:3
611 611 20: baz:4
612 612 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=19, descend=1)'
613 613 19: baz:3
614 614 20: baz:4
615 615 $ sed 's/3/3+/' baz > baz.new
616 616 $ mv baz.new baz
617 617 $ hg ci -m 'baz:3->3+'
618 618 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7, descend=0)'
619 619 16: baz:0
620 620 19: baz:3
621 621 20: baz:4
622 622 24: baz:3->3+
623 623 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=17, descend=True)'
624 624 19: baz:3
625 625 20: baz:4
626 626 24: baz:3->3+
627 627 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 1:2, descend=false)'
628 628 22: added two lines with 0
629 629
630 630 file patterns are okay
631 631 $ hg log -T '{rev}: {desc}\n' -r 'followlines("path:baz", 1:2)'
632 632 22: added two lines with 0
633 633
634 634 renames are followed
635 635 $ hg mv baz qux
636 636 $ sed 's/4/4+/' qux > qux.new
637 637 $ mv qux.new qux
638 638 $ hg ci -m 'qux:4->4+'
639 639 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5:7)'
640 640 16: baz:0
641 641 19: baz:3
642 642 20: baz:4
643 643 24: baz:3->3+
644 644 25: qux:4->4+
645 645
646 646 but are missed when following children
647 647 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7, startrev=22, descend=True)'
648 648 24: baz:3->3+
649 649
650 650 merge
651 651 $ hg up 24 --quiet
652 652 $ echo 7 >> baz
653 653 $ hg ci -m 'one more line, out of line range'
654 654 created new head
655 655 $ sed 's/3+/3-/' baz > baz.new
656 656 $ mv baz.new baz
657 657 $ hg ci -m 'baz:3+->3-'
658 658 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7)'
659 659 16: baz:0
660 660 19: baz:3
661 661 20: baz:4
662 662 24: baz:3->3+
663 663 27: baz:3+->3-
664 664 $ hg merge 25
665 665 merging baz and qux to qux
666 666 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
667 667 (branch merge, don't forget to commit)
668 668 $ hg ci -m merge
669 669 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5:7)'
670 670 16: baz:0
671 671 19: baz:3
672 672 20: baz:4
673 673 24: baz:3->3+
674 674 25: qux:4->4+
675 675 27: baz:3+->3-
676 676 28: merge
677 677 $ hg up 25 --quiet
678 678 $ hg merge 27
679 679 merging qux and baz to qux
680 680 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
681 681 (branch merge, don't forget to commit)
682 682 $ hg ci -m 'merge from other side'
683 683 created new head
684 684 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5:7)'
685 685 16: baz:0
686 686 19: baz:3
687 687 20: baz:4
688 688 24: baz:3->3+
689 689 25: qux:4->4+
690 690 27: baz:3+->3-
691 691 29: merge from other side
692 692 $ hg up 24 --quiet
693 693
694 694 we are missing the branch with rename when following children
695 695 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7, startrev=26, descend=True)'
696 696 27: baz:3+->3-
697 697
698 698 we follow all branches in descending direction
699 699 $ hg up 23 --quiet
700 700 $ sed 's/3/+3/' baz > baz.new
701 701 $ mv baz.new baz
702 702 $ hg ci -m 'baz:3->+3'
703 703 created new head
704 704 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 2:5, startrev=16, descend=True)' --graph
705 705 @ 30: baz:3->+3
706 706 :
707 707 : o 27: baz:3+->3-
708 708 : :
709 709 : o 24: baz:3->3+
710 710 :/
711 711 o 20: baz:4
712 712 |\
713 713 | o 19: baz:3
714 714 |/
715 715 o 18: baz:2
716 716 :
717 717 o 16: baz:0
718 718 |
719 719 ~
720 720
721 721 Issue5595: on a merge changeset with different line ranges depending on
722 722 parent, be conservative and use the surrounding interval to avoid loosing
723 723 track of possible further descendants in specified range.
724 724
725 725 $ hg up 23 --quiet
726 726 $ hg cat baz -r 24
727 727 0
728 728 0
729 729 1 baz:1
730 730 2 baz:2
731 731 3+ baz:3
732 732 4 baz:4
733 733 5
734 734 6
735 735 $ cat > baz << EOF
736 736 > 0
737 737 > 0
738 738 > a
739 739 > b
740 740 > 3+ baz:3
741 741 > 4 baz:4
742 742 > y
743 743 > z
744 744 > EOF
745 745 $ hg ci -m 'baz: mostly rewrite with some content from 24'
746 746 created new head
747 747 $ hg merge --tool :merge-other 24
748 748 merging baz
749 749 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
750 750 (branch merge, don't forget to commit)
751 751 $ hg ci -m 'merge forgetting about baz rewrite'
752 752 $ cat > baz << EOF
753 753 > 0
754 754 > 0
755 755 > 1 baz:1
756 756 > 2+ baz:2
757 757 > 3+ baz:3
758 758 > 4 baz:4
759 759 > 5
760 760 > 6
761 761 > EOF
762 762 $ hg ci -m 'baz: narrow change (2->2+)'
763 763 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:4, startrev=20, descend=True)' --graph
764 764 @ 33: baz: narrow change (2->2+)
765 765 |
766 766 o 32: merge forgetting about baz rewrite
767 767 |\
768 768 | o 31: baz: mostly rewrite with some content from 24
769 769 | :
770 770 | : o 30: baz:3->+3
771 771 | :/
772 772 +---o 27: baz:3+->3-
773 773 | :
774 774 o : 24: baz:3->3+
775 775 :/
776 776 o 20: baz:4
777 777 |\
778 778 ~ ~
779 779
780 780 check error cases
781 781 $ hg up 24 --quiet
782 782 $ hg log -r 'followlines()'
783 783 hg: parse error: followlines takes at least 1 positional arguments
784 784 [255]
785 785 $ hg log -r 'followlines(baz)'
786 786 hg: parse error: followlines requires a line range
787 787 [255]
788 788 $ hg log -r 'followlines(baz, 1)'
789 789 hg: parse error: followlines expects a line range
790 790 [255]
791 791 $ hg log -r 'followlines(baz, 1:2, startrev=desc("b"))'
792 792 hg: parse error: followlines expects exactly one revision
793 793 [255]
794 794 $ hg log -r 'followlines("glob:*", 1:2)'
795 795 hg: parse error: followlines expects exactly one file
796 796 [255]
797 797 $ hg log -r 'followlines(baz, 1:)'
798 798 hg: parse error: line range bounds must be integers
799 799 [255]
800 800 $ hg log -r 'followlines(baz, :1)'
801 801 hg: parse error: line range bounds must be integers
802 802 [255]
803 803 $ hg log -r 'followlines(baz, x:4)'
804 804 hg: parse error: line range bounds must be integers
805 805 [255]
806 806 $ hg log -r 'followlines(baz, 5:4)'
807 807 hg: parse error: line range must be positive
808 808 [255]
809 809 $ hg log -r 'followlines(baz, 0:4)'
810 810 hg: parse error: fromline must be strictly positive
811 811 [255]
812 812 $ hg log -r 'followlines(baz, 2:40)'
813 813 abort: line range exceeds file size
814 814 [255]
815 815 $ hg log -r 'followlines(baz, 2:4, startrev=20, descend=[1])'
816 816 hg: parse error at 43: not a prefix: [
817 817 [255]
818 818 $ hg log -r 'followlines(baz, 2:4, startrev=20, descend=a)'
819 819 hg: parse error: descend argument must be a boolean
820 820 [255]
821 821
822 822 Test empty annotate output
823 823
824 824 $ printf '\0' > binary
825 825 $ touch empty
826 826 $ hg ci -qAm 'add binary and empty files'
827 827
828 828 $ hg annotate binary empty
829 829 binary: binary file
830 830
831 831 $ hg annotate -Tjson binary empty
832 832 [
833 833 {
834 834 "abspath": "binary",
835 835 "path": "binary"
836 836 },
837 837 {
838 838 "abspath": "empty",
839 839 "lines": [],
840 840 "path": "empty"
841 841 }
842 842 ]
843 843
844 844 Test annotate with whitespace options
845 845
846 846 $ cd ..
847 847 $ hg init repo-ws
848 848 $ cd repo-ws
849 849 $ cat > a <<EOF
850 850 > aa
851 851 >
852 852 > b b
853 853 > EOF
854 854 $ hg ci -Am "adda"
855 855 adding a
856 856 $ sed 's/EOL$//g' > a <<EOF
857 857 > a a
858 858 >
859 859 > EOL
860 860 > b b
861 861 > EOF
862 862 $ hg ci -m "changea"
863 863
864 864 Annotate with no option
865 865
866 866 $ hg annotate a
867 867 1: a a
868 868 0:
869 869 1:
870 870 1: b b
871 871
872 872 Annotate with --ignore-space-change
873 873
874 874 $ hg annotate --ignore-space-change a
875 875 1: a a
876 876 1:
877 877 0:
878 878 0: b b
879 879
880 880 Annotate with --ignore-all-space
881 881
882 882 $ hg annotate --ignore-all-space a
883 883 0: a a
884 884 0:
885 885 1:
886 886 0: b b
887 887
888 888 Annotate with --ignore-blank-lines (similar to no options case)
889 889
890 890 $ hg annotate --ignore-blank-lines a
891 891 1: a a
892 892 0:
893 893 1:
894 894 1: b b
895 895
896 896 $ cd ..
897 897
898 Annotate with orphaned CR (issue5798)
899 -------------------------------------
900
901 $ hg init repo-cr
902 $ cd repo-cr
903
904 $ substcr() {
905 > sed 's/\r/[CR]/g'
906 > }
907
908 >>> with open('a', 'wb') as f:
909 ... f.write(b'0a\r0b\r\n0c\r0d\r\n0e\n0f\n0g')
910 $ hg ci -qAm0
911 >>> with open('a', 'wb') as f:
912 ... f.write(b'0a\r0b\r\n1c\r1d\r\n0e\n1f\n0g')
913 $ hg ci -m1
914
915 $ hg annotate -r0 a | substcr
916 0: 0a[CR]0b[CR]
917 0: 0c[CR]0d[CR]
918 0: 0e
919 0: 0f
920 0: 0g
921 $ hg annotate -r1 a | substcr
922 0: 0a[CR]0b[CR]
923 1: 1c[CR]1d[CR]
924 0: 0e
925 1: 1f
926 0: 0g
927
928 $ cd ..
929
898 930 Annotate with linkrev pointing to another branch
899 931 ------------------------------------------------
900 932
901 933 create history with a filerev whose linkrev points to another branch
902 934
903 935 $ hg init branchedlinkrev
904 936 $ cd branchedlinkrev
905 937 $ echo A > a
906 938 $ hg commit -Am 'contentA'
907 939 adding a
908 940 $ echo B >> a
909 941 $ hg commit -m 'contentB'
910 942 $ hg up --rev 'desc(contentA)'
911 943 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
912 944 $ echo unrelated > unrelated
913 945 $ hg commit -Am 'unrelated'
914 946 adding unrelated
915 947 created new head
916 948 $ hg graft -r 'desc(contentB)'
917 949 grafting 1:fd27c222e3e6 "contentB"
918 950 $ echo C >> a
919 951 $ hg commit -m 'contentC'
920 952 $ echo W >> a
921 953 $ hg log -G
922 954 @ changeset: 4:072f1e8df249
923 955 | tag: tip
924 956 | user: test
925 957 | date: Thu Jan 01 00:00:00 1970 +0000
926 958 | summary: contentC
927 959 |
928 960 o changeset: 3:ff38df03cc4b
929 961 | user: test
930 962 | date: Thu Jan 01 00:00:00 1970 +0000
931 963 | summary: contentB
932 964 |
933 965 o changeset: 2:62aaf3f6fc06
934 966 | parent: 0:f0932f74827e
935 967 | user: test
936 968 | date: Thu Jan 01 00:00:00 1970 +0000
937 969 | summary: unrelated
938 970 |
939 971 | o changeset: 1:fd27c222e3e6
940 972 |/ user: test
941 973 | date: Thu Jan 01 00:00:00 1970 +0000
942 974 | summary: contentB
943 975 |
944 976 o changeset: 0:f0932f74827e
945 977 user: test
946 978 date: Thu Jan 01 00:00:00 1970 +0000
947 979 summary: contentA
948 980
949 981
950 982 Annotate should list ancestor of starting revision only
951 983
952 984 $ hg annotate a
953 985 0: A
954 986 3: B
955 987 4: C
956 988
957 989 $ hg annotate a -r 'wdir()'
958 990 0 : A
959 991 3 : B
960 992 4 : C
961 993 4+: W
962 994
963 995 Even when the starting revision is the linkrev-shadowed one:
964 996
965 997 $ hg annotate a -r 3
966 998 0: A
967 999 3: B
968 1000
969 1001 $ cd ..
970 1002
971 1003 Issue5360: Deleted chunk in p1 of a merge changeset
972 1004
973 1005 $ hg init repo-5360
974 1006 $ cd repo-5360
975 1007 $ echo 1 > a
976 1008 $ hg commit -A a -m 1
977 1009 $ echo 2 >> a
978 1010 $ hg commit -m 2
979 1011 $ echo a > a
980 1012 $ hg commit -m a
981 1013 $ hg update '.^' -q
982 1014 $ echo 3 >> a
983 1015 $ hg commit -m 3 -q
984 1016 $ hg merge 2 -q
985 1017 $ cat > a << EOF
986 1018 > b
987 1019 > 1
988 1020 > 2
989 1021 > 3
990 1022 > a
991 1023 > EOF
992 1024 $ hg resolve --mark -q
993 1025 $ hg commit -m m
994 1026 $ hg annotate a
995 1027 4: b
996 1028 0: 1
997 1029 1: 2
998 1030 3: 3
999 1031 2: a
1000 1032
1001 1033 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now