##// END OF EJS Templates
annotate: track whether a particular annotation was the result of a skip...
Siddharth Agarwal -
r34434:2f5a135b default
parent child Browse files
Show More
@@ -1,2566 +1,2568
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 bin,
19 19 hex,
20 20 modifiednodeid,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 wdirid,
25 25 wdirnodes,
26 26 wdirrev,
27 27 )
28 28 from .thirdparty import (
29 29 attr,
30 30 )
31 31 from . import (
32 32 encoding,
33 33 error,
34 34 fileset,
35 35 match as matchmod,
36 36 mdiff,
37 37 obsolete as obsmod,
38 38 patch,
39 39 pathutil,
40 40 phases,
41 41 pycompat,
42 42 repoview,
43 43 revlog,
44 44 scmutil,
45 45 sparse,
46 46 subrepo,
47 47 util,
48 48 )
49 49
50 50 propertycache = util.propertycache
51 51
52 52 nonascii = re.compile(r'[^\x21-\x7f]').search
53 53
54 54 class basectx(object):
55 55 """A basectx object represents the common logic for its children:
56 56 changectx: read-only context that is already present in the repo,
57 57 workingctx: a context that represents the working directory and can
58 58 be committed,
59 59 memctx: a context that represents changes in-memory and can also
60 60 be committed."""
61 61 def __new__(cls, repo, changeid='', *args, **kwargs):
62 62 if isinstance(changeid, basectx):
63 63 return changeid
64 64
65 65 o = super(basectx, cls).__new__(cls)
66 66
67 67 o._repo = repo
68 68 o._rev = nullrev
69 69 o._node = nullid
70 70
71 71 return o
72 72
73 73 def __bytes__(self):
74 74 return short(self.node())
75 75
76 76 __str__ = encoding.strmethod(__bytes__)
77 77
78 78 def __int__(self):
79 79 return self.rev()
80 80
81 81 def __repr__(self):
82 82 return r"<%s %s>" % (type(self).__name__, str(self))
83 83
84 84 def __eq__(self, other):
85 85 try:
86 86 return type(self) == type(other) and self._rev == other._rev
87 87 except AttributeError:
88 88 return False
89 89
90 90 def __ne__(self, other):
91 91 return not (self == other)
92 92
93 93 def __contains__(self, key):
94 94 return key in self._manifest
95 95
96 96 def __getitem__(self, key):
97 97 return self.filectx(key)
98 98
99 99 def __iter__(self):
100 100 return iter(self._manifest)
101 101
102 102 def _buildstatusmanifest(self, status):
103 103 """Builds a manifest that includes the given status results, if this is
104 104 a working copy context. For non-working copy contexts, it just returns
105 105 the normal manifest."""
106 106 return self.manifest()
107 107
108 108 def _matchstatus(self, other, match):
109 109 """This internal method provides a way for child objects to override the
110 110 match operator.
111 111 """
112 112 return match
113 113
114 114 def _buildstatus(self, other, s, match, listignored, listclean,
115 115 listunknown):
116 116 """build a status with respect to another context"""
117 117 # Load earliest manifest first for caching reasons. More specifically,
118 118 # if you have revisions 1000 and 1001, 1001 is probably stored as a
119 119 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
120 120 # 1000 and cache it so that when you read 1001, we just need to apply a
121 121 # delta to what's in the cache. So that's one full reconstruction + one
122 122 # delta application.
123 123 mf2 = None
124 124 if self.rev() is not None and self.rev() < other.rev():
125 125 mf2 = self._buildstatusmanifest(s)
126 126 mf1 = other._buildstatusmanifest(s)
127 127 if mf2 is None:
128 128 mf2 = self._buildstatusmanifest(s)
129 129
130 130 modified, added = [], []
131 131 removed = []
132 132 clean = []
133 133 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
134 134 deletedset = set(deleted)
135 135 d = mf1.diff(mf2, match=match, clean=listclean)
136 136 for fn, value in d.iteritems():
137 137 if fn in deletedset:
138 138 continue
139 139 if value is None:
140 140 clean.append(fn)
141 141 continue
142 142 (node1, flag1), (node2, flag2) = value
143 143 if node1 is None:
144 144 added.append(fn)
145 145 elif node2 is None:
146 146 removed.append(fn)
147 147 elif flag1 != flag2:
148 148 modified.append(fn)
149 149 elif node2 not in wdirnodes:
150 150 # When comparing files between two commits, we save time by
151 151 # not comparing the file contents when the nodeids differ.
152 152 # Note that this means we incorrectly report a reverted change
153 153 # to a file as a modification.
154 154 modified.append(fn)
155 155 elif self[fn].cmp(other[fn]):
156 156 modified.append(fn)
157 157 else:
158 158 clean.append(fn)
159 159
160 160 if removed:
161 161 # need to filter files if they are already reported as removed
162 162 unknown = [fn for fn in unknown if fn not in mf1 and
163 163 (not match or match(fn))]
164 164 ignored = [fn for fn in ignored if fn not in mf1 and
165 165 (not match or match(fn))]
166 166 # if they're deleted, don't report them as removed
167 167 removed = [fn for fn in removed if fn not in deletedset]
168 168
169 169 return scmutil.status(modified, added, removed, deleted, unknown,
170 170 ignored, clean)
171 171
172 172 @propertycache
173 173 def substate(self):
174 174 return subrepo.state(self, self._repo.ui)
175 175
176 176 def subrev(self, subpath):
177 177 return self.substate[subpath][1]
178 178
179 179 def rev(self):
180 180 return self._rev
181 181 def node(self):
182 182 return self._node
183 183 def hex(self):
184 184 return hex(self.node())
185 185 def manifest(self):
186 186 return self._manifest
187 187 def manifestctx(self):
188 188 return self._manifestctx
189 189 def repo(self):
190 190 return self._repo
191 191 def phasestr(self):
192 192 return phases.phasenames[self.phase()]
193 193 def mutable(self):
194 194 return self.phase() > phases.public
195 195
196 196 def getfileset(self, expr):
197 197 return fileset.getfileset(self, expr)
198 198
199 199 def obsolete(self):
200 200 """True if the changeset is obsolete"""
201 201 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
202 202
203 203 def extinct(self):
204 204 """True if the changeset is extinct"""
205 205 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
206 206
207 207 def unstable(self):
208 208 msg = ("'context.unstable' is deprecated, "
209 209 "use 'context.orphan'")
210 210 self._repo.ui.deprecwarn(msg, '4.4')
211 211 return self.orphan()
212 212
213 213 def orphan(self):
214 214 """True if the changeset is not obsolete but it's ancestor are"""
215 215 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
216 216
217 217 def bumped(self):
218 218 msg = ("'context.bumped' is deprecated, "
219 219 "use 'context.phasedivergent'")
220 220 self._repo.ui.deprecwarn(msg, '4.4')
221 221 return self.phasedivergent()
222 222
223 223 def phasedivergent(self):
224 224 """True if the changeset try to be a successor of a public changeset
225 225
226 226 Only non-public and non-obsolete changesets may be bumped.
227 227 """
228 228 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
229 229
230 230 def divergent(self):
231 231 msg = ("'context.divergent' is deprecated, "
232 232 "use 'context.contentdivergent'")
233 233 self._repo.ui.deprecwarn(msg, '4.4')
234 234 return self.contentdivergent()
235 235
236 236 def contentdivergent(self):
237 237 """Is a successors of a changeset with multiple possible successors set
238 238
239 239 Only non-public and non-obsolete changesets may be divergent.
240 240 """
241 241 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
242 242
243 243 def troubled(self):
244 244 msg = ("'context.troubled' is deprecated, "
245 245 "use 'context.isunstable'")
246 246 self._repo.ui.deprecwarn(msg, '4.4')
247 247 return self.isunstable()
248 248
249 249 def isunstable(self):
250 250 """True if the changeset is either unstable, bumped or divergent"""
251 251 return self.orphan() or self.phasedivergent() or self.contentdivergent()
252 252
253 253 def troubles(self):
254 254 """Keep the old version around in order to avoid breaking extensions
255 255 about different return values.
256 256 """
257 257 msg = ("'context.troubles' is deprecated, "
258 258 "use 'context.instabilities'")
259 259 self._repo.ui.deprecwarn(msg, '4.4')
260 260
261 261 troubles = []
262 262 if self.orphan():
263 263 troubles.append('orphan')
264 264 if self.phasedivergent():
265 265 troubles.append('bumped')
266 266 if self.contentdivergent():
267 267 troubles.append('divergent')
268 268 return troubles
269 269
270 270 def instabilities(self):
271 271 """return the list of instabilities affecting this changeset.
272 272
273 273 Instabilities are returned as strings. possible values are:
274 274 - orphan,
275 275 - phase-divergent,
276 276 - content-divergent.
277 277 """
278 278 instabilities = []
279 279 if self.orphan():
280 280 instabilities.append('orphan')
281 281 if self.phasedivergent():
282 282 instabilities.append('phase-divergent')
283 283 if self.contentdivergent():
284 284 instabilities.append('content-divergent')
285 285 return instabilities
286 286
287 287 def parents(self):
288 288 """return contexts for each parent changeset"""
289 289 return self._parents
290 290
291 291 def p1(self):
292 292 return self._parents[0]
293 293
294 294 def p2(self):
295 295 parents = self._parents
296 296 if len(parents) == 2:
297 297 return parents[1]
298 298 return changectx(self._repo, nullrev)
299 299
300 300 def _fileinfo(self, path):
301 301 if r'_manifest' in self.__dict__:
302 302 try:
303 303 return self._manifest[path], self._manifest.flags(path)
304 304 except KeyError:
305 305 raise error.ManifestLookupError(self._node, path,
306 306 _('not found in manifest'))
307 307 if r'_manifestdelta' in self.__dict__ or path in self.files():
308 308 if path in self._manifestdelta:
309 309 return (self._manifestdelta[path],
310 310 self._manifestdelta.flags(path))
311 311 mfl = self._repo.manifestlog
312 312 try:
313 313 node, flag = mfl[self._changeset.manifest].find(path)
314 314 except KeyError:
315 315 raise error.ManifestLookupError(self._node, path,
316 316 _('not found in manifest'))
317 317
318 318 return node, flag
319 319
320 320 def filenode(self, path):
321 321 return self._fileinfo(path)[0]
322 322
323 323 def flags(self, path):
324 324 try:
325 325 return self._fileinfo(path)[1]
326 326 except error.LookupError:
327 327 return ''
328 328
329 329 def sub(self, path, allowcreate=True):
330 330 '''return a subrepo for the stored revision of path, never wdir()'''
331 331 return subrepo.subrepo(self, path, allowcreate=allowcreate)
332 332
333 333 def nullsub(self, path, pctx):
334 334 return subrepo.nullsubrepo(self, path, pctx)
335 335
336 336 def workingsub(self, path):
337 337 '''return a subrepo for the stored revision, or wdir if this is a wdir
338 338 context.
339 339 '''
340 340 return subrepo.subrepo(self, path, allowwdir=True)
341 341
342 342 def match(self, pats=None, include=None, exclude=None, default='glob',
343 343 listsubrepos=False, badfn=None):
344 344 r = self._repo
345 345 return matchmod.match(r.root, r.getcwd(), pats,
346 346 include, exclude, default,
347 347 auditor=r.nofsauditor, ctx=self,
348 348 listsubrepos=listsubrepos, badfn=badfn)
349 349
350 350 def diff(self, ctx2=None, match=None, **opts):
351 351 """Returns a diff generator for the given contexts and matcher"""
352 352 if ctx2 is None:
353 353 ctx2 = self.p1()
354 354 if ctx2 is not None:
355 355 ctx2 = self._repo[ctx2]
356 356 diffopts = patch.diffopts(self._repo.ui, opts)
357 357 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
358 358
359 359 def dirs(self):
360 360 return self._manifest.dirs()
361 361
362 362 def hasdir(self, dir):
363 363 return self._manifest.hasdir(dir)
364 364
365 365 def status(self, other=None, match=None, listignored=False,
366 366 listclean=False, listunknown=False, listsubrepos=False):
367 367 """return status of files between two nodes or node and working
368 368 directory.
369 369
370 370 If other is None, compare this node with working directory.
371 371
372 372 returns (modified, added, removed, deleted, unknown, ignored, clean)
373 373 """
374 374
375 375 ctx1 = self
376 376 ctx2 = self._repo[other]
377 377
378 378 # This next code block is, admittedly, fragile logic that tests for
379 379 # reversing the contexts and wouldn't need to exist if it weren't for
380 380 # the fast (and common) code path of comparing the working directory
381 381 # with its first parent.
382 382 #
383 383 # What we're aiming for here is the ability to call:
384 384 #
385 385 # workingctx.status(parentctx)
386 386 #
387 387 # If we always built the manifest for each context and compared those,
388 388 # then we'd be done. But the special case of the above call means we
389 389 # just copy the manifest of the parent.
390 390 reversed = False
391 391 if (not isinstance(ctx1, changectx)
392 392 and isinstance(ctx2, changectx)):
393 393 reversed = True
394 394 ctx1, ctx2 = ctx2, ctx1
395 395
396 396 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
397 397 match = ctx2._matchstatus(ctx1, match)
398 398 r = scmutil.status([], [], [], [], [], [], [])
399 399 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
400 400 listunknown)
401 401
402 402 if reversed:
403 403 # Reverse added and removed. Clear deleted, unknown and ignored as
404 404 # these make no sense to reverse.
405 405 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
406 406 r.clean)
407 407
408 408 if listsubrepos:
409 409 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
410 410 try:
411 411 rev2 = ctx2.subrev(subpath)
412 412 except KeyError:
413 413 # A subrepo that existed in node1 was deleted between
414 414 # node1 and node2 (inclusive). Thus, ctx2's substate
415 415 # won't contain that subpath. The best we can do ignore it.
416 416 rev2 = None
417 417 submatch = matchmod.subdirmatcher(subpath, match)
418 418 s = sub.status(rev2, match=submatch, ignored=listignored,
419 419 clean=listclean, unknown=listunknown,
420 420 listsubrepos=True)
421 421 for rfiles, sfiles in zip(r, s):
422 422 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
423 423
424 424 for l in r:
425 425 l.sort()
426 426
427 427 return r
428 428
429 429 def _filterederror(repo, changeid):
430 430 """build an exception to be raised about a filtered changeid
431 431
432 432 This is extracted in a function to help extensions (eg: evolve) to
433 433 experiment with various message variants."""
434 434 if repo.filtername.startswith('visible'):
435 435 msg = _("hidden revision '%s'") % changeid
436 436 hint = _('use --hidden to access hidden revisions')
437 437 return error.FilteredRepoLookupError(msg, hint=hint)
438 438 msg = _("filtered revision '%s' (not in '%s' subset)")
439 439 msg %= (changeid, repo.filtername)
440 440 return error.FilteredRepoLookupError(msg)
441 441
442 442 class changectx(basectx):
443 443 """A changecontext object makes access to data related to a particular
444 444 changeset convenient. It represents a read-only context already present in
445 445 the repo."""
446 446 def __init__(self, repo, changeid=''):
447 447 """changeid is a revision number, node, or tag"""
448 448
449 449 # since basectx.__new__ already took care of copying the object, we
450 450 # don't need to do anything in __init__, so we just exit here
451 451 if isinstance(changeid, basectx):
452 452 return
453 453
454 454 if changeid == '':
455 455 changeid = '.'
456 456 self._repo = repo
457 457
458 458 try:
459 459 if isinstance(changeid, int):
460 460 self._node = repo.changelog.node(changeid)
461 461 self._rev = changeid
462 462 return
463 463 if not pycompat.ispy3 and isinstance(changeid, long):
464 464 changeid = str(changeid)
465 465 if changeid == 'null':
466 466 self._node = nullid
467 467 self._rev = nullrev
468 468 return
469 469 if changeid == 'tip':
470 470 self._node = repo.changelog.tip()
471 471 self._rev = repo.changelog.rev(self._node)
472 472 return
473 473 if changeid == '.' or changeid == repo.dirstate.p1():
474 474 # this is a hack to delay/avoid loading obsmarkers
475 475 # when we know that '.' won't be hidden
476 476 self._node = repo.dirstate.p1()
477 477 self._rev = repo.unfiltered().changelog.rev(self._node)
478 478 return
479 479 if len(changeid) == 20:
480 480 try:
481 481 self._node = changeid
482 482 self._rev = repo.changelog.rev(changeid)
483 483 return
484 484 except error.FilteredRepoLookupError:
485 485 raise
486 486 except LookupError:
487 487 pass
488 488
489 489 try:
490 490 r = int(changeid)
491 491 if '%d' % r != changeid:
492 492 raise ValueError
493 493 l = len(repo.changelog)
494 494 if r < 0:
495 495 r += l
496 496 if r < 0 or r >= l and r != wdirrev:
497 497 raise ValueError
498 498 self._rev = r
499 499 self._node = repo.changelog.node(r)
500 500 return
501 501 except error.FilteredIndexError:
502 502 raise
503 503 except (ValueError, OverflowError, IndexError):
504 504 pass
505 505
506 506 if len(changeid) == 40:
507 507 try:
508 508 self._node = bin(changeid)
509 509 self._rev = repo.changelog.rev(self._node)
510 510 return
511 511 except error.FilteredLookupError:
512 512 raise
513 513 except (TypeError, LookupError):
514 514 pass
515 515
516 516 # lookup bookmarks through the name interface
517 517 try:
518 518 self._node = repo.names.singlenode(repo, changeid)
519 519 self._rev = repo.changelog.rev(self._node)
520 520 return
521 521 except KeyError:
522 522 pass
523 523 except error.FilteredRepoLookupError:
524 524 raise
525 525 except error.RepoLookupError:
526 526 pass
527 527
528 528 self._node = repo.unfiltered().changelog._partialmatch(changeid)
529 529 if self._node is not None:
530 530 self._rev = repo.changelog.rev(self._node)
531 531 return
532 532
533 533 # lookup failed
534 534 # check if it might have come from damaged dirstate
535 535 #
536 536 # XXX we could avoid the unfiltered if we had a recognizable
537 537 # exception for filtered changeset access
538 538 if changeid in repo.unfiltered().dirstate.parents():
539 539 msg = _("working directory has unknown parent '%s'!")
540 540 raise error.Abort(msg % short(changeid))
541 541 try:
542 542 if len(changeid) == 20 and nonascii(changeid):
543 543 changeid = hex(changeid)
544 544 except TypeError:
545 545 pass
546 546 except (error.FilteredIndexError, error.FilteredLookupError,
547 547 error.FilteredRepoLookupError):
548 548 raise _filterederror(repo, changeid)
549 549 except IndexError:
550 550 pass
551 551 raise error.RepoLookupError(
552 552 _("unknown revision '%s'") % changeid)
553 553
554 554 def __hash__(self):
555 555 try:
556 556 return hash(self._rev)
557 557 except AttributeError:
558 558 return id(self)
559 559
560 560 def __nonzero__(self):
561 561 return self._rev != nullrev
562 562
563 563 __bool__ = __nonzero__
564 564
565 565 @propertycache
566 566 def _changeset(self):
567 567 return self._repo.changelog.changelogrevision(self.rev())
568 568
569 569 @propertycache
570 570 def _manifest(self):
571 571 return self._manifestctx.read()
572 572
573 573 @property
574 574 def _manifestctx(self):
575 575 return self._repo.manifestlog[self._changeset.manifest]
576 576
577 577 @propertycache
578 578 def _manifestdelta(self):
579 579 return self._manifestctx.readdelta()
580 580
581 581 @propertycache
582 582 def _parents(self):
583 583 repo = self._repo
584 584 p1, p2 = repo.changelog.parentrevs(self._rev)
585 585 if p2 == nullrev:
586 586 return [changectx(repo, p1)]
587 587 return [changectx(repo, p1), changectx(repo, p2)]
588 588
589 589 def changeset(self):
590 590 c = self._changeset
591 591 return (
592 592 c.manifest,
593 593 c.user,
594 594 c.date,
595 595 c.files,
596 596 c.description,
597 597 c.extra,
598 598 )
599 599 def manifestnode(self):
600 600 return self._changeset.manifest
601 601
602 602 def user(self):
603 603 return self._changeset.user
604 604 def date(self):
605 605 return self._changeset.date
606 606 def files(self):
607 607 return self._changeset.files
608 608 def description(self):
609 609 return self._changeset.description
610 610 def branch(self):
611 611 return encoding.tolocal(self._changeset.extra.get("branch"))
612 612 def closesbranch(self):
613 613 return 'close' in self._changeset.extra
614 614 def extra(self):
615 615 return self._changeset.extra
616 616 def tags(self):
617 617 return self._repo.nodetags(self._node)
618 618 def bookmarks(self):
619 619 return self._repo.nodebookmarks(self._node)
620 620 def phase(self):
621 621 return self._repo._phasecache.phase(self._repo, self._rev)
622 622 def hidden(self):
623 623 return self._rev in repoview.filterrevs(self._repo, 'visible')
624 624
625 625 def children(self):
626 626 """return contexts for each child changeset"""
627 627 c = self._repo.changelog.children(self._node)
628 628 return [changectx(self._repo, x) for x in c]
629 629
630 630 def ancestors(self):
631 631 for a in self._repo.changelog.ancestors([self._rev]):
632 632 yield changectx(self._repo, a)
633 633
634 634 def descendants(self):
635 635 for d in self._repo.changelog.descendants([self._rev]):
636 636 yield changectx(self._repo, d)
637 637
638 638 def filectx(self, path, fileid=None, filelog=None):
639 639 """get a file context from this changeset"""
640 640 if fileid is None:
641 641 fileid = self.filenode(path)
642 642 return filectx(self._repo, path, fileid=fileid,
643 643 changectx=self, filelog=filelog)
644 644
645 645 def ancestor(self, c2, warn=False):
646 646 """return the "best" ancestor context of self and c2
647 647
648 648 If there are multiple candidates, it will show a message and check
649 649 merge.preferancestor configuration before falling back to the
650 650 revlog ancestor."""
651 651 # deal with workingctxs
652 652 n2 = c2._node
653 653 if n2 is None:
654 654 n2 = c2._parents[0]._node
655 655 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
656 656 if not cahs:
657 657 anc = nullid
658 658 elif len(cahs) == 1:
659 659 anc = cahs[0]
660 660 else:
661 661 # experimental config: merge.preferancestor
662 662 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
663 663 try:
664 664 ctx = changectx(self._repo, r)
665 665 except error.RepoLookupError:
666 666 continue
667 667 anc = ctx.node()
668 668 if anc in cahs:
669 669 break
670 670 else:
671 671 anc = self._repo.changelog.ancestor(self._node, n2)
672 672 if warn:
673 673 self._repo.ui.status(
674 674 (_("note: using %s as ancestor of %s and %s\n") %
675 675 (short(anc), short(self._node), short(n2))) +
676 676 ''.join(_(" alternatively, use --config "
677 677 "merge.preferancestor=%s\n") %
678 678 short(n) for n in sorted(cahs) if n != anc))
679 679 return changectx(self._repo, anc)
680 680
681 681 def descendant(self, other):
682 682 """True if other is descendant of this changeset"""
683 683 return self._repo.changelog.descendant(self._rev, other._rev)
684 684
685 685 def walk(self, match):
686 686 '''Generates matching file names.'''
687 687
688 688 # Wrap match.bad method to have message with nodeid
689 689 def bad(fn, msg):
690 690 # The manifest doesn't know about subrepos, so don't complain about
691 691 # paths into valid subrepos.
692 692 if any(fn == s or fn.startswith(s + '/')
693 693 for s in self.substate):
694 694 return
695 695 match.bad(fn, _('no such file in rev %s') % self)
696 696
697 697 m = matchmod.badmatch(match, bad)
698 698 return self._manifest.walk(m)
699 699
700 700 def matches(self, match):
701 701 return self.walk(match)
702 702
703 703 class basefilectx(object):
704 704 """A filecontext object represents the common logic for its children:
705 705 filectx: read-only access to a filerevision that is already present
706 706 in the repo,
707 707 workingfilectx: a filecontext that represents files from the working
708 708 directory,
709 709 memfilectx: a filecontext that represents files in-memory,
710 710 overlayfilectx: duplicate another filecontext with some fields overridden.
711 711 """
712 712 @propertycache
713 713 def _filelog(self):
714 714 return self._repo.file(self._path)
715 715
716 716 @propertycache
717 717 def _changeid(self):
718 718 if r'_changeid' in self.__dict__:
719 719 return self._changeid
720 720 elif r'_changectx' in self.__dict__:
721 721 return self._changectx.rev()
722 722 elif r'_descendantrev' in self.__dict__:
723 723 # this file context was created from a revision with a known
724 724 # descendant, we can (lazily) correct for linkrev aliases
725 725 return self._adjustlinkrev(self._descendantrev)
726 726 else:
727 727 return self._filelog.linkrev(self._filerev)
728 728
729 729 @propertycache
730 730 def _filenode(self):
731 731 if r'_fileid' in self.__dict__:
732 732 return self._filelog.lookup(self._fileid)
733 733 else:
734 734 return self._changectx.filenode(self._path)
735 735
736 736 @propertycache
737 737 def _filerev(self):
738 738 return self._filelog.rev(self._filenode)
739 739
740 740 @propertycache
741 741 def _repopath(self):
742 742 return self._path
743 743
744 744 def __nonzero__(self):
745 745 try:
746 746 self._filenode
747 747 return True
748 748 except error.LookupError:
749 749 # file is missing
750 750 return False
751 751
752 752 __bool__ = __nonzero__
753 753
754 754 def __bytes__(self):
755 755 try:
756 756 return "%s@%s" % (self.path(), self._changectx)
757 757 except error.LookupError:
758 758 return "%s@???" % self.path()
759 759
760 760 __str__ = encoding.strmethod(__bytes__)
761 761
762 762 def __repr__(self):
763 763 return "<%s %s>" % (type(self).__name__, str(self))
764 764
765 765 def __hash__(self):
766 766 try:
767 767 return hash((self._path, self._filenode))
768 768 except AttributeError:
769 769 return id(self)
770 770
771 771 def __eq__(self, other):
772 772 try:
773 773 return (type(self) == type(other) and self._path == other._path
774 774 and self._filenode == other._filenode)
775 775 except AttributeError:
776 776 return False
777 777
778 778 def __ne__(self, other):
779 779 return not (self == other)
780 780
781 781 def filerev(self):
782 782 return self._filerev
783 783 def filenode(self):
784 784 return self._filenode
785 785 @propertycache
786 786 def _flags(self):
787 787 return self._changectx.flags(self._path)
788 788 def flags(self):
789 789 return self._flags
790 790 def filelog(self):
791 791 return self._filelog
792 792 def rev(self):
793 793 return self._changeid
794 794 def linkrev(self):
795 795 return self._filelog.linkrev(self._filerev)
796 796 def node(self):
797 797 return self._changectx.node()
798 798 def hex(self):
799 799 return self._changectx.hex()
800 800 def user(self):
801 801 return self._changectx.user()
802 802 def date(self):
803 803 return self._changectx.date()
804 804 def files(self):
805 805 return self._changectx.files()
806 806 def description(self):
807 807 return self._changectx.description()
808 808 def branch(self):
809 809 return self._changectx.branch()
810 810 def extra(self):
811 811 return self._changectx.extra()
812 812 def phase(self):
813 813 return self._changectx.phase()
814 814 def phasestr(self):
815 815 return self._changectx.phasestr()
816 816 def manifest(self):
817 817 return self._changectx.manifest()
818 818 def changectx(self):
819 819 return self._changectx
820 820 def renamed(self):
821 821 return self._copied
822 822 def repo(self):
823 823 return self._repo
824 824 def size(self):
825 825 return len(self.data())
826 826
827 827 def path(self):
828 828 return self._path
829 829
830 830 def isbinary(self):
831 831 try:
832 832 return util.binary(self.data())
833 833 except IOError:
834 834 return False
835 835 def isexec(self):
836 836 return 'x' in self.flags()
837 837 def islink(self):
838 838 return 'l' in self.flags()
839 839
840 840 def isabsent(self):
841 841 """whether this filectx represents a file not in self._changectx
842 842
843 843 This is mainly for merge code to detect change/delete conflicts. This is
844 844 expected to be True for all subclasses of basectx."""
845 845 return False
846 846
847 847 _customcmp = False
848 848 def cmp(self, fctx):
849 849 """compare with other file context
850 850
851 851 returns True if different than fctx.
852 852 """
853 853 if fctx._customcmp:
854 854 return fctx.cmp(self)
855 855
856 856 if (fctx._filenode is None
857 857 and (self._repo._encodefilterpats
858 858 # if file data starts with '\1\n', empty metadata block is
859 859 # prepended, which adds 4 bytes to filelog.size().
860 860 or self.size() - 4 == fctx.size())
861 861 or self.size() == fctx.size()):
862 862 return self._filelog.cmp(self._filenode, fctx.data())
863 863
864 864 return True
865 865
866 866 def _adjustlinkrev(self, srcrev, inclusive=False):
867 867 """return the first ancestor of <srcrev> introducing <fnode>
868 868
869 869 If the linkrev of the file revision does not point to an ancestor of
870 870 srcrev, we'll walk down the ancestors until we find one introducing
871 871 this file revision.
872 872
873 873 :srcrev: the changeset revision we search ancestors from
874 874 :inclusive: if true, the src revision will also be checked
875 875 """
876 876 repo = self._repo
877 877 cl = repo.unfiltered().changelog
878 878 mfl = repo.manifestlog
879 879 # fetch the linkrev
880 880 lkr = self.linkrev()
881 881 # hack to reuse ancestor computation when searching for renames
882 882 memberanc = getattr(self, '_ancestrycontext', None)
883 883 iteranc = None
884 884 if srcrev is None:
885 885 # wctx case, used by workingfilectx during mergecopy
886 886 revs = [p.rev() for p in self._repo[None].parents()]
887 887 inclusive = True # we skipped the real (revless) source
888 888 else:
889 889 revs = [srcrev]
890 890 if memberanc is None:
891 891 memberanc = iteranc = cl.ancestors(revs, lkr,
892 892 inclusive=inclusive)
893 893 # check if this linkrev is an ancestor of srcrev
894 894 if lkr not in memberanc:
895 895 if iteranc is None:
896 896 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
897 897 fnode = self._filenode
898 898 path = self._path
899 899 for a in iteranc:
900 900 ac = cl.read(a) # get changeset data (we avoid object creation)
901 901 if path in ac[3]: # checking the 'files' field.
902 902 # The file has been touched, check if the content is
903 903 # similar to the one we search for.
904 904 if fnode == mfl[ac[0]].readfast().get(path):
905 905 return a
906 906 # In theory, we should never get out of that loop without a result.
907 907 # But if manifest uses a buggy file revision (not children of the
908 908 # one it replaces) we could. Such a buggy situation will likely
909 909 # result is crash somewhere else at to some point.
910 910 return lkr
911 911
912 912 def introrev(self):
913 913 """return the rev of the changeset which introduced this file revision
914 914
915 915 This method is different from linkrev because it take into account the
916 916 changeset the filectx was created from. It ensures the returned
917 917 revision is one of its ancestors. This prevents bugs from
918 918 'linkrev-shadowing' when a file revision is used by multiple
919 919 changesets.
920 920 """
921 921 lkr = self.linkrev()
922 922 attrs = vars(self)
923 923 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
924 924 if noctx or self.rev() == lkr:
925 925 return self.linkrev()
926 926 return self._adjustlinkrev(self.rev(), inclusive=True)
927 927
928 928 def _parentfilectx(self, path, fileid, filelog):
929 929 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
930 930 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
931 931 if '_changeid' in vars(self) or '_changectx' in vars(self):
932 932 # If self is associated with a changeset (probably explicitly
933 933 # fed), ensure the created filectx is associated with a
934 934 # changeset that is an ancestor of self.changectx.
935 935 # This lets us later use _adjustlinkrev to get a correct link.
936 936 fctx._descendantrev = self.rev()
937 937 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
938 938 elif '_descendantrev' in vars(self):
939 939 # Otherwise propagate _descendantrev if we have one associated.
940 940 fctx._descendantrev = self._descendantrev
941 941 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
942 942 return fctx
943 943
944 944 def parents(self):
945 945 _path = self._path
946 946 fl = self._filelog
947 947 parents = self._filelog.parents(self._filenode)
948 948 pl = [(_path, node, fl) for node in parents if node != nullid]
949 949
950 950 r = fl.renamed(self._filenode)
951 951 if r:
952 952 # - In the simple rename case, both parent are nullid, pl is empty.
953 953 # - In case of merge, only one of the parent is null id and should
954 954 # be replaced with the rename information. This parent is -always-
955 955 # the first one.
956 956 #
957 957 # As null id have always been filtered out in the previous list
958 958 # comprehension, inserting to 0 will always result in "replacing
959 959 # first nullid parent with rename information.
960 960 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
961 961
962 962 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
963 963
964 964 def p1(self):
965 965 return self.parents()[0]
966 966
967 967 def p2(self):
968 968 p = self.parents()
969 969 if len(p) == 2:
970 970 return p[1]
971 971 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
972 972
973 973 def annotate(self, follow=False, linenumber=False, skiprevs=None,
974 974 diffopts=None):
975 975 '''returns a list of tuples of ((ctx, number), line) for each line
976 976 in the file, where ctx is the filectx of the node where
977 977 that line was last changed; if linenumber parameter is true, number is
978 978 the line number at the first appearance in the managed file, otherwise,
979 979 number has a fixed value of False.
980 980 '''
981 981
982 982 def lines(text):
983 983 if text.endswith("\n"):
984 984 return text.count("\n")
985 985 return text.count("\n") + int(bool(text))
986 986
987 987 if linenumber:
988 988 def decorate(text, rev):
989 989 return ([annotateline(fctx=rev, lineno=i)
990 990 for i in xrange(1, lines(text) + 1)], text)
991 991 else:
992 992 def decorate(text, rev):
993 993 return ([annotateline(fctx=rev)] * lines(text), text)
994 994
995 995 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
996 996
997 997 def parents(f):
998 998 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
999 999 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1000 1000 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1001 1001 # isn't an ancestor of the srcrev.
1002 1002 f._changeid
1003 1003 pl = f.parents()
1004 1004
1005 1005 # Don't return renamed parents if we aren't following.
1006 1006 if not follow:
1007 1007 pl = [p for p in pl if p.path() == f.path()]
1008 1008
1009 1009 # renamed filectx won't have a filelog yet, so set it
1010 1010 # from the cache to save time
1011 1011 for p in pl:
1012 1012 if not '_filelog' in p.__dict__:
1013 1013 p._filelog = getlog(p.path())
1014 1014
1015 1015 return pl
1016 1016
1017 1017 # use linkrev to find the first changeset where self appeared
1018 1018 base = self
1019 1019 introrev = self.introrev()
1020 1020 if self.rev() != introrev:
1021 1021 base = self.filectx(self.filenode(), changeid=introrev)
1022 1022 if getattr(base, '_ancestrycontext', None) is None:
1023 1023 cl = self._repo.changelog
1024 1024 if introrev is None:
1025 1025 # wctx is not inclusive, but works because _ancestrycontext
1026 1026 # is used to test filelog revisions
1027 1027 ac = cl.ancestors([p.rev() for p in base.parents()],
1028 1028 inclusive=True)
1029 1029 else:
1030 1030 ac = cl.ancestors([introrev], inclusive=True)
1031 1031 base._ancestrycontext = ac
1032 1032
1033 1033 # This algorithm would prefer to be recursive, but Python is a
1034 1034 # bit recursion-hostile. Instead we do an iterative
1035 1035 # depth-first search.
1036 1036
1037 1037 # 1st DFS pre-calculates pcache and needed
1038 1038 visit = [base]
1039 1039 pcache = {}
1040 1040 needed = {base: 1}
1041 1041 while visit:
1042 1042 f = visit.pop()
1043 1043 if f in pcache:
1044 1044 continue
1045 1045 pl = parents(f)
1046 1046 pcache[f] = pl
1047 1047 for p in pl:
1048 1048 needed[p] = needed.get(p, 0) + 1
1049 1049 if p not in pcache:
1050 1050 visit.append(p)
1051 1051
1052 1052 # 2nd DFS does the actual annotate
1053 1053 visit[:] = [base]
1054 1054 hist = {}
1055 1055 while visit:
1056 1056 f = visit[-1]
1057 1057 if f in hist:
1058 1058 visit.pop()
1059 1059 continue
1060 1060
1061 1061 ready = True
1062 1062 pl = pcache[f]
1063 1063 for p in pl:
1064 1064 if p not in hist:
1065 1065 ready = False
1066 1066 visit.append(p)
1067 1067 if ready:
1068 1068 visit.pop()
1069 1069 curr = decorate(f.data(), f)
1070 1070 skipchild = False
1071 1071 if skiprevs is not None:
1072 1072 skipchild = f._changeid in skiprevs
1073 1073 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1074 1074 diffopts)
1075 1075 for p in pl:
1076 1076 if needed[p] == 1:
1077 1077 del hist[p]
1078 1078 del needed[p]
1079 1079 else:
1080 1080 needed[p] -= 1
1081 1081
1082 1082 hist[f] = curr
1083 1083 del pcache[f]
1084 1084
1085 1085 return zip(hist[base][0], hist[base][1].splitlines(True))
1086 1086
1087 1087 def ancestors(self, followfirst=False):
1088 1088 visit = {}
1089 1089 c = self
1090 1090 if followfirst:
1091 1091 cut = 1
1092 1092 else:
1093 1093 cut = None
1094 1094
1095 1095 while True:
1096 1096 for parent in c.parents()[:cut]:
1097 1097 visit[(parent.linkrev(), parent.filenode())] = parent
1098 1098 if not visit:
1099 1099 break
1100 1100 c = visit.pop(max(visit))
1101 1101 yield c
1102 1102
1103 1103 def decodeddata(self):
1104 1104 """Returns `data()` after running repository decoding filters.
1105 1105
1106 1106 This is often equivalent to how the data would be expressed on disk.
1107 1107 """
1108 1108 return self._repo.wwritedata(self.path(), self.data())
1109 1109
1110 1110 @attr.s(slots=True, frozen=True)
1111 1111 class annotateline(object):
1112 1112 fctx = attr.ib()
1113 1113 lineno = attr.ib(default=False)
1114 # Whether this annotation was the result of a skip-annotate.
1115 skip = attr.ib(default=False)
1114 1116
1115 1117 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1116 1118 r'''
1117 1119 Given parent and child fctxes and annotate data for parents, for all lines
1118 1120 in either parent that match the child, annotate the child with the parent's
1119 1121 data.
1120 1122
1121 1123 Additionally, if `skipchild` is True, replace all other lines with parent
1122 1124 annotate data as well such that child is never blamed for any lines.
1123 1125
1124 1126 See test-annotate.py for unit tests.
1125 1127 '''
1126 1128 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1127 1129 for parent in parents]
1128 1130
1129 1131 if skipchild:
1130 1132 # Need to iterate over the blocks twice -- make it a list
1131 1133 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1132 1134 # Mercurial currently prefers p2 over p1 for annotate.
1133 1135 # TODO: change this?
1134 1136 for parent, blocks in pblocks:
1135 1137 for (a1, a2, b1, b2), t in blocks:
1136 1138 # Changed blocks ('!') or blocks made only of blank lines ('~')
1137 1139 # belong to the child.
1138 1140 if t == '=':
1139 1141 child[0][b1:b2] = parent[0][a1:a2]
1140 1142
1141 1143 if skipchild:
1142 1144 # Now try and match up anything that couldn't be matched,
1143 1145 # Reversing pblocks maintains bias towards p2, matching above
1144 1146 # behavior.
1145 1147 pblocks.reverse()
1146 1148
1147 1149 # The heuristics are:
1148 1150 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1149 1151 # This could potentially be smarter but works well enough.
1150 1152 # * For a non-matching section, do a best-effort fit. Match lines in
1151 1153 # diff hunks 1:1, dropping lines as necessary.
1152 1154 # * Repeat the last line as a last resort.
1153 1155
1154 1156 # First, replace as much as possible without repeating the last line.
1155 1157 remaining = [(parent, []) for parent, _blocks in pblocks]
1156 1158 for idx, (parent, blocks) in enumerate(pblocks):
1157 1159 for (a1, a2, b1, b2), _t in blocks:
1158 1160 if a2 - a1 >= b2 - b1:
1159 1161 for bk in xrange(b1, b2):
1160 1162 if child[0][bk].fctx == childfctx:
1161 1163 ak = min(a1 + (bk - b1), a2 - 1)
1162 child[0][bk] = parent[0][ak]
1164 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1163 1165 else:
1164 1166 remaining[idx][1].append((a1, a2, b1, b2))
1165 1167
1166 1168 # Then, look at anything left, which might involve repeating the last
1167 1169 # line.
1168 1170 for parent, blocks in remaining:
1169 1171 for a1, a2, b1, b2 in blocks:
1170 1172 for bk in xrange(b1, b2):
1171 1173 if child[0][bk].fctx == childfctx:
1172 1174 ak = min(a1 + (bk - b1), a2 - 1)
1173 child[0][bk] = parent[0][ak]
1175 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1174 1176 return child
1175 1177
1176 1178 class filectx(basefilectx):
1177 1179 """A filecontext object makes access to data related to a particular
1178 1180 filerevision convenient."""
1179 1181 def __init__(self, repo, path, changeid=None, fileid=None,
1180 1182 filelog=None, changectx=None):
1181 1183 """changeid can be a changeset revision, node, or tag.
1182 1184 fileid can be a file revision or node."""
1183 1185 self._repo = repo
1184 1186 self._path = path
1185 1187
1186 1188 assert (changeid is not None
1187 1189 or fileid is not None
1188 1190 or changectx is not None), \
1189 1191 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1190 1192 % (changeid, fileid, changectx))
1191 1193
1192 1194 if filelog is not None:
1193 1195 self._filelog = filelog
1194 1196
1195 1197 if changeid is not None:
1196 1198 self._changeid = changeid
1197 1199 if changectx is not None:
1198 1200 self._changectx = changectx
1199 1201 if fileid is not None:
1200 1202 self._fileid = fileid
1201 1203
1202 1204 @propertycache
1203 1205 def _changectx(self):
1204 1206 try:
1205 1207 return changectx(self._repo, self._changeid)
1206 1208 except error.FilteredRepoLookupError:
1207 1209 # Linkrev may point to any revision in the repository. When the
1208 1210 # repository is filtered this may lead to `filectx` trying to build
1209 1211 # `changectx` for filtered revision. In such case we fallback to
1210 1212 # creating `changectx` on the unfiltered version of the reposition.
1211 1213 # This fallback should not be an issue because `changectx` from
1212 1214 # `filectx` are not used in complex operations that care about
1213 1215 # filtering.
1214 1216 #
1215 1217 # This fallback is a cheap and dirty fix that prevent several
1216 1218 # crashes. It does not ensure the behavior is correct. However the
1217 1219 # behavior was not correct before filtering either and "incorrect
1218 1220 # behavior" is seen as better as "crash"
1219 1221 #
1220 1222 # Linkrevs have several serious troubles with filtering that are
1221 1223 # complicated to solve. Proper handling of the issue here should be
1222 1224 # considered when solving linkrev issue are on the table.
1223 1225 return changectx(self._repo.unfiltered(), self._changeid)
1224 1226
1225 1227 def filectx(self, fileid, changeid=None):
1226 1228 '''opens an arbitrary revision of the file without
1227 1229 opening a new filelog'''
1228 1230 return filectx(self._repo, self._path, fileid=fileid,
1229 1231 filelog=self._filelog, changeid=changeid)
1230 1232
1231 1233 def rawdata(self):
1232 1234 return self._filelog.revision(self._filenode, raw=True)
1233 1235
1234 1236 def rawflags(self):
1235 1237 """low-level revlog flags"""
1236 1238 return self._filelog.flags(self._filerev)
1237 1239
1238 1240 def data(self):
1239 1241 try:
1240 1242 return self._filelog.read(self._filenode)
1241 1243 except error.CensoredNodeError:
1242 1244 if self._repo.ui.config("censor", "policy") == "ignore":
1243 1245 return ""
1244 1246 raise error.Abort(_("censored node: %s") % short(self._filenode),
1245 1247 hint=_("set censor.policy to ignore errors"))
1246 1248
1247 1249 def size(self):
1248 1250 return self._filelog.size(self._filerev)
1249 1251
1250 1252 @propertycache
1251 1253 def _copied(self):
1252 1254 """check if file was actually renamed in this changeset revision
1253 1255
1254 1256 If rename logged in file revision, we report copy for changeset only
1255 1257 if file revisions linkrev points back to the changeset in question
1256 1258 or both changeset parents contain different file revisions.
1257 1259 """
1258 1260
1259 1261 renamed = self._filelog.renamed(self._filenode)
1260 1262 if not renamed:
1261 1263 return renamed
1262 1264
1263 1265 if self.rev() == self.linkrev():
1264 1266 return renamed
1265 1267
1266 1268 name = self.path()
1267 1269 fnode = self._filenode
1268 1270 for p in self._changectx.parents():
1269 1271 try:
1270 1272 if fnode == p.filenode(name):
1271 1273 return None
1272 1274 except error.LookupError:
1273 1275 pass
1274 1276 return renamed
1275 1277
1276 1278 def children(self):
1277 1279 # hard for renames
1278 1280 c = self._filelog.children(self._filenode)
1279 1281 return [filectx(self._repo, self._path, fileid=x,
1280 1282 filelog=self._filelog) for x in c]
1281 1283
1282 1284 class committablectx(basectx):
1283 1285 """A committablectx object provides common functionality for a context that
1284 1286 wants the ability to commit, e.g. workingctx or memctx."""
1285 1287 def __init__(self, repo, text="", user=None, date=None, extra=None,
1286 1288 changes=None):
1287 1289 self._repo = repo
1288 1290 self._rev = None
1289 1291 self._node = None
1290 1292 self._text = text
1291 1293 if date:
1292 1294 self._date = util.parsedate(date)
1293 1295 if user:
1294 1296 self._user = user
1295 1297 if changes:
1296 1298 self._status = changes
1297 1299
1298 1300 self._extra = {}
1299 1301 if extra:
1300 1302 self._extra = extra.copy()
1301 1303 if 'branch' not in self._extra:
1302 1304 try:
1303 1305 branch = encoding.fromlocal(self._repo.dirstate.branch())
1304 1306 except UnicodeDecodeError:
1305 1307 raise error.Abort(_('branch name not in UTF-8!'))
1306 1308 self._extra['branch'] = branch
1307 1309 if self._extra['branch'] == '':
1308 1310 self._extra['branch'] = 'default'
1309 1311
1310 1312 def __bytes__(self):
1311 1313 return bytes(self._parents[0]) + "+"
1312 1314
1313 1315 __str__ = encoding.strmethod(__bytes__)
1314 1316
1315 1317 def __nonzero__(self):
1316 1318 return True
1317 1319
1318 1320 __bool__ = __nonzero__
1319 1321
1320 1322 def _buildflagfunc(self):
1321 1323 # Create a fallback function for getting file flags when the
1322 1324 # filesystem doesn't support them
1323 1325
1324 1326 copiesget = self._repo.dirstate.copies().get
1325 1327 parents = self.parents()
1326 1328 if len(parents) < 2:
1327 1329 # when we have one parent, it's easy: copy from parent
1328 1330 man = parents[0].manifest()
1329 1331 def func(f):
1330 1332 f = copiesget(f, f)
1331 1333 return man.flags(f)
1332 1334 else:
1333 1335 # merges are tricky: we try to reconstruct the unstored
1334 1336 # result from the merge (issue1802)
1335 1337 p1, p2 = parents
1336 1338 pa = p1.ancestor(p2)
1337 1339 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1338 1340
1339 1341 def func(f):
1340 1342 f = copiesget(f, f) # may be wrong for merges with copies
1341 1343 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1342 1344 if fl1 == fl2:
1343 1345 return fl1
1344 1346 if fl1 == fla:
1345 1347 return fl2
1346 1348 if fl2 == fla:
1347 1349 return fl1
1348 1350 return '' # punt for conflicts
1349 1351
1350 1352 return func
1351 1353
1352 1354 @propertycache
1353 1355 def _flagfunc(self):
1354 1356 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1355 1357
1356 1358 @propertycache
1357 1359 def _status(self):
1358 1360 return self._repo.status()
1359 1361
1360 1362 @propertycache
1361 1363 def _user(self):
1362 1364 return self._repo.ui.username()
1363 1365
1364 1366 @propertycache
1365 1367 def _date(self):
1366 1368 ui = self._repo.ui
1367 1369 date = ui.configdate('devel', 'default-date')
1368 1370 if date is None:
1369 1371 date = util.makedate()
1370 1372 return date
1371 1373
1372 1374 def subrev(self, subpath):
1373 1375 return None
1374 1376
1375 1377 def manifestnode(self):
1376 1378 return None
1377 1379 def user(self):
1378 1380 return self._user or self._repo.ui.username()
1379 1381 def date(self):
1380 1382 return self._date
1381 1383 def description(self):
1382 1384 return self._text
1383 1385 def files(self):
1384 1386 return sorted(self._status.modified + self._status.added +
1385 1387 self._status.removed)
1386 1388
1387 1389 def modified(self):
1388 1390 return self._status.modified
1389 1391 def added(self):
1390 1392 return self._status.added
1391 1393 def removed(self):
1392 1394 return self._status.removed
1393 1395 def deleted(self):
1394 1396 return self._status.deleted
1395 1397 def branch(self):
1396 1398 return encoding.tolocal(self._extra['branch'])
1397 1399 def closesbranch(self):
1398 1400 return 'close' in self._extra
1399 1401 def extra(self):
1400 1402 return self._extra
1401 1403
1402 1404 def tags(self):
1403 1405 return []
1404 1406
1405 1407 def bookmarks(self):
1406 1408 b = []
1407 1409 for p in self.parents():
1408 1410 b.extend(p.bookmarks())
1409 1411 return b
1410 1412
1411 1413 def phase(self):
1412 1414 phase = phases.draft # default phase to draft
1413 1415 for p in self.parents():
1414 1416 phase = max(phase, p.phase())
1415 1417 return phase
1416 1418
1417 1419 def hidden(self):
1418 1420 return False
1419 1421
1420 1422 def children(self):
1421 1423 return []
1422 1424
1423 1425 def flags(self, path):
1424 1426 if r'_manifest' in self.__dict__:
1425 1427 try:
1426 1428 return self._manifest.flags(path)
1427 1429 except KeyError:
1428 1430 return ''
1429 1431
1430 1432 try:
1431 1433 return self._flagfunc(path)
1432 1434 except OSError:
1433 1435 return ''
1434 1436
1435 1437 def ancestor(self, c2):
1436 1438 """return the "best" ancestor context of self and c2"""
1437 1439 return self._parents[0].ancestor(c2) # punt on two parents for now
1438 1440
1439 1441 def walk(self, match):
1440 1442 '''Generates matching file names.'''
1441 1443 return sorted(self._repo.dirstate.walk(match,
1442 1444 subrepos=sorted(self.substate),
1443 1445 unknown=True, ignored=False))
1444 1446
1445 1447 def matches(self, match):
1446 1448 return sorted(self._repo.dirstate.matches(match))
1447 1449
1448 1450 def ancestors(self):
1449 1451 for p in self._parents:
1450 1452 yield p
1451 1453 for a in self._repo.changelog.ancestors(
1452 1454 [p.rev() for p in self._parents]):
1453 1455 yield changectx(self._repo, a)
1454 1456
1455 1457 def markcommitted(self, node):
1456 1458 """Perform post-commit cleanup necessary after committing this ctx
1457 1459
1458 1460 Specifically, this updates backing stores this working context
1459 1461 wraps to reflect the fact that the changes reflected by this
1460 1462 workingctx have been committed. For example, it marks
1461 1463 modified and added files as normal in the dirstate.
1462 1464
1463 1465 """
1464 1466
1465 1467 with self._repo.dirstate.parentchange():
1466 1468 for f in self.modified() + self.added():
1467 1469 self._repo.dirstate.normal(f)
1468 1470 for f in self.removed():
1469 1471 self._repo.dirstate.drop(f)
1470 1472 self._repo.dirstate.setparents(node)
1471 1473
1472 1474 # write changes out explicitly, because nesting wlock at
1473 1475 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1474 1476 # from immediately doing so for subsequent changing files
1475 1477 self._repo.dirstate.write(self._repo.currenttransaction())
1476 1478
1477 1479 def dirty(self, missing=False, merge=True, branch=True):
1478 1480 return False
1479 1481
1480 1482 class workingctx(committablectx):
1481 1483 """A workingctx object makes access to data related to
1482 1484 the current working directory convenient.
1483 1485 date - any valid date string or (unixtime, offset), or None.
1484 1486 user - username string, or None.
1485 1487 extra - a dictionary of extra values, or None.
1486 1488 changes - a list of file lists as returned by localrepo.status()
1487 1489 or None to use the repository status.
1488 1490 """
1489 1491 def __init__(self, repo, text="", user=None, date=None, extra=None,
1490 1492 changes=None):
1491 1493 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1492 1494
1493 1495 def __iter__(self):
1494 1496 d = self._repo.dirstate
1495 1497 for f in d:
1496 1498 if d[f] != 'r':
1497 1499 yield f
1498 1500
1499 1501 def __contains__(self, key):
1500 1502 return self._repo.dirstate[key] not in "?r"
1501 1503
1502 1504 def hex(self):
1503 1505 return hex(wdirid)
1504 1506
1505 1507 @propertycache
1506 1508 def _parents(self):
1507 1509 p = self._repo.dirstate.parents()
1508 1510 if p[1] == nullid:
1509 1511 p = p[:-1]
1510 1512 return [changectx(self._repo, x) for x in p]
1511 1513
1512 1514 def filectx(self, path, filelog=None):
1513 1515 """get a file context from the working directory"""
1514 1516 return workingfilectx(self._repo, path, workingctx=self,
1515 1517 filelog=filelog)
1516 1518
1517 1519 def dirty(self, missing=False, merge=True, branch=True):
1518 1520 "check whether a working directory is modified"
1519 1521 # check subrepos first
1520 1522 for s in sorted(self.substate):
1521 1523 if self.sub(s).dirty(missing=missing):
1522 1524 return True
1523 1525 # check current working dir
1524 1526 return ((merge and self.p2()) or
1525 1527 (branch and self.branch() != self.p1().branch()) or
1526 1528 self.modified() or self.added() or self.removed() or
1527 1529 (missing and self.deleted()))
1528 1530
1529 1531 def add(self, list, prefix=""):
1530 1532 with self._repo.wlock():
1531 1533 ui, ds = self._repo.ui, self._repo.dirstate
1532 1534 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1533 1535 rejected = []
1534 1536 lstat = self._repo.wvfs.lstat
1535 1537 for f in list:
1536 1538 # ds.pathto() returns an absolute file when this is invoked from
1537 1539 # the keyword extension. That gets flagged as non-portable on
1538 1540 # Windows, since it contains the drive letter and colon.
1539 1541 scmutil.checkportable(ui, os.path.join(prefix, f))
1540 1542 try:
1541 1543 st = lstat(f)
1542 1544 except OSError:
1543 1545 ui.warn(_("%s does not exist!\n") % uipath(f))
1544 1546 rejected.append(f)
1545 1547 continue
1546 1548 if st.st_size > 10000000:
1547 1549 ui.warn(_("%s: up to %d MB of RAM may be required "
1548 1550 "to manage this file\n"
1549 1551 "(use 'hg revert %s' to cancel the "
1550 1552 "pending addition)\n")
1551 1553 % (f, 3 * st.st_size // 1000000, uipath(f)))
1552 1554 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1553 1555 ui.warn(_("%s not added: only files and symlinks "
1554 1556 "supported currently\n") % uipath(f))
1555 1557 rejected.append(f)
1556 1558 elif ds[f] in 'amn':
1557 1559 ui.warn(_("%s already tracked!\n") % uipath(f))
1558 1560 elif ds[f] == 'r':
1559 1561 ds.normallookup(f)
1560 1562 else:
1561 1563 ds.add(f)
1562 1564 return rejected
1563 1565
1564 1566 def forget(self, files, prefix=""):
1565 1567 with self._repo.wlock():
1566 1568 ds = self._repo.dirstate
1567 1569 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1568 1570 rejected = []
1569 1571 for f in files:
1570 1572 if f not in self._repo.dirstate:
1571 1573 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1572 1574 rejected.append(f)
1573 1575 elif self._repo.dirstate[f] != 'a':
1574 1576 self._repo.dirstate.remove(f)
1575 1577 else:
1576 1578 self._repo.dirstate.drop(f)
1577 1579 return rejected
1578 1580
1579 1581 def undelete(self, list):
1580 1582 pctxs = self.parents()
1581 1583 with self._repo.wlock():
1582 1584 ds = self._repo.dirstate
1583 1585 for f in list:
1584 1586 if self._repo.dirstate[f] != 'r':
1585 1587 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1586 1588 else:
1587 1589 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1588 1590 t = fctx.data()
1589 1591 self._repo.wwrite(f, t, fctx.flags())
1590 1592 self._repo.dirstate.normal(f)
1591 1593
1592 1594 def copy(self, source, dest):
1593 1595 try:
1594 1596 st = self._repo.wvfs.lstat(dest)
1595 1597 except OSError as err:
1596 1598 if err.errno != errno.ENOENT:
1597 1599 raise
1598 1600 self._repo.ui.warn(_("%s does not exist!\n")
1599 1601 % self._repo.dirstate.pathto(dest))
1600 1602 return
1601 1603 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1602 1604 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1603 1605 "symbolic link\n")
1604 1606 % self._repo.dirstate.pathto(dest))
1605 1607 else:
1606 1608 with self._repo.wlock():
1607 1609 if self._repo.dirstate[dest] in '?':
1608 1610 self._repo.dirstate.add(dest)
1609 1611 elif self._repo.dirstate[dest] in 'r':
1610 1612 self._repo.dirstate.normallookup(dest)
1611 1613 self._repo.dirstate.copy(source, dest)
1612 1614
1613 1615 def match(self, pats=None, include=None, exclude=None, default='glob',
1614 1616 listsubrepos=False, badfn=None):
1615 1617 r = self._repo
1616 1618
1617 1619 # Only a case insensitive filesystem needs magic to translate user input
1618 1620 # to actual case in the filesystem.
1619 1621 icasefs = not util.fscasesensitive(r.root)
1620 1622 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1621 1623 default, auditor=r.auditor, ctx=self,
1622 1624 listsubrepos=listsubrepos, badfn=badfn,
1623 1625 icasefs=icasefs)
1624 1626
1625 1627 def flushall(self):
1626 1628 pass # For overlayworkingfilectx compatibility.
1627 1629
1628 1630 def _filtersuspectsymlink(self, files):
1629 1631 if not files or self._repo.dirstate._checklink:
1630 1632 return files
1631 1633
1632 1634 # Symlink placeholders may get non-symlink-like contents
1633 1635 # via user error or dereferencing by NFS or Samba servers,
1634 1636 # so we filter out any placeholders that don't look like a
1635 1637 # symlink
1636 1638 sane = []
1637 1639 for f in files:
1638 1640 if self.flags(f) == 'l':
1639 1641 d = self[f].data()
1640 1642 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1641 1643 self._repo.ui.debug('ignoring suspect symlink placeholder'
1642 1644 ' "%s"\n' % f)
1643 1645 continue
1644 1646 sane.append(f)
1645 1647 return sane
1646 1648
1647 1649 def _checklookup(self, files):
1648 1650 # check for any possibly clean files
1649 1651 if not files:
1650 1652 return [], [], []
1651 1653
1652 1654 modified = []
1653 1655 deleted = []
1654 1656 fixup = []
1655 1657 pctx = self._parents[0]
1656 1658 # do a full compare of any files that might have changed
1657 1659 for f in sorted(files):
1658 1660 try:
1659 1661 # This will return True for a file that got replaced by a
1660 1662 # directory in the interim, but fixing that is pretty hard.
1661 1663 if (f not in pctx or self.flags(f) != pctx.flags(f)
1662 1664 or pctx[f].cmp(self[f])):
1663 1665 modified.append(f)
1664 1666 else:
1665 1667 fixup.append(f)
1666 1668 except (IOError, OSError):
1667 1669 # A file become inaccessible in between? Mark it as deleted,
1668 1670 # matching dirstate behavior (issue5584).
1669 1671 # The dirstate has more complex behavior around whether a
1670 1672 # missing file matches a directory, etc, but we don't need to
1671 1673 # bother with that: if f has made it to this point, we're sure
1672 1674 # it's in the dirstate.
1673 1675 deleted.append(f)
1674 1676
1675 1677 return modified, deleted, fixup
1676 1678
1677 1679 def _poststatusfixup(self, status, fixup):
1678 1680 """update dirstate for files that are actually clean"""
1679 1681 poststatus = self._repo.postdsstatus()
1680 1682 if fixup or poststatus:
1681 1683 try:
1682 1684 oldid = self._repo.dirstate.identity()
1683 1685
1684 1686 # updating the dirstate is optional
1685 1687 # so we don't wait on the lock
1686 1688 # wlock can invalidate the dirstate, so cache normal _after_
1687 1689 # taking the lock
1688 1690 with self._repo.wlock(False):
1689 1691 if self._repo.dirstate.identity() == oldid:
1690 1692 if fixup:
1691 1693 normal = self._repo.dirstate.normal
1692 1694 for f in fixup:
1693 1695 normal(f)
1694 1696 # write changes out explicitly, because nesting
1695 1697 # wlock at runtime may prevent 'wlock.release()'
1696 1698 # after this block from doing so for subsequent
1697 1699 # changing files
1698 1700 tr = self._repo.currenttransaction()
1699 1701 self._repo.dirstate.write(tr)
1700 1702
1701 1703 if poststatus:
1702 1704 for ps in poststatus:
1703 1705 ps(self, status)
1704 1706 else:
1705 1707 # in this case, writing changes out breaks
1706 1708 # consistency, because .hg/dirstate was
1707 1709 # already changed simultaneously after last
1708 1710 # caching (see also issue5584 for detail)
1709 1711 self._repo.ui.debug('skip updating dirstate: '
1710 1712 'identity mismatch\n')
1711 1713 except error.LockError:
1712 1714 pass
1713 1715 finally:
1714 1716 # Even if the wlock couldn't be grabbed, clear out the list.
1715 1717 self._repo.clearpostdsstatus()
1716 1718
1717 1719 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1718 1720 '''Gets the status from the dirstate -- internal use only.'''
1719 1721 subrepos = []
1720 1722 if '.hgsub' in self:
1721 1723 subrepos = sorted(self.substate)
1722 1724 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1723 1725 clean=clean, unknown=unknown)
1724 1726
1725 1727 # check for any possibly clean files
1726 1728 fixup = []
1727 1729 if cmp:
1728 1730 modified2, deleted2, fixup = self._checklookup(cmp)
1729 1731 s.modified.extend(modified2)
1730 1732 s.deleted.extend(deleted2)
1731 1733
1732 1734 if fixup and clean:
1733 1735 s.clean.extend(fixup)
1734 1736
1735 1737 self._poststatusfixup(s, fixup)
1736 1738
1737 1739 if match.always():
1738 1740 # cache for performance
1739 1741 if s.unknown or s.ignored or s.clean:
1740 1742 # "_status" is cached with list*=False in the normal route
1741 1743 self._status = scmutil.status(s.modified, s.added, s.removed,
1742 1744 s.deleted, [], [], [])
1743 1745 else:
1744 1746 self._status = s
1745 1747
1746 1748 return s
1747 1749
1748 1750 @propertycache
1749 1751 def _manifest(self):
1750 1752 """generate a manifest corresponding to the values in self._status
1751 1753
1752 1754 This reuse the file nodeid from parent, but we use special node
1753 1755 identifiers for added and modified files. This is used by manifests
1754 1756 merge to see that files are different and by update logic to avoid
1755 1757 deleting newly added files.
1756 1758 """
1757 1759 return self._buildstatusmanifest(self._status)
1758 1760
1759 1761 def _buildstatusmanifest(self, status):
1760 1762 """Builds a manifest that includes the given status results."""
1761 1763 parents = self.parents()
1762 1764
1763 1765 man = parents[0].manifest().copy()
1764 1766
1765 1767 ff = self._flagfunc
1766 1768 for i, l in ((addednodeid, status.added),
1767 1769 (modifiednodeid, status.modified)):
1768 1770 for f in l:
1769 1771 man[f] = i
1770 1772 try:
1771 1773 man.setflag(f, ff(f))
1772 1774 except OSError:
1773 1775 pass
1774 1776
1775 1777 for f in status.deleted + status.removed:
1776 1778 if f in man:
1777 1779 del man[f]
1778 1780
1779 1781 return man
1780 1782
1781 1783 def _buildstatus(self, other, s, match, listignored, listclean,
1782 1784 listunknown):
1783 1785 """build a status with respect to another context
1784 1786
1785 1787 This includes logic for maintaining the fast path of status when
1786 1788 comparing the working directory against its parent, which is to skip
1787 1789 building a new manifest if self (working directory) is not comparing
1788 1790 against its parent (repo['.']).
1789 1791 """
1790 1792 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1791 1793 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1792 1794 # might have accidentally ended up with the entire contents of the file
1793 1795 # they are supposed to be linking to.
1794 1796 s.modified[:] = self._filtersuspectsymlink(s.modified)
1795 1797 if other != self._repo['.']:
1796 1798 s = super(workingctx, self)._buildstatus(other, s, match,
1797 1799 listignored, listclean,
1798 1800 listunknown)
1799 1801 return s
1800 1802
1801 1803 def _matchstatus(self, other, match):
1802 1804 """override the match method with a filter for directory patterns
1803 1805
1804 1806 We use inheritance to customize the match.bad method only in cases of
1805 1807 workingctx since it belongs only to the working directory when
1806 1808 comparing against the parent changeset.
1807 1809
1808 1810 If we aren't comparing against the working directory's parent, then we
1809 1811 just use the default match object sent to us.
1810 1812 """
1811 1813 if other != self._repo['.']:
1812 1814 def bad(f, msg):
1813 1815 # 'f' may be a directory pattern from 'match.files()',
1814 1816 # so 'f not in ctx1' is not enough
1815 1817 if f not in other and not other.hasdir(f):
1816 1818 self._repo.ui.warn('%s: %s\n' %
1817 1819 (self._repo.dirstate.pathto(f), msg))
1818 1820 match.bad = bad
1819 1821 return match
1820 1822
1821 1823 def markcommitted(self, node):
1822 1824 super(workingctx, self).markcommitted(node)
1823 1825
1824 1826 sparse.aftercommit(self._repo, node)
1825 1827
1826 1828 class committablefilectx(basefilectx):
1827 1829 """A committablefilectx provides common functionality for a file context
1828 1830 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1829 1831 def __init__(self, repo, path, filelog=None, ctx=None):
1830 1832 self._repo = repo
1831 1833 self._path = path
1832 1834 self._changeid = None
1833 1835 self._filerev = self._filenode = None
1834 1836
1835 1837 if filelog is not None:
1836 1838 self._filelog = filelog
1837 1839 if ctx:
1838 1840 self._changectx = ctx
1839 1841
1840 1842 def __nonzero__(self):
1841 1843 return True
1842 1844
1843 1845 __bool__ = __nonzero__
1844 1846
1845 1847 def linkrev(self):
1846 1848 # linked to self._changectx no matter if file is modified or not
1847 1849 return self.rev()
1848 1850
1849 1851 def parents(self):
1850 1852 '''return parent filectxs, following copies if necessary'''
1851 1853 def filenode(ctx, path):
1852 1854 return ctx._manifest.get(path, nullid)
1853 1855
1854 1856 path = self._path
1855 1857 fl = self._filelog
1856 1858 pcl = self._changectx._parents
1857 1859 renamed = self.renamed()
1858 1860
1859 1861 if renamed:
1860 1862 pl = [renamed + (None,)]
1861 1863 else:
1862 1864 pl = [(path, filenode(pcl[0], path), fl)]
1863 1865
1864 1866 for pc in pcl[1:]:
1865 1867 pl.append((path, filenode(pc, path), fl))
1866 1868
1867 1869 return [self._parentfilectx(p, fileid=n, filelog=l)
1868 1870 for p, n, l in pl if n != nullid]
1869 1871
1870 1872 def children(self):
1871 1873 return []
1872 1874
1873 1875 class workingfilectx(committablefilectx):
1874 1876 """A workingfilectx object makes access to data related to a particular
1875 1877 file in the working directory convenient."""
1876 1878 def __init__(self, repo, path, filelog=None, workingctx=None):
1877 1879 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1878 1880
1879 1881 @propertycache
1880 1882 def _changectx(self):
1881 1883 return workingctx(self._repo)
1882 1884
1883 1885 def data(self):
1884 1886 return self._repo.wread(self._path)
1885 1887 def renamed(self):
1886 1888 rp = self._repo.dirstate.copied(self._path)
1887 1889 if not rp:
1888 1890 return None
1889 1891 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1890 1892
1891 1893 def size(self):
1892 1894 return self._repo.wvfs.lstat(self._path).st_size
1893 1895 def date(self):
1894 1896 t, tz = self._changectx.date()
1895 1897 try:
1896 1898 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1897 1899 except OSError as err:
1898 1900 if err.errno != errno.ENOENT:
1899 1901 raise
1900 1902 return (t, tz)
1901 1903
1902 1904 def exists(self):
1903 1905 return self._repo.wvfs.exists(self._path)
1904 1906
1905 1907 def lexists(self):
1906 1908 return self._repo.wvfs.lexists(self._path)
1907 1909
1908 1910 def audit(self):
1909 1911 return self._repo.wvfs.audit(self._path)
1910 1912
1911 1913 def cmp(self, fctx):
1912 1914 """compare with other file context
1913 1915
1914 1916 returns True if different than fctx.
1915 1917 """
1916 1918 # fctx should be a filectx (not a workingfilectx)
1917 1919 # invert comparison to reuse the same code path
1918 1920 return fctx.cmp(self)
1919 1921
1920 1922 def remove(self, ignoremissing=False):
1921 1923 """wraps unlink for a repo's working directory"""
1922 1924 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1923 1925
1924 1926 def write(self, data, flags, backgroundclose=False):
1925 1927 """wraps repo.wwrite"""
1926 1928 self._repo.wwrite(self._path, data, flags,
1927 1929 backgroundclose=backgroundclose)
1928 1930
1929 1931 def clearunknown(self):
1930 1932 """Removes conflicting items in the working directory so that
1931 1933 ``write()`` can be called successfully.
1932 1934 """
1933 1935 wvfs = self._repo.wvfs
1934 1936 if wvfs.isdir(self._path) and not wvfs.islink(self._path):
1935 1937 wvfs.removedirs(self._path)
1936 1938
1937 1939 def setflags(self, l, x):
1938 1940 self._repo.wvfs.setflags(self._path, l, x)
1939 1941
1940 1942 class overlayworkingctx(workingctx):
1941 1943 """Wraps another mutable context with a write-back cache that can be flushed
1942 1944 at a later time.
1943 1945
1944 1946 self._cache[path] maps to a dict with keys: {
1945 1947 'exists': bool?
1946 1948 'date': date?
1947 1949 'data': str?
1948 1950 'flags': str?
1949 1951 }
1950 1952 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1951 1953 is `False`, the file was deleted.
1952 1954 """
1953 1955
1954 1956 def __init__(self, repo, wrappedctx):
1955 1957 super(overlayworkingctx, self).__init__(repo)
1956 1958 self._repo = repo
1957 1959 self._wrappedctx = wrappedctx
1958 1960 self._clean()
1959 1961
1960 1962 def data(self, path):
1961 1963 if self.isdirty(path):
1962 1964 if self._cache[path]['exists']:
1963 1965 if self._cache[path]['data']:
1964 1966 return self._cache[path]['data']
1965 1967 else:
1966 1968 # Must fallback here, too, because we only set flags.
1967 1969 return self._wrappedctx[path].data()
1968 1970 else:
1969 1971 raise error.ProgrammingError("No such file or directory: %s" %
1970 1972 self._path)
1971 1973 else:
1972 1974 return self._wrappedctx[path].data()
1973 1975
1974 1976 def filedate(self, path):
1975 1977 if self.isdirty(path):
1976 1978 return self._cache[path]['date']
1977 1979 else:
1978 1980 return self._wrappedctx[path].date()
1979 1981
1980 1982 def flags(self, path):
1981 1983 if self.isdirty(path):
1982 1984 if self._cache[path]['exists']:
1983 1985 return self._cache[path]['flags']
1984 1986 else:
1985 1987 raise error.ProgrammingError("No such file or directory: %s" %
1986 1988 self._path)
1987 1989 else:
1988 1990 return self._wrappedctx[path].flags()
1989 1991
1990 1992 def write(self, path, data, flags=''):
1991 1993 if data is None:
1992 1994 raise error.ProgrammingError("data must be non-None")
1993 1995 self._markdirty(path, exists=True, data=data, date=util.makedate(),
1994 1996 flags=flags)
1995 1997
1996 1998 def setflags(self, path, l, x):
1997 1999 self._markdirty(path, exists=True, date=util.makedate(),
1998 2000 flags=(l and 'l' or '') + (x and 'x' or ''))
1999 2001
2000 2002 def remove(self, path):
2001 2003 self._markdirty(path, exists=False)
2002 2004
2003 2005 def exists(self, path):
2004 2006 """exists behaves like `lexists`, but needs to follow symlinks and
2005 2007 return False if they are broken.
2006 2008 """
2007 2009 if self.isdirty(path):
2008 2010 # If this path exists and is a symlink, "follow" it by calling
2009 2011 # exists on the destination path.
2010 2012 if (self._cache[path]['exists'] and
2011 2013 'l' in self._cache[path]['flags']):
2012 2014 return self.exists(self._cache[path]['data'].strip())
2013 2015 else:
2014 2016 return self._cache[path]['exists']
2015 2017 return self._wrappedctx[path].exists()
2016 2018
2017 2019 def lexists(self, path):
2018 2020 """lexists returns True if the path exists"""
2019 2021 if self.isdirty(path):
2020 2022 return self._cache[path]['exists']
2021 2023 return self._wrappedctx[path].lexists()
2022 2024
2023 2025 def size(self, path):
2024 2026 if self.isdirty(path):
2025 2027 if self._cache[path]['exists']:
2026 2028 return len(self._cache[path]['data'])
2027 2029 else:
2028 2030 raise error.ProgrammingError("No such file or directory: %s" %
2029 2031 self._path)
2030 2032 return self._wrappedctx[path].size()
2031 2033
2032 2034 def flushall(self):
2033 2035 for path in self._writeorder:
2034 2036 entry = self._cache[path]
2035 2037 if entry['exists']:
2036 2038 self._wrappedctx[path].clearunknown()
2037 2039 if entry['data'] is not None:
2038 2040 if entry['flags'] is None:
2039 2041 raise error.ProgrammingError('data set but not flags')
2040 2042 self._wrappedctx[path].write(
2041 2043 entry['data'],
2042 2044 entry['flags'])
2043 2045 else:
2044 2046 self._wrappedctx[path].setflags(
2045 2047 'l' in entry['flags'],
2046 2048 'x' in entry['flags'])
2047 2049 else:
2048 2050 self._wrappedctx[path].remove(path)
2049 2051 self._clean()
2050 2052
2051 2053 def isdirty(self, path):
2052 2054 return path in self._cache
2053 2055
2054 2056 def _clean(self):
2055 2057 self._cache = {}
2056 2058 self._writeorder = []
2057 2059
2058 2060 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2059 2061 if path not in self._cache:
2060 2062 self._writeorder.append(path)
2061 2063
2062 2064 self._cache[path] = {
2063 2065 'exists': exists,
2064 2066 'data': data,
2065 2067 'date': date,
2066 2068 'flags': flags,
2067 2069 }
2068 2070
2069 2071 def filectx(self, path, filelog=None):
2070 2072 return overlayworkingfilectx(self._repo, path, parent=self,
2071 2073 filelog=filelog)
2072 2074
2073 2075 class overlayworkingfilectx(workingfilectx):
2074 2076 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2075 2077 cache, which can be flushed through later by calling ``flush()``."""
2076 2078
2077 2079 def __init__(self, repo, path, filelog=None, parent=None):
2078 2080 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2079 2081 parent)
2080 2082 self._repo = repo
2081 2083 self._parent = parent
2082 2084 self._path = path
2083 2085
2084 2086 def ctx(self):
2085 2087 return self._parent
2086 2088
2087 2089 def data(self):
2088 2090 return self._parent.data(self._path)
2089 2091
2090 2092 def date(self):
2091 2093 return self._parent.filedate(self._path)
2092 2094
2093 2095 def exists(self):
2094 2096 return self.lexists()
2095 2097
2096 2098 def lexists(self):
2097 2099 return self._parent.exists(self._path)
2098 2100
2099 2101 def renamed(self):
2100 2102 # Copies are currently tracked in the dirstate as before. Straight copy
2101 2103 # from workingfilectx.
2102 2104 rp = self._repo.dirstate.copied(self._path)
2103 2105 if not rp:
2104 2106 return None
2105 2107 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
2106 2108
2107 2109 def size(self):
2108 2110 return self._parent.size(self._path)
2109 2111
2110 2112 def audit(self):
2111 2113 pass
2112 2114
2113 2115 def flags(self):
2114 2116 return self._parent.flags(self._path)
2115 2117
2116 2118 def setflags(self, islink, isexec):
2117 2119 return self._parent.setflags(self._path, islink, isexec)
2118 2120
2119 2121 def write(self, data, flags, backgroundclose=False):
2120 2122 return self._parent.write(self._path, data, flags)
2121 2123
2122 2124 def remove(self, ignoremissing=False):
2123 2125 return self._parent.remove(self._path)
2124 2126
2125 2127 class workingcommitctx(workingctx):
2126 2128 """A workingcommitctx object makes access to data related to
2127 2129 the revision being committed convenient.
2128 2130
2129 2131 This hides changes in the working directory, if they aren't
2130 2132 committed in this context.
2131 2133 """
2132 2134 def __init__(self, repo, changes,
2133 2135 text="", user=None, date=None, extra=None):
2134 2136 super(workingctx, self).__init__(repo, text, user, date, extra,
2135 2137 changes)
2136 2138
2137 2139 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2138 2140 """Return matched files only in ``self._status``
2139 2141
2140 2142 Uncommitted files appear "clean" via this context, even if
2141 2143 they aren't actually so in the working directory.
2142 2144 """
2143 2145 if clean:
2144 2146 clean = [f for f in self._manifest if f not in self._changedset]
2145 2147 else:
2146 2148 clean = []
2147 2149 return scmutil.status([f for f in self._status.modified if match(f)],
2148 2150 [f for f in self._status.added if match(f)],
2149 2151 [f for f in self._status.removed if match(f)],
2150 2152 [], [], [], clean)
2151 2153
2152 2154 @propertycache
2153 2155 def _changedset(self):
2154 2156 """Return the set of files changed in this context
2155 2157 """
2156 2158 changed = set(self._status.modified)
2157 2159 changed.update(self._status.added)
2158 2160 changed.update(self._status.removed)
2159 2161 return changed
2160 2162
2161 2163 def makecachingfilectxfn(func):
2162 2164 """Create a filectxfn that caches based on the path.
2163 2165
2164 2166 We can't use util.cachefunc because it uses all arguments as the cache
2165 2167 key and this creates a cycle since the arguments include the repo and
2166 2168 memctx.
2167 2169 """
2168 2170 cache = {}
2169 2171
2170 2172 def getfilectx(repo, memctx, path):
2171 2173 if path not in cache:
2172 2174 cache[path] = func(repo, memctx, path)
2173 2175 return cache[path]
2174 2176
2175 2177 return getfilectx
2176 2178
2177 2179 def memfilefromctx(ctx):
2178 2180 """Given a context return a memfilectx for ctx[path]
2179 2181
2180 2182 This is a convenience method for building a memctx based on another
2181 2183 context.
2182 2184 """
2183 2185 def getfilectx(repo, memctx, path):
2184 2186 fctx = ctx[path]
2185 2187 # this is weird but apparently we only keep track of one parent
2186 2188 # (why not only store that instead of a tuple?)
2187 2189 copied = fctx.renamed()
2188 2190 if copied:
2189 2191 copied = copied[0]
2190 2192 return memfilectx(repo, path, fctx.data(),
2191 2193 islink=fctx.islink(), isexec=fctx.isexec(),
2192 2194 copied=copied, memctx=memctx)
2193 2195
2194 2196 return getfilectx
2195 2197
2196 2198 def memfilefrompatch(patchstore):
2197 2199 """Given a patch (e.g. patchstore object) return a memfilectx
2198 2200
2199 2201 This is a convenience method for building a memctx based on a patchstore.
2200 2202 """
2201 2203 def getfilectx(repo, memctx, path):
2202 2204 data, mode, copied = patchstore.getfile(path)
2203 2205 if data is None:
2204 2206 return None
2205 2207 islink, isexec = mode
2206 2208 return memfilectx(repo, path, data, islink=islink,
2207 2209 isexec=isexec, copied=copied,
2208 2210 memctx=memctx)
2209 2211
2210 2212 return getfilectx
2211 2213
2212 2214 class memctx(committablectx):
2213 2215 """Use memctx to perform in-memory commits via localrepo.commitctx().
2214 2216
2215 2217 Revision information is supplied at initialization time while
2216 2218 related files data and is made available through a callback
2217 2219 mechanism. 'repo' is the current localrepo, 'parents' is a
2218 2220 sequence of two parent revisions identifiers (pass None for every
2219 2221 missing parent), 'text' is the commit message and 'files' lists
2220 2222 names of files touched by the revision (normalized and relative to
2221 2223 repository root).
2222 2224
2223 2225 filectxfn(repo, memctx, path) is a callable receiving the
2224 2226 repository, the current memctx object and the normalized path of
2225 2227 requested file, relative to repository root. It is fired by the
2226 2228 commit function for every file in 'files', but calls order is
2227 2229 undefined. If the file is available in the revision being
2228 2230 committed (updated or added), filectxfn returns a memfilectx
2229 2231 object. If the file was removed, filectxfn return None for recent
2230 2232 Mercurial. Moved files are represented by marking the source file
2231 2233 removed and the new file added with copy information (see
2232 2234 memfilectx).
2233 2235
2234 2236 user receives the committer name and defaults to current
2235 2237 repository username, date is the commit date in any format
2236 2238 supported by util.parsedate() and defaults to current date, extra
2237 2239 is a dictionary of metadata or is left empty.
2238 2240 """
2239 2241
2240 2242 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2241 2243 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2242 2244 # this field to determine what to do in filectxfn.
2243 2245 _returnnoneformissingfiles = True
2244 2246
2245 2247 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2246 2248 date=None, extra=None, branch=None, editor=False):
2247 2249 super(memctx, self).__init__(repo, text, user, date, extra)
2248 2250 self._rev = None
2249 2251 self._node = None
2250 2252 parents = [(p or nullid) for p in parents]
2251 2253 p1, p2 = parents
2252 2254 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2253 2255 files = sorted(set(files))
2254 2256 self._files = files
2255 2257 if branch is not None:
2256 2258 self._extra['branch'] = encoding.fromlocal(branch)
2257 2259 self.substate = {}
2258 2260
2259 2261 if isinstance(filectxfn, patch.filestore):
2260 2262 filectxfn = memfilefrompatch(filectxfn)
2261 2263 elif not callable(filectxfn):
2262 2264 # if store is not callable, wrap it in a function
2263 2265 filectxfn = memfilefromctx(filectxfn)
2264 2266
2265 2267 # memoizing increases performance for e.g. vcs convert scenarios.
2266 2268 self._filectxfn = makecachingfilectxfn(filectxfn)
2267 2269
2268 2270 if editor:
2269 2271 self._text = editor(self._repo, self, [])
2270 2272 self._repo.savecommitmessage(self._text)
2271 2273
2272 2274 def filectx(self, path, filelog=None):
2273 2275 """get a file context from the working directory
2274 2276
2275 2277 Returns None if file doesn't exist and should be removed."""
2276 2278 return self._filectxfn(self._repo, self, path)
2277 2279
2278 2280 def commit(self):
2279 2281 """commit context to the repo"""
2280 2282 return self._repo.commitctx(self)
2281 2283
2282 2284 @propertycache
2283 2285 def _manifest(self):
2284 2286 """generate a manifest based on the return values of filectxfn"""
2285 2287
2286 2288 # keep this simple for now; just worry about p1
2287 2289 pctx = self._parents[0]
2288 2290 man = pctx.manifest().copy()
2289 2291
2290 2292 for f in self._status.modified:
2291 2293 p1node = nullid
2292 2294 p2node = nullid
2293 2295 p = pctx[f].parents() # if file isn't in pctx, check p2?
2294 2296 if len(p) > 0:
2295 2297 p1node = p[0].filenode()
2296 2298 if len(p) > 1:
2297 2299 p2node = p[1].filenode()
2298 2300 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2299 2301
2300 2302 for f in self._status.added:
2301 2303 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2302 2304
2303 2305 for f in self._status.removed:
2304 2306 if f in man:
2305 2307 del man[f]
2306 2308
2307 2309 return man
2308 2310
2309 2311 @propertycache
2310 2312 def _status(self):
2311 2313 """Calculate exact status from ``files`` specified at construction
2312 2314 """
2313 2315 man1 = self.p1().manifest()
2314 2316 p2 = self._parents[1]
2315 2317 # "1 < len(self._parents)" can't be used for checking
2316 2318 # existence of the 2nd parent, because "memctx._parents" is
2317 2319 # explicitly initialized by the list, of which length is 2.
2318 2320 if p2.node() != nullid:
2319 2321 man2 = p2.manifest()
2320 2322 managing = lambda f: f in man1 or f in man2
2321 2323 else:
2322 2324 managing = lambda f: f in man1
2323 2325
2324 2326 modified, added, removed = [], [], []
2325 2327 for f in self._files:
2326 2328 if not managing(f):
2327 2329 added.append(f)
2328 2330 elif self[f]:
2329 2331 modified.append(f)
2330 2332 else:
2331 2333 removed.append(f)
2332 2334
2333 2335 return scmutil.status(modified, added, removed, [], [], [], [])
2334 2336
2335 2337 class memfilectx(committablefilectx):
2336 2338 """memfilectx represents an in-memory file to commit.
2337 2339
2338 2340 See memctx and committablefilectx for more details.
2339 2341 """
2340 2342 def __init__(self, repo, path, data, islink=False,
2341 2343 isexec=False, copied=None, memctx=None):
2342 2344 """
2343 2345 path is the normalized file path relative to repository root.
2344 2346 data is the file content as a string.
2345 2347 islink is True if the file is a symbolic link.
2346 2348 isexec is True if the file is executable.
2347 2349 copied is the source file path if current file was copied in the
2348 2350 revision being committed, or None."""
2349 2351 super(memfilectx, self).__init__(repo, path, None, memctx)
2350 2352 self._data = data
2351 2353 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2352 2354 self._copied = None
2353 2355 if copied:
2354 2356 self._copied = (copied, nullid)
2355 2357
2356 2358 def data(self):
2357 2359 return self._data
2358 2360
2359 2361 def remove(self, ignoremissing=False):
2360 2362 """wraps unlink for a repo's working directory"""
2361 2363 # need to figure out what to do here
2362 2364 del self._changectx[self._path]
2363 2365
2364 2366 def write(self, data, flags):
2365 2367 """wraps repo.wwrite"""
2366 2368 self._data = data
2367 2369
2368 2370 class overlayfilectx(committablefilectx):
2369 2371 """Like memfilectx but take an original filectx and optional parameters to
2370 2372 override parts of it. This is useful when fctx.data() is expensive (i.e.
2371 2373 flag processor is expensive) and raw data, flags, and filenode could be
2372 2374 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2373 2375 """
2374 2376
2375 2377 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2376 2378 copied=None, ctx=None):
2377 2379 """originalfctx: filecontext to duplicate
2378 2380
2379 2381 datafunc: None or a function to override data (file content). It is a
2380 2382 function to be lazy. path, flags, copied, ctx: None or overridden value
2381 2383
2382 2384 copied could be (path, rev), or False. copied could also be just path,
2383 2385 and will be converted to (path, nullid). This simplifies some callers.
2384 2386 """
2385 2387
2386 2388 if path is None:
2387 2389 path = originalfctx.path()
2388 2390 if ctx is None:
2389 2391 ctx = originalfctx.changectx()
2390 2392 ctxmatch = lambda: True
2391 2393 else:
2392 2394 ctxmatch = lambda: ctx == originalfctx.changectx()
2393 2395
2394 2396 repo = originalfctx.repo()
2395 2397 flog = originalfctx.filelog()
2396 2398 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2397 2399
2398 2400 if copied is None:
2399 2401 copied = originalfctx.renamed()
2400 2402 copiedmatch = lambda: True
2401 2403 else:
2402 2404 if copied and not isinstance(copied, tuple):
2403 2405 # repo._filecommit will recalculate copyrev so nullid is okay
2404 2406 copied = (copied, nullid)
2405 2407 copiedmatch = lambda: copied == originalfctx.renamed()
2406 2408
2407 2409 # When data, copied (could affect data), ctx (could affect filelog
2408 2410 # parents) are not overridden, rawdata, rawflags, and filenode may be
2409 2411 # reused (repo._filecommit should double check filelog parents).
2410 2412 #
2411 2413 # path, flags are not hashed in filelog (but in manifestlog) so they do
2412 2414 # not affect reusable here.
2413 2415 #
2414 2416 # If ctx or copied is overridden to a same value with originalfctx,
2415 2417 # still consider it's reusable. originalfctx.renamed() may be a bit
2416 2418 # expensive so it's not called unless necessary. Assuming datafunc is
2417 2419 # always expensive, do not call it for this "reusable" test.
2418 2420 reusable = datafunc is None and ctxmatch() and copiedmatch()
2419 2421
2420 2422 if datafunc is None:
2421 2423 datafunc = originalfctx.data
2422 2424 if flags is None:
2423 2425 flags = originalfctx.flags()
2424 2426
2425 2427 self._datafunc = datafunc
2426 2428 self._flags = flags
2427 2429 self._copied = copied
2428 2430
2429 2431 if reusable:
2430 2432 # copy extra fields from originalfctx
2431 2433 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2432 2434 for attr_ in attrs:
2433 2435 if util.safehasattr(originalfctx, attr_):
2434 2436 setattr(self, attr_, getattr(originalfctx, attr_))
2435 2437
2436 2438 def data(self):
2437 2439 return self._datafunc()
2438 2440
2439 2441 class metadataonlyctx(committablectx):
2440 2442 """Like memctx but it's reusing the manifest of different commit.
2441 2443 Intended to be used by lightweight operations that are creating
2442 2444 metadata-only changes.
2443 2445
2444 2446 Revision information is supplied at initialization time. 'repo' is the
2445 2447 current localrepo, 'ctx' is original revision which manifest we're reuisng
2446 2448 'parents' is a sequence of two parent revisions identifiers (pass None for
2447 2449 every missing parent), 'text' is the commit.
2448 2450
2449 2451 user receives the committer name and defaults to current repository
2450 2452 username, date is the commit date in any format supported by
2451 2453 util.parsedate() and defaults to current date, extra is a dictionary of
2452 2454 metadata or is left empty.
2453 2455 """
2454 2456 def __new__(cls, repo, originalctx, *args, **kwargs):
2455 2457 return super(metadataonlyctx, cls).__new__(cls, repo)
2456 2458
2457 2459 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2458 2460 date=None, extra=None, editor=False):
2459 2461 if text is None:
2460 2462 text = originalctx.description()
2461 2463 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2462 2464 self._rev = None
2463 2465 self._node = None
2464 2466 self._originalctx = originalctx
2465 2467 self._manifestnode = originalctx.manifestnode()
2466 2468 if parents is None:
2467 2469 parents = originalctx.parents()
2468 2470 else:
2469 2471 parents = [repo[p] for p in parents if p is not None]
2470 2472 parents = parents[:]
2471 2473 while len(parents) < 2:
2472 2474 parents.append(repo[nullid])
2473 2475 p1, p2 = self._parents = parents
2474 2476
2475 2477 # sanity check to ensure that the reused manifest parents are
2476 2478 # manifests of our commit parents
2477 2479 mp1, mp2 = self.manifestctx().parents
2478 2480 if p1 != nullid and p1.manifestnode() != mp1:
2479 2481 raise RuntimeError('can\'t reuse the manifest: '
2480 2482 'its p1 doesn\'t match the new ctx p1')
2481 2483 if p2 != nullid and p2.manifestnode() != mp2:
2482 2484 raise RuntimeError('can\'t reuse the manifest: '
2483 2485 'its p2 doesn\'t match the new ctx p2')
2484 2486
2485 2487 self._files = originalctx.files()
2486 2488 self.substate = {}
2487 2489
2488 2490 if editor:
2489 2491 self._text = editor(self._repo, self, [])
2490 2492 self._repo.savecommitmessage(self._text)
2491 2493
2492 2494 def manifestnode(self):
2493 2495 return self._manifestnode
2494 2496
2495 2497 @property
2496 2498 def _manifestctx(self):
2497 2499 return self._repo.manifestlog[self._manifestnode]
2498 2500
2499 2501 def filectx(self, path, filelog=None):
2500 2502 return self._originalctx.filectx(path, filelog=filelog)
2501 2503
2502 2504 def commit(self):
2503 2505 """commit context to the repo"""
2504 2506 return self._repo.commitctx(self)
2505 2507
2506 2508 @property
2507 2509 def _manifest(self):
2508 2510 return self._originalctx.manifest()
2509 2511
2510 2512 @propertycache
2511 2513 def _status(self):
2512 2514 """Calculate exact status from ``files`` specified in the ``origctx``
2513 2515 and parents manifests.
2514 2516 """
2515 2517 man1 = self.p1().manifest()
2516 2518 p2 = self._parents[1]
2517 2519 # "1 < len(self._parents)" can't be used for checking
2518 2520 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2519 2521 # explicitly initialized by the list, of which length is 2.
2520 2522 if p2.node() != nullid:
2521 2523 man2 = p2.manifest()
2522 2524 managing = lambda f: f in man1 or f in man2
2523 2525 else:
2524 2526 managing = lambda f: f in man1
2525 2527
2526 2528 modified, added, removed = [], [], []
2527 2529 for f in self._files:
2528 2530 if not managing(f):
2529 2531 added.append(f)
2530 2532 elif f in self:
2531 2533 modified.append(f)
2532 2534 else:
2533 2535 removed.append(f)
2534 2536
2535 2537 return scmutil.status(modified, added, removed, [], [], [], [])
2536 2538
2537 2539 class arbitraryfilectx(object):
2538 2540 """Allows you to use filectx-like functions on a file in an arbitrary
2539 2541 location on disk, possibly not in the working directory.
2540 2542 """
2541 2543 def __init__(self, path):
2542 2544 self._path = path
2543 2545
2544 2546 def cmp(self, otherfilectx):
2545 2547 return self.data() != otherfilectx.data()
2546 2548
2547 2549 def path(self):
2548 2550 return self._path
2549 2551
2550 2552 def flags(self):
2551 2553 return ''
2552 2554
2553 2555 def data(self):
2554 2556 return util.readfile(self._path)
2555 2557
2556 2558 def decodeddata(self):
2557 2559 with open(self._path, "rb") as f:
2558 2560 return f.read()
2559 2561
2560 2562 def remove(self):
2561 2563 util.unlink(self._path)
2562 2564
2563 2565 def write(self, data, flags):
2564 2566 assert not flags
2565 2567 with open(self._path, "w") as f:
2566 2568 f.write(data)
@@ -1,102 +1,104
1 1 from __future__ import absolute_import
2 2 from __future__ import print_function
3 3
4 4 import unittest
5 5
6 6 from mercurial import (
7 7 mdiff,
8 8 )
9 9 from mercurial.context import (
10 10 annotateline,
11 11 _annotatepair,
12 12 )
13 13
14 14 class AnnotateTests(unittest.TestCase):
15 15 """Unit tests for annotate code."""
16 16
17 17 def testannotatepair(self):
18 18 self.maxDiff = None # camelcase-required
19 19
20 20 oldfctx = b'old'
21 21 p1fctx, p2fctx, childfctx = b'p1', b'p2', b'c'
22 22 olddata = b'a\nb\n'
23 23 p1data = b'a\nb\nc\n'
24 24 p2data = b'a\nc\nd\n'
25 25 childdata = b'a\nb2\nc\nc2\nd\n'
26 26 diffopts = mdiff.diffopts()
27 27
28 28 def decorate(text, rev):
29 29 return ([annotateline(fctx=rev, lineno=i)
30 30 for i in xrange(1, text.count(b'\n') + 1)],
31 31 text)
32 32
33 33 # Basic usage
34 34
35 35 oldann = decorate(olddata, oldfctx)
36 36 p1ann = decorate(p1data, p1fctx)
37 37 p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
38 38 self.assertEqual(p1ann[0], [
39 39 annotateline('old', 1),
40 40 annotateline('old', 2),
41 41 annotateline('p1', 3),
42 42 ])
43 43
44 44 p2ann = decorate(p2data, p2fctx)
45 45 p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
46 46 self.assertEqual(p2ann[0], [
47 47 annotateline('old', 1),
48 48 annotateline('p2', 2),
49 49 annotateline('p2', 3),
50 50 ])
51 51
52 52 # Test with multiple parents (note the difference caused by ordering)
53 53
54 54 childann = decorate(childdata, childfctx)
55 55 childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
56 56 diffopts)
57 57 self.assertEqual(childann[0], [
58 58 annotateline('old', 1),
59 59 annotateline('c', 2),
60 60 annotateline('p2', 2),
61 61 annotateline('c', 4),
62 62 annotateline('p2', 3),
63 63 ])
64 64
65 65 childann = decorate(childdata, childfctx)
66 66 childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
67 67 diffopts)
68 68 self.assertEqual(childann[0], [
69 69 annotateline('old', 1),
70 70 annotateline('c', 2),
71 71 annotateline('p1', 3),
72 72 annotateline('c', 4),
73 73 annotateline('p2', 3),
74 74 ])
75 75
76 76 # Test with skipchild (note the difference caused by ordering)
77 77
78 78 childann = decorate(childdata, childfctx)
79 79 childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
80 80 diffopts)
81 81 self.assertEqual(childann[0], [
82 82 annotateline('old', 1),
83 annotateline('old', 2),
83 annotateline('old', 2, True),
84 # note that this line was carried over from earlier so it is *not*
85 # marked skipped
84 86 annotateline('p2', 2),
85 annotateline('p2', 2),
87 annotateline('p2', 2, True),
86 88 annotateline('p2', 3),
87 89 ])
88 90
89 91 childann = decorate(childdata, childfctx)
90 92 childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
91 93 diffopts)
92 94 self.assertEqual(childann[0], [
93 95 annotateline('old', 1),
94 annotateline('old', 2),
96 annotateline('old', 2, True),
95 97 annotateline('p1', 3),
96 annotateline('p1', 3),
98 annotateline('p1', 3, True),
97 99 annotateline('p2', 3),
98 100 ])
99 101
100 102 if __name__ == '__main__':
101 103 import silenttestrunner
102 104 silenttestrunner.main(__name__)
General Comments 0
You need to be logged in to leave comments. Login now