##// END OF EJS Templates
context: fix troubled deprecation...
Boris Feld -
r33794:4abf34f4 default
parent child Browse files
Show More
@@ -1,2371 +1,2371 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 bin,
19 19 hex,
20 20 modifiednodeid,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 wdirid,
25 25 wdirnodes,
26 26 wdirrev,
27 27 )
28 28 from . import (
29 29 encoding,
30 30 error,
31 31 fileset,
32 32 match as matchmod,
33 33 mdiff,
34 34 obsolete as obsmod,
35 35 patch,
36 36 pathutil,
37 37 phases,
38 38 pycompat,
39 39 repoview,
40 40 revlog,
41 41 scmutil,
42 42 sparse,
43 43 subrepo,
44 44 util,
45 45 )
46 46
47 47 propertycache = util.propertycache
48 48
49 49 nonascii = re.compile(r'[^\x21-\x7f]').search
50 50
51 51 class basectx(object):
52 52 """A basectx object represents the common logic for its children:
53 53 changectx: read-only context that is already present in the repo,
54 54 workingctx: a context that represents the working directory and can
55 55 be committed,
56 56 memctx: a context that represents changes in-memory and can also
57 57 be committed."""
58 58 def __new__(cls, repo, changeid='', *args, **kwargs):
59 59 if isinstance(changeid, basectx):
60 60 return changeid
61 61
62 62 o = super(basectx, cls).__new__(cls)
63 63
64 64 o._repo = repo
65 65 o._rev = nullrev
66 66 o._node = nullid
67 67
68 68 return o
69 69
70 70 def __bytes__(self):
71 71 return short(self.node())
72 72
73 73 __str__ = encoding.strmethod(__bytes__)
74 74
75 75 def __int__(self):
76 76 return self.rev()
77 77
78 78 def __repr__(self):
79 79 return r"<%s %s>" % (type(self).__name__, str(self))
80 80
81 81 def __eq__(self, other):
82 82 try:
83 83 return type(self) == type(other) and self._rev == other._rev
84 84 except AttributeError:
85 85 return False
86 86
87 87 def __ne__(self, other):
88 88 return not (self == other)
89 89
90 90 def __contains__(self, key):
91 91 return key in self._manifest
92 92
93 93 def __getitem__(self, key):
94 94 return self.filectx(key)
95 95
96 96 def __iter__(self):
97 97 return iter(self._manifest)
98 98
99 99 def _buildstatusmanifest(self, status):
100 100 """Builds a manifest that includes the given status results, if this is
101 101 a working copy context. For non-working copy contexts, it just returns
102 102 the normal manifest."""
103 103 return self.manifest()
104 104
105 105 def _matchstatus(self, other, match):
106 106 """return match.always if match is none
107 107
108 108 This internal method provides a way for child objects to override the
109 109 match operator.
110 110 """
111 111 return match or matchmod.always(self._repo.root, self._repo.getcwd())
112 112
113 113 def _buildstatus(self, other, s, match, listignored, listclean,
114 114 listunknown):
115 115 """build a status with respect to another context"""
116 116 # Load earliest manifest first for caching reasons. More specifically,
117 117 # if you have revisions 1000 and 1001, 1001 is probably stored as a
118 118 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
119 119 # 1000 and cache it so that when you read 1001, we just need to apply a
120 120 # delta to what's in the cache. So that's one full reconstruction + one
121 121 # delta application.
122 122 mf2 = None
123 123 if self.rev() is not None and self.rev() < other.rev():
124 124 mf2 = self._buildstatusmanifest(s)
125 125 mf1 = other._buildstatusmanifest(s)
126 126 if mf2 is None:
127 127 mf2 = self._buildstatusmanifest(s)
128 128
129 129 modified, added = [], []
130 130 removed = []
131 131 clean = []
132 132 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
133 133 deletedset = set(deleted)
134 134 d = mf1.diff(mf2, match=match, clean=listclean)
135 135 for fn, value in d.iteritems():
136 136 if fn in deletedset:
137 137 continue
138 138 if value is None:
139 139 clean.append(fn)
140 140 continue
141 141 (node1, flag1), (node2, flag2) = value
142 142 if node1 is None:
143 143 added.append(fn)
144 144 elif node2 is None:
145 145 removed.append(fn)
146 146 elif flag1 != flag2:
147 147 modified.append(fn)
148 148 elif node2 not in wdirnodes:
149 149 # When comparing files between two commits, we save time by
150 150 # not comparing the file contents when the nodeids differ.
151 151 # Note that this means we incorrectly report a reverted change
152 152 # to a file as a modification.
153 153 modified.append(fn)
154 154 elif self[fn].cmp(other[fn]):
155 155 modified.append(fn)
156 156 else:
157 157 clean.append(fn)
158 158
159 159 if removed:
160 160 # need to filter files if they are already reported as removed
161 161 unknown = [fn for fn in unknown if fn not in mf1 and
162 162 (not match or match(fn))]
163 163 ignored = [fn for fn in ignored if fn not in mf1 and
164 164 (not match or match(fn))]
165 165 # if they're deleted, don't report them as removed
166 166 removed = [fn for fn in removed if fn not in deletedset]
167 167
168 168 return scmutil.status(modified, added, removed, deleted, unknown,
169 169 ignored, clean)
170 170
171 171 @propertycache
172 172 def substate(self):
173 173 return subrepo.state(self, self._repo.ui)
174 174
175 175 def subrev(self, subpath):
176 176 return self.substate[subpath][1]
177 177
178 178 def rev(self):
179 179 return self._rev
180 180 def node(self):
181 181 return self._node
182 182 def hex(self):
183 183 return hex(self.node())
184 184 def manifest(self):
185 185 return self._manifest
186 186 def manifestctx(self):
187 187 return self._manifestctx
188 188 def repo(self):
189 189 return self._repo
190 190 def phasestr(self):
191 191 return phases.phasenames[self.phase()]
192 192 def mutable(self):
193 193 return self.phase() > phases.public
194 194
195 195 def getfileset(self, expr):
196 196 return fileset.getfileset(self, expr)
197 197
198 198 def obsolete(self):
199 199 """True if the changeset is obsolete"""
200 200 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
201 201
202 202 def extinct(self):
203 203 """True if the changeset is extinct"""
204 204 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
205 205
206 206 def unstable(self):
207 207 msg = ("'context.unstable' is deprecated, "
208 208 "use 'context.orphan'")
209 209 self._repo.ui.deprecwarn(msg, '4.4')
210 210 return self.orphan()
211 211
212 212 def orphan(self):
213 213 """True if the changeset is not obsolete but it's ancestor are"""
214 214 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
215 215
216 216 def bumped(self):
217 217 msg = ("'context.bumped' is deprecated, "
218 218 "use 'context.phasedivergent'")
219 219 self._repo.ui.deprecwarn(msg, '4.4')
220 220 return self.phasedivergent()
221 221
222 222 def phasedivergent(self):
223 223 """True if the changeset try to be a successor of a public changeset
224 224
225 225 Only non-public and non-obsolete changesets may be bumped.
226 226 """
227 227 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
228 228
229 229 def divergent(self):
230 230 msg = ("'context.divergent' is deprecated, "
231 231 "use 'context.contentdivergent'")
232 232 self._repo.ui.deprecwarn(msg, '4.4')
233 233 return self.contentdivergent()
234 234
235 235 def contentdivergent(self):
236 236 """Is a successors of a changeset with multiple possible successors set
237 237
238 238 Only non-public and non-obsolete changesets may be divergent.
239 239 """
240 240 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
241 241
242 242 def troubled(self):
243 243 msg = ("'context.troubled' is deprecated, "
244 244 "use 'context.isunstable'")
245 245 self._repo.ui.deprecwarn(msg, '4.4')
246 return self.unstable()
246 return self.isunstable()
247 247
248 248 def isunstable(self):
249 249 """True if the changeset is either unstable, bumped or divergent"""
250 250 return self.orphan() or self.phasedivergent() or self.contentdivergent()
251 251
252 252 def troubles(self):
253 253 """Keep the old version around in order to avoid breaking extensions
254 254 about different return values.
255 255 """
256 256 msg = ("'context.troubles' is deprecated, "
257 257 "use 'context.instabilities'")
258 258 self._repo.ui.deprecwarn(msg, '4.4')
259 259
260 260 troubles = []
261 261 if self.orphan():
262 262 troubles.append('orphan')
263 263 if self.phasedivergent():
264 264 troubles.append('bumped')
265 265 if self.contentdivergent():
266 266 troubles.append('divergent')
267 267 return troubles
268 268
269 269 def instabilities(self):
270 270 """return the list of instabilities affecting this changeset.
271 271
272 272 Instabilities are returned as strings. possible values are:
273 273 - orphan,
274 274 - phase-divergent,
275 275 - content-divergent.
276 276 """
277 277 instabilities = []
278 278 if self.orphan():
279 279 instabilities.append('orphan')
280 280 if self.phasedivergent():
281 281 instabilities.append('phase-divergent')
282 282 if self.contentdivergent():
283 283 instabilities.append('content-divergent')
284 284 return instabilities
285 285
286 286 def parents(self):
287 287 """return contexts for each parent changeset"""
288 288 return self._parents
289 289
290 290 def p1(self):
291 291 return self._parents[0]
292 292
293 293 def p2(self):
294 294 parents = self._parents
295 295 if len(parents) == 2:
296 296 return parents[1]
297 297 return changectx(self._repo, nullrev)
298 298
299 299 def _fileinfo(self, path):
300 300 if r'_manifest' in self.__dict__:
301 301 try:
302 302 return self._manifest[path], self._manifest.flags(path)
303 303 except KeyError:
304 304 raise error.ManifestLookupError(self._node, path,
305 305 _('not found in manifest'))
306 306 if r'_manifestdelta' in self.__dict__ or path in self.files():
307 307 if path in self._manifestdelta:
308 308 return (self._manifestdelta[path],
309 309 self._manifestdelta.flags(path))
310 310 mfl = self._repo.manifestlog
311 311 try:
312 312 node, flag = mfl[self._changeset.manifest].find(path)
313 313 except KeyError:
314 314 raise error.ManifestLookupError(self._node, path,
315 315 _('not found in manifest'))
316 316
317 317 return node, flag
318 318
319 319 def filenode(self, path):
320 320 return self._fileinfo(path)[0]
321 321
322 322 def flags(self, path):
323 323 try:
324 324 return self._fileinfo(path)[1]
325 325 except error.LookupError:
326 326 return ''
327 327
328 328 def sub(self, path, allowcreate=True):
329 329 '''return a subrepo for the stored revision of path, never wdir()'''
330 330 return subrepo.subrepo(self, path, allowcreate=allowcreate)
331 331
332 332 def nullsub(self, path, pctx):
333 333 return subrepo.nullsubrepo(self, path, pctx)
334 334
335 335 def workingsub(self, path):
336 336 '''return a subrepo for the stored revision, or wdir if this is a wdir
337 337 context.
338 338 '''
339 339 return subrepo.subrepo(self, path, allowwdir=True)
340 340
341 341 def match(self, pats=None, include=None, exclude=None, default='glob',
342 342 listsubrepos=False, badfn=None):
343 343 r = self._repo
344 344 return matchmod.match(r.root, r.getcwd(), pats,
345 345 include, exclude, default,
346 346 auditor=r.nofsauditor, ctx=self,
347 347 listsubrepos=listsubrepos, badfn=badfn)
348 348
349 349 def diff(self, ctx2=None, match=None, **opts):
350 350 """Returns a diff generator for the given contexts and matcher"""
351 351 if ctx2 is None:
352 352 ctx2 = self.p1()
353 353 if ctx2 is not None:
354 354 ctx2 = self._repo[ctx2]
355 355 diffopts = patch.diffopts(self._repo.ui, opts)
356 356 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
357 357
358 358 def dirs(self):
359 359 return self._manifest.dirs()
360 360
361 361 def hasdir(self, dir):
362 362 return self._manifest.hasdir(dir)
363 363
364 364 def status(self, other=None, match=None, listignored=False,
365 365 listclean=False, listunknown=False, listsubrepos=False):
366 366 """return status of files between two nodes or node and working
367 367 directory.
368 368
369 369 If other is None, compare this node with working directory.
370 370
371 371 returns (modified, added, removed, deleted, unknown, ignored, clean)
372 372 """
373 373
374 374 ctx1 = self
375 375 ctx2 = self._repo[other]
376 376
377 377 # This next code block is, admittedly, fragile logic that tests for
378 378 # reversing the contexts and wouldn't need to exist if it weren't for
379 379 # the fast (and common) code path of comparing the working directory
380 380 # with its first parent.
381 381 #
382 382 # What we're aiming for here is the ability to call:
383 383 #
384 384 # workingctx.status(parentctx)
385 385 #
386 386 # If we always built the manifest for each context and compared those,
387 387 # then we'd be done. But the special case of the above call means we
388 388 # just copy the manifest of the parent.
389 389 reversed = False
390 390 if (not isinstance(ctx1, changectx)
391 391 and isinstance(ctx2, changectx)):
392 392 reversed = True
393 393 ctx1, ctx2 = ctx2, ctx1
394 394
395 395 match = ctx2._matchstatus(ctx1, match)
396 396 r = scmutil.status([], [], [], [], [], [], [])
397 397 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
398 398 listunknown)
399 399
400 400 if reversed:
401 401 # Reverse added and removed. Clear deleted, unknown and ignored as
402 402 # these make no sense to reverse.
403 403 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
404 404 r.clean)
405 405
406 406 if listsubrepos:
407 407 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
408 408 try:
409 409 rev2 = ctx2.subrev(subpath)
410 410 except KeyError:
411 411 # A subrepo that existed in node1 was deleted between
412 412 # node1 and node2 (inclusive). Thus, ctx2's substate
413 413 # won't contain that subpath. The best we can do ignore it.
414 414 rev2 = None
415 415 submatch = matchmod.subdirmatcher(subpath, match)
416 416 s = sub.status(rev2, match=submatch, ignored=listignored,
417 417 clean=listclean, unknown=listunknown,
418 418 listsubrepos=True)
419 419 for rfiles, sfiles in zip(r, s):
420 420 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
421 421
422 422 for l in r:
423 423 l.sort()
424 424
425 425 return r
426 426
427 427 def _filterederror(repo, changeid):
428 428 """build an exception to be raised about a filtered changeid
429 429
430 430 This is extracted in a function to help extensions (eg: evolve) to
431 431 experiment with various message variants."""
432 432 if repo.filtername.startswith('visible'):
433 433 msg = _("hidden revision '%s'") % changeid
434 434 hint = _('use --hidden to access hidden revisions')
435 435 return error.FilteredRepoLookupError(msg, hint=hint)
436 436 msg = _("filtered revision '%s' (not in '%s' subset)")
437 437 msg %= (changeid, repo.filtername)
438 438 return error.FilteredRepoLookupError(msg)
439 439
440 440 class changectx(basectx):
441 441 """A changecontext object makes access to data related to a particular
442 442 changeset convenient. It represents a read-only context already present in
443 443 the repo."""
444 444 def __init__(self, repo, changeid=''):
445 445 """changeid is a revision number, node, or tag"""
446 446
447 447 # since basectx.__new__ already took care of copying the object, we
448 448 # don't need to do anything in __init__, so we just exit here
449 449 if isinstance(changeid, basectx):
450 450 return
451 451
452 452 if changeid == '':
453 453 changeid = '.'
454 454 self._repo = repo
455 455
456 456 try:
457 457 if isinstance(changeid, int):
458 458 self._node = repo.changelog.node(changeid)
459 459 self._rev = changeid
460 460 return
461 461 if not pycompat.ispy3 and isinstance(changeid, long):
462 462 changeid = str(changeid)
463 463 if changeid == 'null':
464 464 self._node = nullid
465 465 self._rev = nullrev
466 466 return
467 467 if changeid == 'tip':
468 468 self._node = repo.changelog.tip()
469 469 self._rev = repo.changelog.rev(self._node)
470 470 return
471 471 if changeid == '.' or changeid == repo.dirstate.p1():
472 472 # this is a hack to delay/avoid loading obsmarkers
473 473 # when we know that '.' won't be hidden
474 474 self._node = repo.dirstate.p1()
475 475 self._rev = repo.unfiltered().changelog.rev(self._node)
476 476 return
477 477 if len(changeid) == 20:
478 478 try:
479 479 self._node = changeid
480 480 self._rev = repo.changelog.rev(changeid)
481 481 return
482 482 except error.FilteredRepoLookupError:
483 483 raise
484 484 except LookupError:
485 485 pass
486 486
487 487 try:
488 488 r = int(changeid)
489 489 if '%d' % r != changeid:
490 490 raise ValueError
491 491 l = len(repo.changelog)
492 492 if r < 0:
493 493 r += l
494 494 if r < 0 or r >= l and r != wdirrev:
495 495 raise ValueError
496 496 self._rev = r
497 497 self._node = repo.changelog.node(r)
498 498 return
499 499 except error.FilteredIndexError:
500 500 raise
501 501 except (ValueError, OverflowError, IndexError):
502 502 pass
503 503
504 504 if len(changeid) == 40:
505 505 try:
506 506 self._node = bin(changeid)
507 507 self._rev = repo.changelog.rev(self._node)
508 508 return
509 509 except error.FilteredLookupError:
510 510 raise
511 511 except (TypeError, LookupError):
512 512 pass
513 513
514 514 # lookup bookmarks through the name interface
515 515 try:
516 516 self._node = repo.names.singlenode(repo, changeid)
517 517 self._rev = repo.changelog.rev(self._node)
518 518 return
519 519 except KeyError:
520 520 pass
521 521 except error.FilteredRepoLookupError:
522 522 raise
523 523 except error.RepoLookupError:
524 524 pass
525 525
526 526 self._node = repo.unfiltered().changelog._partialmatch(changeid)
527 527 if self._node is not None:
528 528 self._rev = repo.changelog.rev(self._node)
529 529 return
530 530
531 531 # lookup failed
532 532 # check if it might have come from damaged dirstate
533 533 #
534 534 # XXX we could avoid the unfiltered if we had a recognizable
535 535 # exception for filtered changeset access
536 536 if changeid in repo.unfiltered().dirstate.parents():
537 537 msg = _("working directory has unknown parent '%s'!")
538 538 raise error.Abort(msg % short(changeid))
539 539 try:
540 540 if len(changeid) == 20 and nonascii(changeid):
541 541 changeid = hex(changeid)
542 542 except TypeError:
543 543 pass
544 544 except (error.FilteredIndexError, error.FilteredLookupError,
545 545 error.FilteredRepoLookupError):
546 546 raise _filterederror(repo, changeid)
547 547 except IndexError:
548 548 pass
549 549 raise error.RepoLookupError(
550 550 _("unknown revision '%s'") % changeid)
551 551
552 552 def __hash__(self):
553 553 try:
554 554 return hash(self._rev)
555 555 except AttributeError:
556 556 return id(self)
557 557
558 558 def __nonzero__(self):
559 559 return self._rev != nullrev
560 560
561 561 __bool__ = __nonzero__
562 562
563 563 @propertycache
564 564 def _changeset(self):
565 565 return self._repo.changelog.changelogrevision(self.rev())
566 566
567 567 @propertycache
568 568 def _manifest(self):
569 569 return self._manifestctx.read()
570 570
571 571 @property
572 572 def _manifestctx(self):
573 573 return self._repo.manifestlog[self._changeset.manifest]
574 574
575 575 @propertycache
576 576 def _manifestdelta(self):
577 577 return self._manifestctx.readdelta()
578 578
579 579 @propertycache
580 580 def _parents(self):
581 581 repo = self._repo
582 582 p1, p2 = repo.changelog.parentrevs(self._rev)
583 583 if p2 == nullrev:
584 584 return [changectx(repo, p1)]
585 585 return [changectx(repo, p1), changectx(repo, p2)]
586 586
587 587 def changeset(self):
588 588 c = self._changeset
589 589 return (
590 590 c.manifest,
591 591 c.user,
592 592 c.date,
593 593 c.files,
594 594 c.description,
595 595 c.extra,
596 596 )
597 597 def manifestnode(self):
598 598 return self._changeset.manifest
599 599
600 600 def user(self):
601 601 return self._changeset.user
602 602 def date(self):
603 603 return self._changeset.date
604 604 def files(self):
605 605 return self._changeset.files
606 606 def description(self):
607 607 return self._changeset.description
608 608 def branch(self):
609 609 return encoding.tolocal(self._changeset.extra.get("branch"))
610 610 def closesbranch(self):
611 611 return 'close' in self._changeset.extra
612 612 def extra(self):
613 613 return self._changeset.extra
614 614 def tags(self):
615 615 return self._repo.nodetags(self._node)
616 616 def bookmarks(self):
617 617 return self._repo.nodebookmarks(self._node)
618 618 def phase(self):
619 619 return self._repo._phasecache.phase(self._repo, self._rev)
620 620 def hidden(self):
621 621 return self._rev in repoview.filterrevs(self._repo, 'visible')
622 622
623 623 def children(self):
624 624 """return contexts for each child changeset"""
625 625 c = self._repo.changelog.children(self._node)
626 626 return [changectx(self._repo, x) for x in c]
627 627
628 628 def ancestors(self):
629 629 for a in self._repo.changelog.ancestors([self._rev]):
630 630 yield changectx(self._repo, a)
631 631
632 632 def descendants(self):
633 633 for d in self._repo.changelog.descendants([self._rev]):
634 634 yield changectx(self._repo, d)
635 635
636 636 def filectx(self, path, fileid=None, filelog=None):
637 637 """get a file context from this changeset"""
638 638 if fileid is None:
639 639 fileid = self.filenode(path)
640 640 return filectx(self._repo, path, fileid=fileid,
641 641 changectx=self, filelog=filelog)
642 642
643 643 def ancestor(self, c2, warn=False):
644 644 """return the "best" ancestor context of self and c2
645 645
646 646 If there are multiple candidates, it will show a message and check
647 647 merge.preferancestor configuration before falling back to the
648 648 revlog ancestor."""
649 649 # deal with workingctxs
650 650 n2 = c2._node
651 651 if n2 is None:
652 652 n2 = c2._parents[0]._node
653 653 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
654 654 if not cahs:
655 655 anc = nullid
656 656 elif len(cahs) == 1:
657 657 anc = cahs[0]
658 658 else:
659 659 # experimental config: merge.preferancestor
660 660 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
661 661 try:
662 662 ctx = changectx(self._repo, r)
663 663 except error.RepoLookupError:
664 664 continue
665 665 anc = ctx.node()
666 666 if anc in cahs:
667 667 break
668 668 else:
669 669 anc = self._repo.changelog.ancestor(self._node, n2)
670 670 if warn:
671 671 self._repo.ui.status(
672 672 (_("note: using %s as ancestor of %s and %s\n") %
673 673 (short(anc), short(self._node), short(n2))) +
674 674 ''.join(_(" alternatively, use --config "
675 675 "merge.preferancestor=%s\n") %
676 676 short(n) for n in sorted(cahs) if n != anc))
677 677 return changectx(self._repo, anc)
678 678
679 679 def descendant(self, other):
680 680 """True if other is descendant of this changeset"""
681 681 return self._repo.changelog.descendant(self._rev, other._rev)
682 682
683 683 def walk(self, match):
684 684 '''Generates matching file names.'''
685 685
686 686 # Wrap match.bad method to have message with nodeid
687 687 def bad(fn, msg):
688 688 # The manifest doesn't know about subrepos, so don't complain about
689 689 # paths into valid subrepos.
690 690 if any(fn == s or fn.startswith(s + '/')
691 691 for s in self.substate):
692 692 return
693 693 match.bad(fn, _('no such file in rev %s') % self)
694 694
695 695 m = matchmod.badmatch(match, bad)
696 696 return self._manifest.walk(m)
697 697
698 698 def matches(self, match):
699 699 return self.walk(match)
700 700
701 701 class basefilectx(object):
702 702 """A filecontext object represents the common logic for its children:
703 703 filectx: read-only access to a filerevision that is already present
704 704 in the repo,
705 705 workingfilectx: a filecontext that represents files from the working
706 706 directory,
707 707 memfilectx: a filecontext that represents files in-memory,
708 708 overlayfilectx: duplicate another filecontext with some fields overridden.
709 709 """
710 710 @propertycache
711 711 def _filelog(self):
712 712 return self._repo.file(self._path)
713 713
714 714 @propertycache
715 715 def _changeid(self):
716 716 if r'_changeid' in self.__dict__:
717 717 return self._changeid
718 718 elif r'_changectx' in self.__dict__:
719 719 return self._changectx.rev()
720 720 elif r'_descendantrev' in self.__dict__:
721 721 # this file context was created from a revision with a known
722 722 # descendant, we can (lazily) correct for linkrev aliases
723 723 return self._adjustlinkrev(self._descendantrev)
724 724 else:
725 725 return self._filelog.linkrev(self._filerev)
726 726
727 727 @propertycache
728 728 def _filenode(self):
729 729 if r'_fileid' in self.__dict__:
730 730 return self._filelog.lookup(self._fileid)
731 731 else:
732 732 return self._changectx.filenode(self._path)
733 733
734 734 @propertycache
735 735 def _filerev(self):
736 736 return self._filelog.rev(self._filenode)
737 737
738 738 @propertycache
739 739 def _repopath(self):
740 740 return self._path
741 741
742 742 def __nonzero__(self):
743 743 try:
744 744 self._filenode
745 745 return True
746 746 except error.LookupError:
747 747 # file is missing
748 748 return False
749 749
750 750 __bool__ = __nonzero__
751 751
752 752 def __bytes__(self):
753 753 try:
754 754 return "%s@%s" % (self.path(), self._changectx)
755 755 except error.LookupError:
756 756 return "%s@???" % self.path()
757 757
758 758 __str__ = encoding.strmethod(__bytes__)
759 759
760 760 def __repr__(self):
761 761 return "<%s %s>" % (type(self).__name__, str(self))
762 762
763 763 def __hash__(self):
764 764 try:
765 765 return hash((self._path, self._filenode))
766 766 except AttributeError:
767 767 return id(self)
768 768
769 769 def __eq__(self, other):
770 770 try:
771 771 return (type(self) == type(other) and self._path == other._path
772 772 and self._filenode == other._filenode)
773 773 except AttributeError:
774 774 return False
775 775
776 776 def __ne__(self, other):
777 777 return not (self == other)
778 778
779 779 def filerev(self):
780 780 return self._filerev
781 781 def filenode(self):
782 782 return self._filenode
783 783 @propertycache
784 784 def _flags(self):
785 785 return self._changectx.flags(self._path)
786 786 def flags(self):
787 787 return self._flags
788 788 def filelog(self):
789 789 return self._filelog
790 790 def rev(self):
791 791 return self._changeid
792 792 def linkrev(self):
793 793 return self._filelog.linkrev(self._filerev)
794 794 def node(self):
795 795 return self._changectx.node()
796 796 def hex(self):
797 797 return self._changectx.hex()
798 798 def user(self):
799 799 return self._changectx.user()
800 800 def date(self):
801 801 return self._changectx.date()
802 802 def files(self):
803 803 return self._changectx.files()
804 804 def description(self):
805 805 return self._changectx.description()
806 806 def branch(self):
807 807 return self._changectx.branch()
808 808 def extra(self):
809 809 return self._changectx.extra()
810 810 def phase(self):
811 811 return self._changectx.phase()
812 812 def phasestr(self):
813 813 return self._changectx.phasestr()
814 814 def manifest(self):
815 815 return self._changectx.manifest()
816 816 def changectx(self):
817 817 return self._changectx
818 818 def renamed(self):
819 819 return self._copied
820 820 def repo(self):
821 821 return self._repo
822 822 def size(self):
823 823 return len(self.data())
824 824
825 825 def path(self):
826 826 return self._path
827 827
828 828 def isbinary(self):
829 829 try:
830 830 return util.binary(self.data())
831 831 except IOError:
832 832 return False
833 833 def isexec(self):
834 834 return 'x' in self.flags()
835 835 def islink(self):
836 836 return 'l' in self.flags()
837 837
838 838 def isabsent(self):
839 839 """whether this filectx represents a file not in self._changectx
840 840
841 841 This is mainly for merge code to detect change/delete conflicts. This is
842 842 expected to be True for all subclasses of basectx."""
843 843 return False
844 844
845 845 _customcmp = False
846 846 def cmp(self, fctx):
847 847 """compare with other file context
848 848
849 849 returns True if different than fctx.
850 850 """
851 851 if fctx._customcmp:
852 852 return fctx.cmp(self)
853 853
854 854 if (fctx._filenode is None
855 855 and (self._repo._encodefilterpats
856 856 # if file data starts with '\1\n', empty metadata block is
857 857 # prepended, which adds 4 bytes to filelog.size().
858 858 or self.size() - 4 == fctx.size())
859 859 or self.size() == fctx.size()):
860 860 return self._filelog.cmp(self._filenode, fctx.data())
861 861
862 862 return True
863 863
864 864 def _adjustlinkrev(self, srcrev, inclusive=False):
865 865 """return the first ancestor of <srcrev> introducing <fnode>
866 866
867 867 If the linkrev of the file revision does not point to an ancestor of
868 868 srcrev, we'll walk down the ancestors until we find one introducing
869 869 this file revision.
870 870
871 871 :srcrev: the changeset revision we search ancestors from
872 872 :inclusive: if true, the src revision will also be checked
873 873 """
874 874 repo = self._repo
875 875 cl = repo.unfiltered().changelog
876 876 mfl = repo.manifestlog
877 877 # fetch the linkrev
878 878 lkr = self.linkrev()
879 879 # hack to reuse ancestor computation when searching for renames
880 880 memberanc = getattr(self, '_ancestrycontext', None)
881 881 iteranc = None
882 882 if srcrev is None:
883 883 # wctx case, used by workingfilectx during mergecopy
884 884 revs = [p.rev() for p in self._repo[None].parents()]
885 885 inclusive = True # we skipped the real (revless) source
886 886 else:
887 887 revs = [srcrev]
888 888 if memberanc is None:
889 889 memberanc = iteranc = cl.ancestors(revs, lkr,
890 890 inclusive=inclusive)
891 891 # check if this linkrev is an ancestor of srcrev
892 892 if lkr not in memberanc:
893 893 if iteranc is None:
894 894 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
895 895 fnode = self._filenode
896 896 path = self._path
897 897 for a in iteranc:
898 898 ac = cl.read(a) # get changeset data (we avoid object creation)
899 899 if path in ac[3]: # checking the 'files' field.
900 900 # The file has been touched, check if the content is
901 901 # similar to the one we search for.
902 902 if fnode == mfl[ac[0]].readfast().get(path):
903 903 return a
904 904 # In theory, we should never get out of that loop without a result.
905 905 # But if manifest uses a buggy file revision (not children of the
906 906 # one it replaces) we could. Such a buggy situation will likely
907 907 # result is crash somewhere else at to some point.
908 908 return lkr
909 909
910 910 def introrev(self):
911 911 """return the rev of the changeset which introduced this file revision
912 912
913 913 This method is different from linkrev because it take into account the
914 914 changeset the filectx was created from. It ensures the returned
915 915 revision is one of its ancestors. This prevents bugs from
916 916 'linkrev-shadowing' when a file revision is used by multiple
917 917 changesets.
918 918 """
919 919 lkr = self.linkrev()
920 920 attrs = vars(self)
921 921 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
922 922 if noctx or self.rev() == lkr:
923 923 return self.linkrev()
924 924 return self._adjustlinkrev(self.rev(), inclusive=True)
925 925
926 926 def _parentfilectx(self, path, fileid, filelog):
927 927 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
928 928 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
929 929 if '_changeid' in vars(self) or '_changectx' in vars(self):
930 930 # If self is associated with a changeset (probably explicitly
931 931 # fed), ensure the created filectx is associated with a
932 932 # changeset that is an ancestor of self.changectx.
933 933 # This lets us later use _adjustlinkrev to get a correct link.
934 934 fctx._descendantrev = self.rev()
935 935 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
936 936 elif '_descendantrev' in vars(self):
937 937 # Otherwise propagate _descendantrev if we have one associated.
938 938 fctx._descendantrev = self._descendantrev
939 939 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
940 940 return fctx
941 941
942 942 def parents(self):
943 943 _path = self._path
944 944 fl = self._filelog
945 945 parents = self._filelog.parents(self._filenode)
946 946 pl = [(_path, node, fl) for node in parents if node != nullid]
947 947
948 948 r = fl.renamed(self._filenode)
949 949 if r:
950 950 # - In the simple rename case, both parent are nullid, pl is empty.
951 951 # - In case of merge, only one of the parent is null id and should
952 952 # be replaced with the rename information. This parent is -always-
953 953 # the first one.
954 954 #
955 955 # As null id have always been filtered out in the previous list
956 956 # comprehension, inserting to 0 will always result in "replacing
957 957 # first nullid parent with rename information.
958 958 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
959 959
960 960 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
961 961
962 962 def p1(self):
963 963 return self.parents()[0]
964 964
965 965 def p2(self):
966 966 p = self.parents()
967 967 if len(p) == 2:
968 968 return p[1]
969 969 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
970 970
971 971 def annotate(self, follow=False, linenumber=False, skiprevs=None,
972 972 diffopts=None):
973 973 '''returns a list of tuples of ((ctx, number), line) for each line
974 974 in the file, where ctx is the filectx of the node where
975 975 that line was last changed; if linenumber parameter is true, number is
976 976 the line number at the first appearance in the managed file, otherwise,
977 977 number has a fixed value of False.
978 978 '''
979 979
980 980 def lines(text):
981 981 if text.endswith("\n"):
982 982 return text.count("\n")
983 983 return text.count("\n") + int(bool(text))
984 984
985 985 if linenumber:
986 986 def decorate(text, rev):
987 987 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
988 988 else:
989 989 def decorate(text, rev):
990 990 return ([(rev, False)] * lines(text), text)
991 991
992 992 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
993 993
994 994 def parents(f):
995 995 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
996 996 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
997 997 # from the topmost introrev (= srcrev) down to p.linkrev() if it
998 998 # isn't an ancestor of the srcrev.
999 999 f._changeid
1000 1000 pl = f.parents()
1001 1001
1002 1002 # Don't return renamed parents if we aren't following.
1003 1003 if not follow:
1004 1004 pl = [p for p in pl if p.path() == f.path()]
1005 1005
1006 1006 # renamed filectx won't have a filelog yet, so set it
1007 1007 # from the cache to save time
1008 1008 for p in pl:
1009 1009 if not '_filelog' in p.__dict__:
1010 1010 p._filelog = getlog(p.path())
1011 1011
1012 1012 return pl
1013 1013
1014 1014 # use linkrev to find the first changeset where self appeared
1015 1015 base = self
1016 1016 introrev = self.introrev()
1017 1017 if self.rev() != introrev:
1018 1018 base = self.filectx(self.filenode(), changeid=introrev)
1019 1019 if getattr(base, '_ancestrycontext', None) is None:
1020 1020 cl = self._repo.changelog
1021 1021 if introrev is None:
1022 1022 # wctx is not inclusive, but works because _ancestrycontext
1023 1023 # is used to test filelog revisions
1024 1024 ac = cl.ancestors([p.rev() for p in base.parents()],
1025 1025 inclusive=True)
1026 1026 else:
1027 1027 ac = cl.ancestors([introrev], inclusive=True)
1028 1028 base._ancestrycontext = ac
1029 1029
1030 1030 # This algorithm would prefer to be recursive, but Python is a
1031 1031 # bit recursion-hostile. Instead we do an iterative
1032 1032 # depth-first search.
1033 1033
1034 1034 # 1st DFS pre-calculates pcache and needed
1035 1035 visit = [base]
1036 1036 pcache = {}
1037 1037 needed = {base: 1}
1038 1038 while visit:
1039 1039 f = visit.pop()
1040 1040 if f in pcache:
1041 1041 continue
1042 1042 pl = parents(f)
1043 1043 pcache[f] = pl
1044 1044 for p in pl:
1045 1045 needed[p] = needed.get(p, 0) + 1
1046 1046 if p not in pcache:
1047 1047 visit.append(p)
1048 1048
1049 1049 # 2nd DFS does the actual annotate
1050 1050 visit[:] = [base]
1051 1051 hist = {}
1052 1052 while visit:
1053 1053 f = visit[-1]
1054 1054 if f in hist:
1055 1055 visit.pop()
1056 1056 continue
1057 1057
1058 1058 ready = True
1059 1059 pl = pcache[f]
1060 1060 for p in pl:
1061 1061 if p not in hist:
1062 1062 ready = False
1063 1063 visit.append(p)
1064 1064 if ready:
1065 1065 visit.pop()
1066 1066 curr = decorate(f.data(), f)
1067 1067 skipchild = False
1068 1068 if skiprevs is not None:
1069 1069 skipchild = f._changeid in skiprevs
1070 1070 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1071 1071 diffopts)
1072 1072 for p in pl:
1073 1073 if needed[p] == 1:
1074 1074 del hist[p]
1075 1075 del needed[p]
1076 1076 else:
1077 1077 needed[p] -= 1
1078 1078
1079 1079 hist[f] = curr
1080 1080 del pcache[f]
1081 1081
1082 1082 return zip(hist[base][0], hist[base][1].splitlines(True))
1083 1083
1084 1084 def ancestors(self, followfirst=False):
1085 1085 visit = {}
1086 1086 c = self
1087 1087 if followfirst:
1088 1088 cut = 1
1089 1089 else:
1090 1090 cut = None
1091 1091
1092 1092 while True:
1093 1093 for parent in c.parents()[:cut]:
1094 1094 visit[(parent.linkrev(), parent.filenode())] = parent
1095 1095 if not visit:
1096 1096 break
1097 1097 c = visit.pop(max(visit))
1098 1098 yield c
1099 1099
1100 1100 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1101 1101 r'''
1102 1102 Given parent and child fctxes and annotate data for parents, for all lines
1103 1103 in either parent that match the child, annotate the child with the parent's
1104 1104 data.
1105 1105
1106 1106 Additionally, if `skipchild` is True, replace all other lines with parent
1107 1107 annotate data as well such that child is never blamed for any lines.
1108 1108
1109 1109 >>> oldfctx = 'old'
1110 1110 >>> p1fctx, p2fctx, childfctx = 'p1', 'p2', 'c'
1111 1111 >>> olddata = 'a\nb\n'
1112 1112 >>> p1data = 'a\nb\nc\n'
1113 1113 >>> p2data = 'a\nc\nd\n'
1114 1114 >>> childdata = 'a\nb2\nc\nc2\nd\n'
1115 1115 >>> diffopts = mdiff.diffopts()
1116 1116
1117 1117 >>> def decorate(text, rev):
1118 1118 ... return ([(rev, i) for i in xrange(1, text.count('\n') + 1)], text)
1119 1119
1120 1120 Basic usage:
1121 1121
1122 1122 >>> oldann = decorate(olddata, oldfctx)
1123 1123 >>> p1ann = decorate(p1data, p1fctx)
1124 1124 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1125 1125 >>> p1ann[0]
1126 1126 [('old', 1), ('old', 2), ('p1', 3)]
1127 1127 >>> p2ann = decorate(p2data, p2fctx)
1128 1128 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1129 1129 >>> p2ann[0]
1130 1130 [('old', 1), ('p2', 2), ('p2', 3)]
1131 1131
1132 1132 Test with multiple parents (note the difference caused by ordering):
1133 1133
1134 1134 >>> childann = decorate(childdata, childfctx)
1135 1135 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1136 1136 ... diffopts)
1137 1137 >>> childann[0]
1138 1138 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1139 1139
1140 1140 >>> childann = decorate(childdata, childfctx)
1141 1141 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1142 1142 ... diffopts)
1143 1143 >>> childann[0]
1144 1144 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1145 1145
1146 1146 Test with skipchild (note the difference caused by ordering):
1147 1147
1148 1148 >>> childann = decorate(childdata, childfctx)
1149 1149 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1150 1150 ... diffopts)
1151 1151 >>> childann[0]
1152 1152 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1153 1153
1154 1154 >>> childann = decorate(childdata, childfctx)
1155 1155 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1156 1156 ... diffopts)
1157 1157 >>> childann[0]
1158 1158 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1159 1159 '''
1160 1160 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1161 1161 for parent in parents]
1162 1162
1163 1163 if skipchild:
1164 1164 # Need to iterate over the blocks twice -- make it a list
1165 1165 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1166 1166 # Mercurial currently prefers p2 over p1 for annotate.
1167 1167 # TODO: change this?
1168 1168 for parent, blocks in pblocks:
1169 1169 for (a1, a2, b1, b2), t in blocks:
1170 1170 # Changed blocks ('!') or blocks made only of blank lines ('~')
1171 1171 # belong to the child.
1172 1172 if t == '=':
1173 1173 child[0][b1:b2] = parent[0][a1:a2]
1174 1174
1175 1175 if skipchild:
1176 1176 # Now try and match up anything that couldn't be matched,
1177 1177 # Reversing pblocks maintains bias towards p2, matching above
1178 1178 # behavior.
1179 1179 pblocks.reverse()
1180 1180
1181 1181 # The heuristics are:
1182 1182 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1183 1183 # This could potentially be smarter but works well enough.
1184 1184 # * For a non-matching section, do a best-effort fit. Match lines in
1185 1185 # diff hunks 1:1, dropping lines as necessary.
1186 1186 # * Repeat the last line as a last resort.
1187 1187
1188 1188 # First, replace as much as possible without repeating the last line.
1189 1189 remaining = [(parent, []) for parent, _blocks in pblocks]
1190 1190 for idx, (parent, blocks) in enumerate(pblocks):
1191 1191 for (a1, a2, b1, b2), _t in blocks:
1192 1192 if a2 - a1 >= b2 - b1:
1193 1193 for bk in xrange(b1, b2):
1194 1194 if child[0][bk][0] == childfctx:
1195 1195 ak = min(a1 + (bk - b1), a2 - 1)
1196 1196 child[0][bk] = parent[0][ak]
1197 1197 else:
1198 1198 remaining[idx][1].append((a1, a2, b1, b2))
1199 1199
1200 1200 # Then, look at anything left, which might involve repeating the last
1201 1201 # line.
1202 1202 for parent, blocks in remaining:
1203 1203 for a1, a2, b1, b2 in blocks:
1204 1204 for bk in xrange(b1, b2):
1205 1205 if child[0][bk][0] == childfctx:
1206 1206 ak = min(a1 + (bk - b1), a2 - 1)
1207 1207 child[0][bk] = parent[0][ak]
1208 1208 return child
1209 1209
1210 1210 class filectx(basefilectx):
1211 1211 """A filecontext object makes access to data related to a particular
1212 1212 filerevision convenient."""
1213 1213 def __init__(self, repo, path, changeid=None, fileid=None,
1214 1214 filelog=None, changectx=None):
1215 1215 """changeid can be a changeset revision, node, or tag.
1216 1216 fileid can be a file revision or node."""
1217 1217 self._repo = repo
1218 1218 self._path = path
1219 1219
1220 1220 assert (changeid is not None
1221 1221 or fileid is not None
1222 1222 or changectx is not None), \
1223 1223 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1224 1224 % (changeid, fileid, changectx))
1225 1225
1226 1226 if filelog is not None:
1227 1227 self._filelog = filelog
1228 1228
1229 1229 if changeid is not None:
1230 1230 self._changeid = changeid
1231 1231 if changectx is not None:
1232 1232 self._changectx = changectx
1233 1233 if fileid is not None:
1234 1234 self._fileid = fileid
1235 1235
1236 1236 @propertycache
1237 1237 def _changectx(self):
1238 1238 try:
1239 1239 return changectx(self._repo, self._changeid)
1240 1240 except error.FilteredRepoLookupError:
1241 1241 # Linkrev may point to any revision in the repository. When the
1242 1242 # repository is filtered this may lead to `filectx` trying to build
1243 1243 # `changectx` for filtered revision. In such case we fallback to
1244 1244 # creating `changectx` on the unfiltered version of the reposition.
1245 1245 # This fallback should not be an issue because `changectx` from
1246 1246 # `filectx` are not used in complex operations that care about
1247 1247 # filtering.
1248 1248 #
1249 1249 # This fallback is a cheap and dirty fix that prevent several
1250 1250 # crashes. It does not ensure the behavior is correct. However the
1251 1251 # behavior was not correct before filtering either and "incorrect
1252 1252 # behavior" is seen as better as "crash"
1253 1253 #
1254 1254 # Linkrevs have several serious troubles with filtering that are
1255 1255 # complicated to solve. Proper handling of the issue here should be
1256 1256 # considered when solving linkrev issue are on the table.
1257 1257 return changectx(self._repo.unfiltered(), self._changeid)
1258 1258
1259 1259 def filectx(self, fileid, changeid=None):
1260 1260 '''opens an arbitrary revision of the file without
1261 1261 opening a new filelog'''
1262 1262 return filectx(self._repo, self._path, fileid=fileid,
1263 1263 filelog=self._filelog, changeid=changeid)
1264 1264
1265 1265 def rawdata(self):
1266 1266 return self._filelog.revision(self._filenode, raw=True)
1267 1267
1268 1268 def rawflags(self):
1269 1269 """low-level revlog flags"""
1270 1270 return self._filelog.flags(self._filerev)
1271 1271
1272 1272 def data(self):
1273 1273 try:
1274 1274 return self._filelog.read(self._filenode)
1275 1275 except error.CensoredNodeError:
1276 1276 if self._repo.ui.config("censor", "policy") == "ignore":
1277 1277 return ""
1278 1278 raise error.Abort(_("censored node: %s") % short(self._filenode),
1279 1279 hint=_("set censor.policy to ignore errors"))
1280 1280
1281 1281 def size(self):
1282 1282 return self._filelog.size(self._filerev)
1283 1283
1284 1284 @propertycache
1285 1285 def _copied(self):
1286 1286 """check if file was actually renamed in this changeset revision
1287 1287
1288 1288 If rename logged in file revision, we report copy for changeset only
1289 1289 if file revisions linkrev points back to the changeset in question
1290 1290 or both changeset parents contain different file revisions.
1291 1291 """
1292 1292
1293 1293 renamed = self._filelog.renamed(self._filenode)
1294 1294 if not renamed:
1295 1295 return renamed
1296 1296
1297 1297 if self.rev() == self.linkrev():
1298 1298 return renamed
1299 1299
1300 1300 name = self.path()
1301 1301 fnode = self._filenode
1302 1302 for p in self._changectx.parents():
1303 1303 try:
1304 1304 if fnode == p.filenode(name):
1305 1305 return None
1306 1306 except error.LookupError:
1307 1307 pass
1308 1308 return renamed
1309 1309
1310 1310 def children(self):
1311 1311 # hard for renames
1312 1312 c = self._filelog.children(self._filenode)
1313 1313 return [filectx(self._repo, self._path, fileid=x,
1314 1314 filelog=self._filelog) for x in c]
1315 1315
1316 1316 class committablectx(basectx):
1317 1317 """A committablectx object provides common functionality for a context that
1318 1318 wants the ability to commit, e.g. workingctx or memctx."""
1319 1319 def __init__(self, repo, text="", user=None, date=None, extra=None,
1320 1320 changes=None):
1321 1321 self._repo = repo
1322 1322 self._rev = None
1323 1323 self._node = None
1324 1324 self._text = text
1325 1325 if date:
1326 1326 self._date = util.parsedate(date)
1327 1327 if user:
1328 1328 self._user = user
1329 1329 if changes:
1330 1330 self._status = changes
1331 1331
1332 1332 self._extra = {}
1333 1333 if extra:
1334 1334 self._extra = extra.copy()
1335 1335 if 'branch' not in self._extra:
1336 1336 try:
1337 1337 branch = encoding.fromlocal(self._repo.dirstate.branch())
1338 1338 except UnicodeDecodeError:
1339 1339 raise error.Abort(_('branch name not in UTF-8!'))
1340 1340 self._extra['branch'] = branch
1341 1341 if self._extra['branch'] == '':
1342 1342 self._extra['branch'] = 'default'
1343 1343
1344 1344 def __bytes__(self):
1345 1345 return bytes(self._parents[0]) + "+"
1346 1346
1347 1347 __str__ = encoding.strmethod(__bytes__)
1348 1348
1349 1349 def __nonzero__(self):
1350 1350 return True
1351 1351
1352 1352 __bool__ = __nonzero__
1353 1353
1354 1354 def _buildflagfunc(self):
1355 1355 # Create a fallback function for getting file flags when the
1356 1356 # filesystem doesn't support them
1357 1357
1358 1358 copiesget = self._repo.dirstate.copies().get
1359 1359 parents = self.parents()
1360 1360 if len(parents) < 2:
1361 1361 # when we have one parent, it's easy: copy from parent
1362 1362 man = parents[0].manifest()
1363 1363 def func(f):
1364 1364 f = copiesget(f, f)
1365 1365 return man.flags(f)
1366 1366 else:
1367 1367 # merges are tricky: we try to reconstruct the unstored
1368 1368 # result from the merge (issue1802)
1369 1369 p1, p2 = parents
1370 1370 pa = p1.ancestor(p2)
1371 1371 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1372 1372
1373 1373 def func(f):
1374 1374 f = copiesget(f, f) # may be wrong for merges with copies
1375 1375 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1376 1376 if fl1 == fl2:
1377 1377 return fl1
1378 1378 if fl1 == fla:
1379 1379 return fl2
1380 1380 if fl2 == fla:
1381 1381 return fl1
1382 1382 return '' # punt for conflicts
1383 1383
1384 1384 return func
1385 1385
1386 1386 @propertycache
1387 1387 def _flagfunc(self):
1388 1388 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1389 1389
1390 1390 @propertycache
1391 1391 def _status(self):
1392 1392 return self._repo.status()
1393 1393
1394 1394 @propertycache
1395 1395 def _user(self):
1396 1396 return self._repo.ui.username()
1397 1397
1398 1398 @propertycache
1399 1399 def _date(self):
1400 1400 ui = self._repo.ui
1401 1401 date = ui.configdate('devel', 'default-date')
1402 1402 if date is None:
1403 1403 date = util.makedate()
1404 1404 return date
1405 1405
1406 1406 def subrev(self, subpath):
1407 1407 return None
1408 1408
1409 1409 def manifestnode(self):
1410 1410 return None
1411 1411 def user(self):
1412 1412 return self._user or self._repo.ui.username()
1413 1413 def date(self):
1414 1414 return self._date
1415 1415 def description(self):
1416 1416 return self._text
1417 1417 def files(self):
1418 1418 return sorted(self._status.modified + self._status.added +
1419 1419 self._status.removed)
1420 1420
1421 1421 def modified(self):
1422 1422 return self._status.modified
1423 1423 def added(self):
1424 1424 return self._status.added
1425 1425 def removed(self):
1426 1426 return self._status.removed
1427 1427 def deleted(self):
1428 1428 return self._status.deleted
1429 1429 def branch(self):
1430 1430 return encoding.tolocal(self._extra['branch'])
1431 1431 def closesbranch(self):
1432 1432 return 'close' in self._extra
1433 1433 def extra(self):
1434 1434 return self._extra
1435 1435
1436 1436 def tags(self):
1437 1437 return []
1438 1438
1439 1439 def bookmarks(self):
1440 1440 b = []
1441 1441 for p in self.parents():
1442 1442 b.extend(p.bookmarks())
1443 1443 return b
1444 1444
1445 1445 def phase(self):
1446 1446 phase = phases.draft # default phase to draft
1447 1447 for p in self.parents():
1448 1448 phase = max(phase, p.phase())
1449 1449 return phase
1450 1450
1451 1451 def hidden(self):
1452 1452 return False
1453 1453
1454 1454 def children(self):
1455 1455 return []
1456 1456
1457 1457 def flags(self, path):
1458 1458 if r'_manifest' in self.__dict__:
1459 1459 try:
1460 1460 return self._manifest.flags(path)
1461 1461 except KeyError:
1462 1462 return ''
1463 1463
1464 1464 try:
1465 1465 return self._flagfunc(path)
1466 1466 except OSError:
1467 1467 return ''
1468 1468
1469 1469 def ancestor(self, c2):
1470 1470 """return the "best" ancestor context of self and c2"""
1471 1471 return self._parents[0].ancestor(c2) # punt on two parents for now
1472 1472
1473 1473 def walk(self, match):
1474 1474 '''Generates matching file names.'''
1475 1475 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1476 1476 True, False))
1477 1477
1478 1478 def matches(self, match):
1479 1479 return sorted(self._repo.dirstate.matches(match))
1480 1480
1481 1481 def ancestors(self):
1482 1482 for p in self._parents:
1483 1483 yield p
1484 1484 for a in self._repo.changelog.ancestors(
1485 1485 [p.rev() for p in self._parents]):
1486 1486 yield changectx(self._repo, a)
1487 1487
1488 1488 def markcommitted(self, node):
1489 1489 """Perform post-commit cleanup necessary after committing this ctx
1490 1490
1491 1491 Specifically, this updates backing stores this working context
1492 1492 wraps to reflect the fact that the changes reflected by this
1493 1493 workingctx have been committed. For example, it marks
1494 1494 modified and added files as normal in the dirstate.
1495 1495
1496 1496 """
1497 1497
1498 1498 with self._repo.dirstate.parentchange():
1499 1499 for f in self.modified() + self.added():
1500 1500 self._repo.dirstate.normal(f)
1501 1501 for f in self.removed():
1502 1502 self._repo.dirstate.drop(f)
1503 1503 self._repo.dirstate.setparents(node)
1504 1504
1505 1505 # write changes out explicitly, because nesting wlock at
1506 1506 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1507 1507 # from immediately doing so for subsequent changing files
1508 1508 self._repo.dirstate.write(self._repo.currenttransaction())
1509 1509
1510 1510 def dirty(self, missing=False, merge=True, branch=True):
1511 1511 return False
1512 1512
1513 1513 class workingctx(committablectx):
1514 1514 """A workingctx object makes access to data related to
1515 1515 the current working directory convenient.
1516 1516 date - any valid date string or (unixtime, offset), or None.
1517 1517 user - username string, or None.
1518 1518 extra - a dictionary of extra values, or None.
1519 1519 changes - a list of file lists as returned by localrepo.status()
1520 1520 or None to use the repository status.
1521 1521 """
1522 1522 def __init__(self, repo, text="", user=None, date=None, extra=None,
1523 1523 changes=None):
1524 1524 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1525 1525
1526 1526 def __iter__(self):
1527 1527 d = self._repo.dirstate
1528 1528 for f in d:
1529 1529 if d[f] != 'r':
1530 1530 yield f
1531 1531
1532 1532 def __contains__(self, key):
1533 1533 return self._repo.dirstate[key] not in "?r"
1534 1534
1535 1535 def hex(self):
1536 1536 return hex(wdirid)
1537 1537
1538 1538 @propertycache
1539 1539 def _parents(self):
1540 1540 p = self._repo.dirstate.parents()
1541 1541 if p[1] == nullid:
1542 1542 p = p[:-1]
1543 1543 return [changectx(self._repo, x) for x in p]
1544 1544
1545 1545 def filectx(self, path, filelog=None):
1546 1546 """get a file context from the working directory"""
1547 1547 return workingfilectx(self._repo, path, workingctx=self,
1548 1548 filelog=filelog)
1549 1549
1550 1550 def dirty(self, missing=False, merge=True, branch=True):
1551 1551 "check whether a working directory is modified"
1552 1552 # check subrepos first
1553 1553 for s in sorted(self.substate):
1554 1554 if self.sub(s).dirty(missing=missing):
1555 1555 return True
1556 1556 # check current working dir
1557 1557 return ((merge and self.p2()) or
1558 1558 (branch and self.branch() != self.p1().branch()) or
1559 1559 self.modified() or self.added() or self.removed() or
1560 1560 (missing and self.deleted()))
1561 1561
1562 1562 def add(self, list, prefix=""):
1563 1563 with self._repo.wlock():
1564 1564 ui, ds = self._repo.ui, self._repo.dirstate
1565 1565 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1566 1566 rejected = []
1567 1567 lstat = self._repo.wvfs.lstat
1568 1568 for f in list:
1569 1569 # ds.pathto() returns an absolute file when this is invoked from
1570 1570 # the keyword extension. That gets flagged as non-portable on
1571 1571 # Windows, since it contains the drive letter and colon.
1572 1572 scmutil.checkportable(ui, os.path.join(prefix, f))
1573 1573 try:
1574 1574 st = lstat(f)
1575 1575 except OSError:
1576 1576 ui.warn(_("%s does not exist!\n") % uipath(f))
1577 1577 rejected.append(f)
1578 1578 continue
1579 1579 if st.st_size > 10000000:
1580 1580 ui.warn(_("%s: up to %d MB of RAM may be required "
1581 1581 "to manage this file\n"
1582 1582 "(use 'hg revert %s' to cancel the "
1583 1583 "pending addition)\n")
1584 1584 % (f, 3 * st.st_size // 1000000, uipath(f)))
1585 1585 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1586 1586 ui.warn(_("%s not added: only files and symlinks "
1587 1587 "supported currently\n") % uipath(f))
1588 1588 rejected.append(f)
1589 1589 elif ds[f] in 'amn':
1590 1590 ui.warn(_("%s already tracked!\n") % uipath(f))
1591 1591 elif ds[f] == 'r':
1592 1592 ds.normallookup(f)
1593 1593 else:
1594 1594 ds.add(f)
1595 1595 return rejected
1596 1596
1597 1597 def forget(self, files, prefix=""):
1598 1598 with self._repo.wlock():
1599 1599 ds = self._repo.dirstate
1600 1600 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1601 1601 rejected = []
1602 1602 for f in files:
1603 1603 if f not in self._repo.dirstate:
1604 1604 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1605 1605 rejected.append(f)
1606 1606 elif self._repo.dirstate[f] != 'a':
1607 1607 self._repo.dirstate.remove(f)
1608 1608 else:
1609 1609 self._repo.dirstate.drop(f)
1610 1610 return rejected
1611 1611
1612 1612 def undelete(self, list):
1613 1613 pctxs = self.parents()
1614 1614 with self._repo.wlock():
1615 1615 ds = self._repo.dirstate
1616 1616 for f in list:
1617 1617 if self._repo.dirstate[f] != 'r':
1618 1618 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1619 1619 else:
1620 1620 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1621 1621 t = fctx.data()
1622 1622 self._repo.wwrite(f, t, fctx.flags())
1623 1623 self._repo.dirstate.normal(f)
1624 1624
1625 1625 def copy(self, source, dest):
1626 1626 try:
1627 1627 st = self._repo.wvfs.lstat(dest)
1628 1628 except OSError as err:
1629 1629 if err.errno != errno.ENOENT:
1630 1630 raise
1631 1631 self._repo.ui.warn(_("%s does not exist!\n")
1632 1632 % self._repo.dirstate.pathto(dest))
1633 1633 return
1634 1634 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1635 1635 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1636 1636 "symbolic link\n")
1637 1637 % self._repo.dirstate.pathto(dest))
1638 1638 else:
1639 1639 with self._repo.wlock():
1640 1640 if self._repo.dirstate[dest] in '?':
1641 1641 self._repo.dirstate.add(dest)
1642 1642 elif self._repo.dirstate[dest] in 'r':
1643 1643 self._repo.dirstate.normallookup(dest)
1644 1644 self._repo.dirstate.copy(source, dest)
1645 1645
1646 1646 def match(self, pats=None, include=None, exclude=None, default='glob',
1647 1647 listsubrepos=False, badfn=None):
1648 1648 r = self._repo
1649 1649
1650 1650 # Only a case insensitive filesystem needs magic to translate user input
1651 1651 # to actual case in the filesystem.
1652 1652 icasefs = not util.fscasesensitive(r.root)
1653 1653 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1654 1654 default, auditor=r.auditor, ctx=self,
1655 1655 listsubrepos=listsubrepos, badfn=badfn,
1656 1656 icasefs=icasefs)
1657 1657
1658 1658 def _filtersuspectsymlink(self, files):
1659 1659 if not files or self._repo.dirstate._checklink:
1660 1660 return files
1661 1661
1662 1662 # Symlink placeholders may get non-symlink-like contents
1663 1663 # via user error or dereferencing by NFS or Samba servers,
1664 1664 # so we filter out any placeholders that don't look like a
1665 1665 # symlink
1666 1666 sane = []
1667 1667 for f in files:
1668 1668 if self.flags(f) == 'l':
1669 1669 d = self[f].data()
1670 1670 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1671 1671 self._repo.ui.debug('ignoring suspect symlink placeholder'
1672 1672 ' "%s"\n' % f)
1673 1673 continue
1674 1674 sane.append(f)
1675 1675 return sane
1676 1676
1677 1677 def _checklookup(self, files):
1678 1678 # check for any possibly clean files
1679 1679 if not files:
1680 1680 return [], [], []
1681 1681
1682 1682 modified = []
1683 1683 deleted = []
1684 1684 fixup = []
1685 1685 pctx = self._parents[0]
1686 1686 # do a full compare of any files that might have changed
1687 1687 for f in sorted(files):
1688 1688 try:
1689 1689 # This will return True for a file that got replaced by a
1690 1690 # directory in the interim, but fixing that is pretty hard.
1691 1691 if (f not in pctx or self.flags(f) != pctx.flags(f)
1692 1692 or pctx[f].cmp(self[f])):
1693 1693 modified.append(f)
1694 1694 else:
1695 1695 fixup.append(f)
1696 1696 except (IOError, OSError):
1697 1697 # A file become inaccessible in between? Mark it as deleted,
1698 1698 # matching dirstate behavior (issue5584).
1699 1699 # The dirstate has more complex behavior around whether a
1700 1700 # missing file matches a directory, etc, but we don't need to
1701 1701 # bother with that: if f has made it to this point, we're sure
1702 1702 # it's in the dirstate.
1703 1703 deleted.append(f)
1704 1704
1705 1705 return modified, deleted, fixup
1706 1706
1707 1707 def _poststatusfixup(self, status, fixup):
1708 1708 """update dirstate for files that are actually clean"""
1709 1709 poststatus = self._repo.postdsstatus()
1710 1710 if fixup or poststatus:
1711 1711 try:
1712 1712 oldid = self._repo.dirstate.identity()
1713 1713
1714 1714 # updating the dirstate is optional
1715 1715 # so we don't wait on the lock
1716 1716 # wlock can invalidate the dirstate, so cache normal _after_
1717 1717 # taking the lock
1718 1718 with self._repo.wlock(False):
1719 1719 if self._repo.dirstate.identity() == oldid:
1720 1720 if fixup:
1721 1721 normal = self._repo.dirstate.normal
1722 1722 for f in fixup:
1723 1723 normal(f)
1724 1724 # write changes out explicitly, because nesting
1725 1725 # wlock at runtime may prevent 'wlock.release()'
1726 1726 # after this block from doing so for subsequent
1727 1727 # changing files
1728 1728 tr = self._repo.currenttransaction()
1729 1729 self._repo.dirstate.write(tr)
1730 1730
1731 1731 if poststatus:
1732 1732 for ps in poststatus:
1733 1733 ps(self, status)
1734 1734 else:
1735 1735 # in this case, writing changes out breaks
1736 1736 # consistency, because .hg/dirstate was
1737 1737 # already changed simultaneously after last
1738 1738 # caching (see also issue5584 for detail)
1739 1739 self._repo.ui.debug('skip updating dirstate: '
1740 1740 'identity mismatch\n')
1741 1741 except error.LockError:
1742 1742 pass
1743 1743 finally:
1744 1744 # Even if the wlock couldn't be grabbed, clear out the list.
1745 1745 self._repo.clearpostdsstatus()
1746 1746
1747 1747 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1748 1748 unknown=False):
1749 1749 '''Gets the status from the dirstate -- internal use only.'''
1750 1750 listignored, listclean, listunknown = ignored, clean, unknown
1751 1751 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1752 1752 subrepos = []
1753 1753 if '.hgsub' in self:
1754 1754 subrepos = sorted(self.substate)
1755 1755 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1756 1756 listclean, listunknown)
1757 1757
1758 1758 # check for any possibly clean files
1759 1759 fixup = []
1760 1760 if cmp:
1761 1761 modified2, deleted2, fixup = self._checklookup(cmp)
1762 1762 s.modified.extend(modified2)
1763 1763 s.deleted.extend(deleted2)
1764 1764
1765 1765 if fixup and listclean:
1766 1766 s.clean.extend(fixup)
1767 1767
1768 1768 self._poststatusfixup(s, fixup)
1769 1769
1770 1770 if match.always():
1771 1771 # cache for performance
1772 1772 if s.unknown or s.ignored or s.clean:
1773 1773 # "_status" is cached with list*=False in the normal route
1774 1774 self._status = scmutil.status(s.modified, s.added, s.removed,
1775 1775 s.deleted, [], [], [])
1776 1776 else:
1777 1777 self._status = s
1778 1778
1779 1779 return s
1780 1780
1781 1781 @propertycache
1782 1782 def _manifest(self):
1783 1783 """generate a manifest corresponding to the values in self._status
1784 1784
1785 1785 This reuse the file nodeid from parent, but we use special node
1786 1786 identifiers for added and modified files. This is used by manifests
1787 1787 merge to see that files are different and by update logic to avoid
1788 1788 deleting newly added files.
1789 1789 """
1790 1790 return self._buildstatusmanifest(self._status)
1791 1791
1792 1792 def _buildstatusmanifest(self, status):
1793 1793 """Builds a manifest that includes the given status results."""
1794 1794 parents = self.parents()
1795 1795
1796 1796 man = parents[0].manifest().copy()
1797 1797
1798 1798 ff = self._flagfunc
1799 1799 for i, l in ((addednodeid, status.added),
1800 1800 (modifiednodeid, status.modified)):
1801 1801 for f in l:
1802 1802 man[f] = i
1803 1803 try:
1804 1804 man.setflag(f, ff(f))
1805 1805 except OSError:
1806 1806 pass
1807 1807
1808 1808 for f in status.deleted + status.removed:
1809 1809 if f in man:
1810 1810 del man[f]
1811 1811
1812 1812 return man
1813 1813
1814 1814 def _buildstatus(self, other, s, match, listignored, listclean,
1815 1815 listunknown):
1816 1816 """build a status with respect to another context
1817 1817
1818 1818 This includes logic for maintaining the fast path of status when
1819 1819 comparing the working directory against its parent, which is to skip
1820 1820 building a new manifest if self (working directory) is not comparing
1821 1821 against its parent (repo['.']).
1822 1822 """
1823 1823 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1824 1824 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1825 1825 # might have accidentally ended up with the entire contents of the file
1826 1826 # they are supposed to be linking to.
1827 1827 s.modified[:] = self._filtersuspectsymlink(s.modified)
1828 1828 if other != self._repo['.']:
1829 1829 s = super(workingctx, self)._buildstatus(other, s, match,
1830 1830 listignored, listclean,
1831 1831 listunknown)
1832 1832 return s
1833 1833
1834 1834 def _matchstatus(self, other, match):
1835 1835 """override the match method with a filter for directory patterns
1836 1836
1837 1837 We use inheritance to customize the match.bad method only in cases of
1838 1838 workingctx since it belongs only to the working directory when
1839 1839 comparing against the parent changeset.
1840 1840
1841 1841 If we aren't comparing against the working directory's parent, then we
1842 1842 just use the default match object sent to us.
1843 1843 """
1844 1844 superself = super(workingctx, self)
1845 1845 match = superself._matchstatus(other, match)
1846 1846 if other != self._repo['.']:
1847 1847 def bad(f, msg):
1848 1848 # 'f' may be a directory pattern from 'match.files()',
1849 1849 # so 'f not in ctx1' is not enough
1850 1850 if f not in other and not other.hasdir(f):
1851 1851 self._repo.ui.warn('%s: %s\n' %
1852 1852 (self._repo.dirstate.pathto(f), msg))
1853 1853 match.bad = bad
1854 1854 return match
1855 1855
1856 1856 def markcommitted(self, node):
1857 1857 super(workingctx, self).markcommitted(node)
1858 1858
1859 1859 sparse.aftercommit(self._repo, node)
1860 1860
1861 1861 class committablefilectx(basefilectx):
1862 1862 """A committablefilectx provides common functionality for a file context
1863 1863 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1864 1864 def __init__(self, repo, path, filelog=None, ctx=None):
1865 1865 self._repo = repo
1866 1866 self._path = path
1867 1867 self._changeid = None
1868 1868 self._filerev = self._filenode = None
1869 1869
1870 1870 if filelog is not None:
1871 1871 self._filelog = filelog
1872 1872 if ctx:
1873 1873 self._changectx = ctx
1874 1874
1875 1875 def __nonzero__(self):
1876 1876 return True
1877 1877
1878 1878 __bool__ = __nonzero__
1879 1879
1880 1880 def linkrev(self):
1881 1881 # linked to self._changectx no matter if file is modified or not
1882 1882 return self.rev()
1883 1883
1884 1884 def parents(self):
1885 1885 '''return parent filectxs, following copies if necessary'''
1886 1886 def filenode(ctx, path):
1887 1887 return ctx._manifest.get(path, nullid)
1888 1888
1889 1889 path = self._path
1890 1890 fl = self._filelog
1891 1891 pcl = self._changectx._parents
1892 1892 renamed = self.renamed()
1893 1893
1894 1894 if renamed:
1895 1895 pl = [renamed + (None,)]
1896 1896 else:
1897 1897 pl = [(path, filenode(pcl[0], path), fl)]
1898 1898
1899 1899 for pc in pcl[1:]:
1900 1900 pl.append((path, filenode(pc, path), fl))
1901 1901
1902 1902 return [self._parentfilectx(p, fileid=n, filelog=l)
1903 1903 for p, n, l in pl if n != nullid]
1904 1904
1905 1905 def children(self):
1906 1906 return []
1907 1907
1908 1908 class workingfilectx(committablefilectx):
1909 1909 """A workingfilectx object makes access to data related to a particular
1910 1910 file in the working directory convenient."""
1911 1911 def __init__(self, repo, path, filelog=None, workingctx=None):
1912 1912 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1913 1913
1914 1914 @propertycache
1915 1915 def _changectx(self):
1916 1916 return workingctx(self._repo)
1917 1917
1918 1918 def data(self):
1919 1919 return self._repo.wread(self._path)
1920 1920 def renamed(self):
1921 1921 rp = self._repo.dirstate.copied(self._path)
1922 1922 if not rp:
1923 1923 return None
1924 1924 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1925 1925
1926 1926 def size(self):
1927 1927 return self._repo.wvfs.lstat(self._path).st_size
1928 1928 def date(self):
1929 1929 t, tz = self._changectx.date()
1930 1930 try:
1931 1931 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1932 1932 except OSError as err:
1933 1933 if err.errno != errno.ENOENT:
1934 1934 raise
1935 1935 return (t, tz)
1936 1936
1937 1937 def exists(self):
1938 1938 return self._repo.wvfs.exists(self._path)
1939 1939
1940 1940 def lexists(self):
1941 1941 return self._repo.wvfs.lexists(self._path)
1942 1942
1943 1943 def audit(self):
1944 1944 return self._repo.wvfs.audit(self._path)
1945 1945
1946 1946 def cmp(self, fctx):
1947 1947 """compare with other file context
1948 1948
1949 1949 returns True if different than fctx.
1950 1950 """
1951 1951 # fctx should be a filectx (not a workingfilectx)
1952 1952 # invert comparison to reuse the same code path
1953 1953 return fctx.cmp(self)
1954 1954
1955 1955 def remove(self, ignoremissing=False):
1956 1956 """wraps unlink for a repo's working directory"""
1957 1957 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1958 1958
1959 1959 def write(self, data, flags, backgroundclose=False):
1960 1960 """wraps repo.wwrite"""
1961 1961 self._repo.wwrite(self._path, data, flags,
1962 1962 backgroundclose=backgroundclose)
1963 1963
1964 1964 def setflags(self, l, x):
1965 1965 self._repo.wvfs.setflags(self._path, l, x)
1966 1966
1967 1967 class workingcommitctx(workingctx):
1968 1968 """A workingcommitctx object makes access to data related to
1969 1969 the revision being committed convenient.
1970 1970
1971 1971 This hides changes in the working directory, if they aren't
1972 1972 committed in this context.
1973 1973 """
1974 1974 def __init__(self, repo, changes,
1975 1975 text="", user=None, date=None, extra=None):
1976 1976 super(workingctx, self).__init__(repo, text, user, date, extra,
1977 1977 changes)
1978 1978
1979 1979 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1980 1980 unknown=False):
1981 1981 """Return matched files only in ``self._status``
1982 1982
1983 1983 Uncommitted files appear "clean" via this context, even if
1984 1984 they aren't actually so in the working directory.
1985 1985 """
1986 1986 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1987 1987 if clean:
1988 1988 clean = [f for f in self._manifest if f not in self._changedset]
1989 1989 else:
1990 1990 clean = []
1991 1991 return scmutil.status([f for f in self._status.modified if match(f)],
1992 1992 [f for f in self._status.added if match(f)],
1993 1993 [f for f in self._status.removed if match(f)],
1994 1994 [], [], [], clean)
1995 1995
1996 1996 @propertycache
1997 1997 def _changedset(self):
1998 1998 """Return the set of files changed in this context
1999 1999 """
2000 2000 changed = set(self._status.modified)
2001 2001 changed.update(self._status.added)
2002 2002 changed.update(self._status.removed)
2003 2003 return changed
2004 2004
2005 2005 def makecachingfilectxfn(func):
2006 2006 """Create a filectxfn that caches based on the path.
2007 2007
2008 2008 We can't use util.cachefunc because it uses all arguments as the cache
2009 2009 key and this creates a cycle since the arguments include the repo and
2010 2010 memctx.
2011 2011 """
2012 2012 cache = {}
2013 2013
2014 2014 def getfilectx(repo, memctx, path):
2015 2015 if path not in cache:
2016 2016 cache[path] = func(repo, memctx, path)
2017 2017 return cache[path]
2018 2018
2019 2019 return getfilectx
2020 2020
2021 2021 def memfilefromctx(ctx):
2022 2022 """Given a context return a memfilectx for ctx[path]
2023 2023
2024 2024 This is a convenience method for building a memctx based on another
2025 2025 context.
2026 2026 """
2027 2027 def getfilectx(repo, memctx, path):
2028 2028 fctx = ctx[path]
2029 2029 # this is weird but apparently we only keep track of one parent
2030 2030 # (why not only store that instead of a tuple?)
2031 2031 copied = fctx.renamed()
2032 2032 if copied:
2033 2033 copied = copied[0]
2034 2034 return memfilectx(repo, path, fctx.data(),
2035 2035 islink=fctx.islink(), isexec=fctx.isexec(),
2036 2036 copied=copied, memctx=memctx)
2037 2037
2038 2038 return getfilectx
2039 2039
2040 2040 def memfilefrompatch(patchstore):
2041 2041 """Given a patch (e.g. patchstore object) return a memfilectx
2042 2042
2043 2043 This is a convenience method for building a memctx based on a patchstore.
2044 2044 """
2045 2045 def getfilectx(repo, memctx, path):
2046 2046 data, mode, copied = patchstore.getfile(path)
2047 2047 if data is None:
2048 2048 return None
2049 2049 islink, isexec = mode
2050 2050 return memfilectx(repo, path, data, islink=islink,
2051 2051 isexec=isexec, copied=copied,
2052 2052 memctx=memctx)
2053 2053
2054 2054 return getfilectx
2055 2055
2056 2056 class memctx(committablectx):
2057 2057 """Use memctx to perform in-memory commits via localrepo.commitctx().
2058 2058
2059 2059 Revision information is supplied at initialization time while
2060 2060 related files data and is made available through a callback
2061 2061 mechanism. 'repo' is the current localrepo, 'parents' is a
2062 2062 sequence of two parent revisions identifiers (pass None for every
2063 2063 missing parent), 'text' is the commit message and 'files' lists
2064 2064 names of files touched by the revision (normalized and relative to
2065 2065 repository root).
2066 2066
2067 2067 filectxfn(repo, memctx, path) is a callable receiving the
2068 2068 repository, the current memctx object and the normalized path of
2069 2069 requested file, relative to repository root. It is fired by the
2070 2070 commit function for every file in 'files', but calls order is
2071 2071 undefined. If the file is available in the revision being
2072 2072 committed (updated or added), filectxfn returns a memfilectx
2073 2073 object. If the file was removed, filectxfn return None for recent
2074 2074 Mercurial. Moved files are represented by marking the source file
2075 2075 removed and the new file added with copy information (see
2076 2076 memfilectx).
2077 2077
2078 2078 user receives the committer name and defaults to current
2079 2079 repository username, date is the commit date in any format
2080 2080 supported by util.parsedate() and defaults to current date, extra
2081 2081 is a dictionary of metadata or is left empty.
2082 2082 """
2083 2083
2084 2084 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2085 2085 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2086 2086 # this field to determine what to do in filectxfn.
2087 2087 _returnnoneformissingfiles = True
2088 2088
2089 2089 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2090 2090 date=None, extra=None, branch=None, editor=False):
2091 2091 super(memctx, self).__init__(repo, text, user, date, extra)
2092 2092 self._rev = None
2093 2093 self._node = None
2094 2094 parents = [(p or nullid) for p in parents]
2095 2095 p1, p2 = parents
2096 2096 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2097 2097 files = sorted(set(files))
2098 2098 self._files = files
2099 2099 if branch is not None:
2100 2100 self._extra['branch'] = encoding.fromlocal(branch)
2101 2101 self.substate = {}
2102 2102
2103 2103 if isinstance(filectxfn, patch.filestore):
2104 2104 filectxfn = memfilefrompatch(filectxfn)
2105 2105 elif not callable(filectxfn):
2106 2106 # if store is not callable, wrap it in a function
2107 2107 filectxfn = memfilefromctx(filectxfn)
2108 2108
2109 2109 # memoizing increases performance for e.g. vcs convert scenarios.
2110 2110 self._filectxfn = makecachingfilectxfn(filectxfn)
2111 2111
2112 2112 if editor:
2113 2113 self._text = editor(self._repo, self, [])
2114 2114 self._repo.savecommitmessage(self._text)
2115 2115
2116 2116 def filectx(self, path, filelog=None):
2117 2117 """get a file context from the working directory
2118 2118
2119 2119 Returns None if file doesn't exist and should be removed."""
2120 2120 return self._filectxfn(self._repo, self, path)
2121 2121
2122 2122 def commit(self):
2123 2123 """commit context to the repo"""
2124 2124 return self._repo.commitctx(self)
2125 2125
2126 2126 @propertycache
2127 2127 def _manifest(self):
2128 2128 """generate a manifest based on the return values of filectxfn"""
2129 2129
2130 2130 # keep this simple for now; just worry about p1
2131 2131 pctx = self._parents[0]
2132 2132 man = pctx.manifest().copy()
2133 2133
2134 2134 for f in self._status.modified:
2135 2135 p1node = nullid
2136 2136 p2node = nullid
2137 2137 p = pctx[f].parents() # if file isn't in pctx, check p2?
2138 2138 if len(p) > 0:
2139 2139 p1node = p[0].filenode()
2140 2140 if len(p) > 1:
2141 2141 p2node = p[1].filenode()
2142 2142 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2143 2143
2144 2144 for f in self._status.added:
2145 2145 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2146 2146
2147 2147 for f in self._status.removed:
2148 2148 if f in man:
2149 2149 del man[f]
2150 2150
2151 2151 return man
2152 2152
2153 2153 @propertycache
2154 2154 def _status(self):
2155 2155 """Calculate exact status from ``files`` specified at construction
2156 2156 """
2157 2157 man1 = self.p1().manifest()
2158 2158 p2 = self._parents[1]
2159 2159 # "1 < len(self._parents)" can't be used for checking
2160 2160 # existence of the 2nd parent, because "memctx._parents" is
2161 2161 # explicitly initialized by the list, of which length is 2.
2162 2162 if p2.node() != nullid:
2163 2163 man2 = p2.manifest()
2164 2164 managing = lambda f: f in man1 or f in man2
2165 2165 else:
2166 2166 managing = lambda f: f in man1
2167 2167
2168 2168 modified, added, removed = [], [], []
2169 2169 for f in self._files:
2170 2170 if not managing(f):
2171 2171 added.append(f)
2172 2172 elif self[f]:
2173 2173 modified.append(f)
2174 2174 else:
2175 2175 removed.append(f)
2176 2176
2177 2177 return scmutil.status(modified, added, removed, [], [], [], [])
2178 2178
2179 2179 class memfilectx(committablefilectx):
2180 2180 """memfilectx represents an in-memory file to commit.
2181 2181
2182 2182 See memctx and committablefilectx for more details.
2183 2183 """
2184 2184 def __init__(self, repo, path, data, islink=False,
2185 2185 isexec=False, copied=None, memctx=None):
2186 2186 """
2187 2187 path is the normalized file path relative to repository root.
2188 2188 data is the file content as a string.
2189 2189 islink is True if the file is a symbolic link.
2190 2190 isexec is True if the file is executable.
2191 2191 copied is the source file path if current file was copied in the
2192 2192 revision being committed, or None."""
2193 2193 super(memfilectx, self).__init__(repo, path, None, memctx)
2194 2194 self._data = data
2195 2195 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2196 2196 self._copied = None
2197 2197 if copied:
2198 2198 self._copied = (copied, nullid)
2199 2199
2200 2200 def data(self):
2201 2201 return self._data
2202 2202
2203 2203 def remove(self, ignoremissing=False):
2204 2204 """wraps unlink for a repo's working directory"""
2205 2205 # need to figure out what to do here
2206 2206 del self._changectx[self._path]
2207 2207
2208 2208 def write(self, data, flags):
2209 2209 """wraps repo.wwrite"""
2210 2210 self._data = data
2211 2211
2212 2212 class overlayfilectx(committablefilectx):
2213 2213 """Like memfilectx but take an original filectx and optional parameters to
2214 2214 override parts of it. This is useful when fctx.data() is expensive (i.e.
2215 2215 flag processor is expensive) and raw data, flags, and filenode could be
2216 2216 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2217 2217 """
2218 2218
2219 2219 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2220 2220 copied=None, ctx=None):
2221 2221 """originalfctx: filecontext to duplicate
2222 2222
2223 2223 datafunc: None or a function to override data (file content). It is a
2224 2224 function to be lazy. path, flags, copied, ctx: None or overridden value
2225 2225
2226 2226 copied could be (path, rev), or False. copied could also be just path,
2227 2227 and will be converted to (path, nullid). This simplifies some callers.
2228 2228 """
2229 2229
2230 2230 if path is None:
2231 2231 path = originalfctx.path()
2232 2232 if ctx is None:
2233 2233 ctx = originalfctx.changectx()
2234 2234 ctxmatch = lambda: True
2235 2235 else:
2236 2236 ctxmatch = lambda: ctx == originalfctx.changectx()
2237 2237
2238 2238 repo = originalfctx.repo()
2239 2239 flog = originalfctx.filelog()
2240 2240 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2241 2241
2242 2242 if copied is None:
2243 2243 copied = originalfctx.renamed()
2244 2244 copiedmatch = lambda: True
2245 2245 else:
2246 2246 if copied and not isinstance(copied, tuple):
2247 2247 # repo._filecommit will recalculate copyrev so nullid is okay
2248 2248 copied = (copied, nullid)
2249 2249 copiedmatch = lambda: copied == originalfctx.renamed()
2250 2250
2251 2251 # When data, copied (could affect data), ctx (could affect filelog
2252 2252 # parents) are not overridden, rawdata, rawflags, and filenode may be
2253 2253 # reused (repo._filecommit should double check filelog parents).
2254 2254 #
2255 2255 # path, flags are not hashed in filelog (but in manifestlog) so they do
2256 2256 # not affect reusable here.
2257 2257 #
2258 2258 # If ctx or copied is overridden to a same value with originalfctx,
2259 2259 # still consider it's reusable. originalfctx.renamed() may be a bit
2260 2260 # expensive so it's not called unless necessary. Assuming datafunc is
2261 2261 # always expensive, do not call it for this "reusable" test.
2262 2262 reusable = datafunc is None and ctxmatch() and copiedmatch()
2263 2263
2264 2264 if datafunc is None:
2265 2265 datafunc = originalfctx.data
2266 2266 if flags is None:
2267 2267 flags = originalfctx.flags()
2268 2268
2269 2269 self._datafunc = datafunc
2270 2270 self._flags = flags
2271 2271 self._copied = copied
2272 2272
2273 2273 if reusable:
2274 2274 # copy extra fields from originalfctx
2275 2275 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2276 2276 for attr in attrs:
2277 2277 if util.safehasattr(originalfctx, attr):
2278 2278 setattr(self, attr, getattr(originalfctx, attr))
2279 2279
2280 2280 def data(self):
2281 2281 return self._datafunc()
2282 2282
2283 2283 class metadataonlyctx(committablectx):
2284 2284 """Like memctx but it's reusing the manifest of different commit.
2285 2285 Intended to be used by lightweight operations that are creating
2286 2286 metadata-only changes.
2287 2287
2288 2288 Revision information is supplied at initialization time. 'repo' is the
2289 2289 current localrepo, 'ctx' is original revision which manifest we're reuisng
2290 2290 'parents' is a sequence of two parent revisions identifiers (pass None for
2291 2291 every missing parent), 'text' is the commit.
2292 2292
2293 2293 user receives the committer name and defaults to current repository
2294 2294 username, date is the commit date in any format supported by
2295 2295 util.parsedate() and defaults to current date, extra is a dictionary of
2296 2296 metadata or is left empty.
2297 2297 """
2298 2298 def __new__(cls, repo, originalctx, *args, **kwargs):
2299 2299 return super(metadataonlyctx, cls).__new__(cls, repo)
2300 2300
2301 2301 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2302 2302 extra=None, editor=False):
2303 2303 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2304 2304 self._rev = None
2305 2305 self._node = None
2306 2306 self._originalctx = originalctx
2307 2307 self._manifestnode = originalctx.manifestnode()
2308 2308 parents = [(p or nullid) for p in parents]
2309 2309 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2310 2310
2311 2311 # sanity check to ensure that the reused manifest parents are
2312 2312 # manifests of our commit parents
2313 2313 mp1, mp2 = self.manifestctx().parents
2314 2314 if p1 != nullid and p1.manifestnode() != mp1:
2315 2315 raise RuntimeError('can\'t reuse the manifest: '
2316 2316 'its p1 doesn\'t match the new ctx p1')
2317 2317 if p2 != nullid and p2.manifestnode() != mp2:
2318 2318 raise RuntimeError('can\'t reuse the manifest: '
2319 2319 'its p2 doesn\'t match the new ctx p2')
2320 2320
2321 2321 self._files = originalctx.files()
2322 2322 self.substate = {}
2323 2323
2324 2324 if editor:
2325 2325 self._text = editor(self._repo, self, [])
2326 2326 self._repo.savecommitmessage(self._text)
2327 2327
2328 2328 def manifestnode(self):
2329 2329 return self._manifestnode
2330 2330
2331 2331 @property
2332 2332 def _manifestctx(self):
2333 2333 return self._repo.manifestlog[self._manifestnode]
2334 2334
2335 2335 def filectx(self, path, filelog=None):
2336 2336 return self._originalctx.filectx(path, filelog=filelog)
2337 2337
2338 2338 def commit(self):
2339 2339 """commit context to the repo"""
2340 2340 return self._repo.commitctx(self)
2341 2341
2342 2342 @property
2343 2343 def _manifest(self):
2344 2344 return self._originalctx.manifest()
2345 2345
2346 2346 @propertycache
2347 2347 def _status(self):
2348 2348 """Calculate exact status from ``files`` specified in the ``origctx``
2349 2349 and parents manifests.
2350 2350 """
2351 2351 man1 = self.p1().manifest()
2352 2352 p2 = self._parents[1]
2353 2353 # "1 < len(self._parents)" can't be used for checking
2354 2354 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2355 2355 # explicitly initialized by the list, of which length is 2.
2356 2356 if p2.node() != nullid:
2357 2357 man2 = p2.manifest()
2358 2358 managing = lambda f: f in man1 or f in man2
2359 2359 else:
2360 2360 managing = lambda f: f in man1
2361 2361
2362 2362 modified, added, removed = [], [], []
2363 2363 for f in self._files:
2364 2364 if not managing(f):
2365 2365 added.append(f)
2366 2366 elif self[f]:
2367 2367 modified.append(f)
2368 2368 else:
2369 2369 removed.append(f)
2370 2370
2371 2371 return scmutil.status(modified, added, removed, [], [], [], [])
General Comments 0
You need to be logged in to leave comments. Login now