##// END OF EJS Templates
context: rename unstable into orphan...
Boris Feld -
r33693:f163edb4 default
parent child Browse files
Show More
@@ -1,2347 +1,2353 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 bin,
19 19 hex,
20 20 modifiednodeid,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 wdirid,
25 25 wdirnodes,
26 26 wdirrev,
27 27 )
28 28 from . import (
29 29 encoding,
30 30 error,
31 31 fileset,
32 32 match as matchmod,
33 33 mdiff,
34 34 obsolete as obsmod,
35 35 patch,
36 36 pathutil,
37 37 phases,
38 38 pycompat,
39 39 repoview,
40 40 revlog,
41 41 scmutil,
42 42 sparse,
43 43 subrepo,
44 44 util,
45 45 )
46 46
47 47 propertycache = util.propertycache
48 48
49 49 nonascii = re.compile(r'[^\x21-\x7f]').search
50 50
51 51 class basectx(object):
52 52 """A basectx object represents the common logic for its children:
53 53 changectx: read-only context that is already present in the repo,
54 54 workingctx: a context that represents the working directory and can
55 55 be committed,
56 56 memctx: a context that represents changes in-memory and can also
57 57 be committed."""
58 58 def __new__(cls, repo, changeid='', *args, **kwargs):
59 59 if isinstance(changeid, basectx):
60 60 return changeid
61 61
62 62 o = super(basectx, cls).__new__(cls)
63 63
64 64 o._repo = repo
65 65 o._rev = nullrev
66 66 o._node = nullid
67 67
68 68 return o
69 69
70 70 def __bytes__(self):
71 71 return short(self.node())
72 72
73 73 __str__ = encoding.strmethod(__bytes__)
74 74
75 75 def __int__(self):
76 76 return self.rev()
77 77
78 78 def __repr__(self):
79 79 return r"<%s %s>" % (type(self).__name__, str(self))
80 80
81 81 def __eq__(self, other):
82 82 try:
83 83 return type(self) == type(other) and self._rev == other._rev
84 84 except AttributeError:
85 85 return False
86 86
87 87 def __ne__(self, other):
88 88 return not (self == other)
89 89
90 90 def __contains__(self, key):
91 91 return key in self._manifest
92 92
93 93 def __getitem__(self, key):
94 94 return self.filectx(key)
95 95
96 96 def __iter__(self):
97 97 return iter(self._manifest)
98 98
99 99 def _buildstatusmanifest(self, status):
100 100 """Builds a manifest that includes the given status results, if this is
101 101 a working copy context. For non-working copy contexts, it just returns
102 102 the normal manifest."""
103 103 return self.manifest()
104 104
105 105 def _matchstatus(self, other, match):
106 106 """return match.always if match is none
107 107
108 108 This internal method provides a way for child objects to override the
109 109 match operator.
110 110 """
111 111 return match or matchmod.always(self._repo.root, self._repo.getcwd())
112 112
113 113 def _buildstatus(self, other, s, match, listignored, listclean,
114 114 listunknown):
115 115 """build a status with respect to another context"""
116 116 # Load earliest manifest first for caching reasons. More specifically,
117 117 # if you have revisions 1000 and 1001, 1001 is probably stored as a
118 118 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
119 119 # 1000 and cache it so that when you read 1001, we just need to apply a
120 120 # delta to what's in the cache. So that's one full reconstruction + one
121 121 # delta application.
122 122 mf2 = None
123 123 if self.rev() is not None and self.rev() < other.rev():
124 124 mf2 = self._buildstatusmanifest(s)
125 125 mf1 = other._buildstatusmanifest(s)
126 126 if mf2 is None:
127 127 mf2 = self._buildstatusmanifest(s)
128 128
129 129 modified, added = [], []
130 130 removed = []
131 131 clean = []
132 132 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
133 133 deletedset = set(deleted)
134 134 d = mf1.diff(mf2, match=match, clean=listclean)
135 135 for fn, value in d.iteritems():
136 136 if fn in deletedset:
137 137 continue
138 138 if value is None:
139 139 clean.append(fn)
140 140 continue
141 141 (node1, flag1), (node2, flag2) = value
142 142 if node1 is None:
143 143 added.append(fn)
144 144 elif node2 is None:
145 145 removed.append(fn)
146 146 elif flag1 != flag2:
147 147 modified.append(fn)
148 148 elif node2 not in wdirnodes:
149 149 # When comparing files between two commits, we save time by
150 150 # not comparing the file contents when the nodeids differ.
151 151 # Note that this means we incorrectly report a reverted change
152 152 # to a file as a modification.
153 153 modified.append(fn)
154 154 elif self[fn].cmp(other[fn]):
155 155 modified.append(fn)
156 156 else:
157 157 clean.append(fn)
158 158
159 159 if removed:
160 160 # need to filter files if they are already reported as removed
161 161 unknown = [fn for fn in unknown if fn not in mf1 and
162 162 (not match or match(fn))]
163 163 ignored = [fn for fn in ignored if fn not in mf1 and
164 164 (not match or match(fn))]
165 165 # if they're deleted, don't report them as removed
166 166 removed = [fn for fn in removed if fn not in deletedset]
167 167
168 168 return scmutil.status(modified, added, removed, deleted, unknown,
169 169 ignored, clean)
170 170
171 171 @propertycache
172 172 def substate(self):
173 173 return subrepo.state(self, self._repo.ui)
174 174
175 175 def subrev(self, subpath):
176 176 return self.substate[subpath][1]
177 177
178 178 def rev(self):
179 179 return self._rev
180 180 def node(self):
181 181 return self._node
182 182 def hex(self):
183 183 return hex(self.node())
184 184 def manifest(self):
185 185 return self._manifest
186 186 def manifestctx(self):
187 187 return self._manifestctx
188 188 def repo(self):
189 189 return self._repo
190 190 def phasestr(self):
191 191 return phases.phasenames[self.phase()]
192 192 def mutable(self):
193 193 return self.phase() > phases.public
194 194
195 195 def getfileset(self, expr):
196 196 return fileset.getfileset(self, expr)
197 197
198 198 def obsolete(self):
199 199 """True if the changeset is obsolete"""
200 200 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
201 201
202 202 def extinct(self):
203 203 """True if the changeset is extinct"""
204 204 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
205 205
206 206 def unstable(self):
207 msg = ("'context.unstable' is deprecated, "
208 "use 'context.orphan'")
209 self._repo.ui.deprecwarn(msg, '4.4')
210 return self.orphan()
211
212 def orphan(self):
207 213 """True if the changeset is not obsolete but it's ancestor are"""
208 214 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
209 215
210 216 def bumped(self):
211 217 """True if the changeset try to be a successor of a public changeset
212 218
213 219 Only non-public and non-obsolete changesets may be bumped.
214 220 """
215 221 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
216 222
217 223 def divergent(self):
218 224 """Is a successors of a changeset with multiple possible successors set
219 225
220 226 Only non-public and non-obsolete changesets may be divergent.
221 227 """
222 228 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
223 229
224 230 def troubled(self):
225 231 """True if the changeset is either unstable, bumped or divergent"""
226 return self.unstable() or self.bumped() or self.divergent()
232 return self.orphan() or self.bumped() or self.divergent()
227 233
228 234 def troubles(self):
229 235 """Keep the old version around in order to avoid breaking extensions
230 236 about different return values.
231 237 """
232 238 msg = ("'context.troubles' is deprecated, "
233 239 "use 'context.instabilities'")
234 240 self._repo.ui.deprecwarn(msg, '4.4')
235 241
236 242 troubles = []
237 if self.unstable():
243 if self.orphan():
238 244 troubles.append('orphan')
239 245 if self.bumped():
240 246 troubles.append('bumped')
241 247 if self.divergent():
242 248 troubles.append('divergent')
243 249 return troubles
244 250
245 251 def instabilities(self):
246 252 """return the list of instabilities affecting this changeset.
247 253
248 254 Instabilities are returned as strings. possible values are:
249 255 - orphan,
250 256 - phase-divergent,
251 257 - content-divergent.
252 258 """
253 259 instabilities = []
254 if self.unstable():
260 if self.orphan():
255 261 instabilities.append('orphan')
256 262 if self.bumped():
257 263 instabilities.append('phase-divergent')
258 264 if self.divergent():
259 265 instabilities.append('content-divergent')
260 266 return instabilities
261 267
262 268 def parents(self):
263 269 """return contexts for each parent changeset"""
264 270 return self._parents
265 271
266 272 def p1(self):
267 273 return self._parents[0]
268 274
269 275 def p2(self):
270 276 parents = self._parents
271 277 if len(parents) == 2:
272 278 return parents[1]
273 279 return changectx(self._repo, nullrev)
274 280
275 281 def _fileinfo(self, path):
276 282 if r'_manifest' in self.__dict__:
277 283 try:
278 284 return self._manifest[path], self._manifest.flags(path)
279 285 except KeyError:
280 286 raise error.ManifestLookupError(self._node, path,
281 287 _('not found in manifest'))
282 288 if r'_manifestdelta' in self.__dict__ or path in self.files():
283 289 if path in self._manifestdelta:
284 290 return (self._manifestdelta[path],
285 291 self._manifestdelta.flags(path))
286 292 mfl = self._repo.manifestlog
287 293 try:
288 294 node, flag = mfl[self._changeset.manifest].find(path)
289 295 except KeyError:
290 296 raise error.ManifestLookupError(self._node, path,
291 297 _('not found in manifest'))
292 298
293 299 return node, flag
294 300
295 301 def filenode(self, path):
296 302 return self._fileinfo(path)[0]
297 303
298 304 def flags(self, path):
299 305 try:
300 306 return self._fileinfo(path)[1]
301 307 except error.LookupError:
302 308 return ''
303 309
304 310 def sub(self, path, allowcreate=True):
305 311 '''return a subrepo for the stored revision of path, never wdir()'''
306 312 return subrepo.subrepo(self, path, allowcreate=allowcreate)
307 313
308 314 def nullsub(self, path, pctx):
309 315 return subrepo.nullsubrepo(self, path, pctx)
310 316
311 317 def workingsub(self, path):
312 318 '''return a subrepo for the stored revision, or wdir if this is a wdir
313 319 context.
314 320 '''
315 321 return subrepo.subrepo(self, path, allowwdir=True)
316 322
317 323 def match(self, pats=None, include=None, exclude=None, default='glob',
318 324 listsubrepos=False, badfn=None):
319 325 r = self._repo
320 326 return matchmod.match(r.root, r.getcwd(), pats,
321 327 include, exclude, default,
322 328 auditor=r.nofsauditor, ctx=self,
323 329 listsubrepos=listsubrepos, badfn=badfn)
324 330
325 331 def diff(self, ctx2=None, match=None, **opts):
326 332 """Returns a diff generator for the given contexts and matcher"""
327 333 if ctx2 is None:
328 334 ctx2 = self.p1()
329 335 if ctx2 is not None:
330 336 ctx2 = self._repo[ctx2]
331 337 diffopts = patch.diffopts(self._repo.ui, opts)
332 338 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
333 339
334 340 def dirs(self):
335 341 return self._manifest.dirs()
336 342
337 343 def hasdir(self, dir):
338 344 return self._manifest.hasdir(dir)
339 345
340 346 def status(self, other=None, match=None, listignored=False,
341 347 listclean=False, listunknown=False, listsubrepos=False):
342 348 """return status of files between two nodes or node and working
343 349 directory.
344 350
345 351 If other is None, compare this node with working directory.
346 352
347 353 returns (modified, added, removed, deleted, unknown, ignored, clean)
348 354 """
349 355
350 356 ctx1 = self
351 357 ctx2 = self._repo[other]
352 358
353 359 # This next code block is, admittedly, fragile logic that tests for
354 360 # reversing the contexts and wouldn't need to exist if it weren't for
355 361 # the fast (and common) code path of comparing the working directory
356 362 # with its first parent.
357 363 #
358 364 # What we're aiming for here is the ability to call:
359 365 #
360 366 # workingctx.status(parentctx)
361 367 #
362 368 # If we always built the manifest for each context and compared those,
363 369 # then we'd be done. But the special case of the above call means we
364 370 # just copy the manifest of the parent.
365 371 reversed = False
366 372 if (not isinstance(ctx1, changectx)
367 373 and isinstance(ctx2, changectx)):
368 374 reversed = True
369 375 ctx1, ctx2 = ctx2, ctx1
370 376
371 377 match = ctx2._matchstatus(ctx1, match)
372 378 r = scmutil.status([], [], [], [], [], [], [])
373 379 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
374 380 listunknown)
375 381
376 382 if reversed:
377 383 # Reverse added and removed. Clear deleted, unknown and ignored as
378 384 # these make no sense to reverse.
379 385 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
380 386 r.clean)
381 387
382 388 if listsubrepos:
383 389 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
384 390 try:
385 391 rev2 = ctx2.subrev(subpath)
386 392 except KeyError:
387 393 # A subrepo that existed in node1 was deleted between
388 394 # node1 and node2 (inclusive). Thus, ctx2's substate
389 395 # won't contain that subpath. The best we can do ignore it.
390 396 rev2 = None
391 397 submatch = matchmod.subdirmatcher(subpath, match)
392 398 s = sub.status(rev2, match=submatch, ignored=listignored,
393 399 clean=listclean, unknown=listunknown,
394 400 listsubrepos=True)
395 401 for rfiles, sfiles in zip(r, s):
396 402 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
397 403
398 404 for l in r:
399 405 l.sort()
400 406
401 407 return r
402 408
403 409 def _filterederror(repo, changeid):
404 410 """build an exception to be raised about a filtered changeid
405 411
406 412 This is extracted in a function to help extensions (eg: evolve) to
407 413 experiment with various message variants."""
408 414 if repo.filtername.startswith('visible'):
409 415 msg = _("hidden revision '%s'") % changeid
410 416 hint = _('use --hidden to access hidden revisions')
411 417 return error.FilteredRepoLookupError(msg, hint=hint)
412 418 msg = _("filtered revision '%s' (not in '%s' subset)")
413 419 msg %= (changeid, repo.filtername)
414 420 return error.FilteredRepoLookupError(msg)
415 421
416 422 class changectx(basectx):
417 423 """A changecontext object makes access to data related to a particular
418 424 changeset convenient. It represents a read-only context already present in
419 425 the repo."""
420 426 def __init__(self, repo, changeid=''):
421 427 """changeid is a revision number, node, or tag"""
422 428
423 429 # since basectx.__new__ already took care of copying the object, we
424 430 # don't need to do anything in __init__, so we just exit here
425 431 if isinstance(changeid, basectx):
426 432 return
427 433
428 434 if changeid == '':
429 435 changeid = '.'
430 436 self._repo = repo
431 437
432 438 try:
433 439 if isinstance(changeid, int):
434 440 self._node = repo.changelog.node(changeid)
435 441 self._rev = changeid
436 442 return
437 443 if not pycompat.ispy3 and isinstance(changeid, long):
438 444 changeid = str(changeid)
439 445 if changeid == 'null':
440 446 self._node = nullid
441 447 self._rev = nullrev
442 448 return
443 449 if changeid == 'tip':
444 450 self._node = repo.changelog.tip()
445 451 self._rev = repo.changelog.rev(self._node)
446 452 return
447 453 if changeid == '.' or changeid == repo.dirstate.p1():
448 454 # this is a hack to delay/avoid loading obsmarkers
449 455 # when we know that '.' won't be hidden
450 456 self._node = repo.dirstate.p1()
451 457 self._rev = repo.unfiltered().changelog.rev(self._node)
452 458 return
453 459 if len(changeid) == 20:
454 460 try:
455 461 self._node = changeid
456 462 self._rev = repo.changelog.rev(changeid)
457 463 return
458 464 except error.FilteredRepoLookupError:
459 465 raise
460 466 except LookupError:
461 467 pass
462 468
463 469 try:
464 470 r = int(changeid)
465 471 if '%d' % r != changeid:
466 472 raise ValueError
467 473 l = len(repo.changelog)
468 474 if r < 0:
469 475 r += l
470 476 if r < 0 or r >= l and r != wdirrev:
471 477 raise ValueError
472 478 self._rev = r
473 479 self._node = repo.changelog.node(r)
474 480 return
475 481 except error.FilteredIndexError:
476 482 raise
477 483 except (ValueError, OverflowError, IndexError):
478 484 pass
479 485
480 486 if len(changeid) == 40:
481 487 try:
482 488 self._node = bin(changeid)
483 489 self._rev = repo.changelog.rev(self._node)
484 490 return
485 491 except error.FilteredLookupError:
486 492 raise
487 493 except (TypeError, LookupError):
488 494 pass
489 495
490 496 # lookup bookmarks through the name interface
491 497 try:
492 498 self._node = repo.names.singlenode(repo, changeid)
493 499 self._rev = repo.changelog.rev(self._node)
494 500 return
495 501 except KeyError:
496 502 pass
497 503 except error.FilteredRepoLookupError:
498 504 raise
499 505 except error.RepoLookupError:
500 506 pass
501 507
502 508 self._node = repo.unfiltered().changelog._partialmatch(changeid)
503 509 if self._node is not None:
504 510 self._rev = repo.changelog.rev(self._node)
505 511 return
506 512
507 513 # lookup failed
508 514 # check if it might have come from damaged dirstate
509 515 #
510 516 # XXX we could avoid the unfiltered if we had a recognizable
511 517 # exception for filtered changeset access
512 518 if changeid in repo.unfiltered().dirstate.parents():
513 519 msg = _("working directory has unknown parent '%s'!")
514 520 raise error.Abort(msg % short(changeid))
515 521 try:
516 522 if len(changeid) == 20 and nonascii(changeid):
517 523 changeid = hex(changeid)
518 524 except TypeError:
519 525 pass
520 526 except (error.FilteredIndexError, error.FilteredLookupError,
521 527 error.FilteredRepoLookupError):
522 528 raise _filterederror(repo, changeid)
523 529 except IndexError:
524 530 pass
525 531 raise error.RepoLookupError(
526 532 _("unknown revision '%s'") % changeid)
527 533
528 534 def __hash__(self):
529 535 try:
530 536 return hash(self._rev)
531 537 except AttributeError:
532 538 return id(self)
533 539
534 540 def __nonzero__(self):
535 541 return self._rev != nullrev
536 542
537 543 __bool__ = __nonzero__
538 544
539 545 @propertycache
540 546 def _changeset(self):
541 547 return self._repo.changelog.changelogrevision(self.rev())
542 548
543 549 @propertycache
544 550 def _manifest(self):
545 551 return self._manifestctx.read()
546 552
547 553 @property
548 554 def _manifestctx(self):
549 555 return self._repo.manifestlog[self._changeset.manifest]
550 556
551 557 @propertycache
552 558 def _manifestdelta(self):
553 559 return self._manifestctx.readdelta()
554 560
555 561 @propertycache
556 562 def _parents(self):
557 563 repo = self._repo
558 564 p1, p2 = repo.changelog.parentrevs(self._rev)
559 565 if p2 == nullrev:
560 566 return [changectx(repo, p1)]
561 567 return [changectx(repo, p1), changectx(repo, p2)]
562 568
563 569 def changeset(self):
564 570 c = self._changeset
565 571 return (
566 572 c.manifest,
567 573 c.user,
568 574 c.date,
569 575 c.files,
570 576 c.description,
571 577 c.extra,
572 578 )
573 579 def manifestnode(self):
574 580 return self._changeset.manifest
575 581
576 582 def user(self):
577 583 return self._changeset.user
578 584 def date(self):
579 585 return self._changeset.date
580 586 def files(self):
581 587 return self._changeset.files
582 588 def description(self):
583 589 return self._changeset.description
584 590 def branch(self):
585 591 return encoding.tolocal(self._changeset.extra.get("branch"))
586 592 def closesbranch(self):
587 593 return 'close' in self._changeset.extra
588 594 def extra(self):
589 595 return self._changeset.extra
590 596 def tags(self):
591 597 return self._repo.nodetags(self._node)
592 598 def bookmarks(self):
593 599 return self._repo.nodebookmarks(self._node)
594 600 def phase(self):
595 601 return self._repo._phasecache.phase(self._repo, self._rev)
596 602 def hidden(self):
597 603 return self._rev in repoview.filterrevs(self._repo, 'visible')
598 604
599 605 def children(self):
600 606 """return contexts for each child changeset"""
601 607 c = self._repo.changelog.children(self._node)
602 608 return [changectx(self._repo, x) for x in c]
603 609
604 610 def ancestors(self):
605 611 for a in self._repo.changelog.ancestors([self._rev]):
606 612 yield changectx(self._repo, a)
607 613
608 614 def descendants(self):
609 615 for d in self._repo.changelog.descendants([self._rev]):
610 616 yield changectx(self._repo, d)
611 617
612 618 def filectx(self, path, fileid=None, filelog=None):
613 619 """get a file context from this changeset"""
614 620 if fileid is None:
615 621 fileid = self.filenode(path)
616 622 return filectx(self._repo, path, fileid=fileid,
617 623 changectx=self, filelog=filelog)
618 624
619 625 def ancestor(self, c2, warn=False):
620 626 """return the "best" ancestor context of self and c2
621 627
622 628 If there are multiple candidates, it will show a message and check
623 629 merge.preferancestor configuration before falling back to the
624 630 revlog ancestor."""
625 631 # deal with workingctxs
626 632 n2 = c2._node
627 633 if n2 is None:
628 634 n2 = c2._parents[0]._node
629 635 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
630 636 if not cahs:
631 637 anc = nullid
632 638 elif len(cahs) == 1:
633 639 anc = cahs[0]
634 640 else:
635 641 # experimental config: merge.preferancestor
636 642 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
637 643 try:
638 644 ctx = changectx(self._repo, r)
639 645 except error.RepoLookupError:
640 646 continue
641 647 anc = ctx.node()
642 648 if anc in cahs:
643 649 break
644 650 else:
645 651 anc = self._repo.changelog.ancestor(self._node, n2)
646 652 if warn:
647 653 self._repo.ui.status(
648 654 (_("note: using %s as ancestor of %s and %s\n") %
649 655 (short(anc), short(self._node), short(n2))) +
650 656 ''.join(_(" alternatively, use --config "
651 657 "merge.preferancestor=%s\n") %
652 658 short(n) for n in sorted(cahs) if n != anc))
653 659 return changectx(self._repo, anc)
654 660
655 661 def descendant(self, other):
656 662 """True if other is descendant of this changeset"""
657 663 return self._repo.changelog.descendant(self._rev, other._rev)
658 664
659 665 def walk(self, match):
660 666 '''Generates matching file names.'''
661 667
662 668 # Wrap match.bad method to have message with nodeid
663 669 def bad(fn, msg):
664 670 # The manifest doesn't know about subrepos, so don't complain about
665 671 # paths into valid subrepos.
666 672 if any(fn == s or fn.startswith(s + '/')
667 673 for s in self.substate):
668 674 return
669 675 match.bad(fn, _('no such file in rev %s') % self)
670 676
671 677 m = matchmod.badmatch(match, bad)
672 678 return self._manifest.walk(m)
673 679
674 680 def matches(self, match):
675 681 return self.walk(match)
676 682
677 683 class basefilectx(object):
678 684 """A filecontext object represents the common logic for its children:
679 685 filectx: read-only access to a filerevision that is already present
680 686 in the repo,
681 687 workingfilectx: a filecontext that represents files from the working
682 688 directory,
683 689 memfilectx: a filecontext that represents files in-memory,
684 690 overlayfilectx: duplicate another filecontext with some fields overridden.
685 691 """
686 692 @propertycache
687 693 def _filelog(self):
688 694 return self._repo.file(self._path)
689 695
690 696 @propertycache
691 697 def _changeid(self):
692 698 if r'_changeid' in self.__dict__:
693 699 return self._changeid
694 700 elif r'_changectx' in self.__dict__:
695 701 return self._changectx.rev()
696 702 elif r'_descendantrev' in self.__dict__:
697 703 # this file context was created from a revision with a known
698 704 # descendant, we can (lazily) correct for linkrev aliases
699 705 return self._adjustlinkrev(self._descendantrev)
700 706 else:
701 707 return self._filelog.linkrev(self._filerev)
702 708
703 709 @propertycache
704 710 def _filenode(self):
705 711 if r'_fileid' in self.__dict__:
706 712 return self._filelog.lookup(self._fileid)
707 713 else:
708 714 return self._changectx.filenode(self._path)
709 715
710 716 @propertycache
711 717 def _filerev(self):
712 718 return self._filelog.rev(self._filenode)
713 719
714 720 @propertycache
715 721 def _repopath(self):
716 722 return self._path
717 723
718 724 def __nonzero__(self):
719 725 try:
720 726 self._filenode
721 727 return True
722 728 except error.LookupError:
723 729 # file is missing
724 730 return False
725 731
726 732 __bool__ = __nonzero__
727 733
728 734 def __bytes__(self):
729 735 try:
730 736 return "%s@%s" % (self.path(), self._changectx)
731 737 except error.LookupError:
732 738 return "%s@???" % self.path()
733 739
734 740 __str__ = encoding.strmethod(__bytes__)
735 741
736 742 def __repr__(self):
737 743 return "<%s %s>" % (type(self).__name__, str(self))
738 744
739 745 def __hash__(self):
740 746 try:
741 747 return hash((self._path, self._filenode))
742 748 except AttributeError:
743 749 return id(self)
744 750
745 751 def __eq__(self, other):
746 752 try:
747 753 return (type(self) == type(other) and self._path == other._path
748 754 and self._filenode == other._filenode)
749 755 except AttributeError:
750 756 return False
751 757
752 758 def __ne__(self, other):
753 759 return not (self == other)
754 760
755 761 def filerev(self):
756 762 return self._filerev
757 763 def filenode(self):
758 764 return self._filenode
759 765 @propertycache
760 766 def _flags(self):
761 767 return self._changectx.flags(self._path)
762 768 def flags(self):
763 769 return self._flags
764 770 def filelog(self):
765 771 return self._filelog
766 772 def rev(self):
767 773 return self._changeid
768 774 def linkrev(self):
769 775 return self._filelog.linkrev(self._filerev)
770 776 def node(self):
771 777 return self._changectx.node()
772 778 def hex(self):
773 779 return self._changectx.hex()
774 780 def user(self):
775 781 return self._changectx.user()
776 782 def date(self):
777 783 return self._changectx.date()
778 784 def files(self):
779 785 return self._changectx.files()
780 786 def description(self):
781 787 return self._changectx.description()
782 788 def branch(self):
783 789 return self._changectx.branch()
784 790 def extra(self):
785 791 return self._changectx.extra()
786 792 def phase(self):
787 793 return self._changectx.phase()
788 794 def phasestr(self):
789 795 return self._changectx.phasestr()
790 796 def manifest(self):
791 797 return self._changectx.manifest()
792 798 def changectx(self):
793 799 return self._changectx
794 800 def renamed(self):
795 801 return self._copied
796 802 def repo(self):
797 803 return self._repo
798 804 def size(self):
799 805 return len(self.data())
800 806
801 807 def path(self):
802 808 return self._path
803 809
804 810 def isbinary(self):
805 811 try:
806 812 return util.binary(self.data())
807 813 except IOError:
808 814 return False
809 815 def isexec(self):
810 816 return 'x' in self.flags()
811 817 def islink(self):
812 818 return 'l' in self.flags()
813 819
814 820 def isabsent(self):
815 821 """whether this filectx represents a file not in self._changectx
816 822
817 823 This is mainly for merge code to detect change/delete conflicts. This is
818 824 expected to be True for all subclasses of basectx."""
819 825 return False
820 826
821 827 _customcmp = False
822 828 def cmp(self, fctx):
823 829 """compare with other file context
824 830
825 831 returns True if different than fctx.
826 832 """
827 833 if fctx._customcmp:
828 834 return fctx.cmp(self)
829 835
830 836 if (fctx._filenode is None
831 837 and (self._repo._encodefilterpats
832 838 # if file data starts with '\1\n', empty metadata block is
833 839 # prepended, which adds 4 bytes to filelog.size().
834 840 or self.size() - 4 == fctx.size())
835 841 or self.size() == fctx.size()):
836 842 return self._filelog.cmp(self._filenode, fctx.data())
837 843
838 844 return True
839 845
840 846 def _adjustlinkrev(self, srcrev, inclusive=False):
841 847 """return the first ancestor of <srcrev> introducing <fnode>
842 848
843 849 If the linkrev of the file revision does not point to an ancestor of
844 850 srcrev, we'll walk down the ancestors until we find one introducing
845 851 this file revision.
846 852
847 853 :srcrev: the changeset revision we search ancestors from
848 854 :inclusive: if true, the src revision will also be checked
849 855 """
850 856 repo = self._repo
851 857 cl = repo.unfiltered().changelog
852 858 mfl = repo.manifestlog
853 859 # fetch the linkrev
854 860 lkr = self.linkrev()
855 861 # hack to reuse ancestor computation when searching for renames
856 862 memberanc = getattr(self, '_ancestrycontext', None)
857 863 iteranc = None
858 864 if srcrev is None:
859 865 # wctx case, used by workingfilectx during mergecopy
860 866 revs = [p.rev() for p in self._repo[None].parents()]
861 867 inclusive = True # we skipped the real (revless) source
862 868 else:
863 869 revs = [srcrev]
864 870 if memberanc is None:
865 871 memberanc = iteranc = cl.ancestors(revs, lkr,
866 872 inclusive=inclusive)
867 873 # check if this linkrev is an ancestor of srcrev
868 874 if lkr not in memberanc:
869 875 if iteranc is None:
870 876 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
871 877 fnode = self._filenode
872 878 path = self._path
873 879 for a in iteranc:
874 880 ac = cl.read(a) # get changeset data (we avoid object creation)
875 881 if path in ac[3]: # checking the 'files' field.
876 882 # The file has been touched, check if the content is
877 883 # similar to the one we search for.
878 884 if fnode == mfl[ac[0]].readfast().get(path):
879 885 return a
880 886 # In theory, we should never get out of that loop without a result.
881 887 # But if manifest uses a buggy file revision (not children of the
882 888 # one it replaces) we could. Such a buggy situation will likely
883 889 # result is crash somewhere else at to some point.
884 890 return lkr
885 891
886 892 def introrev(self):
887 893 """return the rev of the changeset which introduced this file revision
888 894
889 895 This method is different from linkrev because it take into account the
890 896 changeset the filectx was created from. It ensures the returned
891 897 revision is one of its ancestors. This prevents bugs from
892 898 'linkrev-shadowing' when a file revision is used by multiple
893 899 changesets.
894 900 """
895 901 lkr = self.linkrev()
896 902 attrs = vars(self)
897 903 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
898 904 if noctx or self.rev() == lkr:
899 905 return self.linkrev()
900 906 return self._adjustlinkrev(self.rev(), inclusive=True)
901 907
902 908 def _parentfilectx(self, path, fileid, filelog):
903 909 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
904 910 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
905 911 if '_changeid' in vars(self) or '_changectx' in vars(self):
906 912 # If self is associated with a changeset (probably explicitly
907 913 # fed), ensure the created filectx is associated with a
908 914 # changeset that is an ancestor of self.changectx.
909 915 # This lets us later use _adjustlinkrev to get a correct link.
910 916 fctx._descendantrev = self.rev()
911 917 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
912 918 elif '_descendantrev' in vars(self):
913 919 # Otherwise propagate _descendantrev if we have one associated.
914 920 fctx._descendantrev = self._descendantrev
915 921 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
916 922 return fctx
917 923
918 924 def parents(self):
919 925 _path = self._path
920 926 fl = self._filelog
921 927 parents = self._filelog.parents(self._filenode)
922 928 pl = [(_path, node, fl) for node in parents if node != nullid]
923 929
924 930 r = fl.renamed(self._filenode)
925 931 if r:
926 932 # - In the simple rename case, both parent are nullid, pl is empty.
927 933 # - In case of merge, only one of the parent is null id and should
928 934 # be replaced with the rename information. This parent is -always-
929 935 # the first one.
930 936 #
931 937 # As null id have always been filtered out in the previous list
932 938 # comprehension, inserting to 0 will always result in "replacing
933 939 # first nullid parent with rename information.
934 940 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
935 941
936 942 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
937 943
938 944 def p1(self):
939 945 return self.parents()[0]
940 946
941 947 def p2(self):
942 948 p = self.parents()
943 949 if len(p) == 2:
944 950 return p[1]
945 951 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
946 952
947 953 def annotate(self, follow=False, linenumber=False, skiprevs=None,
948 954 diffopts=None):
949 955 '''returns a list of tuples of ((ctx, number), line) for each line
950 956 in the file, where ctx is the filectx of the node where
951 957 that line was last changed; if linenumber parameter is true, number is
952 958 the line number at the first appearance in the managed file, otherwise,
953 959 number has a fixed value of False.
954 960 '''
955 961
956 962 def lines(text):
957 963 if text.endswith("\n"):
958 964 return text.count("\n")
959 965 return text.count("\n") + int(bool(text))
960 966
961 967 if linenumber:
962 968 def decorate(text, rev):
963 969 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
964 970 else:
965 971 def decorate(text, rev):
966 972 return ([(rev, False)] * lines(text), text)
967 973
968 974 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
969 975
970 976 def parents(f):
971 977 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
972 978 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
973 979 # from the topmost introrev (= srcrev) down to p.linkrev() if it
974 980 # isn't an ancestor of the srcrev.
975 981 f._changeid
976 982 pl = f.parents()
977 983
978 984 # Don't return renamed parents if we aren't following.
979 985 if not follow:
980 986 pl = [p for p in pl if p.path() == f.path()]
981 987
982 988 # renamed filectx won't have a filelog yet, so set it
983 989 # from the cache to save time
984 990 for p in pl:
985 991 if not '_filelog' in p.__dict__:
986 992 p._filelog = getlog(p.path())
987 993
988 994 return pl
989 995
990 996 # use linkrev to find the first changeset where self appeared
991 997 base = self
992 998 introrev = self.introrev()
993 999 if self.rev() != introrev:
994 1000 base = self.filectx(self.filenode(), changeid=introrev)
995 1001 if getattr(base, '_ancestrycontext', None) is None:
996 1002 cl = self._repo.changelog
997 1003 if introrev is None:
998 1004 # wctx is not inclusive, but works because _ancestrycontext
999 1005 # is used to test filelog revisions
1000 1006 ac = cl.ancestors([p.rev() for p in base.parents()],
1001 1007 inclusive=True)
1002 1008 else:
1003 1009 ac = cl.ancestors([introrev], inclusive=True)
1004 1010 base._ancestrycontext = ac
1005 1011
1006 1012 # This algorithm would prefer to be recursive, but Python is a
1007 1013 # bit recursion-hostile. Instead we do an iterative
1008 1014 # depth-first search.
1009 1015
1010 1016 # 1st DFS pre-calculates pcache and needed
1011 1017 visit = [base]
1012 1018 pcache = {}
1013 1019 needed = {base: 1}
1014 1020 while visit:
1015 1021 f = visit.pop()
1016 1022 if f in pcache:
1017 1023 continue
1018 1024 pl = parents(f)
1019 1025 pcache[f] = pl
1020 1026 for p in pl:
1021 1027 needed[p] = needed.get(p, 0) + 1
1022 1028 if p not in pcache:
1023 1029 visit.append(p)
1024 1030
1025 1031 # 2nd DFS does the actual annotate
1026 1032 visit[:] = [base]
1027 1033 hist = {}
1028 1034 while visit:
1029 1035 f = visit[-1]
1030 1036 if f in hist:
1031 1037 visit.pop()
1032 1038 continue
1033 1039
1034 1040 ready = True
1035 1041 pl = pcache[f]
1036 1042 for p in pl:
1037 1043 if p not in hist:
1038 1044 ready = False
1039 1045 visit.append(p)
1040 1046 if ready:
1041 1047 visit.pop()
1042 1048 curr = decorate(f.data(), f)
1043 1049 skipchild = False
1044 1050 if skiprevs is not None:
1045 1051 skipchild = f._changeid in skiprevs
1046 1052 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1047 1053 diffopts)
1048 1054 for p in pl:
1049 1055 if needed[p] == 1:
1050 1056 del hist[p]
1051 1057 del needed[p]
1052 1058 else:
1053 1059 needed[p] -= 1
1054 1060
1055 1061 hist[f] = curr
1056 1062 del pcache[f]
1057 1063
1058 1064 return zip(hist[base][0], hist[base][1].splitlines(True))
1059 1065
1060 1066 def ancestors(self, followfirst=False):
1061 1067 visit = {}
1062 1068 c = self
1063 1069 if followfirst:
1064 1070 cut = 1
1065 1071 else:
1066 1072 cut = None
1067 1073
1068 1074 while True:
1069 1075 for parent in c.parents()[:cut]:
1070 1076 visit[(parent.linkrev(), parent.filenode())] = parent
1071 1077 if not visit:
1072 1078 break
1073 1079 c = visit.pop(max(visit))
1074 1080 yield c
1075 1081
1076 1082 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1077 1083 r'''
1078 1084 Given parent and child fctxes and annotate data for parents, for all lines
1079 1085 in either parent that match the child, annotate the child with the parent's
1080 1086 data.
1081 1087
1082 1088 Additionally, if `skipchild` is True, replace all other lines with parent
1083 1089 annotate data as well such that child is never blamed for any lines.
1084 1090
1085 1091 >>> oldfctx = 'old'
1086 1092 >>> p1fctx, p2fctx, childfctx = 'p1', 'p2', 'c'
1087 1093 >>> olddata = 'a\nb\n'
1088 1094 >>> p1data = 'a\nb\nc\n'
1089 1095 >>> p2data = 'a\nc\nd\n'
1090 1096 >>> childdata = 'a\nb2\nc\nc2\nd\n'
1091 1097 >>> diffopts = mdiff.diffopts()
1092 1098
1093 1099 >>> def decorate(text, rev):
1094 1100 ... return ([(rev, i) for i in xrange(1, text.count('\n') + 1)], text)
1095 1101
1096 1102 Basic usage:
1097 1103
1098 1104 >>> oldann = decorate(olddata, oldfctx)
1099 1105 >>> p1ann = decorate(p1data, p1fctx)
1100 1106 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1101 1107 >>> p1ann[0]
1102 1108 [('old', 1), ('old', 2), ('p1', 3)]
1103 1109 >>> p2ann = decorate(p2data, p2fctx)
1104 1110 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1105 1111 >>> p2ann[0]
1106 1112 [('old', 1), ('p2', 2), ('p2', 3)]
1107 1113
1108 1114 Test with multiple parents (note the difference caused by ordering):
1109 1115
1110 1116 >>> childann = decorate(childdata, childfctx)
1111 1117 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1112 1118 ... diffopts)
1113 1119 >>> childann[0]
1114 1120 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1115 1121
1116 1122 >>> childann = decorate(childdata, childfctx)
1117 1123 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1118 1124 ... diffopts)
1119 1125 >>> childann[0]
1120 1126 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1121 1127
1122 1128 Test with skipchild (note the difference caused by ordering):
1123 1129
1124 1130 >>> childann = decorate(childdata, childfctx)
1125 1131 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1126 1132 ... diffopts)
1127 1133 >>> childann[0]
1128 1134 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1129 1135
1130 1136 >>> childann = decorate(childdata, childfctx)
1131 1137 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1132 1138 ... diffopts)
1133 1139 >>> childann[0]
1134 1140 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1135 1141 '''
1136 1142 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1137 1143 for parent in parents]
1138 1144
1139 1145 if skipchild:
1140 1146 # Need to iterate over the blocks twice -- make it a list
1141 1147 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1142 1148 # Mercurial currently prefers p2 over p1 for annotate.
1143 1149 # TODO: change this?
1144 1150 for parent, blocks in pblocks:
1145 1151 for (a1, a2, b1, b2), t in blocks:
1146 1152 # Changed blocks ('!') or blocks made only of blank lines ('~')
1147 1153 # belong to the child.
1148 1154 if t == '=':
1149 1155 child[0][b1:b2] = parent[0][a1:a2]
1150 1156
1151 1157 if skipchild:
1152 1158 # Now try and match up anything that couldn't be matched,
1153 1159 # Reversing pblocks maintains bias towards p2, matching above
1154 1160 # behavior.
1155 1161 pblocks.reverse()
1156 1162
1157 1163 # The heuristics are:
1158 1164 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1159 1165 # This could potentially be smarter but works well enough.
1160 1166 # * For a non-matching section, do a best-effort fit. Match lines in
1161 1167 # diff hunks 1:1, dropping lines as necessary.
1162 1168 # * Repeat the last line as a last resort.
1163 1169
1164 1170 # First, replace as much as possible without repeating the last line.
1165 1171 remaining = [(parent, []) for parent, _blocks in pblocks]
1166 1172 for idx, (parent, blocks) in enumerate(pblocks):
1167 1173 for (a1, a2, b1, b2), _t in blocks:
1168 1174 if a2 - a1 >= b2 - b1:
1169 1175 for bk in xrange(b1, b2):
1170 1176 if child[0][bk][0] == childfctx:
1171 1177 ak = min(a1 + (bk - b1), a2 - 1)
1172 1178 child[0][bk] = parent[0][ak]
1173 1179 else:
1174 1180 remaining[idx][1].append((a1, a2, b1, b2))
1175 1181
1176 1182 # Then, look at anything left, which might involve repeating the last
1177 1183 # line.
1178 1184 for parent, blocks in remaining:
1179 1185 for a1, a2, b1, b2 in blocks:
1180 1186 for bk in xrange(b1, b2):
1181 1187 if child[0][bk][0] == childfctx:
1182 1188 ak = min(a1 + (bk - b1), a2 - 1)
1183 1189 child[0][bk] = parent[0][ak]
1184 1190 return child
1185 1191
1186 1192 class filectx(basefilectx):
1187 1193 """A filecontext object makes access to data related to a particular
1188 1194 filerevision convenient."""
1189 1195 def __init__(self, repo, path, changeid=None, fileid=None,
1190 1196 filelog=None, changectx=None):
1191 1197 """changeid can be a changeset revision, node, or tag.
1192 1198 fileid can be a file revision or node."""
1193 1199 self._repo = repo
1194 1200 self._path = path
1195 1201
1196 1202 assert (changeid is not None
1197 1203 or fileid is not None
1198 1204 or changectx is not None), \
1199 1205 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1200 1206 % (changeid, fileid, changectx))
1201 1207
1202 1208 if filelog is not None:
1203 1209 self._filelog = filelog
1204 1210
1205 1211 if changeid is not None:
1206 1212 self._changeid = changeid
1207 1213 if changectx is not None:
1208 1214 self._changectx = changectx
1209 1215 if fileid is not None:
1210 1216 self._fileid = fileid
1211 1217
1212 1218 @propertycache
1213 1219 def _changectx(self):
1214 1220 try:
1215 1221 return changectx(self._repo, self._changeid)
1216 1222 except error.FilteredRepoLookupError:
1217 1223 # Linkrev may point to any revision in the repository. When the
1218 1224 # repository is filtered this may lead to `filectx` trying to build
1219 1225 # `changectx` for filtered revision. In such case we fallback to
1220 1226 # creating `changectx` on the unfiltered version of the reposition.
1221 1227 # This fallback should not be an issue because `changectx` from
1222 1228 # `filectx` are not used in complex operations that care about
1223 1229 # filtering.
1224 1230 #
1225 1231 # This fallback is a cheap and dirty fix that prevent several
1226 1232 # crashes. It does not ensure the behavior is correct. However the
1227 1233 # behavior was not correct before filtering either and "incorrect
1228 1234 # behavior" is seen as better as "crash"
1229 1235 #
1230 1236 # Linkrevs have several serious troubles with filtering that are
1231 1237 # complicated to solve. Proper handling of the issue here should be
1232 1238 # considered when solving linkrev issue are on the table.
1233 1239 return changectx(self._repo.unfiltered(), self._changeid)
1234 1240
1235 1241 def filectx(self, fileid, changeid=None):
1236 1242 '''opens an arbitrary revision of the file without
1237 1243 opening a new filelog'''
1238 1244 return filectx(self._repo, self._path, fileid=fileid,
1239 1245 filelog=self._filelog, changeid=changeid)
1240 1246
1241 1247 def rawdata(self):
1242 1248 return self._filelog.revision(self._filenode, raw=True)
1243 1249
1244 1250 def rawflags(self):
1245 1251 """low-level revlog flags"""
1246 1252 return self._filelog.flags(self._filerev)
1247 1253
1248 1254 def data(self):
1249 1255 try:
1250 1256 return self._filelog.read(self._filenode)
1251 1257 except error.CensoredNodeError:
1252 1258 if self._repo.ui.config("censor", "policy") == "ignore":
1253 1259 return ""
1254 1260 raise error.Abort(_("censored node: %s") % short(self._filenode),
1255 1261 hint=_("set censor.policy to ignore errors"))
1256 1262
1257 1263 def size(self):
1258 1264 return self._filelog.size(self._filerev)
1259 1265
1260 1266 @propertycache
1261 1267 def _copied(self):
1262 1268 """check if file was actually renamed in this changeset revision
1263 1269
1264 1270 If rename logged in file revision, we report copy for changeset only
1265 1271 if file revisions linkrev points back to the changeset in question
1266 1272 or both changeset parents contain different file revisions.
1267 1273 """
1268 1274
1269 1275 renamed = self._filelog.renamed(self._filenode)
1270 1276 if not renamed:
1271 1277 return renamed
1272 1278
1273 1279 if self.rev() == self.linkrev():
1274 1280 return renamed
1275 1281
1276 1282 name = self.path()
1277 1283 fnode = self._filenode
1278 1284 for p in self._changectx.parents():
1279 1285 try:
1280 1286 if fnode == p.filenode(name):
1281 1287 return None
1282 1288 except error.LookupError:
1283 1289 pass
1284 1290 return renamed
1285 1291
1286 1292 def children(self):
1287 1293 # hard for renames
1288 1294 c = self._filelog.children(self._filenode)
1289 1295 return [filectx(self._repo, self._path, fileid=x,
1290 1296 filelog=self._filelog) for x in c]
1291 1297
1292 1298 class committablectx(basectx):
1293 1299 """A committablectx object provides common functionality for a context that
1294 1300 wants the ability to commit, e.g. workingctx or memctx."""
1295 1301 def __init__(self, repo, text="", user=None, date=None, extra=None,
1296 1302 changes=None):
1297 1303 self._repo = repo
1298 1304 self._rev = None
1299 1305 self._node = None
1300 1306 self._text = text
1301 1307 if date:
1302 1308 self._date = util.parsedate(date)
1303 1309 if user:
1304 1310 self._user = user
1305 1311 if changes:
1306 1312 self._status = changes
1307 1313
1308 1314 self._extra = {}
1309 1315 if extra:
1310 1316 self._extra = extra.copy()
1311 1317 if 'branch' not in self._extra:
1312 1318 try:
1313 1319 branch = encoding.fromlocal(self._repo.dirstate.branch())
1314 1320 except UnicodeDecodeError:
1315 1321 raise error.Abort(_('branch name not in UTF-8!'))
1316 1322 self._extra['branch'] = branch
1317 1323 if self._extra['branch'] == '':
1318 1324 self._extra['branch'] = 'default'
1319 1325
1320 1326 def __bytes__(self):
1321 1327 return bytes(self._parents[0]) + "+"
1322 1328
1323 1329 __str__ = encoding.strmethod(__bytes__)
1324 1330
1325 1331 def __nonzero__(self):
1326 1332 return True
1327 1333
1328 1334 __bool__ = __nonzero__
1329 1335
1330 1336 def _buildflagfunc(self):
1331 1337 # Create a fallback function for getting file flags when the
1332 1338 # filesystem doesn't support them
1333 1339
1334 1340 copiesget = self._repo.dirstate.copies().get
1335 1341 parents = self.parents()
1336 1342 if len(parents) < 2:
1337 1343 # when we have one parent, it's easy: copy from parent
1338 1344 man = parents[0].manifest()
1339 1345 def func(f):
1340 1346 f = copiesget(f, f)
1341 1347 return man.flags(f)
1342 1348 else:
1343 1349 # merges are tricky: we try to reconstruct the unstored
1344 1350 # result from the merge (issue1802)
1345 1351 p1, p2 = parents
1346 1352 pa = p1.ancestor(p2)
1347 1353 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1348 1354
1349 1355 def func(f):
1350 1356 f = copiesget(f, f) # may be wrong for merges with copies
1351 1357 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1352 1358 if fl1 == fl2:
1353 1359 return fl1
1354 1360 if fl1 == fla:
1355 1361 return fl2
1356 1362 if fl2 == fla:
1357 1363 return fl1
1358 1364 return '' # punt for conflicts
1359 1365
1360 1366 return func
1361 1367
1362 1368 @propertycache
1363 1369 def _flagfunc(self):
1364 1370 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1365 1371
1366 1372 @propertycache
1367 1373 def _status(self):
1368 1374 return self._repo.status()
1369 1375
1370 1376 @propertycache
1371 1377 def _user(self):
1372 1378 return self._repo.ui.username()
1373 1379
1374 1380 @propertycache
1375 1381 def _date(self):
1376 1382 ui = self._repo.ui
1377 1383 date = ui.configdate('devel', 'default-date')
1378 1384 if date is None:
1379 1385 date = util.makedate()
1380 1386 return date
1381 1387
1382 1388 def subrev(self, subpath):
1383 1389 return None
1384 1390
1385 1391 def manifestnode(self):
1386 1392 return None
1387 1393 def user(self):
1388 1394 return self._user or self._repo.ui.username()
1389 1395 def date(self):
1390 1396 return self._date
1391 1397 def description(self):
1392 1398 return self._text
1393 1399 def files(self):
1394 1400 return sorted(self._status.modified + self._status.added +
1395 1401 self._status.removed)
1396 1402
1397 1403 def modified(self):
1398 1404 return self._status.modified
1399 1405 def added(self):
1400 1406 return self._status.added
1401 1407 def removed(self):
1402 1408 return self._status.removed
1403 1409 def deleted(self):
1404 1410 return self._status.deleted
1405 1411 def branch(self):
1406 1412 return encoding.tolocal(self._extra['branch'])
1407 1413 def closesbranch(self):
1408 1414 return 'close' in self._extra
1409 1415 def extra(self):
1410 1416 return self._extra
1411 1417
1412 1418 def tags(self):
1413 1419 return []
1414 1420
1415 1421 def bookmarks(self):
1416 1422 b = []
1417 1423 for p in self.parents():
1418 1424 b.extend(p.bookmarks())
1419 1425 return b
1420 1426
1421 1427 def phase(self):
1422 1428 phase = phases.draft # default phase to draft
1423 1429 for p in self.parents():
1424 1430 phase = max(phase, p.phase())
1425 1431 return phase
1426 1432
1427 1433 def hidden(self):
1428 1434 return False
1429 1435
1430 1436 def children(self):
1431 1437 return []
1432 1438
1433 1439 def flags(self, path):
1434 1440 if r'_manifest' in self.__dict__:
1435 1441 try:
1436 1442 return self._manifest.flags(path)
1437 1443 except KeyError:
1438 1444 return ''
1439 1445
1440 1446 try:
1441 1447 return self._flagfunc(path)
1442 1448 except OSError:
1443 1449 return ''
1444 1450
1445 1451 def ancestor(self, c2):
1446 1452 """return the "best" ancestor context of self and c2"""
1447 1453 return self._parents[0].ancestor(c2) # punt on two parents for now
1448 1454
1449 1455 def walk(self, match):
1450 1456 '''Generates matching file names.'''
1451 1457 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1452 1458 True, False))
1453 1459
1454 1460 def matches(self, match):
1455 1461 return sorted(self._repo.dirstate.matches(match))
1456 1462
1457 1463 def ancestors(self):
1458 1464 for p in self._parents:
1459 1465 yield p
1460 1466 for a in self._repo.changelog.ancestors(
1461 1467 [p.rev() for p in self._parents]):
1462 1468 yield changectx(self._repo, a)
1463 1469
1464 1470 def markcommitted(self, node):
1465 1471 """Perform post-commit cleanup necessary after committing this ctx
1466 1472
1467 1473 Specifically, this updates backing stores this working context
1468 1474 wraps to reflect the fact that the changes reflected by this
1469 1475 workingctx have been committed. For example, it marks
1470 1476 modified and added files as normal in the dirstate.
1471 1477
1472 1478 """
1473 1479
1474 1480 with self._repo.dirstate.parentchange():
1475 1481 for f in self.modified() + self.added():
1476 1482 self._repo.dirstate.normal(f)
1477 1483 for f in self.removed():
1478 1484 self._repo.dirstate.drop(f)
1479 1485 self._repo.dirstate.setparents(node)
1480 1486
1481 1487 # write changes out explicitly, because nesting wlock at
1482 1488 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1483 1489 # from immediately doing so for subsequent changing files
1484 1490 self._repo.dirstate.write(self._repo.currenttransaction())
1485 1491
1486 1492 def dirty(self, missing=False, merge=True, branch=True):
1487 1493 return False
1488 1494
1489 1495 class workingctx(committablectx):
1490 1496 """A workingctx object makes access to data related to
1491 1497 the current working directory convenient.
1492 1498 date - any valid date string or (unixtime, offset), or None.
1493 1499 user - username string, or None.
1494 1500 extra - a dictionary of extra values, or None.
1495 1501 changes - a list of file lists as returned by localrepo.status()
1496 1502 or None to use the repository status.
1497 1503 """
1498 1504 def __init__(self, repo, text="", user=None, date=None, extra=None,
1499 1505 changes=None):
1500 1506 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1501 1507
1502 1508 def __iter__(self):
1503 1509 d = self._repo.dirstate
1504 1510 for f in d:
1505 1511 if d[f] != 'r':
1506 1512 yield f
1507 1513
1508 1514 def __contains__(self, key):
1509 1515 return self._repo.dirstate[key] not in "?r"
1510 1516
1511 1517 def hex(self):
1512 1518 return hex(wdirid)
1513 1519
1514 1520 @propertycache
1515 1521 def _parents(self):
1516 1522 p = self._repo.dirstate.parents()
1517 1523 if p[1] == nullid:
1518 1524 p = p[:-1]
1519 1525 return [changectx(self._repo, x) for x in p]
1520 1526
1521 1527 def filectx(self, path, filelog=None):
1522 1528 """get a file context from the working directory"""
1523 1529 return workingfilectx(self._repo, path, workingctx=self,
1524 1530 filelog=filelog)
1525 1531
1526 1532 def dirty(self, missing=False, merge=True, branch=True):
1527 1533 "check whether a working directory is modified"
1528 1534 # check subrepos first
1529 1535 for s in sorted(self.substate):
1530 1536 if self.sub(s).dirty(missing=missing):
1531 1537 return True
1532 1538 # check current working dir
1533 1539 return ((merge and self.p2()) or
1534 1540 (branch and self.branch() != self.p1().branch()) or
1535 1541 self.modified() or self.added() or self.removed() or
1536 1542 (missing and self.deleted()))
1537 1543
1538 1544 def add(self, list, prefix=""):
1539 1545 with self._repo.wlock():
1540 1546 ui, ds = self._repo.ui, self._repo.dirstate
1541 1547 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1542 1548 rejected = []
1543 1549 lstat = self._repo.wvfs.lstat
1544 1550 for f in list:
1545 1551 # ds.pathto() returns an absolute file when this is invoked from
1546 1552 # the keyword extension. That gets flagged as non-portable on
1547 1553 # Windows, since it contains the drive letter and colon.
1548 1554 scmutil.checkportable(ui, os.path.join(prefix, f))
1549 1555 try:
1550 1556 st = lstat(f)
1551 1557 except OSError:
1552 1558 ui.warn(_("%s does not exist!\n") % uipath(f))
1553 1559 rejected.append(f)
1554 1560 continue
1555 1561 if st.st_size > 10000000:
1556 1562 ui.warn(_("%s: up to %d MB of RAM may be required "
1557 1563 "to manage this file\n"
1558 1564 "(use 'hg revert %s' to cancel the "
1559 1565 "pending addition)\n")
1560 1566 % (f, 3 * st.st_size // 1000000, uipath(f)))
1561 1567 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1562 1568 ui.warn(_("%s not added: only files and symlinks "
1563 1569 "supported currently\n") % uipath(f))
1564 1570 rejected.append(f)
1565 1571 elif ds[f] in 'amn':
1566 1572 ui.warn(_("%s already tracked!\n") % uipath(f))
1567 1573 elif ds[f] == 'r':
1568 1574 ds.normallookup(f)
1569 1575 else:
1570 1576 ds.add(f)
1571 1577 return rejected
1572 1578
1573 1579 def forget(self, files, prefix=""):
1574 1580 with self._repo.wlock():
1575 1581 ds = self._repo.dirstate
1576 1582 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1577 1583 rejected = []
1578 1584 for f in files:
1579 1585 if f not in self._repo.dirstate:
1580 1586 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1581 1587 rejected.append(f)
1582 1588 elif self._repo.dirstate[f] != 'a':
1583 1589 self._repo.dirstate.remove(f)
1584 1590 else:
1585 1591 self._repo.dirstate.drop(f)
1586 1592 return rejected
1587 1593
1588 1594 def undelete(self, list):
1589 1595 pctxs = self.parents()
1590 1596 with self._repo.wlock():
1591 1597 ds = self._repo.dirstate
1592 1598 for f in list:
1593 1599 if self._repo.dirstate[f] != 'r':
1594 1600 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1595 1601 else:
1596 1602 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1597 1603 t = fctx.data()
1598 1604 self._repo.wwrite(f, t, fctx.flags())
1599 1605 self._repo.dirstate.normal(f)
1600 1606
1601 1607 def copy(self, source, dest):
1602 1608 try:
1603 1609 st = self._repo.wvfs.lstat(dest)
1604 1610 except OSError as err:
1605 1611 if err.errno != errno.ENOENT:
1606 1612 raise
1607 1613 self._repo.ui.warn(_("%s does not exist!\n")
1608 1614 % self._repo.dirstate.pathto(dest))
1609 1615 return
1610 1616 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1611 1617 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1612 1618 "symbolic link\n")
1613 1619 % self._repo.dirstate.pathto(dest))
1614 1620 else:
1615 1621 with self._repo.wlock():
1616 1622 if self._repo.dirstate[dest] in '?':
1617 1623 self._repo.dirstate.add(dest)
1618 1624 elif self._repo.dirstate[dest] in 'r':
1619 1625 self._repo.dirstate.normallookup(dest)
1620 1626 self._repo.dirstate.copy(source, dest)
1621 1627
1622 1628 def match(self, pats=None, include=None, exclude=None, default='glob',
1623 1629 listsubrepos=False, badfn=None):
1624 1630 r = self._repo
1625 1631
1626 1632 # Only a case insensitive filesystem needs magic to translate user input
1627 1633 # to actual case in the filesystem.
1628 1634 icasefs = not util.fscasesensitive(r.root)
1629 1635 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1630 1636 default, auditor=r.auditor, ctx=self,
1631 1637 listsubrepos=listsubrepos, badfn=badfn,
1632 1638 icasefs=icasefs)
1633 1639
1634 1640 def _filtersuspectsymlink(self, files):
1635 1641 if not files or self._repo.dirstate._checklink:
1636 1642 return files
1637 1643
1638 1644 # Symlink placeholders may get non-symlink-like contents
1639 1645 # via user error or dereferencing by NFS or Samba servers,
1640 1646 # so we filter out any placeholders that don't look like a
1641 1647 # symlink
1642 1648 sane = []
1643 1649 for f in files:
1644 1650 if self.flags(f) == 'l':
1645 1651 d = self[f].data()
1646 1652 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1647 1653 self._repo.ui.debug('ignoring suspect symlink placeholder'
1648 1654 ' "%s"\n' % f)
1649 1655 continue
1650 1656 sane.append(f)
1651 1657 return sane
1652 1658
1653 1659 def _checklookup(self, files):
1654 1660 # check for any possibly clean files
1655 1661 if not files:
1656 1662 return [], [], []
1657 1663
1658 1664 modified = []
1659 1665 deleted = []
1660 1666 fixup = []
1661 1667 pctx = self._parents[0]
1662 1668 # do a full compare of any files that might have changed
1663 1669 for f in sorted(files):
1664 1670 try:
1665 1671 # This will return True for a file that got replaced by a
1666 1672 # directory in the interim, but fixing that is pretty hard.
1667 1673 if (f not in pctx or self.flags(f) != pctx.flags(f)
1668 1674 or pctx[f].cmp(self[f])):
1669 1675 modified.append(f)
1670 1676 else:
1671 1677 fixup.append(f)
1672 1678 except (IOError, OSError):
1673 1679 # A file become inaccessible in between? Mark it as deleted,
1674 1680 # matching dirstate behavior (issue5584).
1675 1681 # The dirstate has more complex behavior around whether a
1676 1682 # missing file matches a directory, etc, but we don't need to
1677 1683 # bother with that: if f has made it to this point, we're sure
1678 1684 # it's in the dirstate.
1679 1685 deleted.append(f)
1680 1686
1681 1687 return modified, deleted, fixup
1682 1688
1683 1689 def _poststatusfixup(self, status, fixup):
1684 1690 """update dirstate for files that are actually clean"""
1685 1691 poststatus = self._repo.postdsstatus()
1686 1692 if fixup or poststatus:
1687 1693 try:
1688 1694 oldid = self._repo.dirstate.identity()
1689 1695
1690 1696 # updating the dirstate is optional
1691 1697 # so we don't wait on the lock
1692 1698 # wlock can invalidate the dirstate, so cache normal _after_
1693 1699 # taking the lock
1694 1700 with self._repo.wlock(False):
1695 1701 if self._repo.dirstate.identity() == oldid:
1696 1702 if fixup:
1697 1703 normal = self._repo.dirstate.normal
1698 1704 for f in fixup:
1699 1705 normal(f)
1700 1706 # write changes out explicitly, because nesting
1701 1707 # wlock at runtime may prevent 'wlock.release()'
1702 1708 # after this block from doing so for subsequent
1703 1709 # changing files
1704 1710 tr = self._repo.currenttransaction()
1705 1711 self._repo.dirstate.write(tr)
1706 1712
1707 1713 if poststatus:
1708 1714 for ps in poststatus:
1709 1715 ps(self, status)
1710 1716 else:
1711 1717 # in this case, writing changes out breaks
1712 1718 # consistency, because .hg/dirstate was
1713 1719 # already changed simultaneously after last
1714 1720 # caching (see also issue5584 for detail)
1715 1721 self._repo.ui.debug('skip updating dirstate: '
1716 1722 'identity mismatch\n')
1717 1723 except error.LockError:
1718 1724 pass
1719 1725 finally:
1720 1726 # Even if the wlock couldn't be grabbed, clear out the list.
1721 1727 self._repo.clearpostdsstatus()
1722 1728
1723 1729 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1724 1730 unknown=False):
1725 1731 '''Gets the status from the dirstate -- internal use only.'''
1726 1732 listignored, listclean, listunknown = ignored, clean, unknown
1727 1733 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1728 1734 subrepos = []
1729 1735 if '.hgsub' in self:
1730 1736 subrepos = sorted(self.substate)
1731 1737 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1732 1738 listclean, listunknown)
1733 1739
1734 1740 # check for any possibly clean files
1735 1741 fixup = []
1736 1742 if cmp:
1737 1743 modified2, deleted2, fixup = self._checklookup(cmp)
1738 1744 s.modified.extend(modified2)
1739 1745 s.deleted.extend(deleted2)
1740 1746
1741 1747 if fixup and listclean:
1742 1748 s.clean.extend(fixup)
1743 1749
1744 1750 self._poststatusfixup(s, fixup)
1745 1751
1746 1752 if match.always():
1747 1753 # cache for performance
1748 1754 if s.unknown or s.ignored or s.clean:
1749 1755 # "_status" is cached with list*=False in the normal route
1750 1756 self._status = scmutil.status(s.modified, s.added, s.removed,
1751 1757 s.deleted, [], [], [])
1752 1758 else:
1753 1759 self._status = s
1754 1760
1755 1761 return s
1756 1762
1757 1763 @propertycache
1758 1764 def _manifest(self):
1759 1765 """generate a manifest corresponding to the values in self._status
1760 1766
1761 1767 This reuse the file nodeid from parent, but we use special node
1762 1768 identifiers for added and modified files. This is used by manifests
1763 1769 merge to see that files are different and by update logic to avoid
1764 1770 deleting newly added files.
1765 1771 """
1766 1772 return self._buildstatusmanifest(self._status)
1767 1773
1768 1774 def _buildstatusmanifest(self, status):
1769 1775 """Builds a manifest that includes the given status results."""
1770 1776 parents = self.parents()
1771 1777
1772 1778 man = parents[0].manifest().copy()
1773 1779
1774 1780 ff = self._flagfunc
1775 1781 for i, l in ((addednodeid, status.added),
1776 1782 (modifiednodeid, status.modified)):
1777 1783 for f in l:
1778 1784 man[f] = i
1779 1785 try:
1780 1786 man.setflag(f, ff(f))
1781 1787 except OSError:
1782 1788 pass
1783 1789
1784 1790 for f in status.deleted + status.removed:
1785 1791 if f in man:
1786 1792 del man[f]
1787 1793
1788 1794 return man
1789 1795
1790 1796 def _buildstatus(self, other, s, match, listignored, listclean,
1791 1797 listunknown):
1792 1798 """build a status with respect to another context
1793 1799
1794 1800 This includes logic for maintaining the fast path of status when
1795 1801 comparing the working directory against its parent, which is to skip
1796 1802 building a new manifest if self (working directory) is not comparing
1797 1803 against its parent (repo['.']).
1798 1804 """
1799 1805 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1800 1806 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1801 1807 # might have accidentally ended up with the entire contents of the file
1802 1808 # they are supposed to be linking to.
1803 1809 s.modified[:] = self._filtersuspectsymlink(s.modified)
1804 1810 if other != self._repo['.']:
1805 1811 s = super(workingctx, self)._buildstatus(other, s, match,
1806 1812 listignored, listclean,
1807 1813 listunknown)
1808 1814 return s
1809 1815
1810 1816 def _matchstatus(self, other, match):
1811 1817 """override the match method with a filter for directory patterns
1812 1818
1813 1819 We use inheritance to customize the match.bad method only in cases of
1814 1820 workingctx since it belongs only to the working directory when
1815 1821 comparing against the parent changeset.
1816 1822
1817 1823 If we aren't comparing against the working directory's parent, then we
1818 1824 just use the default match object sent to us.
1819 1825 """
1820 1826 superself = super(workingctx, self)
1821 1827 match = superself._matchstatus(other, match)
1822 1828 if other != self._repo['.']:
1823 1829 def bad(f, msg):
1824 1830 # 'f' may be a directory pattern from 'match.files()',
1825 1831 # so 'f not in ctx1' is not enough
1826 1832 if f not in other and not other.hasdir(f):
1827 1833 self._repo.ui.warn('%s: %s\n' %
1828 1834 (self._repo.dirstate.pathto(f), msg))
1829 1835 match.bad = bad
1830 1836 return match
1831 1837
1832 1838 def markcommitted(self, node):
1833 1839 super(workingctx, self).markcommitted(node)
1834 1840
1835 1841 sparse.aftercommit(self._repo, node)
1836 1842
1837 1843 class committablefilectx(basefilectx):
1838 1844 """A committablefilectx provides common functionality for a file context
1839 1845 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1840 1846 def __init__(self, repo, path, filelog=None, ctx=None):
1841 1847 self._repo = repo
1842 1848 self._path = path
1843 1849 self._changeid = None
1844 1850 self._filerev = self._filenode = None
1845 1851
1846 1852 if filelog is not None:
1847 1853 self._filelog = filelog
1848 1854 if ctx:
1849 1855 self._changectx = ctx
1850 1856
1851 1857 def __nonzero__(self):
1852 1858 return True
1853 1859
1854 1860 __bool__ = __nonzero__
1855 1861
1856 1862 def linkrev(self):
1857 1863 # linked to self._changectx no matter if file is modified or not
1858 1864 return self.rev()
1859 1865
1860 1866 def parents(self):
1861 1867 '''return parent filectxs, following copies if necessary'''
1862 1868 def filenode(ctx, path):
1863 1869 return ctx._manifest.get(path, nullid)
1864 1870
1865 1871 path = self._path
1866 1872 fl = self._filelog
1867 1873 pcl = self._changectx._parents
1868 1874 renamed = self.renamed()
1869 1875
1870 1876 if renamed:
1871 1877 pl = [renamed + (None,)]
1872 1878 else:
1873 1879 pl = [(path, filenode(pcl[0], path), fl)]
1874 1880
1875 1881 for pc in pcl[1:]:
1876 1882 pl.append((path, filenode(pc, path), fl))
1877 1883
1878 1884 return [self._parentfilectx(p, fileid=n, filelog=l)
1879 1885 for p, n, l in pl if n != nullid]
1880 1886
1881 1887 def children(self):
1882 1888 return []
1883 1889
1884 1890 class workingfilectx(committablefilectx):
1885 1891 """A workingfilectx object makes access to data related to a particular
1886 1892 file in the working directory convenient."""
1887 1893 def __init__(self, repo, path, filelog=None, workingctx=None):
1888 1894 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1889 1895
1890 1896 @propertycache
1891 1897 def _changectx(self):
1892 1898 return workingctx(self._repo)
1893 1899
1894 1900 def data(self):
1895 1901 return self._repo.wread(self._path)
1896 1902 def renamed(self):
1897 1903 rp = self._repo.dirstate.copied(self._path)
1898 1904 if not rp:
1899 1905 return None
1900 1906 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1901 1907
1902 1908 def size(self):
1903 1909 return self._repo.wvfs.lstat(self._path).st_size
1904 1910 def date(self):
1905 1911 t, tz = self._changectx.date()
1906 1912 try:
1907 1913 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1908 1914 except OSError as err:
1909 1915 if err.errno != errno.ENOENT:
1910 1916 raise
1911 1917 return (t, tz)
1912 1918
1913 1919 def exists(self):
1914 1920 return self._repo.wvfs.exists(self._path)
1915 1921
1916 1922 def lexists(self):
1917 1923 return self._repo.wvfs.lexists(self._path)
1918 1924
1919 1925 def audit(self):
1920 1926 return self._repo.wvfs.audit(self._path)
1921 1927
1922 1928 def cmp(self, fctx):
1923 1929 """compare with other file context
1924 1930
1925 1931 returns True if different than fctx.
1926 1932 """
1927 1933 # fctx should be a filectx (not a workingfilectx)
1928 1934 # invert comparison to reuse the same code path
1929 1935 return fctx.cmp(self)
1930 1936
1931 1937 def remove(self, ignoremissing=False):
1932 1938 """wraps unlink for a repo's working directory"""
1933 1939 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1934 1940
1935 1941 def write(self, data, flags, backgroundclose=False):
1936 1942 """wraps repo.wwrite"""
1937 1943 self._repo.wwrite(self._path, data, flags,
1938 1944 backgroundclose=backgroundclose)
1939 1945
1940 1946 def setflags(self, l, x):
1941 1947 self._repo.wvfs.setflags(self._path, l, x)
1942 1948
1943 1949 class workingcommitctx(workingctx):
1944 1950 """A workingcommitctx object makes access to data related to
1945 1951 the revision being committed convenient.
1946 1952
1947 1953 This hides changes in the working directory, if they aren't
1948 1954 committed in this context.
1949 1955 """
1950 1956 def __init__(self, repo, changes,
1951 1957 text="", user=None, date=None, extra=None):
1952 1958 super(workingctx, self).__init__(repo, text, user, date, extra,
1953 1959 changes)
1954 1960
1955 1961 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1956 1962 unknown=False):
1957 1963 """Return matched files only in ``self._status``
1958 1964
1959 1965 Uncommitted files appear "clean" via this context, even if
1960 1966 they aren't actually so in the working directory.
1961 1967 """
1962 1968 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1963 1969 if clean:
1964 1970 clean = [f for f in self._manifest if f not in self._changedset]
1965 1971 else:
1966 1972 clean = []
1967 1973 return scmutil.status([f for f in self._status.modified if match(f)],
1968 1974 [f for f in self._status.added if match(f)],
1969 1975 [f for f in self._status.removed if match(f)],
1970 1976 [], [], [], clean)
1971 1977
1972 1978 @propertycache
1973 1979 def _changedset(self):
1974 1980 """Return the set of files changed in this context
1975 1981 """
1976 1982 changed = set(self._status.modified)
1977 1983 changed.update(self._status.added)
1978 1984 changed.update(self._status.removed)
1979 1985 return changed
1980 1986
1981 1987 def makecachingfilectxfn(func):
1982 1988 """Create a filectxfn that caches based on the path.
1983 1989
1984 1990 We can't use util.cachefunc because it uses all arguments as the cache
1985 1991 key and this creates a cycle since the arguments include the repo and
1986 1992 memctx.
1987 1993 """
1988 1994 cache = {}
1989 1995
1990 1996 def getfilectx(repo, memctx, path):
1991 1997 if path not in cache:
1992 1998 cache[path] = func(repo, memctx, path)
1993 1999 return cache[path]
1994 2000
1995 2001 return getfilectx
1996 2002
1997 2003 def memfilefromctx(ctx):
1998 2004 """Given a context return a memfilectx for ctx[path]
1999 2005
2000 2006 This is a convenience method for building a memctx based on another
2001 2007 context.
2002 2008 """
2003 2009 def getfilectx(repo, memctx, path):
2004 2010 fctx = ctx[path]
2005 2011 # this is weird but apparently we only keep track of one parent
2006 2012 # (why not only store that instead of a tuple?)
2007 2013 copied = fctx.renamed()
2008 2014 if copied:
2009 2015 copied = copied[0]
2010 2016 return memfilectx(repo, path, fctx.data(),
2011 2017 islink=fctx.islink(), isexec=fctx.isexec(),
2012 2018 copied=copied, memctx=memctx)
2013 2019
2014 2020 return getfilectx
2015 2021
2016 2022 def memfilefrompatch(patchstore):
2017 2023 """Given a patch (e.g. patchstore object) return a memfilectx
2018 2024
2019 2025 This is a convenience method for building a memctx based on a patchstore.
2020 2026 """
2021 2027 def getfilectx(repo, memctx, path):
2022 2028 data, mode, copied = patchstore.getfile(path)
2023 2029 if data is None:
2024 2030 return None
2025 2031 islink, isexec = mode
2026 2032 return memfilectx(repo, path, data, islink=islink,
2027 2033 isexec=isexec, copied=copied,
2028 2034 memctx=memctx)
2029 2035
2030 2036 return getfilectx
2031 2037
2032 2038 class memctx(committablectx):
2033 2039 """Use memctx to perform in-memory commits via localrepo.commitctx().
2034 2040
2035 2041 Revision information is supplied at initialization time while
2036 2042 related files data and is made available through a callback
2037 2043 mechanism. 'repo' is the current localrepo, 'parents' is a
2038 2044 sequence of two parent revisions identifiers (pass None for every
2039 2045 missing parent), 'text' is the commit message and 'files' lists
2040 2046 names of files touched by the revision (normalized and relative to
2041 2047 repository root).
2042 2048
2043 2049 filectxfn(repo, memctx, path) is a callable receiving the
2044 2050 repository, the current memctx object and the normalized path of
2045 2051 requested file, relative to repository root. It is fired by the
2046 2052 commit function for every file in 'files', but calls order is
2047 2053 undefined. If the file is available in the revision being
2048 2054 committed (updated or added), filectxfn returns a memfilectx
2049 2055 object. If the file was removed, filectxfn return None for recent
2050 2056 Mercurial. Moved files are represented by marking the source file
2051 2057 removed and the new file added with copy information (see
2052 2058 memfilectx).
2053 2059
2054 2060 user receives the committer name and defaults to current
2055 2061 repository username, date is the commit date in any format
2056 2062 supported by util.parsedate() and defaults to current date, extra
2057 2063 is a dictionary of metadata or is left empty.
2058 2064 """
2059 2065
2060 2066 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2061 2067 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2062 2068 # this field to determine what to do in filectxfn.
2063 2069 _returnnoneformissingfiles = True
2064 2070
2065 2071 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2066 2072 date=None, extra=None, branch=None, editor=False):
2067 2073 super(memctx, self).__init__(repo, text, user, date, extra)
2068 2074 self._rev = None
2069 2075 self._node = None
2070 2076 parents = [(p or nullid) for p in parents]
2071 2077 p1, p2 = parents
2072 2078 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2073 2079 files = sorted(set(files))
2074 2080 self._files = files
2075 2081 if branch is not None:
2076 2082 self._extra['branch'] = encoding.fromlocal(branch)
2077 2083 self.substate = {}
2078 2084
2079 2085 if isinstance(filectxfn, patch.filestore):
2080 2086 filectxfn = memfilefrompatch(filectxfn)
2081 2087 elif not callable(filectxfn):
2082 2088 # if store is not callable, wrap it in a function
2083 2089 filectxfn = memfilefromctx(filectxfn)
2084 2090
2085 2091 # memoizing increases performance for e.g. vcs convert scenarios.
2086 2092 self._filectxfn = makecachingfilectxfn(filectxfn)
2087 2093
2088 2094 if editor:
2089 2095 self._text = editor(self._repo, self, [])
2090 2096 self._repo.savecommitmessage(self._text)
2091 2097
2092 2098 def filectx(self, path, filelog=None):
2093 2099 """get a file context from the working directory
2094 2100
2095 2101 Returns None if file doesn't exist and should be removed."""
2096 2102 return self._filectxfn(self._repo, self, path)
2097 2103
2098 2104 def commit(self):
2099 2105 """commit context to the repo"""
2100 2106 return self._repo.commitctx(self)
2101 2107
2102 2108 @propertycache
2103 2109 def _manifest(self):
2104 2110 """generate a manifest based on the return values of filectxfn"""
2105 2111
2106 2112 # keep this simple for now; just worry about p1
2107 2113 pctx = self._parents[0]
2108 2114 man = pctx.manifest().copy()
2109 2115
2110 2116 for f in self._status.modified:
2111 2117 p1node = nullid
2112 2118 p2node = nullid
2113 2119 p = pctx[f].parents() # if file isn't in pctx, check p2?
2114 2120 if len(p) > 0:
2115 2121 p1node = p[0].filenode()
2116 2122 if len(p) > 1:
2117 2123 p2node = p[1].filenode()
2118 2124 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2119 2125
2120 2126 for f in self._status.added:
2121 2127 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2122 2128
2123 2129 for f in self._status.removed:
2124 2130 if f in man:
2125 2131 del man[f]
2126 2132
2127 2133 return man
2128 2134
2129 2135 @propertycache
2130 2136 def _status(self):
2131 2137 """Calculate exact status from ``files`` specified at construction
2132 2138 """
2133 2139 man1 = self.p1().manifest()
2134 2140 p2 = self._parents[1]
2135 2141 # "1 < len(self._parents)" can't be used for checking
2136 2142 # existence of the 2nd parent, because "memctx._parents" is
2137 2143 # explicitly initialized by the list, of which length is 2.
2138 2144 if p2.node() != nullid:
2139 2145 man2 = p2.manifest()
2140 2146 managing = lambda f: f in man1 or f in man2
2141 2147 else:
2142 2148 managing = lambda f: f in man1
2143 2149
2144 2150 modified, added, removed = [], [], []
2145 2151 for f in self._files:
2146 2152 if not managing(f):
2147 2153 added.append(f)
2148 2154 elif self[f]:
2149 2155 modified.append(f)
2150 2156 else:
2151 2157 removed.append(f)
2152 2158
2153 2159 return scmutil.status(modified, added, removed, [], [], [], [])
2154 2160
2155 2161 class memfilectx(committablefilectx):
2156 2162 """memfilectx represents an in-memory file to commit.
2157 2163
2158 2164 See memctx and committablefilectx for more details.
2159 2165 """
2160 2166 def __init__(self, repo, path, data, islink=False,
2161 2167 isexec=False, copied=None, memctx=None):
2162 2168 """
2163 2169 path is the normalized file path relative to repository root.
2164 2170 data is the file content as a string.
2165 2171 islink is True if the file is a symbolic link.
2166 2172 isexec is True if the file is executable.
2167 2173 copied is the source file path if current file was copied in the
2168 2174 revision being committed, or None."""
2169 2175 super(memfilectx, self).__init__(repo, path, None, memctx)
2170 2176 self._data = data
2171 2177 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2172 2178 self._copied = None
2173 2179 if copied:
2174 2180 self._copied = (copied, nullid)
2175 2181
2176 2182 def data(self):
2177 2183 return self._data
2178 2184
2179 2185 def remove(self, ignoremissing=False):
2180 2186 """wraps unlink for a repo's working directory"""
2181 2187 # need to figure out what to do here
2182 2188 del self._changectx[self._path]
2183 2189
2184 2190 def write(self, data, flags):
2185 2191 """wraps repo.wwrite"""
2186 2192 self._data = data
2187 2193
2188 2194 class overlayfilectx(committablefilectx):
2189 2195 """Like memfilectx but take an original filectx and optional parameters to
2190 2196 override parts of it. This is useful when fctx.data() is expensive (i.e.
2191 2197 flag processor is expensive) and raw data, flags, and filenode could be
2192 2198 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2193 2199 """
2194 2200
2195 2201 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2196 2202 copied=None, ctx=None):
2197 2203 """originalfctx: filecontext to duplicate
2198 2204
2199 2205 datafunc: None or a function to override data (file content). It is a
2200 2206 function to be lazy. path, flags, copied, ctx: None or overridden value
2201 2207
2202 2208 copied could be (path, rev), or False. copied could also be just path,
2203 2209 and will be converted to (path, nullid). This simplifies some callers.
2204 2210 """
2205 2211
2206 2212 if path is None:
2207 2213 path = originalfctx.path()
2208 2214 if ctx is None:
2209 2215 ctx = originalfctx.changectx()
2210 2216 ctxmatch = lambda: True
2211 2217 else:
2212 2218 ctxmatch = lambda: ctx == originalfctx.changectx()
2213 2219
2214 2220 repo = originalfctx.repo()
2215 2221 flog = originalfctx.filelog()
2216 2222 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2217 2223
2218 2224 if copied is None:
2219 2225 copied = originalfctx.renamed()
2220 2226 copiedmatch = lambda: True
2221 2227 else:
2222 2228 if copied and not isinstance(copied, tuple):
2223 2229 # repo._filecommit will recalculate copyrev so nullid is okay
2224 2230 copied = (copied, nullid)
2225 2231 copiedmatch = lambda: copied == originalfctx.renamed()
2226 2232
2227 2233 # When data, copied (could affect data), ctx (could affect filelog
2228 2234 # parents) are not overridden, rawdata, rawflags, and filenode may be
2229 2235 # reused (repo._filecommit should double check filelog parents).
2230 2236 #
2231 2237 # path, flags are not hashed in filelog (but in manifestlog) so they do
2232 2238 # not affect reusable here.
2233 2239 #
2234 2240 # If ctx or copied is overridden to a same value with originalfctx,
2235 2241 # still consider it's reusable. originalfctx.renamed() may be a bit
2236 2242 # expensive so it's not called unless necessary. Assuming datafunc is
2237 2243 # always expensive, do not call it for this "reusable" test.
2238 2244 reusable = datafunc is None and ctxmatch() and copiedmatch()
2239 2245
2240 2246 if datafunc is None:
2241 2247 datafunc = originalfctx.data
2242 2248 if flags is None:
2243 2249 flags = originalfctx.flags()
2244 2250
2245 2251 self._datafunc = datafunc
2246 2252 self._flags = flags
2247 2253 self._copied = copied
2248 2254
2249 2255 if reusable:
2250 2256 # copy extra fields from originalfctx
2251 2257 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2252 2258 for attr in attrs:
2253 2259 if util.safehasattr(originalfctx, attr):
2254 2260 setattr(self, attr, getattr(originalfctx, attr))
2255 2261
2256 2262 def data(self):
2257 2263 return self._datafunc()
2258 2264
2259 2265 class metadataonlyctx(committablectx):
2260 2266 """Like memctx but it's reusing the manifest of different commit.
2261 2267 Intended to be used by lightweight operations that are creating
2262 2268 metadata-only changes.
2263 2269
2264 2270 Revision information is supplied at initialization time. 'repo' is the
2265 2271 current localrepo, 'ctx' is original revision which manifest we're reuisng
2266 2272 'parents' is a sequence of two parent revisions identifiers (pass None for
2267 2273 every missing parent), 'text' is the commit.
2268 2274
2269 2275 user receives the committer name and defaults to current repository
2270 2276 username, date is the commit date in any format supported by
2271 2277 util.parsedate() and defaults to current date, extra is a dictionary of
2272 2278 metadata or is left empty.
2273 2279 """
2274 2280 def __new__(cls, repo, originalctx, *args, **kwargs):
2275 2281 return super(metadataonlyctx, cls).__new__(cls, repo)
2276 2282
2277 2283 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2278 2284 extra=None, editor=False):
2279 2285 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2280 2286 self._rev = None
2281 2287 self._node = None
2282 2288 self._originalctx = originalctx
2283 2289 self._manifestnode = originalctx.manifestnode()
2284 2290 parents = [(p or nullid) for p in parents]
2285 2291 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2286 2292
2287 2293 # sanity check to ensure that the reused manifest parents are
2288 2294 # manifests of our commit parents
2289 2295 mp1, mp2 = self.manifestctx().parents
2290 2296 if p1 != nullid and p1.manifestnode() != mp1:
2291 2297 raise RuntimeError('can\'t reuse the manifest: '
2292 2298 'its p1 doesn\'t match the new ctx p1')
2293 2299 if p2 != nullid and p2.manifestnode() != mp2:
2294 2300 raise RuntimeError('can\'t reuse the manifest: '
2295 2301 'its p2 doesn\'t match the new ctx p2')
2296 2302
2297 2303 self._files = originalctx.files()
2298 2304 self.substate = {}
2299 2305
2300 2306 if editor:
2301 2307 self._text = editor(self._repo, self, [])
2302 2308 self._repo.savecommitmessage(self._text)
2303 2309
2304 2310 def manifestnode(self):
2305 2311 return self._manifestnode
2306 2312
2307 2313 @property
2308 2314 def _manifestctx(self):
2309 2315 return self._repo.manifestlog[self._manifestnode]
2310 2316
2311 2317 def filectx(self, path, filelog=None):
2312 2318 return self._originalctx.filectx(path, filelog=filelog)
2313 2319
2314 2320 def commit(self):
2315 2321 """commit context to the repo"""
2316 2322 return self._repo.commitctx(self)
2317 2323
2318 2324 @property
2319 2325 def _manifest(self):
2320 2326 return self._originalctx.manifest()
2321 2327
2322 2328 @propertycache
2323 2329 def _status(self):
2324 2330 """Calculate exact status from ``files`` specified in the ``origctx``
2325 2331 and parents manifests.
2326 2332 """
2327 2333 man1 = self.p1().manifest()
2328 2334 p2 = self._parents[1]
2329 2335 # "1 < len(self._parents)" can't be used for checking
2330 2336 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2331 2337 # explicitly initialized by the list, of which length is 2.
2332 2338 if p2.node() != nullid:
2333 2339 man2 = p2.manifest()
2334 2340 managing = lambda f: f in man1 or f in man2
2335 2341 else:
2336 2342 managing = lambda f: f in man1
2337 2343
2338 2344 modified, added, removed = [], [], []
2339 2345 for f in self._files:
2340 2346 if not managing(f):
2341 2347 added.append(f)
2342 2348 elif self[f]:
2343 2349 modified.append(f)
2344 2350 else:
2345 2351 removed.append(f)
2346 2352
2347 2353 return scmutil.status(modified, added, removed, [], [], [], [])
General Comments 0
You need to be logged in to leave comments. Login now