##// END OF EJS Templates
filectx: add an overlayfilectx class...
Jun Wu -
r32239:07da778f default
parent child Browse files
Show More
@@ -1,2177 +1,2250
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 bin,
19 19 hex,
20 20 modifiednodeid,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 wdirid,
25 25 wdirnodes,
26 26 )
27 27 from . import (
28 28 encoding,
29 29 error,
30 30 fileset,
31 31 match as matchmod,
32 32 mdiff,
33 33 obsolete as obsmod,
34 34 patch,
35 35 phases,
36 36 pycompat,
37 37 repoview,
38 38 revlog,
39 39 scmutil,
40 40 subrepo,
41 41 util,
42 42 )
43 43
44 44 propertycache = util.propertycache
45 45
46 46 nonascii = re.compile(r'[^\x21-\x7f]').search
47 47
48 48 class basectx(object):
49 49 """A basectx object represents the common logic for its children:
50 50 changectx: read-only context that is already present in the repo,
51 51 workingctx: a context that represents the working directory and can
52 52 be committed,
53 53 memctx: a context that represents changes in-memory and can also
54 54 be committed."""
55 55 def __new__(cls, repo, changeid='', *args, **kwargs):
56 56 if isinstance(changeid, basectx):
57 57 return changeid
58 58
59 59 o = super(basectx, cls).__new__(cls)
60 60
61 61 o._repo = repo
62 62 o._rev = nullrev
63 63 o._node = nullid
64 64
65 65 return o
66 66
67 67 def __str__(self):
68 68 r = short(self.node())
69 69 if pycompat.ispy3:
70 70 return r.decode('ascii')
71 71 return r
72 72
73 73 def __bytes__(self):
74 74 return short(self.node())
75 75
76 76 def __int__(self):
77 77 return self.rev()
78 78
79 79 def __repr__(self):
80 80 return "<%s %s>" % (type(self).__name__, str(self))
81 81
82 82 def __eq__(self, other):
83 83 try:
84 84 return type(self) == type(other) and self._rev == other._rev
85 85 except AttributeError:
86 86 return False
87 87
88 88 def __ne__(self, other):
89 89 return not (self == other)
90 90
91 91 def __contains__(self, key):
92 92 return key in self._manifest
93 93
94 94 def __getitem__(self, key):
95 95 return self.filectx(key)
96 96
97 97 def __iter__(self):
98 98 return iter(self._manifest)
99 99
100 100 def _buildstatusmanifest(self, status):
101 101 """Builds a manifest that includes the given status results, if this is
102 102 a working copy context. For non-working copy contexts, it just returns
103 103 the normal manifest."""
104 104 return self.manifest()
105 105
106 106 def _matchstatus(self, other, match):
107 107 """return match.always if match is none
108 108
109 109 This internal method provides a way for child objects to override the
110 110 match operator.
111 111 """
112 112 return match or matchmod.always(self._repo.root, self._repo.getcwd())
113 113
114 114 def _buildstatus(self, other, s, match, listignored, listclean,
115 115 listunknown):
116 116 """build a status with respect to another context"""
117 117 # Load earliest manifest first for caching reasons. More specifically,
118 118 # if you have revisions 1000 and 1001, 1001 is probably stored as a
119 119 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
120 120 # 1000 and cache it so that when you read 1001, we just need to apply a
121 121 # delta to what's in the cache. So that's one full reconstruction + one
122 122 # delta application.
123 123 mf2 = None
124 124 if self.rev() is not None and self.rev() < other.rev():
125 125 mf2 = self._buildstatusmanifest(s)
126 126 mf1 = other._buildstatusmanifest(s)
127 127 if mf2 is None:
128 128 mf2 = self._buildstatusmanifest(s)
129 129
130 130 modified, added = [], []
131 131 removed = []
132 132 clean = []
133 133 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
134 134 deletedset = set(deleted)
135 135 d = mf1.diff(mf2, match=match, clean=listclean)
136 136 for fn, value in d.iteritems():
137 137 if fn in deletedset:
138 138 continue
139 139 if value is None:
140 140 clean.append(fn)
141 141 continue
142 142 (node1, flag1), (node2, flag2) = value
143 143 if node1 is None:
144 144 added.append(fn)
145 145 elif node2 is None:
146 146 removed.append(fn)
147 147 elif flag1 != flag2:
148 148 modified.append(fn)
149 149 elif node2 not in wdirnodes:
150 150 # When comparing files between two commits, we save time by
151 151 # not comparing the file contents when the nodeids differ.
152 152 # Note that this means we incorrectly report a reverted change
153 153 # to a file as a modification.
154 154 modified.append(fn)
155 155 elif self[fn].cmp(other[fn]):
156 156 modified.append(fn)
157 157 else:
158 158 clean.append(fn)
159 159
160 160 if removed:
161 161 # need to filter files if they are already reported as removed
162 162 unknown = [fn for fn in unknown if fn not in mf1 and
163 163 (not match or match(fn))]
164 164 ignored = [fn for fn in ignored if fn not in mf1 and
165 165 (not match or match(fn))]
166 166 # if they're deleted, don't report them as removed
167 167 removed = [fn for fn in removed if fn not in deletedset]
168 168
169 169 return scmutil.status(modified, added, removed, deleted, unknown,
170 170 ignored, clean)
171 171
172 172 @propertycache
173 173 def substate(self):
174 174 return subrepo.state(self, self._repo.ui)
175 175
176 176 def subrev(self, subpath):
177 177 return self.substate[subpath][1]
178 178
179 179 def rev(self):
180 180 return self._rev
181 181 def node(self):
182 182 return self._node
183 183 def hex(self):
184 184 return hex(self.node())
185 185 def manifest(self):
186 186 return self._manifest
187 187 def manifestctx(self):
188 188 return self._manifestctx
189 189 def repo(self):
190 190 return self._repo
191 191 def phasestr(self):
192 192 return phases.phasenames[self.phase()]
193 193 def mutable(self):
194 194 return self.phase() > phases.public
195 195
196 196 def getfileset(self, expr):
197 197 return fileset.getfileset(self, expr)
198 198
199 199 def obsolete(self):
200 200 """True if the changeset is obsolete"""
201 201 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
202 202
203 203 def extinct(self):
204 204 """True if the changeset is extinct"""
205 205 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
206 206
207 207 def unstable(self):
208 208 """True if the changeset is not obsolete but it's ancestor are"""
209 209 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
210 210
211 211 def bumped(self):
212 212 """True if the changeset try to be a successor of a public changeset
213 213
214 214 Only non-public and non-obsolete changesets may be bumped.
215 215 """
216 216 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
217 217
218 218 def divergent(self):
219 219 """Is a successors of a changeset with multiple possible successors set
220 220
221 221 Only non-public and non-obsolete changesets may be divergent.
222 222 """
223 223 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
224 224
225 225 def troubled(self):
226 226 """True if the changeset is either unstable, bumped or divergent"""
227 227 return self.unstable() or self.bumped() or self.divergent()
228 228
229 229 def troubles(self):
230 230 """return the list of troubles affecting this changesets.
231 231
232 232 Troubles are returned as strings. possible values are:
233 233 - unstable,
234 234 - bumped,
235 235 - divergent.
236 236 """
237 237 troubles = []
238 238 if self.unstable():
239 239 troubles.append('unstable')
240 240 if self.bumped():
241 241 troubles.append('bumped')
242 242 if self.divergent():
243 243 troubles.append('divergent')
244 244 return troubles
245 245
246 246 def parents(self):
247 247 """return contexts for each parent changeset"""
248 248 return self._parents
249 249
250 250 def p1(self):
251 251 return self._parents[0]
252 252
253 253 def p2(self):
254 254 parents = self._parents
255 255 if len(parents) == 2:
256 256 return parents[1]
257 257 return changectx(self._repo, nullrev)
258 258
259 259 def _fileinfo(self, path):
260 260 if r'_manifest' in self.__dict__:
261 261 try:
262 262 return self._manifest[path], self._manifest.flags(path)
263 263 except KeyError:
264 264 raise error.ManifestLookupError(self._node, path,
265 265 _('not found in manifest'))
266 266 if r'_manifestdelta' in self.__dict__ or path in self.files():
267 267 if path in self._manifestdelta:
268 268 return (self._manifestdelta[path],
269 269 self._manifestdelta.flags(path))
270 270 mfl = self._repo.manifestlog
271 271 try:
272 272 node, flag = mfl[self._changeset.manifest].find(path)
273 273 except KeyError:
274 274 raise error.ManifestLookupError(self._node, path,
275 275 _('not found in manifest'))
276 276
277 277 return node, flag
278 278
279 279 def filenode(self, path):
280 280 return self._fileinfo(path)[0]
281 281
282 282 def flags(self, path):
283 283 try:
284 284 return self._fileinfo(path)[1]
285 285 except error.LookupError:
286 286 return ''
287 287
288 288 def sub(self, path, allowcreate=True):
289 289 '''return a subrepo for the stored revision of path, never wdir()'''
290 290 return subrepo.subrepo(self, path, allowcreate=allowcreate)
291 291
292 292 def nullsub(self, path, pctx):
293 293 return subrepo.nullsubrepo(self, path, pctx)
294 294
295 295 def workingsub(self, path):
296 296 '''return a subrepo for the stored revision, or wdir if this is a wdir
297 297 context.
298 298 '''
299 299 return subrepo.subrepo(self, path, allowwdir=True)
300 300
301 301 def match(self, pats=None, include=None, exclude=None, default='glob',
302 302 listsubrepos=False, badfn=None):
303 303 if pats is None:
304 304 pats = []
305 305 r = self._repo
306 306 return matchmod.match(r.root, r.getcwd(), pats,
307 307 include, exclude, default,
308 308 auditor=r.nofsauditor, ctx=self,
309 309 listsubrepos=listsubrepos, badfn=badfn)
310 310
311 311 def diff(self, ctx2=None, match=None, **opts):
312 312 """Returns a diff generator for the given contexts and matcher"""
313 313 if ctx2 is None:
314 314 ctx2 = self.p1()
315 315 if ctx2 is not None:
316 316 ctx2 = self._repo[ctx2]
317 317 diffopts = patch.diffopts(self._repo.ui, opts)
318 318 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
319 319
320 320 def dirs(self):
321 321 return self._manifest.dirs()
322 322
323 323 def hasdir(self, dir):
324 324 return self._manifest.hasdir(dir)
325 325
326 326 def dirty(self, missing=False, merge=True, branch=True):
327 327 return False
328 328
329 329 def status(self, other=None, match=None, listignored=False,
330 330 listclean=False, listunknown=False, listsubrepos=False):
331 331 """return status of files between two nodes or node and working
332 332 directory.
333 333
334 334 If other is None, compare this node with working directory.
335 335
336 336 returns (modified, added, removed, deleted, unknown, ignored, clean)
337 337 """
338 338
339 339 ctx1 = self
340 340 ctx2 = self._repo[other]
341 341
342 342 # This next code block is, admittedly, fragile logic that tests for
343 343 # reversing the contexts and wouldn't need to exist if it weren't for
344 344 # the fast (and common) code path of comparing the working directory
345 345 # with its first parent.
346 346 #
347 347 # What we're aiming for here is the ability to call:
348 348 #
349 349 # workingctx.status(parentctx)
350 350 #
351 351 # If we always built the manifest for each context and compared those,
352 352 # then we'd be done. But the special case of the above call means we
353 353 # just copy the manifest of the parent.
354 354 reversed = False
355 355 if (not isinstance(ctx1, changectx)
356 356 and isinstance(ctx2, changectx)):
357 357 reversed = True
358 358 ctx1, ctx2 = ctx2, ctx1
359 359
360 360 match = ctx2._matchstatus(ctx1, match)
361 361 r = scmutil.status([], [], [], [], [], [], [])
362 362 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
363 363 listunknown)
364 364
365 365 if reversed:
366 366 # Reverse added and removed. Clear deleted, unknown and ignored as
367 367 # these make no sense to reverse.
368 368 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
369 369 r.clean)
370 370
371 371 if listsubrepos:
372 372 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
373 373 try:
374 374 rev2 = ctx2.subrev(subpath)
375 375 except KeyError:
376 376 # A subrepo that existed in node1 was deleted between
377 377 # node1 and node2 (inclusive). Thus, ctx2's substate
378 378 # won't contain that subpath. The best we can do ignore it.
379 379 rev2 = None
380 380 submatch = matchmod.subdirmatcher(subpath, match)
381 381 s = sub.status(rev2, match=submatch, ignored=listignored,
382 382 clean=listclean, unknown=listunknown,
383 383 listsubrepos=True)
384 384 for rfiles, sfiles in zip(r, s):
385 385 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
386 386
387 387 for l in r:
388 388 l.sort()
389 389
390 390 return r
391 391
392 392
393 393 def makememctx(repo, parents, text, user, date, branch, files, store,
394 394 editor=None, extra=None):
395 395 def getfilectx(repo, memctx, path):
396 396 data, mode, copied = store.getfile(path)
397 397 if data is None:
398 398 return None
399 399 islink, isexec = mode
400 400 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
401 401 copied=copied, memctx=memctx)
402 402 if extra is None:
403 403 extra = {}
404 404 if branch:
405 405 extra['branch'] = encoding.fromlocal(branch)
406 406 ctx = memctx(repo, parents, text, files, getfilectx, user,
407 407 date, extra, editor)
408 408 return ctx
409 409
410 410 def _filterederror(repo, changeid):
411 411 """build an exception to be raised about a filtered changeid
412 412
413 413 This is extracted in a function to help extensions (eg: evolve) to
414 414 experiment with various message variants."""
415 415 if repo.filtername.startswith('visible'):
416 416 msg = _("hidden revision '%s'") % changeid
417 417 hint = _('use --hidden to access hidden revisions')
418 418 return error.FilteredRepoLookupError(msg, hint=hint)
419 419 msg = _("filtered revision '%s' (not in '%s' subset)")
420 420 msg %= (changeid, repo.filtername)
421 421 return error.FilteredRepoLookupError(msg)
422 422
423 423 class changectx(basectx):
424 424 """A changecontext object makes access to data related to a particular
425 425 changeset convenient. It represents a read-only context already present in
426 426 the repo."""
427 427 def __init__(self, repo, changeid=''):
428 428 """changeid is a revision number, node, or tag"""
429 429
430 430 # since basectx.__new__ already took care of copying the object, we
431 431 # don't need to do anything in __init__, so we just exit here
432 432 if isinstance(changeid, basectx):
433 433 return
434 434
435 435 if changeid == '':
436 436 changeid = '.'
437 437 self._repo = repo
438 438
439 439 try:
440 440 if isinstance(changeid, int):
441 441 self._node = repo.changelog.node(changeid)
442 442 self._rev = changeid
443 443 return
444 444 if not pycompat.ispy3 and isinstance(changeid, long):
445 445 changeid = str(changeid)
446 446 if changeid == 'null':
447 447 self._node = nullid
448 448 self._rev = nullrev
449 449 return
450 450 if changeid == 'tip':
451 451 self._node = repo.changelog.tip()
452 452 self._rev = repo.changelog.rev(self._node)
453 453 return
454 454 if changeid == '.' or changeid == repo.dirstate.p1():
455 455 # this is a hack to delay/avoid loading obsmarkers
456 456 # when we know that '.' won't be hidden
457 457 self._node = repo.dirstate.p1()
458 458 self._rev = repo.unfiltered().changelog.rev(self._node)
459 459 return
460 460 if len(changeid) == 20:
461 461 try:
462 462 self._node = changeid
463 463 self._rev = repo.changelog.rev(changeid)
464 464 return
465 465 except error.FilteredRepoLookupError:
466 466 raise
467 467 except LookupError:
468 468 pass
469 469
470 470 try:
471 471 r = int(changeid)
472 472 if '%d' % r != changeid:
473 473 raise ValueError
474 474 l = len(repo.changelog)
475 475 if r < 0:
476 476 r += l
477 477 if r < 0 or r >= l:
478 478 raise ValueError
479 479 self._rev = r
480 480 self._node = repo.changelog.node(r)
481 481 return
482 482 except error.FilteredIndexError:
483 483 raise
484 484 except (ValueError, OverflowError, IndexError):
485 485 pass
486 486
487 487 if len(changeid) == 40:
488 488 try:
489 489 self._node = bin(changeid)
490 490 self._rev = repo.changelog.rev(self._node)
491 491 return
492 492 except error.FilteredLookupError:
493 493 raise
494 494 except (TypeError, LookupError):
495 495 pass
496 496
497 497 # lookup bookmarks through the name interface
498 498 try:
499 499 self._node = repo.names.singlenode(repo, changeid)
500 500 self._rev = repo.changelog.rev(self._node)
501 501 return
502 502 except KeyError:
503 503 pass
504 504 except error.FilteredRepoLookupError:
505 505 raise
506 506 except error.RepoLookupError:
507 507 pass
508 508
509 509 self._node = repo.unfiltered().changelog._partialmatch(changeid)
510 510 if self._node is not None:
511 511 self._rev = repo.changelog.rev(self._node)
512 512 return
513 513
514 514 # lookup failed
515 515 # check if it might have come from damaged dirstate
516 516 #
517 517 # XXX we could avoid the unfiltered if we had a recognizable
518 518 # exception for filtered changeset access
519 519 if changeid in repo.unfiltered().dirstate.parents():
520 520 msg = _("working directory has unknown parent '%s'!")
521 521 raise error.Abort(msg % short(changeid))
522 522 try:
523 523 if len(changeid) == 20 and nonascii(changeid):
524 524 changeid = hex(changeid)
525 525 except TypeError:
526 526 pass
527 527 except (error.FilteredIndexError, error.FilteredLookupError,
528 528 error.FilteredRepoLookupError):
529 529 raise _filterederror(repo, changeid)
530 530 except IndexError:
531 531 pass
532 532 raise error.RepoLookupError(
533 533 _("unknown revision '%s'") % changeid)
534 534
535 535 def __hash__(self):
536 536 try:
537 537 return hash(self._rev)
538 538 except AttributeError:
539 539 return id(self)
540 540
541 541 def __nonzero__(self):
542 542 return self._rev != nullrev
543 543
544 544 __bool__ = __nonzero__
545 545
546 546 @propertycache
547 547 def _changeset(self):
548 548 return self._repo.changelog.changelogrevision(self.rev())
549 549
550 550 @propertycache
551 551 def _manifest(self):
552 552 return self._manifestctx.read()
553 553
554 554 @propertycache
555 555 def _manifestctx(self):
556 556 return self._repo.manifestlog[self._changeset.manifest]
557 557
558 558 @propertycache
559 559 def _manifestdelta(self):
560 560 return self._manifestctx.readdelta()
561 561
562 562 @propertycache
563 563 def _parents(self):
564 564 repo = self._repo
565 565 p1, p2 = repo.changelog.parentrevs(self._rev)
566 566 if p2 == nullrev:
567 567 return [changectx(repo, p1)]
568 568 return [changectx(repo, p1), changectx(repo, p2)]
569 569
570 570 def changeset(self):
571 571 c = self._changeset
572 572 return (
573 573 c.manifest,
574 574 c.user,
575 575 c.date,
576 576 c.files,
577 577 c.description,
578 578 c.extra,
579 579 )
580 580 def manifestnode(self):
581 581 return self._changeset.manifest
582 582
583 583 def user(self):
584 584 return self._changeset.user
585 585 def date(self):
586 586 return self._changeset.date
587 587 def files(self):
588 588 return self._changeset.files
589 589 def description(self):
590 590 return self._changeset.description
591 591 def branch(self):
592 592 return encoding.tolocal(self._changeset.extra.get("branch"))
593 593 def closesbranch(self):
594 594 return 'close' in self._changeset.extra
595 595 def extra(self):
596 596 return self._changeset.extra
597 597 def tags(self):
598 598 return self._repo.nodetags(self._node)
599 599 def bookmarks(self):
600 600 return self._repo.nodebookmarks(self._node)
601 601 def phase(self):
602 602 return self._repo._phasecache.phase(self._repo, self._rev)
603 603 def hidden(self):
604 604 return self._rev in repoview.filterrevs(self._repo, 'visible')
605 605
606 606 def children(self):
607 607 """return contexts for each child changeset"""
608 608 c = self._repo.changelog.children(self._node)
609 609 return [changectx(self._repo, x) for x in c]
610 610
611 611 def ancestors(self):
612 612 for a in self._repo.changelog.ancestors([self._rev]):
613 613 yield changectx(self._repo, a)
614 614
615 615 def descendants(self):
616 616 for d in self._repo.changelog.descendants([self._rev]):
617 617 yield changectx(self._repo, d)
618 618
619 619 def filectx(self, path, fileid=None, filelog=None):
620 620 """get a file context from this changeset"""
621 621 if fileid is None:
622 622 fileid = self.filenode(path)
623 623 return filectx(self._repo, path, fileid=fileid,
624 624 changectx=self, filelog=filelog)
625 625
626 626 def ancestor(self, c2, warn=False):
627 627 """return the "best" ancestor context of self and c2
628 628
629 629 If there are multiple candidates, it will show a message and check
630 630 merge.preferancestor configuration before falling back to the
631 631 revlog ancestor."""
632 632 # deal with workingctxs
633 633 n2 = c2._node
634 634 if n2 is None:
635 635 n2 = c2._parents[0]._node
636 636 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
637 637 if not cahs:
638 638 anc = nullid
639 639 elif len(cahs) == 1:
640 640 anc = cahs[0]
641 641 else:
642 642 # experimental config: merge.preferancestor
643 643 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
644 644 try:
645 645 ctx = changectx(self._repo, r)
646 646 except error.RepoLookupError:
647 647 continue
648 648 anc = ctx.node()
649 649 if anc in cahs:
650 650 break
651 651 else:
652 652 anc = self._repo.changelog.ancestor(self._node, n2)
653 653 if warn:
654 654 self._repo.ui.status(
655 655 (_("note: using %s as ancestor of %s and %s\n") %
656 656 (short(anc), short(self._node), short(n2))) +
657 657 ''.join(_(" alternatively, use --config "
658 658 "merge.preferancestor=%s\n") %
659 659 short(n) for n in sorted(cahs) if n != anc))
660 660 return changectx(self._repo, anc)
661 661
662 662 def descendant(self, other):
663 663 """True if other is descendant of this changeset"""
664 664 return self._repo.changelog.descendant(self._rev, other._rev)
665 665
666 666 def walk(self, match):
667 667 '''Generates matching file names.'''
668 668
669 669 # Wrap match.bad method to have message with nodeid
670 670 def bad(fn, msg):
671 671 # The manifest doesn't know about subrepos, so don't complain about
672 672 # paths into valid subrepos.
673 673 if any(fn == s or fn.startswith(s + '/')
674 674 for s in self.substate):
675 675 return
676 676 match.bad(fn, _('no such file in rev %s') % self)
677 677
678 678 m = matchmod.badmatch(match, bad)
679 679 return self._manifest.walk(m)
680 680
681 681 def matches(self, match):
682 682 return self.walk(match)
683 683
684 684 class basefilectx(object):
685 685 """A filecontext object represents the common logic for its children:
686 686 filectx: read-only access to a filerevision that is already present
687 687 in the repo,
688 688 workingfilectx: a filecontext that represents files from the working
689 689 directory,
690 memfilectx: a filecontext that represents files in-memory."""
690 memfilectx: a filecontext that represents files in-memory,
691 overlayfilectx: duplicate another filecontext with some fields overridden.
692 """
691 693 @propertycache
692 694 def _filelog(self):
693 695 return self._repo.file(self._path)
694 696
695 697 @propertycache
696 698 def _changeid(self):
697 699 if r'_changeid' in self.__dict__:
698 700 return self._changeid
699 701 elif r'_changectx' in self.__dict__:
700 702 return self._changectx.rev()
701 703 elif r'_descendantrev' in self.__dict__:
702 704 # this file context was created from a revision with a known
703 705 # descendant, we can (lazily) correct for linkrev aliases
704 706 return self._adjustlinkrev(self._descendantrev)
705 707 else:
706 708 return self._filelog.linkrev(self._filerev)
707 709
708 710 @propertycache
709 711 def _filenode(self):
710 712 if r'_fileid' in self.__dict__:
711 713 return self._filelog.lookup(self._fileid)
712 714 else:
713 715 return self._changectx.filenode(self._path)
714 716
715 717 @propertycache
716 718 def _filerev(self):
717 719 return self._filelog.rev(self._filenode)
718 720
719 721 @propertycache
720 722 def _repopath(self):
721 723 return self._path
722 724
723 725 def __nonzero__(self):
724 726 try:
725 727 self._filenode
726 728 return True
727 729 except error.LookupError:
728 730 # file is missing
729 731 return False
730 732
731 733 __bool__ = __nonzero__
732 734
733 735 def __str__(self):
734 736 try:
735 737 return "%s@%s" % (self.path(), self._changectx)
736 738 except error.LookupError:
737 739 return "%s@???" % self.path()
738 740
739 741 def __repr__(self):
740 742 return "<%s %s>" % (type(self).__name__, str(self))
741 743
742 744 def __hash__(self):
743 745 try:
744 746 return hash((self._path, self._filenode))
745 747 except AttributeError:
746 748 return id(self)
747 749
748 750 def __eq__(self, other):
749 751 try:
750 752 return (type(self) == type(other) and self._path == other._path
751 753 and self._filenode == other._filenode)
752 754 except AttributeError:
753 755 return False
754 756
755 757 def __ne__(self, other):
756 758 return not (self == other)
757 759
758 760 def filerev(self):
759 761 return self._filerev
760 762 def filenode(self):
761 763 return self._filenode
762 764 @propertycache
763 765 def _flags(self):
764 766 return self._changectx.flags(self._path)
765 767 def flags(self):
766 768 return self._flags
767 769 def filelog(self):
768 770 return self._filelog
769 771 def rev(self):
770 772 return self._changeid
771 773 def linkrev(self):
772 774 return self._filelog.linkrev(self._filerev)
773 775 def node(self):
774 776 return self._changectx.node()
775 777 def hex(self):
776 778 return self._changectx.hex()
777 779 def user(self):
778 780 return self._changectx.user()
779 781 def date(self):
780 782 return self._changectx.date()
781 783 def files(self):
782 784 return self._changectx.files()
783 785 def description(self):
784 786 return self._changectx.description()
785 787 def branch(self):
786 788 return self._changectx.branch()
787 789 def extra(self):
788 790 return self._changectx.extra()
789 791 def phase(self):
790 792 return self._changectx.phase()
791 793 def phasestr(self):
792 794 return self._changectx.phasestr()
793 795 def manifest(self):
794 796 return self._changectx.manifest()
795 797 def changectx(self):
796 798 return self._changectx
797 799 def renamed(self):
798 800 return self._copied
799 801 def repo(self):
800 802 return self._repo
801 803 def size(self):
802 804 return len(self.data())
803 805
804 806 def path(self):
805 807 return self._path
806 808
807 809 def isbinary(self):
808 810 try:
809 811 return util.binary(self.data())
810 812 except IOError:
811 813 return False
812 814 def isexec(self):
813 815 return 'x' in self.flags()
814 816 def islink(self):
815 817 return 'l' in self.flags()
816 818
817 819 def isabsent(self):
818 820 """whether this filectx represents a file not in self._changectx
819 821
820 822 This is mainly for merge code to detect change/delete conflicts. This is
821 823 expected to be True for all subclasses of basectx."""
822 824 return False
823 825
824 826 _customcmp = False
825 827 def cmp(self, fctx):
826 828 """compare with other file context
827 829
828 830 returns True if different than fctx.
829 831 """
830 832 if fctx._customcmp:
831 833 return fctx.cmp(self)
832 834
833 835 if (fctx._filenode is None
834 836 and (self._repo._encodefilterpats
835 837 # if file data starts with '\1\n', empty metadata block is
836 838 # prepended, which adds 4 bytes to filelog.size().
837 839 or self.size() - 4 == fctx.size())
838 840 or self.size() == fctx.size()):
839 841 return self._filelog.cmp(self._filenode, fctx.data())
840 842
841 843 return True
842 844
843 845 def _adjustlinkrev(self, srcrev, inclusive=False):
844 846 """return the first ancestor of <srcrev> introducing <fnode>
845 847
846 848 If the linkrev of the file revision does not point to an ancestor of
847 849 srcrev, we'll walk down the ancestors until we find one introducing
848 850 this file revision.
849 851
850 852 :srcrev: the changeset revision we search ancestors from
851 853 :inclusive: if true, the src revision will also be checked
852 854 """
853 855 repo = self._repo
854 856 cl = repo.unfiltered().changelog
855 857 mfl = repo.manifestlog
856 858 # fetch the linkrev
857 859 lkr = self.linkrev()
858 860 # hack to reuse ancestor computation when searching for renames
859 861 memberanc = getattr(self, '_ancestrycontext', None)
860 862 iteranc = None
861 863 if srcrev is None:
862 864 # wctx case, used by workingfilectx during mergecopy
863 865 revs = [p.rev() for p in self._repo[None].parents()]
864 866 inclusive = True # we skipped the real (revless) source
865 867 else:
866 868 revs = [srcrev]
867 869 if memberanc is None:
868 870 memberanc = iteranc = cl.ancestors(revs, lkr,
869 871 inclusive=inclusive)
870 872 # check if this linkrev is an ancestor of srcrev
871 873 if lkr not in memberanc:
872 874 if iteranc is None:
873 875 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
874 876 fnode = self._filenode
875 877 path = self._path
876 878 for a in iteranc:
877 879 ac = cl.read(a) # get changeset data (we avoid object creation)
878 880 if path in ac[3]: # checking the 'files' field.
879 881 # The file has been touched, check if the content is
880 882 # similar to the one we search for.
881 883 if fnode == mfl[ac[0]].readfast().get(path):
882 884 return a
883 885 # In theory, we should never get out of that loop without a result.
884 886 # But if manifest uses a buggy file revision (not children of the
885 887 # one it replaces) we could. Such a buggy situation will likely
886 888 # result is crash somewhere else at to some point.
887 889 return lkr
888 890
889 891 def introrev(self):
890 892 """return the rev of the changeset which introduced this file revision
891 893
892 894 This method is different from linkrev because it take into account the
893 895 changeset the filectx was created from. It ensures the returned
894 896 revision is one of its ancestors. This prevents bugs from
895 897 'linkrev-shadowing' when a file revision is used by multiple
896 898 changesets.
897 899 """
898 900 lkr = self.linkrev()
899 901 attrs = vars(self)
900 902 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
901 903 if noctx or self.rev() == lkr:
902 904 return self.linkrev()
903 905 return self._adjustlinkrev(self.rev(), inclusive=True)
904 906
905 907 def _parentfilectx(self, path, fileid, filelog):
906 908 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
907 909 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
908 910 if '_changeid' in vars(self) or '_changectx' in vars(self):
909 911 # If self is associated with a changeset (probably explicitly
910 912 # fed), ensure the created filectx is associated with a
911 913 # changeset that is an ancestor of self.changectx.
912 914 # This lets us later use _adjustlinkrev to get a correct link.
913 915 fctx._descendantrev = self.rev()
914 916 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
915 917 elif '_descendantrev' in vars(self):
916 918 # Otherwise propagate _descendantrev if we have one associated.
917 919 fctx._descendantrev = self._descendantrev
918 920 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
919 921 return fctx
920 922
921 923 def parents(self):
922 924 _path = self._path
923 925 fl = self._filelog
924 926 parents = self._filelog.parents(self._filenode)
925 927 pl = [(_path, node, fl) for node in parents if node != nullid]
926 928
927 929 r = fl.renamed(self._filenode)
928 930 if r:
929 931 # - In the simple rename case, both parent are nullid, pl is empty.
930 932 # - In case of merge, only one of the parent is null id and should
931 933 # be replaced with the rename information. This parent is -always-
932 934 # the first one.
933 935 #
934 936 # As null id have always been filtered out in the previous list
935 937 # comprehension, inserting to 0 will always result in "replacing
936 938 # first nullid parent with rename information.
937 939 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
938 940
939 941 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
940 942
941 943 def p1(self):
942 944 return self.parents()[0]
943 945
944 946 def p2(self):
945 947 p = self.parents()
946 948 if len(p) == 2:
947 949 return p[1]
948 950 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
949 951
950 952 def annotate(self, follow=False, linenumber=False, diffopts=None):
951 953 '''returns a list of tuples of ((ctx, number), line) for each line
952 954 in the file, where ctx is the filectx of the node where
953 955 that line was last changed; if linenumber parameter is true, number is
954 956 the line number at the first appearance in the managed file, otherwise,
955 957 number has a fixed value of False.
956 958 '''
957 959
958 960 def lines(text):
959 961 if text.endswith("\n"):
960 962 return text.count("\n")
961 963 return text.count("\n") + int(bool(text))
962 964
963 965 if linenumber:
964 966 def decorate(text, rev):
965 967 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
966 968 else:
967 969 def decorate(text, rev):
968 970 return ([(rev, False)] * lines(text), text)
969 971
970 972 def pair(parent, child):
971 973 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
972 974 for (a1, a2, b1, b2), t in blocks:
973 975 # Changed blocks ('!') or blocks made only of blank lines ('~')
974 976 # belong to the child.
975 977 if t == '=':
976 978 child[0][b1:b2] = parent[0][a1:a2]
977 979 return child
978 980
979 981 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
980 982
981 983 def parents(f):
982 984 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
983 985 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
984 986 # from the topmost introrev (= srcrev) down to p.linkrev() if it
985 987 # isn't an ancestor of the srcrev.
986 988 f._changeid
987 989 pl = f.parents()
988 990
989 991 # Don't return renamed parents if we aren't following.
990 992 if not follow:
991 993 pl = [p for p in pl if p.path() == f.path()]
992 994
993 995 # renamed filectx won't have a filelog yet, so set it
994 996 # from the cache to save time
995 997 for p in pl:
996 998 if not '_filelog' in p.__dict__:
997 999 p._filelog = getlog(p.path())
998 1000
999 1001 return pl
1000 1002
1001 1003 # use linkrev to find the first changeset where self appeared
1002 1004 base = self
1003 1005 introrev = self.introrev()
1004 1006 if self.rev() != introrev:
1005 1007 base = self.filectx(self.filenode(), changeid=introrev)
1006 1008 if getattr(base, '_ancestrycontext', None) is None:
1007 1009 cl = self._repo.changelog
1008 1010 if introrev is None:
1009 1011 # wctx is not inclusive, but works because _ancestrycontext
1010 1012 # is used to test filelog revisions
1011 1013 ac = cl.ancestors([p.rev() for p in base.parents()],
1012 1014 inclusive=True)
1013 1015 else:
1014 1016 ac = cl.ancestors([introrev], inclusive=True)
1015 1017 base._ancestrycontext = ac
1016 1018
1017 1019 # This algorithm would prefer to be recursive, but Python is a
1018 1020 # bit recursion-hostile. Instead we do an iterative
1019 1021 # depth-first search.
1020 1022
1021 1023 # 1st DFS pre-calculates pcache and needed
1022 1024 visit = [base]
1023 1025 pcache = {}
1024 1026 needed = {base: 1}
1025 1027 while visit:
1026 1028 f = visit.pop()
1027 1029 if f in pcache:
1028 1030 continue
1029 1031 pl = parents(f)
1030 1032 pcache[f] = pl
1031 1033 for p in pl:
1032 1034 needed[p] = needed.get(p, 0) + 1
1033 1035 if p not in pcache:
1034 1036 visit.append(p)
1035 1037
1036 1038 # 2nd DFS does the actual annotate
1037 1039 visit[:] = [base]
1038 1040 hist = {}
1039 1041 while visit:
1040 1042 f = visit[-1]
1041 1043 if f in hist:
1042 1044 visit.pop()
1043 1045 continue
1044 1046
1045 1047 ready = True
1046 1048 pl = pcache[f]
1047 1049 for p in pl:
1048 1050 if p not in hist:
1049 1051 ready = False
1050 1052 visit.append(p)
1051 1053 if ready:
1052 1054 visit.pop()
1053 1055 curr = decorate(f.data(), f)
1054 1056 for p in pl:
1055 1057 curr = pair(hist[p], curr)
1056 1058 if needed[p] == 1:
1057 1059 del hist[p]
1058 1060 del needed[p]
1059 1061 else:
1060 1062 needed[p] -= 1
1061 1063
1062 1064 hist[f] = curr
1063 1065 del pcache[f]
1064 1066
1065 1067 return zip(hist[base][0], hist[base][1].splitlines(True))
1066 1068
1067 1069 def ancestors(self, followfirst=False):
1068 1070 visit = {}
1069 1071 c = self
1070 1072 if followfirst:
1071 1073 cut = 1
1072 1074 else:
1073 1075 cut = None
1074 1076
1075 1077 while True:
1076 1078 for parent in c.parents()[:cut]:
1077 1079 visit[(parent.linkrev(), parent.filenode())] = parent
1078 1080 if not visit:
1079 1081 break
1080 1082 c = visit.pop(max(visit))
1081 1083 yield c
1082 1084
1083 1085 class filectx(basefilectx):
1084 1086 """A filecontext object makes access to data related to a particular
1085 1087 filerevision convenient."""
1086 1088 def __init__(self, repo, path, changeid=None, fileid=None,
1087 1089 filelog=None, changectx=None):
1088 1090 """changeid can be a changeset revision, node, or tag.
1089 1091 fileid can be a file revision or node."""
1090 1092 self._repo = repo
1091 1093 self._path = path
1092 1094
1093 1095 assert (changeid is not None
1094 1096 or fileid is not None
1095 1097 or changectx is not None), \
1096 1098 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1097 1099 % (changeid, fileid, changectx))
1098 1100
1099 1101 if filelog is not None:
1100 1102 self._filelog = filelog
1101 1103
1102 1104 if changeid is not None:
1103 1105 self._changeid = changeid
1104 1106 if changectx is not None:
1105 1107 self._changectx = changectx
1106 1108 if fileid is not None:
1107 1109 self._fileid = fileid
1108 1110
1109 1111 @propertycache
1110 1112 def _changectx(self):
1111 1113 try:
1112 1114 return changectx(self._repo, self._changeid)
1113 1115 except error.FilteredRepoLookupError:
1114 1116 # Linkrev may point to any revision in the repository. When the
1115 1117 # repository is filtered this may lead to `filectx` trying to build
1116 1118 # `changectx` for filtered revision. In such case we fallback to
1117 1119 # creating `changectx` on the unfiltered version of the reposition.
1118 1120 # This fallback should not be an issue because `changectx` from
1119 1121 # `filectx` are not used in complex operations that care about
1120 1122 # filtering.
1121 1123 #
1122 1124 # This fallback is a cheap and dirty fix that prevent several
1123 1125 # crashes. It does not ensure the behavior is correct. However the
1124 1126 # behavior was not correct before filtering either and "incorrect
1125 1127 # behavior" is seen as better as "crash"
1126 1128 #
1127 1129 # Linkrevs have several serious troubles with filtering that are
1128 1130 # complicated to solve. Proper handling of the issue here should be
1129 1131 # considered when solving linkrev issue are on the table.
1130 1132 return changectx(self._repo.unfiltered(), self._changeid)
1131 1133
1132 1134 def filectx(self, fileid, changeid=None):
1133 1135 '''opens an arbitrary revision of the file without
1134 1136 opening a new filelog'''
1135 1137 return filectx(self._repo, self._path, fileid=fileid,
1136 1138 filelog=self._filelog, changeid=changeid)
1137 1139
1138 1140 def rawdata(self):
1139 1141 return self._filelog.revision(self._filenode, raw=True)
1140 1142
1141 1143 def rawflags(self):
1142 1144 """low-level revlog flags"""
1143 1145 return self._filelog.flags(self._filerev)
1144 1146
1145 1147 def data(self):
1146 1148 try:
1147 1149 return self._filelog.read(self._filenode)
1148 1150 except error.CensoredNodeError:
1149 1151 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1150 1152 return ""
1151 1153 raise error.Abort(_("censored node: %s") % short(self._filenode),
1152 1154 hint=_("set censor.policy to ignore errors"))
1153 1155
1154 1156 def size(self):
1155 1157 return self._filelog.size(self._filerev)
1156 1158
1157 1159 @propertycache
1158 1160 def _copied(self):
1159 1161 """check if file was actually renamed in this changeset revision
1160 1162
1161 1163 If rename logged in file revision, we report copy for changeset only
1162 1164 if file revisions linkrev points back to the changeset in question
1163 1165 or both changeset parents contain different file revisions.
1164 1166 """
1165 1167
1166 1168 renamed = self._filelog.renamed(self._filenode)
1167 1169 if not renamed:
1168 1170 return renamed
1169 1171
1170 1172 if self.rev() == self.linkrev():
1171 1173 return renamed
1172 1174
1173 1175 name = self.path()
1174 1176 fnode = self._filenode
1175 1177 for p in self._changectx.parents():
1176 1178 try:
1177 1179 if fnode == p.filenode(name):
1178 1180 return None
1179 1181 except error.LookupError:
1180 1182 pass
1181 1183 return renamed
1182 1184
1183 1185 def children(self):
1184 1186 # hard for renames
1185 1187 c = self._filelog.children(self._filenode)
1186 1188 return [filectx(self._repo, self._path, fileid=x,
1187 1189 filelog=self._filelog) for x in c]
1188 1190
1189 1191 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1190 1192 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1191 1193 if diff from fctx2 to fctx1 has changes in linerange2 and
1192 1194 `linerange1` is the new line range for fctx1.
1193 1195 """
1194 1196 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1195 1197 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1196 1198 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1197 1199 return diffinrange, linerange1
1198 1200
1199 1201 def blockancestors(fctx, fromline, toline, followfirst=False):
1200 1202 """Yield ancestors of `fctx` with respect to the block of lines within
1201 1203 `fromline`-`toline` range.
1202 1204 """
1203 1205 diffopts = patch.diffopts(fctx._repo.ui)
1204 1206 introrev = fctx.introrev()
1205 1207 if fctx.rev() != introrev:
1206 1208 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
1207 1209 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1208 1210 while visit:
1209 1211 c, linerange2 = visit.pop(max(visit))
1210 1212 pl = c.parents()
1211 1213 if followfirst:
1212 1214 pl = pl[:1]
1213 1215 if not pl:
1214 1216 # The block originates from the initial revision.
1215 1217 yield c, linerange2
1216 1218 continue
1217 1219 inrange = False
1218 1220 for p in pl:
1219 1221 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1220 1222 inrange = inrange or inrangep
1221 1223 if linerange1[0] == linerange1[1]:
1222 1224 # Parent's linerange is empty, meaning that the block got
1223 1225 # introduced in this revision; no need to go futher in this
1224 1226 # branch.
1225 1227 continue
1226 1228 # Set _descendantrev with 'c' (a known descendant) so that, when
1227 1229 # _adjustlinkrev is called for 'p', it receives this descendant
1228 1230 # (as srcrev) instead possibly topmost introrev.
1229 1231 p._descendantrev = c.rev()
1230 1232 visit[p.linkrev(), p.filenode()] = p, linerange1
1231 1233 if inrange:
1232 1234 yield c, linerange2
1233 1235
1234 1236 def blockdescendants(fctx, fromline, toline):
1235 1237 """Yield descendants of `fctx` with respect to the block of lines within
1236 1238 `fromline`-`toline` range.
1237 1239 """
1238 1240 # First possibly yield 'fctx' if it has changes in range with respect to
1239 1241 # its parents.
1240 1242 try:
1241 1243 c, linerange1 = next(blockancestors(fctx, fromline, toline))
1242 1244 except StopIteration:
1243 1245 pass
1244 1246 else:
1245 1247 if c == fctx:
1246 1248 yield c, linerange1
1247 1249
1248 1250 diffopts = patch.diffopts(fctx._repo.ui)
1249 1251 fl = fctx.filelog()
1250 1252 seen = {fctx.filerev(): (fctx, (fromline, toline))}
1251 1253 for i in fl.descendants([fctx.filerev()]):
1252 1254 c = fctx.filectx(i)
1253 1255 inrange = False
1254 1256 for x in fl.parentrevs(i):
1255 1257 try:
1256 1258 p, linerange2 = seen[x]
1257 1259 except KeyError:
1258 1260 # nullrev or other branch
1259 1261 continue
1260 1262 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
1261 1263 inrange = inrange or inrangep
1262 1264 # If revision 'i' has been seen (it's a merge), we assume that its
1263 1265 # line range is the same independently of which parents was used
1264 1266 # to compute it.
1265 1267 assert i not in seen or seen[i][1] == linerange1, (
1266 1268 'computed line range for %s is not consistent between '
1267 1269 'ancestor branches' % c)
1268 1270 seen[i] = c, linerange1
1269 1271 if inrange:
1270 1272 yield c, linerange1
1271 1273
1272 1274 class committablectx(basectx):
1273 1275 """A committablectx object provides common functionality for a context that
1274 1276 wants the ability to commit, e.g. workingctx or memctx."""
1275 1277 def __init__(self, repo, text="", user=None, date=None, extra=None,
1276 1278 changes=None):
1277 1279 self._repo = repo
1278 1280 self._rev = None
1279 1281 self._node = None
1280 1282 self._text = text
1281 1283 if date:
1282 1284 self._date = util.parsedate(date)
1283 1285 if user:
1284 1286 self._user = user
1285 1287 if changes:
1286 1288 self._status = changes
1287 1289
1288 1290 self._extra = {}
1289 1291 if extra:
1290 1292 self._extra = extra.copy()
1291 1293 if 'branch' not in self._extra:
1292 1294 try:
1293 1295 branch = encoding.fromlocal(self._repo.dirstate.branch())
1294 1296 except UnicodeDecodeError:
1295 1297 raise error.Abort(_('branch name not in UTF-8!'))
1296 1298 self._extra['branch'] = branch
1297 1299 if self._extra['branch'] == '':
1298 1300 self._extra['branch'] = 'default'
1299 1301
1300 1302 def __str__(self):
1301 1303 return str(self._parents[0]) + "+"
1302 1304
1303 1305 def __nonzero__(self):
1304 1306 return True
1305 1307
1306 1308 __bool__ = __nonzero__
1307 1309
1308 1310 def _buildflagfunc(self):
1309 1311 # Create a fallback function for getting file flags when the
1310 1312 # filesystem doesn't support them
1311 1313
1312 1314 copiesget = self._repo.dirstate.copies().get
1313 1315 parents = self.parents()
1314 1316 if len(parents) < 2:
1315 1317 # when we have one parent, it's easy: copy from parent
1316 1318 man = parents[0].manifest()
1317 1319 def func(f):
1318 1320 f = copiesget(f, f)
1319 1321 return man.flags(f)
1320 1322 else:
1321 1323 # merges are tricky: we try to reconstruct the unstored
1322 1324 # result from the merge (issue1802)
1323 1325 p1, p2 = parents
1324 1326 pa = p1.ancestor(p2)
1325 1327 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1326 1328
1327 1329 def func(f):
1328 1330 f = copiesget(f, f) # may be wrong for merges with copies
1329 1331 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1330 1332 if fl1 == fl2:
1331 1333 return fl1
1332 1334 if fl1 == fla:
1333 1335 return fl2
1334 1336 if fl2 == fla:
1335 1337 return fl1
1336 1338 return '' # punt for conflicts
1337 1339
1338 1340 return func
1339 1341
1340 1342 @propertycache
1341 1343 def _flagfunc(self):
1342 1344 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1343 1345
1344 1346 @propertycache
1345 1347 def _status(self):
1346 1348 return self._repo.status()
1347 1349
1348 1350 @propertycache
1349 1351 def _user(self):
1350 1352 return self._repo.ui.username()
1351 1353
1352 1354 @propertycache
1353 1355 def _date(self):
1354 1356 return util.makedate()
1355 1357
1356 1358 def subrev(self, subpath):
1357 1359 return None
1358 1360
1359 1361 def manifestnode(self):
1360 1362 return None
1361 1363 def user(self):
1362 1364 return self._user or self._repo.ui.username()
1363 1365 def date(self):
1364 1366 return self._date
1365 1367 def description(self):
1366 1368 return self._text
1367 1369 def files(self):
1368 1370 return sorted(self._status.modified + self._status.added +
1369 1371 self._status.removed)
1370 1372
1371 1373 def modified(self):
1372 1374 return self._status.modified
1373 1375 def added(self):
1374 1376 return self._status.added
1375 1377 def removed(self):
1376 1378 return self._status.removed
1377 1379 def deleted(self):
1378 1380 return self._status.deleted
1379 1381 def branch(self):
1380 1382 return encoding.tolocal(self._extra['branch'])
1381 1383 def closesbranch(self):
1382 1384 return 'close' in self._extra
1383 1385 def extra(self):
1384 1386 return self._extra
1385 1387
1386 1388 def tags(self):
1387 1389 return []
1388 1390
1389 1391 def bookmarks(self):
1390 1392 b = []
1391 1393 for p in self.parents():
1392 1394 b.extend(p.bookmarks())
1393 1395 return b
1394 1396
1395 1397 def phase(self):
1396 1398 phase = phases.draft # default phase to draft
1397 1399 for p in self.parents():
1398 1400 phase = max(phase, p.phase())
1399 1401 return phase
1400 1402
1401 1403 def hidden(self):
1402 1404 return False
1403 1405
1404 1406 def children(self):
1405 1407 return []
1406 1408
1407 1409 def flags(self, path):
1408 1410 if r'_manifest' in self.__dict__:
1409 1411 try:
1410 1412 return self._manifest.flags(path)
1411 1413 except KeyError:
1412 1414 return ''
1413 1415
1414 1416 try:
1415 1417 return self._flagfunc(path)
1416 1418 except OSError:
1417 1419 return ''
1418 1420
1419 1421 def ancestor(self, c2):
1420 1422 """return the "best" ancestor context of self and c2"""
1421 1423 return self._parents[0].ancestor(c2) # punt on two parents for now
1422 1424
1423 1425 def walk(self, match):
1424 1426 '''Generates matching file names.'''
1425 1427 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1426 1428 True, False))
1427 1429
1428 1430 def matches(self, match):
1429 1431 return sorted(self._repo.dirstate.matches(match))
1430 1432
1431 1433 def ancestors(self):
1432 1434 for p in self._parents:
1433 1435 yield p
1434 1436 for a in self._repo.changelog.ancestors(
1435 1437 [p.rev() for p in self._parents]):
1436 1438 yield changectx(self._repo, a)
1437 1439
1438 1440 def markcommitted(self, node):
1439 1441 """Perform post-commit cleanup necessary after committing this ctx
1440 1442
1441 1443 Specifically, this updates backing stores this working context
1442 1444 wraps to reflect the fact that the changes reflected by this
1443 1445 workingctx have been committed. For example, it marks
1444 1446 modified and added files as normal in the dirstate.
1445 1447
1446 1448 """
1447 1449
1448 1450 self._repo.dirstate.beginparentchange()
1449 1451 for f in self.modified() + self.added():
1450 1452 self._repo.dirstate.normal(f)
1451 1453 for f in self.removed():
1452 1454 self._repo.dirstate.drop(f)
1453 1455 self._repo.dirstate.setparents(node)
1454 1456 self._repo.dirstate.endparentchange()
1455 1457
1456 1458 # write changes out explicitly, because nesting wlock at
1457 1459 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1458 1460 # from immediately doing so for subsequent changing files
1459 1461 self._repo.dirstate.write(self._repo.currenttransaction())
1460 1462
1461 1463 class workingctx(committablectx):
1462 1464 """A workingctx object makes access to data related to
1463 1465 the current working directory convenient.
1464 1466 date - any valid date string or (unixtime, offset), or None.
1465 1467 user - username string, or None.
1466 1468 extra - a dictionary of extra values, or None.
1467 1469 changes - a list of file lists as returned by localrepo.status()
1468 1470 or None to use the repository status.
1469 1471 """
1470 1472 def __init__(self, repo, text="", user=None, date=None, extra=None,
1471 1473 changes=None):
1472 1474 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1473 1475
1474 1476 def __iter__(self):
1475 1477 d = self._repo.dirstate
1476 1478 for f in d:
1477 1479 if d[f] != 'r':
1478 1480 yield f
1479 1481
1480 1482 def __contains__(self, key):
1481 1483 return self._repo.dirstate[key] not in "?r"
1482 1484
1483 1485 def hex(self):
1484 1486 return hex(wdirid)
1485 1487
1486 1488 @propertycache
1487 1489 def _parents(self):
1488 1490 p = self._repo.dirstate.parents()
1489 1491 if p[1] == nullid:
1490 1492 p = p[:-1]
1491 1493 return [changectx(self._repo, x) for x in p]
1492 1494
1493 1495 def filectx(self, path, filelog=None):
1494 1496 """get a file context from the working directory"""
1495 1497 return workingfilectx(self._repo, path, workingctx=self,
1496 1498 filelog=filelog)
1497 1499
1498 1500 def dirty(self, missing=False, merge=True, branch=True):
1499 1501 "check whether a working directory is modified"
1500 1502 # check subrepos first
1501 1503 for s in sorted(self.substate):
1502 1504 if self.sub(s).dirty():
1503 1505 return True
1504 1506 # check current working dir
1505 1507 return ((merge and self.p2()) or
1506 1508 (branch and self.branch() != self.p1().branch()) or
1507 1509 self.modified() or self.added() or self.removed() or
1508 1510 (missing and self.deleted()))
1509 1511
1510 1512 def add(self, list, prefix=""):
1511 1513 join = lambda f: os.path.join(prefix, f)
1512 1514 with self._repo.wlock():
1513 1515 ui, ds = self._repo.ui, self._repo.dirstate
1514 1516 rejected = []
1515 1517 lstat = self._repo.wvfs.lstat
1516 1518 for f in list:
1517 1519 scmutil.checkportable(ui, join(f))
1518 1520 try:
1519 1521 st = lstat(f)
1520 1522 except OSError:
1521 1523 ui.warn(_("%s does not exist!\n") % join(f))
1522 1524 rejected.append(f)
1523 1525 continue
1524 1526 if st.st_size > 10000000:
1525 1527 ui.warn(_("%s: up to %d MB of RAM may be required "
1526 1528 "to manage this file\n"
1527 1529 "(use 'hg revert %s' to cancel the "
1528 1530 "pending addition)\n")
1529 1531 % (f, 3 * st.st_size // 1000000, join(f)))
1530 1532 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1531 1533 ui.warn(_("%s not added: only files and symlinks "
1532 1534 "supported currently\n") % join(f))
1533 1535 rejected.append(f)
1534 1536 elif ds[f] in 'amn':
1535 1537 ui.warn(_("%s already tracked!\n") % join(f))
1536 1538 elif ds[f] == 'r':
1537 1539 ds.normallookup(f)
1538 1540 else:
1539 1541 ds.add(f)
1540 1542 return rejected
1541 1543
1542 1544 def forget(self, files, prefix=""):
1543 1545 join = lambda f: os.path.join(prefix, f)
1544 1546 with self._repo.wlock():
1545 1547 rejected = []
1546 1548 for f in files:
1547 1549 if f not in self._repo.dirstate:
1548 1550 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1549 1551 rejected.append(f)
1550 1552 elif self._repo.dirstate[f] != 'a':
1551 1553 self._repo.dirstate.remove(f)
1552 1554 else:
1553 1555 self._repo.dirstate.drop(f)
1554 1556 return rejected
1555 1557
1556 1558 def undelete(self, list):
1557 1559 pctxs = self.parents()
1558 1560 with self._repo.wlock():
1559 1561 for f in list:
1560 1562 if self._repo.dirstate[f] != 'r':
1561 1563 self._repo.ui.warn(_("%s not removed!\n") % f)
1562 1564 else:
1563 1565 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1564 1566 t = fctx.data()
1565 1567 self._repo.wwrite(f, t, fctx.flags())
1566 1568 self._repo.dirstate.normal(f)
1567 1569
1568 1570 def copy(self, source, dest):
1569 1571 try:
1570 1572 st = self._repo.wvfs.lstat(dest)
1571 1573 except OSError as err:
1572 1574 if err.errno != errno.ENOENT:
1573 1575 raise
1574 1576 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1575 1577 return
1576 1578 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1577 1579 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1578 1580 "symbolic link\n") % dest)
1579 1581 else:
1580 1582 with self._repo.wlock():
1581 1583 if self._repo.dirstate[dest] in '?':
1582 1584 self._repo.dirstate.add(dest)
1583 1585 elif self._repo.dirstate[dest] in 'r':
1584 1586 self._repo.dirstate.normallookup(dest)
1585 1587 self._repo.dirstate.copy(source, dest)
1586 1588
1587 1589 def match(self, pats=None, include=None, exclude=None, default='glob',
1588 1590 listsubrepos=False, badfn=None):
1589 1591 if pats is None:
1590 1592 pats = []
1591 1593 r = self._repo
1592 1594
1593 1595 # Only a case insensitive filesystem needs magic to translate user input
1594 1596 # to actual case in the filesystem.
1595 1597 matcherfunc = matchmod.match
1596 1598 if not util.fscasesensitive(r.root):
1597 1599 matcherfunc = matchmod.icasefsmatcher
1598 1600 return matcherfunc(r.root, r.getcwd(), pats,
1599 1601 include, exclude, default,
1600 1602 auditor=r.auditor, ctx=self,
1601 1603 listsubrepos=listsubrepos, badfn=badfn)
1602 1604
1603 1605 def _filtersuspectsymlink(self, files):
1604 1606 if not files or self._repo.dirstate._checklink:
1605 1607 return files
1606 1608
1607 1609 # Symlink placeholders may get non-symlink-like contents
1608 1610 # via user error or dereferencing by NFS or Samba servers,
1609 1611 # so we filter out any placeholders that don't look like a
1610 1612 # symlink
1611 1613 sane = []
1612 1614 for f in files:
1613 1615 if self.flags(f) == 'l':
1614 1616 d = self[f].data()
1615 1617 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1616 1618 self._repo.ui.debug('ignoring suspect symlink placeholder'
1617 1619 ' "%s"\n' % f)
1618 1620 continue
1619 1621 sane.append(f)
1620 1622 return sane
1621 1623
1622 1624 def _checklookup(self, files):
1623 1625 # check for any possibly clean files
1624 1626 if not files:
1625 1627 return [], []
1626 1628
1627 1629 modified = []
1628 1630 fixup = []
1629 1631 pctx = self._parents[0]
1630 1632 # do a full compare of any files that might have changed
1631 1633 for f in sorted(files):
1632 1634 if (f not in pctx or self.flags(f) != pctx.flags(f)
1633 1635 or pctx[f].cmp(self[f])):
1634 1636 modified.append(f)
1635 1637 else:
1636 1638 fixup.append(f)
1637 1639
1638 1640 # update dirstate for files that are actually clean
1639 1641 if fixup:
1640 1642 try:
1641 1643 # updating the dirstate is optional
1642 1644 # so we don't wait on the lock
1643 1645 # wlock can invalidate the dirstate, so cache normal _after_
1644 1646 # taking the lock
1645 1647 with self._repo.wlock(False):
1646 1648 normal = self._repo.dirstate.normal
1647 1649 for f in fixup:
1648 1650 normal(f)
1649 1651 # write changes out explicitly, because nesting
1650 1652 # wlock at runtime may prevent 'wlock.release()'
1651 1653 # after this block from doing so for subsequent
1652 1654 # changing files
1653 1655 self._repo.dirstate.write(self._repo.currenttransaction())
1654 1656 except error.LockError:
1655 1657 pass
1656 1658 return modified, fixup
1657 1659
1658 1660 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1659 1661 unknown=False):
1660 1662 '''Gets the status from the dirstate -- internal use only.'''
1661 1663 listignored, listclean, listunknown = ignored, clean, unknown
1662 1664 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1663 1665 subrepos = []
1664 1666 if '.hgsub' in self:
1665 1667 subrepos = sorted(self.substate)
1666 1668 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1667 1669 listclean, listunknown)
1668 1670
1669 1671 # check for any possibly clean files
1670 1672 if cmp:
1671 1673 modified2, fixup = self._checklookup(cmp)
1672 1674 s.modified.extend(modified2)
1673 1675
1674 1676 # update dirstate for files that are actually clean
1675 1677 if fixup and listclean:
1676 1678 s.clean.extend(fixup)
1677 1679
1678 1680 if match.always():
1679 1681 # cache for performance
1680 1682 if s.unknown or s.ignored or s.clean:
1681 1683 # "_status" is cached with list*=False in the normal route
1682 1684 self._status = scmutil.status(s.modified, s.added, s.removed,
1683 1685 s.deleted, [], [], [])
1684 1686 else:
1685 1687 self._status = s
1686 1688
1687 1689 return s
1688 1690
1689 1691 @propertycache
1690 1692 def _manifest(self):
1691 1693 """generate a manifest corresponding to the values in self._status
1692 1694
1693 1695 This reuse the file nodeid from parent, but we use special node
1694 1696 identifiers for added and modified files. This is used by manifests
1695 1697 merge to see that files are different and by update logic to avoid
1696 1698 deleting newly added files.
1697 1699 """
1698 1700 return self._buildstatusmanifest(self._status)
1699 1701
1700 1702 def _buildstatusmanifest(self, status):
1701 1703 """Builds a manifest that includes the given status results."""
1702 1704 parents = self.parents()
1703 1705
1704 1706 man = parents[0].manifest().copy()
1705 1707
1706 1708 ff = self._flagfunc
1707 1709 for i, l in ((addednodeid, status.added),
1708 1710 (modifiednodeid, status.modified)):
1709 1711 for f in l:
1710 1712 man[f] = i
1711 1713 try:
1712 1714 man.setflag(f, ff(f))
1713 1715 except OSError:
1714 1716 pass
1715 1717
1716 1718 for f in status.deleted + status.removed:
1717 1719 if f in man:
1718 1720 del man[f]
1719 1721
1720 1722 return man
1721 1723
1722 1724 def _buildstatus(self, other, s, match, listignored, listclean,
1723 1725 listunknown):
1724 1726 """build a status with respect to another context
1725 1727
1726 1728 This includes logic for maintaining the fast path of status when
1727 1729 comparing the working directory against its parent, which is to skip
1728 1730 building a new manifest if self (working directory) is not comparing
1729 1731 against its parent (repo['.']).
1730 1732 """
1731 1733 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1732 1734 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1733 1735 # might have accidentally ended up with the entire contents of the file
1734 1736 # they are supposed to be linking to.
1735 1737 s.modified[:] = self._filtersuspectsymlink(s.modified)
1736 1738 if other != self._repo['.']:
1737 1739 s = super(workingctx, self)._buildstatus(other, s, match,
1738 1740 listignored, listclean,
1739 1741 listunknown)
1740 1742 return s
1741 1743
1742 1744 def _matchstatus(self, other, match):
1743 1745 """override the match method with a filter for directory patterns
1744 1746
1745 1747 We use inheritance to customize the match.bad method only in cases of
1746 1748 workingctx since it belongs only to the working directory when
1747 1749 comparing against the parent changeset.
1748 1750
1749 1751 If we aren't comparing against the working directory's parent, then we
1750 1752 just use the default match object sent to us.
1751 1753 """
1752 1754 superself = super(workingctx, self)
1753 1755 match = superself._matchstatus(other, match)
1754 1756 if other != self._repo['.']:
1755 1757 def bad(f, msg):
1756 1758 # 'f' may be a directory pattern from 'match.files()',
1757 1759 # so 'f not in ctx1' is not enough
1758 1760 if f not in other and not other.hasdir(f):
1759 1761 self._repo.ui.warn('%s: %s\n' %
1760 1762 (self._repo.dirstate.pathto(f), msg))
1761 1763 match.bad = bad
1762 1764 return match
1763 1765
1764 1766 class committablefilectx(basefilectx):
1765 1767 """A committablefilectx provides common functionality for a file context
1766 1768 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1767 1769 def __init__(self, repo, path, filelog=None, ctx=None):
1768 1770 self._repo = repo
1769 1771 self._path = path
1770 1772 self._changeid = None
1771 1773 self._filerev = self._filenode = None
1772 1774
1773 1775 if filelog is not None:
1774 1776 self._filelog = filelog
1775 1777 if ctx:
1776 1778 self._changectx = ctx
1777 1779
1778 1780 def __nonzero__(self):
1779 1781 return True
1780 1782
1781 1783 __bool__ = __nonzero__
1782 1784
1783 1785 def linkrev(self):
1784 1786 # linked to self._changectx no matter if file is modified or not
1785 1787 return self.rev()
1786 1788
1787 1789 def parents(self):
1788 1790 '''return parent filectxs, following copies if necessary'''
1789 1791 def filenode(ctx, path):
1790 1792 return ctx._manifest.get(path, nullid)
1791 1793
1792 1794 path = self._path
1793 1795 fl = self._filelog
1794 1796 pcl = self._changectx._parents
1795 1797 renamed = self.renamed()
1796 1798
1797 1799 if renamed:
1798 1800 pl = [renamed + (None,)]
1799 1801 else:
1800 1802 pl = [(path, filenode(pcl[0], path), fl)]
1801 1803
1802 1804 for pc in pcl[1:]:
1803 1805 pl.append((path, filenode(pc, path), fl))
1804 1806
1805 1807 return [self._parentfilectx(p, fileid=n, filelog=l)
1806 1808 for p, n, l in pl if n != nullid]
1807 1809
1808 1810 def children(self):
1809 1811 return []
1810 1812
1811 1813 class workingfilectx(committablefilectx):
1812 1814 """A workingfilectx object makes access to data related to a particular
1813 1815 file in the working directory convenient."""
1814 1816 def __init__(self, repo, path, filelog=None, workingctx=None):
1815 1817 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1816 1818
1817 1819 @propertycache
1818 1820 def _changectx(self):
1819 1821 return workingctx(self._repo)
1820 1822
1821 1823 def data(self):
1822 1824 return self._repo.wread(self._path)
1823 1825 def renamed(self):
1824 1826 rp = self._repo.dirstate.copied(self._path)
1825 1827 if not rp:
1826 1828 return None
1827 1829 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1828 1830
1829 1831 def size(self):
1830 1832 return self._repo.wvfs.lstat(self._path).st_size
1831 1833 def date(self):
1832 1834 t, tz = self._changectx.date()
1833 1835 try:
1834 1836 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1835 1837 except OSError as err:
1836 1838 if err.errno != errno.ENOENT:
1837 1839 raise
1838 1840 return (t, tz)
1839 1841
1840 1842 def cmp(self, fctx):
1841 1843 """compare with other file context
1842 1844
1843 1845 returns True if different than fctx.
1844 1846 """
1845 1847 # fctx should be a filectx (not a workingfilectx)
1846 1848 # invert comparison to reuse the same code path
1847 1849 return fctx.cmp(self)
1848 1850
1849 1851 def remove(self, ignoremissing=False):
1850 1852 """wraps unlink for a repo's working directory"""
1851 1853 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1852 1854
1853 1855 def write(self, data, flags):
1854 1856 """wraps repo.wwrite"""
1855 1857 self._repo.wwrite(self._path, data, flags)
1856 1858
1857 1859 class workingcommitctx(workingctx):
1858 1860 """A workingcommitctx object makes access to data related to
1859 1861 the revision being committed convenient.
1860 1862
1861 1863 This hides changes in the working directory, if they aren't
1862 1864 committed in this context.
1863 1865 """
1864 1866 def __init__(self, repo, changes,
1865 1867 text="", user=None, date=None, extra=None):
1866 1868 super(workingctx, self).__init__(repo, text, user, date, extra,
1867 1869 changes)
1868 1870
1869 1871 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1870 1872 unknown=False):
1871 1873 """Return matched files only in ``self._status``
1872 1874
1873 1875 Uncommitted files appear "clean" via this context, even if
1874 1876 they aren't actually so in the working directory.
1875 1877 """
1876 1878 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1877 1879 if clean:
1878 1880 clean = [f for f in self._manifest if f not in self._changedset]
1879 1881 else:
1880 1882 clean = []
1881 1883 return scmutil.status([f for f in self._status.modified if match(f)],
1882 1884 [f for f in self._status.added if match(f)],
1883 1885 [f for f in self._status.removed if match(f)],
1884 1886 [], [], [], clean)
1885 1887
1886 1888 @propertycache
1887 1889 def _changedset(self):
1888 1890 """Return the set of files changed in this context
1889 1891 """
1890 1892 changed = set(self._status.modified)
1891 1893 changed.update(self._status.added)
1892 1894 changed.update(self._status.removed)
1893 1895 return changed
1894 1896
1895 1897 def makecachingfilectxfn(func):
1896 1898 """Create a filectxfn that caches based on the path.
1897 1899
1898 1900 We can't use util.cachefunc because it uses all arguments as the cache
1899 1901 key and this creates a cycle since the arguments include the repo and
1900 1902 memctx.
1901 1903 """
1902 1904 cache = {}
1903 1905
1904 1906 def getfilectx(repo, memctx, path):
1905 1907 if path not in cache:
1906 1908 cache[path] = func(repo, memctx, path)
1907 1909 return cache[path]
1908 1910
1909 1911 return getfilectx
1910 1912
1911 1913 class memctx(committablectx):
1912 1914 """Use memctx to perform in-memory commits via localrepo.commitctx().
1913 1915
1914 1916 Revision information is supplied at initialization time while
1915 1917 related files data and is made available through a callback
1916 1918 mechanism. 'repo' is the current localrepo, 'parents' is a
1917 1919 sequence of two parent revisions identifiers (pass None for every
1918 1920 missing parent), 'text' is the commit message and 'files' lists
1919 1921 names of files touched by the revision (normalized and relative to
1920 1922 repository root).
1921 1923
1922 1924 filectxfn(repo, memctx, path) is a callable receiving the
1923 1925 repository, the current memctx object and the normalized path of
1924 1926 requested file, relative to repository root. It is fired by the
1925 1927 commit function for every file in 'files', but calls order is
1926 1928 undefined. If the file is available in the revision being
1927 1929 committed (updated or added), filectxfn returns a memfilectx
1928 1930 object. If the file was removed, filectxfn return None for recent
1929 1931 Mercurial. Moved files are represented by marking the source file
1930 1932 removed and the new file added with copy information (see
1931 1933 memfilectx).
1932 1934
1933 1935 user receives the committer name and defaults to current
1934 1936 repository username, date is the commit date in any format
1935 1937 supported by util.parsedate() and defaults to current date, extra
1936 1938 is a dictionary of metadata or is left empty.
1937 1939 """
1938 1940
1939 1941 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1940 1942 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1941 1943 # this field to determine what to do in filectxfn.
1942 1944 _returnnoneformissingfiles = True
1943 1945
1944 1946 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1945 1947 date=None, extra=None, editor=False):
1946 1948 super(memctx, self).__init__(repo, text, user, date, extra)
1947 1949 self._rev = None
1948 1950 self._node = None
1949 1951 parents = [(p or nullid) for p in parents]
1950 1952 p1, p2 = parents
1951 1953 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1952 1954 files = sorted(set(files))
1953 1955 self._files = files
1954 1956 self.substate = {}
1955 1957
1956 1958 # if store is not callable, wrap it in a function
1957 1959 if not callable(filectxfn):
1958 1960 def getfilectx(repo, memctx, path):
1959 1961 fctx = filectxfn[path]
1960 1962 # this is weird but apparently we only keep track of one parent
1961 1963 # (why not only store that instead of a tuple?)
1962 1964 copied = fctx.renamed()
1963 1965 if copied:
1964 1966 copied = copied[0]
1965 1967 return memfilectx(repo, path, fctx.data(),
1966 1968 islink=fctx.islink(), isexec=fctx.isexec(),
1967 1969 copied=copied, memctx=memctx)
1968 1970 self._filectxfn = getfilectx
1969 1971 else:
1970 1972 # memoizing increases performance for e.g. vcs convert scenarios.
1971 1973 self._filectxfn = makecachingfilectxfn(filectxfn)
1972 1974
1973 1975 if extra:
1974 1976 self._extra = extra.copy()
1975 1977 else:
1976 1978 self._extra = {}
1977 1979
1978 1980 if self._extra.get('branch', '') == '':
1979 1981 self._extra['branch'] = 'default'
1980 1982
1981 1983 if editor:
1982 1984 self._text = editor(self._repo, self, [])
1983 1985 self._repo.savecommitmessage(self._text)
1984 1986
1985 1987 def filectx(self, path, filelog=None):
1986 1988 """get a file context from the working directory
1987 1989
1988 1990 Returns None if file doesn't exist and should be removed."""
1989 1991 return self._filectxfn(self._repo, self, path)
1990 1992
1991 1993 def commit(self):
1992 1994 """commit context to the repo"""
1993 1995 return self._repo.commitctx(self)
1994 1996
1995 1997 @propertycache
1996 1998 def _manifest(self):
1997 1999 """generate a manifest based on the return values of filectxfn"""
1998 2000
1999 2001 # keep this simple for now; just worry about p1
2000 2002 pctx = self._parents[0]
2001 2003 man = pctx.manifest().copy()
2002 2004
2003 2005 for f in self._status.modified:
2004 2006 p1node = nullid
2005 2007 p2node = nullid
2006 2008 p = pctx[f].parents() # if file isn't in pctx, check p2?
2007 2009 if len(p) > 0:
2008 2010 p1node = p[0].filenode()
2009 2011 if len(p) > 1:
2010 2012 p2node = p[1].filenode()
2011 2013 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2012 2014
2013 2015 for f in self._status.added:
2014 2016 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2015 2017
2016 2018 for f in self._status.removed:
2017 2019 if f in man:
2018 2020 del man[f]
2019 2021
2020 2022 return man
2021 2023
2022 2024 @propertycache
2023 2025 def _status(self):
2024 2026 """Calculate exact status from ``files`` specified at construction
2025 2027 """
2026 2028 man1 = self.p1().manifest()
2027 2029 p2 = self._parents[1]
2028 2030 # "1 < len(self._parents)" can't be used for checking
2029 2031 # existence of the 2nd parent, because "memctx._parents" is
2030 2032 # explicitly initialized by the list, of which length is 2.
2031 2033 if p2.node() != nullid:
2032 2034 man2 = p2.manifest()
2033 2035 managing = lambda f: f in man1 or f in man2
2034 2036 else:
2035 2037 managing = lambda f: f in man1
2036 2038
2037 2039 modified, added, removed = [], [], []
2038 2040 for f in self._files:
2039 2041 if not managing(f):
2040 2042 added.append(f)
2041 2043 elif self[f]:
2042 2044 modified.append(f)
2043 2045 else:
2044 2046 removed.append(f)
2045 2047
2046 2048 return scmutil.status(modified, added, removed, [], [], [], [])
2047 2049
2048 2050 class memfilectx(committablefilectx):
2049 2051 """memfilectx represents an in-memory file to commit.
2050 2052
2051 2053 See memctx and committablefilectx for more details.
2052 2054 """
2053 2055 def __init__(self, repo, path, data, islink=False,
2054 2056 isexec=False, copied=None, memctx=None):
2055 2057 """
2056 2058 path is the normalized file path relative to repository root.
2057 2059 data is the file content as a string.
2058 2060 islink is True if the file is a symbolic link.
2059 2061 isexec is True if the file is executable.
2060 2062 copied is the source file path if current file was copied in the
2061 2063 revision being committed, or None."""
2062 2064 super(memfilectx, self).__init__(repo, path, None, memctx)
2063 2065 self._data = data
2064 2066 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2065 2067 self._copied = None
2066 2068 if copied:
2067 2069 self._copied = (copied, nullid)
2068 2070
2069 2071 def data(self):
2070 2072 return self._data
2071 2073
2072 2074 def remove(self, ignoremissing=False):
2073 2075 """wraps unlink for a repo's working directory"""
2074 2076 # need to figure out what to do here
2075 2077 del self._changectx[self._path]
2076 2078
2077 2079 def write(self, data, flags):
2078 2080 """wraps repo.wwrite"""
2079 2081 self._data = data
2080 2082
2083 class overlayfilectx(committablefilectx):
2084 """Like memfilectx but take an original filectx and optional parameters to
2085 override parts of it. This is useful when fctx.data() is expensive (i.e.
2086 flag processor is expensive) and raw data, flags, and filenode could be
2087 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2088 """
2089
2090 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2091 copied=None, ctx=None):
2092 """originalfctx: filecontext to duplicate
2093
2094 datafunc: None or a function to override data (file content). It is a
2095 function to be lazy. path, flags, copied, ctx: None or overridden value
2096
2097 copied could be (path, rev), or False. copied could also be just path,
2098 and will be converted to (path, nullid). This simplifies some callers.
2099 """
2100
2101 if path is None:
2102 path = originalfctx.path()
2103 if ctx is None:
2104 ctx = originalfctx.changectx()
2105 ctxmatch = lambda: True
2106 else:
2107 ctxmatch = lambda: ctx == originalfctx.changectx()
2108
2109 repo = originalfctx.repo()
2110 flog = originalfctx.filelog()
2111 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2112
2113 if copied is None:
2114 copied = originalfctx.renamed()
2115 copiedmatch = lambda: True
2116 else:
2117 if copied and not isinstance(copied, tuple):
2118 # repo._filecommit will recalculate copyrev so nullid is okay
2119 copied = (copied, nullid)
2120 copiedmatch = lambda: copied == originalfctx.renamed()
2121
2122 # When data, copied (could affect data), ctx (could affect filelog
2123 # parents) are not overridden, rawdata, rawflags, and filenode may be
2124 # reused (repo._filecommit should double check filelog parents).
2125 #
2126 # path, flags are not hashed in filelog (but in manifestlog) so they do
2127 # not affect reusable here.
2128 #
2129 # If ctx or copied is overridden to a same value with originalfctx,
2130 # still consider it's reusable. originalfctx.renamed() may be a bit
2131 # expensive so it's not called unless necessary. Assuming datafunc is
2132 # always expensive, do not call it for this "reusable" test.
2133 reusable = datafunc is None and ctxmatch() and copiedmatch()
2134
2135 if datafunc is None:
2136 datafunc = originalfctx.data
2137 if flags is None:
2138 flags = originalfctx.flags()
2139
2140 self._datafunc = datafunc
2141 self._flags = flags
2142 self._copied = copied
2143
2144 if reusable:
2145 # copy extra fields from originalfctx
2146 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2147 for attr in attrs:
2148 if util.safehasattr(originalfctx, attr):
2149 setattr(self, attr, getattr(originalfctx, attr))
2150
2151 def data(self):
2152 return self._datafunc()
2153
2081 2154 class metadataonlyctx(committablectx):
2082 2155 """Like memctx but it's reusing the manifest of different commit.
2083 2156 Intended to be used by lightweight operations that are creating
2084 2157 metadata-only changes.
2085 2158
2086 2159 Revision information is supplied at initialization time. 'repo' is the
2087 2160 current localrepo, 'ctx' is original revision which manifest we're reuisng
2088 2161 'parents' is a sequence of two parent revisions identifiers (pass None for
2089 2162 every missing parent), 'text' is the commit.
2090 2163
2091 2164 user receives the committer name and defaults to current repository
2092 2165 username, date is the commit date in any format supported by
2093 2166 util.parsedate() and defaults to current date, extra is a dictionary of
2094 2167 metadata or is left empty.
2095 2168 """
2096 2169 def __new__(cls, repo, originalctx, *args, **kwargs):
2097 2170 return super(metadataonlyctx, cls).__new__(cls, repo)
2098 2171
2099 2172 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2100 2173 extra=None, editor=False):
2101 2174 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2102 2175 self._rev = None
2103 2176 self._node = None
2104 2177 self._originalctx = originalctx
2105 2178 self._manifestnode = originalctx.manifestnode()
2106 2179 parents = [(p or nullid) for p in parents]
2107 2180 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2108 2181
2109 2182 # sanity check to ensure that the reused manifest parents are
2110 2183 # manifests of our commit parents
2111 2184 mp1, mp2 = self.manifestctx().parents
2112 2185 if p1 != nullid and p1.manifestnode() != mp1:
2113 2186 raise RuntimeError('can\'t reuse the manifest: '
2114 2187 'its p1 doesn\'t match the new ctx p1')
2115 2188 if p2 != nullid and p2.manifestnode() != mp2:
2116 2189 raise RuntimeError('can\'t reuse the manifest: '
2117 2190 'its p2 doesn\'t match the new ctx p2')
2118 2191
2119 2192 self._files = originalctx.files()
2120 2193 self.substate = {}
2121 2194
2122 2195 if extra:
2123 2196 self._extra = extra.copy()
2124 2197 else:
2125 2198 self._extra = {}
2126 2199
2127 2200 if self._extra.get('branch', '') == '':
2128 2201 self._extra['branch'] = 'default'
2129 2202
2130 2203 if editor:
2131 2204 self._text = editor(self._repo, self, [])
2132 2205 self._repo.savecommitmessage(self._text)
2133 2206
2134 2207 def manifestnode(self):
2135 2208 return self._manifestnode
2136 2209
2137 2210 @propertycache
2138 2211 def _manifestctx(self):
2139 2212 return self._repo.manifestlog[self._manifestnode]
2140 2213
2141 2214 def filectx(self, path, filelog=None):
2142 2215 return self._originalctx.filectx(path, filelog=filelog)
2143 2216
2144 2217 def commit(self):
2145 2218 """commit context to the repo"""
2146 2219 return self._repo.commitctx(self)
2147 2220
2148 2221 @property
2149 2222 def _manifest(self):
2150 2223 return self._originalctx.manifest()
2151 2224
2152 2225 @propertycache
2153 2226 def _status(self):
2154 2227 """Calculate exact status from ``files`` specified in the ``origctx``
2155 2228 and parents manifests.
2156 2229 """
2157 2230 man1 = self.p1().manifest()
2158 2231 p2 = self._parents[1]
2159 2232 # "1 < len(self._parents)" can't be used for checking
2160 2233 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2161 2234 # explicitly initialized by the list, of which length is 2.
2162 2235 if p2.node() != nullid:
2163 2236 man2 = p2.manifest()
2164 2237 managing = lambda f: f in man1 or f in man2
2165 2238 else:
2166 2239 managing = lambda f: f in man1
2167 2240
2168 2241 modified, added, removed = [], [], []
2169 2242 for f in self._files:
2170 2243 if not managing(f):
2171 2244 added.append(f)
2172 2245 elif self[f]:
2173 2246 modified.append(f)
2174 2247 else:
2175 2248 removed.append(f)
2176 2249
2177 2250 return scmutil.status(modified, added, removed, [], [], [], [])
General Comments 0
You need to be logged in to leave comments. Login now