##// END OF EJS Templates
context: add a `blockancestors(fctx, fromline, toline)` function...
Denis Laxalde -
r30718:ce662ee4 default
parent child Browse files
Show More
@@ -1,2075 +1,2111 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 bin,
19 19 hex,
20 20 modifiednodeid,
21 21 newnodeid,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 wdirid,
26 26 )
27 27 from . import (
28 28 encoding,
29 29 error,
30 30 fileset,
31 31 match as matchmod,
32 32 mdiff,
33 33 obsolete as obsmod,
34 34 patch,
35 35 phases,
36 36 repoview,
37 37 revlog,
38 38 scmutil,
39 39 subrepo,
40 40 util,
41 41 )
42 42
43 43 propertycache = util.propertycache
44 44
45 45 nonascii = re.compile(r'[^\x21-\x7f]').search
46 46
47 47 class basectx(object):
48 48 """A basectx object represents the common logic for its children:
49 49 changectx: read-only context that is already present in the repo,
50 50 workingctx: a context that represents the working directory and can
51 51 be committed,
52 52 memctx: a context that represents changes in-memory and can also
53 53 be committed."""
54 54 def __new__(cls, repo, changeid='', *args, **kwargs):
55 55 if isinstance(changeid, basectx):
56 56 return changeid
57 57
58 58 o = super(basectx, cls).__new__(cls)
59 59
60 60 o._repo = repo
61 61 o._rev = nullrev
62 62 o._node = nullid
63 63
64 64 return o
65 65
66 66 def __str__(self):
67 67 return short(self.node())
68 68
69 69 def __int__(self):
70 70 return self.rev()
71 71
72 72 def __repr__(self):
73 73 return "<%s %s>" % (type(self).__name__, str(self))
74 74
75 75 def __eq__(self, other):
76 76 try:
77 77 return type(self) == type(other) and self._rev == other._rev
78 78 except AttributeError:
79 79 return False
80 80
81 81 def __ne__(self, other):
82 82 return not (self == other)
83 83
84 84 def __contains__(self, key):
85 85 return key in self._manifest
86 86
87 87 def __getitem__(self, key):
88 88 return self.filectx(key)
89 89
90 90 def __iter__(self):
91 91 return iter(self._manifest)
92 92
93 93 def _manifestmatches(self, match, s):
94 94 """generate a new manifest filtered by the match argument
95 95
96 96 This method is for internal use only and mainly exists to provide an
97 97 object oriented way for other contexts to customize the manifest
98 98 generation.
99 99 """
100 100 return self.manifest().matches(match)
101 101
102 102 def _matchstatus(self, other, match):
103 103 """return match.always if match is none
104 104
105 105 This internal method provides a way for child objects to override the
106 106 match operator.
107 107 """
108 108 return match or matchmod.always(self._repo.root, self._repo.getcwd())
109 109
110 110 def _buildstatus(self, other, s, match, listignored, listclean,
111 111 listunknown):
112 112 """build a status with respect to another context"""
113 113 # Load earliest manifest first for caching reasons. More specifically,
114 114 # if you have revisions 1000 and 1001, 1001 is probably stored as a
115 115 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
116 116 # 1000 and cache it so that when you read 1001, we just need to apply a
117 117 # delta to what's in the cache. So that's one full reconstruction + one
118 118 # delta application.
119 119 if self.rev() is not None and self.rev() < other.rev():
120 120 self.manifest()
121 121 mf1 = other._manifestmatches(match, s)
122 122 mf2 = self._manifestmatches(match, s)
123 123
124 124 modified, added = [], []
125 125 removed = []
126 126 clean = []
127 127 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
128 128 deletedset = set(deleted)
129 129 d = mf1.diff(mf2, clean=listclean)
130 130 for fn, value in d.iteritems():
131 131 if fn in deletedset:
132 132 continue
133 133 if value is None:
134 134 clean.append(fn)
135 135 continue
136 136 (node1, flag1), (node2, flag2) = value
137 137 if node1 is None:
138 138 added.append(fn)
139 139 elif node2 is None:
140 140 removed.append(fn)
141 141 elif flag1 != flag2:
142 142 modified.append(fn)
143 143 elif node2 != newnodeid:
144 144 # When comparing files between two commits, we save time by
145 145 # not comparing the file contents when the nodeids differ.
146 146 # Note that this means we incorrectly report a reverted change
147 147 # to a file as a modification.
148 148 modified.append(fn)
149 149 elif self[fn].cmp(other[fn]):
150 150 modified.append(fn)
151 151 else:
152 152 clean.append(fn)
153 153
154 154 if removed:
155 155 # need to filter files if they are already reported as removed
156 156 unknown = [fn for fn in unknown if fn not in mf1]
157 157 ignored = [fn for fn in ignored if fn not in mf1]
158 158 # if they're deleted, don't report them as removed
159 159 removed = [fn for fn in removed if fn not in deletedset]
160 160
161 161 return scmutil.status(modified, added, removed, deleted, unknown,
162 162 ignored, clean)
163 163
164 164 @propertycache
165 165 def substate(self):
166 166 return subrepo.state(self, self._repo.ui)
167 167
168 168 def subrev(self, subpath):
169 169 return self.substate[subpath][1]
170 170
171 171 def rev(self):
172 172 return self._rev
173 173 def node(self):
174 174 return self._node
175 175 def hex(self):
176 176 return hex(self.node())
177 177 def manifest(self):
178 178 return self._manifest
179 179 def manifestctx(self):
180 180 return self._manifestctx
181 181 def repo(self):
182 182 return self._repo
183 183 def phasestr(self):
184 184 return phases.phasenames[self.phase()]
185 185 def mutable(self):
186 186 return self.phase() > phases.public
187 187
188 188 def getfileset(self, expr):
189 189 return fileset.getfileset(self, expr)
190 190
191 191 def obsolete(self):
192 192 """True if the changeset is obsolete"""
193 193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
194 194
195 195 def extinct(self):
196 196 """True if the changeset is extinct"""
197 197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
198 198
199 199 def unstable(self):
200 200 """True if the changeset is not obsolete but it's ancestor are"""
201 201 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
202 202
203 203 def bumped(self):
204 204 """True if the changeset try to be a successor of a public changeset
205 205
206 206 Only non-public and non-obsolete changesets may be bumped.
207 207 """
208 208 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
209 209
210 210 def divergent(self):
211 211 """Is a successors of a changeset with multiple possible successors set
212 212
213 213 Only non-public and non-obsolete changesets may be divergent.
214 214 """
215 215 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
216 216
217 217 def troubled(self):
218 218 """True if the changeset is either unstable, bumped or divergent"""
219 219 return self.unstable() or self.bumped() or self.divergent()
220 220
221 221 def troubles(self):
222 222 """return the list of troubles affecting this changesets.
223 223
224 224 Troubles are returned as strings. possible values are:
225 225 - unstable,
226 226 - bumped,
227 227 - divergent.
228 228 """
229 229 troubles = []
230 230 if self.unstable():
231 231 troubles.append('unstable')
232 232 if self.bumped():
233 233 troubles.append('bumped')
234 234 if self.divergent():
235 235 troubles.append('divergent')
236 236 return troubles
237 237
238 238 def parents(self):
239 239 """return contexts for each parent changeset"""
240 240 return self._parents
241 241
242 242 def p1(self):
243 243 return self._parents[0]
244 244
245 245 def p2(self):
246 246 parents = self._parents
247 247 if len(parents) == 2:
248 248 return parents[1]
249 249 return changectx(self._repo, nullrev)
250 250
251 251 def _fileinfo(self, path):
252 252 if '_manifest' in self.__dict__:
253 253 try:
254 254 return self._manifest[path], self._manifest.flags(path)
255 255 except KeyError:
256 256 raise error.ManifestLookupError(self._node, path,
257 257 _('not found in manifest'))
258 258 if '_manifestdelta' in self.__dict__ or path in self.files():
259 259 if path in self._manifestdelta:
260 260 return (self._manifestdelta[path],
261 261 self._manifestdelta.flags(path))
262 262 mfl = self._repo.manifestlog
263 263 try:
264 264 node, flag = mfl[self._changeset.manifest].find(path)
265 265 except KeyError:
266 266 raise error.ManifestLookupError(self._node, path,
267 267 _('not found in manifest'))
268 268
269 269 return node, flag
270 270
271 271 def filenode(self, path):
272 272 return self._fileinfo(path)[0]
273 273
274 274 def flags(self, path):
275 275 try:
276 276 return self._fileinfo(path)[1]
277 277 except error.LookupError:
278 278 return ''
279 279
280 280 def sub(self, path, allowcreate=True):
281 281 '''return a subrepo for the stored revision of path, never wdir()'''
282 282 return subrepo.subrepo(self, path, allowcreate=allowcreate)
283 283
284 284 def nullsub(self, path, pctx):
285 285 return subrepo.nullsubrepo(self, path, pctx)
286 286
287 287 def workingsub(self, path):
288 288 '''return a subrepo for the stored revision, or wdir if this is a wdir
289 289 context.
290 290 '''
291 291 return subrepo.subrepo(self, path, allowwdir=True)
292 292
293 293 def match(self, pats=[], include=None, exclude=None, default='glob',
294 294 listsubrepos=False, badfn=None):
295 295 r = self._repo
296 296 return matchmod.match(r.root, r.getcwd(), pats,
297 297 include, exclude, default,
298 298 auditor=r.nofsauditor, ctx=self,
299 299 listsubrepos=listsubrepos, badfn=badfn)
300 300
301 301 def diff(self, ctx2=None, match=None, **opts):
302 302 """Returns a diff generator for the given contexts and matcher"""
303 303 if ctx2 is None:
304 304 ctx2 = self.p1()
305 305 if ctx2 is not None:
306 306 ctx2 = self._repo[ctx2]
307 307 diffopts = patch.diffopts(self._repo.ui, opts)
308 308 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
309 309
310 310 def dirs(self):
311 311 return self._manifest.dirs()
312 312
313 313 def hasdir(self, dir):
314 314 return self._manifest.hasdir(dir)
315 315
316 316 def dirty(self, missing=False, merge=True, branch=True):
317 317 return False
318 318
319 319 def status(self, other=None, match=None, listignored=False,
320 320 listclean=False, listunknown=False, listsubrepos=False):
321 321 """return status of files between two nodes or node and working
322 322 directory.
323 323
324 324 If other is None, compare this node with working directory.
325 325
326 326 returns (modified, added, removed, deleted, unknown, ignored, clean)
327 327 """
328 328
329 329 ctx1 = self
330 330 ctx2 = self._repo[other]
331 331
332 332 # This next code block is, admittedly, fragile logic that tests for
333 333 # reversing the contexts and wouldn't need to exist if it weren't for
334 334 # the fast (and common) code path of comparing the working directory
335 335 # with its first parent.
336 336 #
337 337 # What we're aiming for here is the ability to call:
338 338 #
339 339 # workingctx.status(parentctx)
340 340 #
341 341 # If we always built the manifest for each context and compared those,
342 342 # then we'd be done. But the special case of the above call means we
343 343 # just copy the manifest of the parent.
344 344 reversed = False
345 345 if (not isinstance(ctx1, changectx)
346 346 and isinstance(ctx2, changectx)):
347 347 reversed = True
348 348 ctx1, ctx2 = ctx2, ctx1
349 349
350 350 match = ctx2._matchstatus(ctx1, match)
351 351 r = scmutil.status([], [], [], [], [], [], [])
352 352 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
353 353 listunknown)
354 354
355 355 if reversed:
356 356 # Reverse added and removed. Clear deleted, unknown and ignored as
357 357 # these make no sense to reverse.
358 358 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
359 359 r.clean)
360 360
361 361 if listsubrepos:
362 362 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
363 363 try:
364 364 rev2 = ctx2.subrev(subpath)
365 365 except KeyError:
366 366 # A subrepo that existed in node1 was deleted between
367 367 # node1 and node2 (inclusive). Thus, ctx2's substate
368 368 # won't contain that subpath. The best we can do ignore it.
369 369 rev2 = None
370 370 submatch = matchmod.subdirmatcher(subpath, match)
371 371 s = sub.status(rev2, match=submatch, ignored=listignored,
372 372 clean=listclean, unknown=listunknown,
373 373 listsubrepos=True)
374 374 for rfiles, sfiles in zip(r, s):
375 375 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
376 376
377 377 for l in r:
378 378 l.sort()
379 379
380 380 return r
381 381
382 382
383 383 def makememctx(repo, parents, text, user, date, branch, files, store,
384 384 editor=None, extra=None):
385 385 def getfilectx(repo, memctx, path):
386 386 data, mode, copied = store.getfile(path)
387 387 if data is None:
388 388 return None
389 389 islink, isexec = mode
390 390 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
391 391 copied=copied, memctx=memctx)
392 392 if extra is None:
393 393 extra = {}
394 394 if branch:
395 395 extra['branch'] = encoding.fromlocal(branch)
396 396 ctx = memctx(repo, parents, text, files, getfilectx, user,
397 397 date, extra, editor)
398 398 return ctx
399 399
400 400 class changectx(basectx):
401 401 """A changecontext object makes access to data related to a particular
402 402 changeset convenient. It represents a read-only context already present in
403 403 the repo."""
404 404 def __init__(self, repo, changeid=''):
405 405 """changeid is a revision number, node, or tag"""
406 406
407 407 # since basectx.__new__ already took care of copying the object, we
408 408 # don't need to do anything in __init__, so we just exit here
409 409 if isinstance(changeid, basectx):
410 410 return
411 411
412 412 if changeid == '':
413 413 changeid = '.'
414 414 self._repo = repo
415 415
416 416 try:
417 417 if isinstance(changeid, int):
418 418 self._node = repo.changelog.node(changeid)
419 419 self._rev = changeid
420 420 return
421 421 if isinstance(changeid, long):
422 422 changeid = str(changeid)
423 423 if changeid == 'null':
424 424 self._node = nullid
425 425 self._rev = nullrev
426 426 return
427 427 if changeid == 'tip':
428 428 self._node = repo.changelog.tip()
429 429 self._rev = repo.changelog.rev(self._node)
430 430 return
431 431 if changeid == '.' or changeid == repo.dirstate.p1():
432 432 # this is a hack to delay/avoid loading obsmarkers
433 433 # when we know that '.' won't be hidden
434 434 self._node = repo.dirstate.p1()
435 435 self._rev = repo.unfiltered().changelog.rev(self._node)
436 436 return
437 437 if len(changeid) == 20:
438 438 try:
439 439 self._node = changeid
440 440 self._rev = repo.changelog.rev(changeid)
441 441 return
442 442 except error.FilteredRepoLookupError:
443 443 raise
444 444 except LookupError:
445 445 pass
446 446
447 447 try:
448 448 r = int(changeid)
449 449 if str(r) != changeid:
450 450 raise ValueError
451 451 l = len(repo.changelog)
452 452 if r < 0:
453 453 r += l
454 454 if r < 0 or r >= l:
455 455 raise ValueError
456 456 self._rev = r
457 457 self._node = repo.changelog.node(r)
458 458 return
459 459 except error.FilteredIndexError:
460 460 raise
461 461 except (ValueError, OverflowError, IndexError):
462 462 pass
463 463
464 464 if len(changeid) == 40:
465 465 try:
466 466 self._node = bin(changeid)
467 467 self._rev = repo.changelog.rev(self._node)
468 468 return
469 469 except error.FilteredLookupError:
470 470 raise
471 471 except (TypeError, LookupError):
472 472 pass
473 473
474 474 # lookup bookmarks through the name interface
475 475 try:
476 476 self._node = repo.names.singlenode(repo, changeid)
477 477 self._rev = repo.changelog.rev(self._node)
478 478 return
479 479 except KeyError:
480 480 pass
481 481 except error.FilteredRepoLookupError:
482 482 raise
483 483 except error.RepoLookupError:
484 484 pass
485 485
486 486 self._node = repo.unfiltered().changelog._partialmatch(changeid)
487 487 if self._node is not None:
488 488 self._rev = repo.changelog.rev(self._node)
489 489 return
490 490
491 491 # lookup failed
492 492 # check if it might have come from damaged dirstate
493 493 #
494 494 # XXX we could avoid the unfiltered if we had a recognizable
495 495 # exception for filtered changeset access
496 496 if changeid in repo.unfiltered().dirstate.parents():
497 497 msg = _("working directory has unknown parent '%s'!")
498 498 raise error.Abort(msg % short(changeid))
499 499 try:
500 500 if len(changeid) == 20 and nonascii(changeid):
501 501 changeid = hex(changeid)
502 502 except TypeError:
503 503 pass
504 504 except (error.FilteredIndexError, error.FilteredLookupError,
505 505 error.FilteredRepoLookupError):
506 506 if repo.filtername.startswith('visible'):
507 507 msg = _("hidden revision '%s'") % changeid
508 508 hint = _('use --hidden to access hidden revisions')
509 509 raise error.FilteredRepoLookupError(msg, hint=hint)
510 510 msg = _("filtered revision '%s' (not in '%s' subset)")
511 511 msg %= (changeid, repo.filtername)
512 512 raise error.FilteredRepoLookupError(msg)
513 513 except IndexError:
514 514 pass
515 515 raise error.RepoLookupError(
516 516 _("unknown revision '%s'") % changeid)
517 517
518 518 def __hash__(self):
519 519 try:
520 520 return hash(self._rev)
521 521 except AttributeError:
522 522 return id(self)
523 523
524 524 def __nonzero__(self):
525 525 return self._rev != nullrev
526 526
527 527 @propertycache
528 528 def _changeset(self):
529 529 return self._repo.changelog.changelogrevision(self.rev())
530 530
531 531 @propertycache
532 532 def _manifest(self):
533 533 return self._manifestctx.read()
534 534
535 535 @propertycache
536 536 def _manifestctx(self):
537 537 return self._repo.manifestlog[self._changeset.manifest]
538 538
539 539 @propertycache
540 540 def _manifestdelta(self):
541 541 return self._manifestctx.readdelta()
542 542
543 543 @propertycache
544 544 def _parents(self):
545 545 repo = self._repo
546 546 p1, p2 = repo.changelog.parentrevs(self._rev)
547 547 if p2 == nullrev:
548 548 return [changectx(repo, p1)]
549 549 return [changectx(repo, p1), changectx(repo, p2)]
550 550
551 551 def changeset(self):
552 552 c = self._changeset
553 553 return (
554 554 c.manifest,
555 555 c.user,
556 556 c.date,
557 557 c.files,
558 558 c.description,
559 559 c.extra,
560 560 )
561 561 def manifestnode(self):
562 562 return self._changeset.manifest
563 563
564 564 def user(self):
565 565 return self._changeset.user
566 566 def date(self):
567 567 return self._changeset.date
568 568 def files(self):
569 569 return self._changeset.files
570 570 def description(self):
571 571 return self._changeset.description
572 572 def branch(self):
573 573 return encoding.tolocal(self._changeset.extra.get("branch"))
574 574 def closesbranch(self):
575 575 return 'close' in self._changeset.extra
576 576 def extra(self):
577 577 return self._changeset.extra
578 578 def tags(self):
579 579 return self._repo.nodetags(self._node)
580 580 def bookmarks(self):
581 581 return self._repo.nodebookmarks(self._node)
582 582 def phase(self):
583 583 return self._repo._phasecache.phase(self._repo, self._rev)
584 584 def hidden(self):
585 585 return self._rev in repoview.filterrevs(self._repo, 'visible')
586 586
587 587 def children(self):
588 588 """return contexts for each child changeset"""
589 589 c = self._repo.changelog.children(self._node)
590 590 return [changectx(self._repo, x) for x in c]
591 591
592 592 def ancestors(self):
593 593 for a in self._repo.changelog.ancestors([self._rev]):
594 594 yield changectx(self._repo, a)
595 595
596 596 def descendants(self):
597 597 for d in self._repo.changelog.descendants([self._rev]):
598 598 yield changectx(self._repo, d)
599 599
600 600 def filectx(self, path, fileid=None, filelog=None):
601 601 """get a file context from this changeset"""
602 602 if fileid is None:
603 603 fileid = self.filenode(path)
604 604 return filectx(self._repo, path, fileid=fileid,
605 605 changectx=self, filelog=filelog)
606 606
607 607 def ancestor(self, c2, warn=False):
608 608 """return the "best" ancestor context of self and c2
609 609
610 610 If there are multiple candidates, it will show a message and check
611 611 merge.preferancestor configuration before falling back to the
612 612 revlog ancestor."""
613 613 # deal with workingctxs
614 614 n2 = c2._node
615 615 if n2 is None:
616 616 n2 = c2._parents[0]._node
617 617 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
618 618 if not cahs:
619 619 anc = nullid
620 620 elif len(cahs) == 1:
621 621 anc = cahs[0]
622 622 else:
623 623 # experimental config: merge.preferancestor
624 624 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
625 625 try:
626 626 ctx = changectx(self._repo, r)
627 627 except error.RepoLookupError:
628 628 continue
629 629 anc = ctx.node()
630 630 if anc in cahs:
631 631 break
632 632 else:
633 633 anc = self._repo.changelog.ancestor(self._node, n2)
634 634 if warn:
635 635 self._repo.ui.status(
636 636 (_("note: using %s as ancestor of %s and %s\n") %
637 637 (short(anc), short(self._node), short(n2))) +
638 638 ''.join(_(" alternatively, use --config "
639 639 "merge.preferancestor=%s\n") %
640 640 short(n) for n in sorted(cahs) if n != anc))
641 641 return changectx(self._repo, anc)
642 642
643 643 def descendant(self, other):
644 644 """True if other is descendant of this changeset"""
645 645 return self._repo.changelog.descendant(self._rev, other._rev)
646 646
647 647 def walk(self, match):
648 648 '''Generates matching file names.'''
649 649
650 650 # Wrap match.bad method to have message with nodeid
651 651 def bad(fn, msg):
652 652 # The manifest doesn't know about subrepos, so don't complain about
653 653 # paths into valid subrepos.
654 654 if any(fn == s or fn.startswith(s + '/')
655 655 for s in self.substate):
656 656 return
657 657 match.bad(fn, _('no such file in rev %s') % self)
658 658
659 659 m = matchmod.badmatch(match, bad)
660 660 return self._manifest.walk(m)
661 661
662 662 def matches(self, match):
663 663 return self.walk(match)
664 664
665 665 class basefilectx(object):
666 666 """A filecontext object represents the common logic for its children:
667 667 filectx: read-only access to a filerevision that is already present
668 668 in the repo,
669 669 workingfilectx: a filecontext that represents files from the working
670 670 directory,
671 671 memfilectx: a filecontext that represents files in-memory."""
672 672 def __new__(cls, repo, path, *args, **kwargs):
673 673 return super(basefilectx, cls).__new__(cls)
674 674
675 675 @propertycache
676 676 def _filelog(self):
677 677 return self._repo.file(self._path)
678 678
679 679 @propertycache
680 680 def _changeid(self):
681 681 if '_changeid' in self.__dict__:
682 682 return self._changeid
683 683 elif '_changectx' in self.__dict__:
684 684 return self._changectx.rev()
685 685 elif '_descendantrev' in self.__dict__:
686 686 # this file context was created from a revision with a known
687 687 # descendant, we can (lazily) correct for linkrev aliases
688 688 return self._adjustlinkrev(self._descendantrev)
689 689 else:
690 690 return self._filelog.linkrev(self._filerev)
691 691
692 692 @propertycache
693 693 def _filenode(self):
694 694 if '_fileid' in self.__dict__:
695 695 return self._filelog.lookup(self._fileid)
696 696 else:
697 697 return self._changectx.filenode(self._path)
698 698
699 699 @propertycache
700 700 def _filerev(self):
701 701 return self._filelog.rev(self._filenode)
702 702
703 703 @propertycache
704 704 def _repopath(self):
705 705 return self._path
706 706
707 707 def __nonzero__(self):
708 708 try:
709 709 self._filenode
710 710 return True
711 711 except error.LookupError:
712 712 # file is missing
713 713 return False
714 714
715 715 def __str__(self):
716 716 try:
717 717 return "%s@%s" % (self.path(), self._changectx)
718 718 except error.LookupError:
719 719 return "%s@???" % self.path()
720 720
721 721 def __repr__(self):
722 722 return "<%s %s>" % (type(self).__name__, str(self))
723 723
724 724 def __hash__(self):
725 725 try:
726 726 return hash((self._path, self._filenode))
727 727 except AttributeError:
728 728 return id(self)
729 729
730 730 def __eq__(self, other):
731 731 try:
732 732 return (type(self) == type(other) and self._path == other._path
733 733 and self._filenode == other._filenode)
734 734 except AttributeError:
735 735 return False
736 736
737 737 def __ne__(self, other):
738 738 return not (self == other)
739 739
740 740 def filerev(self):
741 741 return self._filerev
742 742 def filenode(self):
743 743 return self._filenode
744 744 def flags(self):
745 745 return self._changectx.flags(self._path)
746 746 def filelog(self):
747 747 return self._filelog
748 748 def rev(self):
749 749 return self._changeid
750 750 def linkrev(self):
751 751 return self._filelog.linkrev(self._filerev)
752 752 def node(self):
753 753 return self._changectx.node()
754 754 def hex(self):
755 755 return self._changectx.hex()
756 756 def user(self):
757 757 return self._changectx.user()
758 758 def date(self):
759 759 return self._changectx.date()
760 760 def files(self):
761 761 return self._changectx.files()
762 762 def description(self):
763 763 return self._changectx.description()
764 764 def branch(self):
765 765 return self._changectx.branch()
766 766 def extra(self):
767 767 return self._changectx.extra()
768 768 def phase(self):
769 769 return self._changectx.phase()
770 770 def phasestr(self):
771 771 return self._changectx.phasestr()
772 772 def manifest(self):
773 773 return self._changectx.manifest()
774 774 def changectx(self):
775 775 return self._changectx
776 776 def repo(self):
777 777 return self._repo
778 778
779 779 def path(self):
780 780 return self._path
781 781
782 782 def isbinary(self):
783 783 try:
784 784 return util.binary(self.data())
785 785 except IOError:
786 786 return False
787 787 def isexec(self):
788 788 return 'x' in self.flags()
789 789 def islink(self):
790 790 return 'l' in self.flags()
791 791
792 792 def isabsent(self):
793 793 """whether this filectx represents a file not in self._changectx
794 794
795 795 This is mainly for merge code to detect change/delete conflicts. This is
796 796 expected to be True for all subclasses of basectx."""
797 797 return False
798 798
799 799 _customcmp = False
800 800 def cmp(self, fctx):
801 801 """compare with other file context
802 802
803 803 returns True if different than fctx.
804 804 """
805 805 if fctx._customcmp:
806 806 return fctx.cmp(self)
807 807
808 808 if (fctx._filenode is None
809 809 and (self._repo._encodefilterpats
810 810 # if file data starts with '\1\n', empty metadata block is
811 811 # prepended, which adds 4 bytes to filelog.size().
812 812 or self.size() - 4 == fctx.size())
813 813 or self.size() == fctx.size()):
814 814 return self._filelog.cmp(self._filenode, fctx.data())
815 815
816 816 return True
817 817
818 818 def _adjustlinkrev(self, srcrev, inclusive=False):
819 819 """return the first ancestor of <srcrev> introducing <fnode>
820 820
821 821 If the linkrev of the file revision does not point to an ancestor of
822 822 srcrev, we'll walk down the ancestors until we find one introducing
823 823 this file revision.
824 824
825 825 :srcrev: the changeset revision we search ancestors from
826 826 :inclusive: if true, the src revision will also be checked
827 827 """
828 828 repo = self._repo
829 829 cl = repo.unfiltered().changelog
830 830 mfl = repo.manifestlog
831 831 # fetch the linkrev
832 832 lkr = self.linkrev()
833 833 # hack to reuse ancestor computation when searching for renames
834 834 memberanc = getattr(self, '_ancestrycontext', None)
835 835 iteranc = None
836 836 if srcrev is None:
837 837 # wctx case, used by workingfilectx during mergecopy
838 838 revs = [p.rev() for p in self._repo[None].parents()]
839 839 inclusive = True # we skipped the real (revless) source
840 840 else:
841 841 revs = [srcrev]
842 842 if memberanc is None:
843 843 memberanc = iteranc = cl.ancestors(revs, lkr,
844 844 inclusive=inclusive)
845 845 # check if this linkrev is an ancestor of srcrev
846 846 if lkr not in memberanc:
847 847 if iteranc is None:
848 848 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
849 849 fnode = self._filenode
850 850 path = self._path
851 851 for a in iteranc:
852 852 ac = cl.read(a) # get changeset data (we avoid object creation)
853 853 if path in ac[3]: # checking the 'files' field.
854 854 # The file has been touched, check if the content is
855 855 # similar to the one we search for.
856 856 if fnode == mfl[ac[0]].readfast().get(path):
857 857 return a
858 858 # In theory, we should never get out of that loop without a result.
859 859 # But if manifest uses a buggy file revision (not children of the
860 860 # one it replaces) we could. Such a buggy situation will likely
861 861 # result is crash somewhere else at to some point.
862 862 return lkr
863 863
864 864 def introrev(self):
865 865 """return the rev of the changeset which introduced this file revision
866 866
867 867 This method is different from linkrev because it take into account the
868 868 changeset the filectx was created from. It ensures the returned
869 869 revision is one of its ancestors. This prevents bugs from
870 870 'linkrev-shadowing' when a file revision is used by multiple
871 871 changesets.
872 872 """
873 873 lkr = self.linkrev()
874 874 attrs = vars(self)
875 875 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
876 876 if noctx or self.rev() == lkr:
877 877 return self.linkrev()
878 878 return self._adjustlinkrev(self.rev(), inclusive=True)
879 879
880 880 def _parentfilectx(self, path, fileid, filelog):
881 881 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
882 882 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
883 883 if '_changeid' in vars(self) or '_changectx' in vars(self):
884 884 # If self is associated with a changeset (probably explicitly
885 885 # fed), ensure the created filectx is associated with a
886 886 # changeset that is an ancestor of self.changectx.
887 887 # This lets us later use _adjustlinkrev to get a correct link.
888 888 fctx._descendantrev = self.rev()
889 889 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
890 890 elif '_descendantrev' in vars(self):
891 891 # Otherwise propagate _descendantrev if we have one associated.
892 892 fctx._descendantrev = self._descendantrev
893 893 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
894 894 return fctx
895 895
896 896 def parents(self):
897 897 _path = self._path
898 898 fl = self._filelog
899 899 parents = self._filelog.parents(self._filenode)
900 900 pl = [(_path, node, fl) for node in parents if node != nullid]
901 901
902 902 r = fl.renamed(self._filenode)
903 903 if r:
904 904 # - In the simple rename case, both parent are nullid, pl is empty.
905 905 # - In case of merge, only one of the parent is null id and should
906 906 # be replaced with the rename information. This parent is -always-
907 907 # the first one.
908 908 #
909 909 # As null id have always been filtered out in the previous list
910 910 # comprehension, inserting to 0 will always result in "replacing
911 911 # first nullid parent with rename information.
912 912 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
913 913
914 914 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
915 915
916 916 def p1(self):
917 917 return self.parents()[0]
918 918
919 919 def p2(self):
920 920 p = self.parents()
921 921 if len(p) == 2:
922 922 return p[1]
923 923 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
924 924
925 925 def annotate(self, follow=False, linenumber=False, diffopts=None):
926 926 '''returns a list of tuples of ((ctx, number), line) for each line
927 927 in the file, where ctx is the filectx of the node where
928 928 that line was last changed; if linenumber parameter is true, number is
929 929 the line number at the first appearance in the managed file, otherwise,
930 930 number has a fixed value of False.
931 931 '''
932 932
933 933 def lines(text):
934 934 if text.endswith("\n"):
935 935 return text.count("\n")
936 936 return text.count("\n") + int(bool(text))
937 937
938 938 if linenumber:
939 939 def decorate(text, rev):
940 940 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
941 941 else:
942 942 def decorate(text, rev):
943 943 return ([(rev, False)] * lines(text), text)
944 944
945 945 def pair(parent, child):
946 946 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
947 947 for (a1, a2, b1, b2), t in blocks:
948 948 # Changed blocks ('!') or blocks made only of blank lines ('~')
949 949 # belong to the child.
950 950 if t == '=':
951 951 child[0][b1:b2] = parent[0][a1:a2]
952 952 return child
953 953
954 954 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
955 955
956 956 def parents(f):
957 957 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
958 958 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
959 959 # from the topmost introrev (= srcrev) down to p.linkrev() if it
960 960 # isn't an ancestor of the srcrev.
961 961 f._changeid
962 962 pl = f.parents()
963 963
964 964 # Don't return renamed parents if we aren't following.
965 965 if not follow:
966 966 pl = [p for p in pl if p.path() == f.path()]
967 967
968 968 # renamed filectx won't have a filelog yet, so set it
969 969 # from the cache to save time
970 970 for p in pl:
971 971 if not '_filelog' in p.__dict__:
972 972 p._filelog = getlog(p.path())
973 973
974 974 return pl
975 975
976 976 # use linkrev to find the first changeset where self appeared
977 977 base = self
978 978 introrev = self.introrev()
979 979 if self.rev() != introrev:
980 980 base = self.filectx(self.filenode(), changeid=introrev)
981 981 if getattr(base, '_ancestrycontext', None) is None:
982 982 cl = self._repo.changelog
983 983 if introrev is None:
984 984 # wctx is not inclusive, but works because _ancestrycontext
985 985 # is used to test filelog revisions
986 986 ac = cl.ancestors([p.rev() for p in base.parents()],
987 987 inclusive=True)
988 988 else:
989 989 ac = cl.ancestors([introrev], inclusive=True)
990 990 base._ancestrycontext = ac
991 991
992 992 # This algorithm would prefer to be recursive, but Python is a
993 993 # bit recursion-hostile. Instead we do an iterative
994 994 # depth-first search.
995 995
996 996 # 1st DFS pre-calculates pcache and needed
997 997 visit = [base]
998 998 pcache = {}
999 999 needed = {base: 1}
1000 1000 while visit:
1001 1001 f = visit.pop()
1002 1002 if f in pcache:
1003 1003 continue
1004 1004 pl = parents(f)
1005 1005 pcache[f] = pl
1006 1006 for p in pl:
1007 1007 needed[p] = needed.get(p, 0) + 1
1008 1008 if p not in pcache:
1009 1009 visit.append(p)
1010 1010
1011 1011 # 2nd DFS does the actual annotate
1012 1012 visit[:] = [base]
1013 1013 hist = {}
1014 1014 while visit:
1015 1015 f = visit[-1]
1016 1016 if f in hist:
1017 1017 visit.pop()
1018 1018 continue
1019 1019
1020 1020 ready = True
1021 1021 pl = pcache[f]
1022 1022 for p in pl:
1023 1023 if p not in hist:
1024 1024 ready = False
1025 1025 visit.append(p)
1026 1026 if ready:
1027 1027 visit.pop()
1028 1028 curr = decorate(f.data(), f)
1029 1029 for p in pl:
1030 1030 curr = pair(hist[p], curr)
1031 1031 if needed[p] == 1:
1032 1032 del hist[p]
1033 1033 del needed[p]
1034 1034 else:
1035 1035 needed[p] -= 1
1036 1036
1037 1037 hist[f] = curr
1038 1038 del pcache[f]
1039 1039
1040 1040 return zip(hist[base][0], hist[base][1].splitlines(True))
1041 1041
1042 1042 def ancestors(self, followfirst=False):
1043 1043 visit = {}
1044 1044 c = self
1045 1045 if followfirst:
1046 1046 cut = 1
1047 1047 else:
1048 1048 cut = None
1049 1049
1050 1050 while True:
1051 1051 for parent in c.parents()[:cut]:
1052 1052 visit[(parent.linkrev(), parent.filenode())] = parent
1053 1053 if not visit:
1054 1054 break
1055 1055 c = visit.pop(max(visit))
1056 1056 yield c
1057 1057
1058 1058 class filectx(basefilectx):
1059 1059 """A filecontext object makes access to data related to a particular
1060 1060 filerevision convenient."""
1061 1061 def __init__(self, repo, path, changeid=None, fileid=None,
1062 1062 filelog=None, changectx=None):
1063 1063 """changeid can be a changeset revision, node, or tag.
1064 1064 fileid can be a file revision or node."""
1065 1065 self._repo = repo
1066 1066 self._path = path
1067 1067
1068 1068 assert (changeid is not None
1069 1069 or fileid is not None
1070 1070 or changectx is not None), \
1071 1071 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1072 1072 % (changeid, fileid, changectx))
1073 1073
1074 1074 if filelog is not None:
1075 1075 self._filelog = filelog
1076 1076
1077 1077 if changeid is not None:
1078 1078 self._changeid = changeid
1079 1079 if changectx is not None:
1080 1080 self._changectx = changectx
1081 1081 if fileid is not None:
1082 1082 self._fileid = fileid
1083 1083
1084 1084 @propertycache
1085 1085 def _changectx(self):
1086 1086 try:
1087 1087 return changectx(self._repo, self._changeid)
1088 1088 except error.FilteredRepoLookupError:
1089 1089 # Linkrev may point to any revision in the repository. When the
1090 1090 # repository is filtered this may lead to `filectx` trying to build
1091 1091 # `changectx` for filtered revision. In such case we fallback to
1092 1092 # creating `changectx` on the unfiltered version of the reposition.
1093 1093 # This fallback should not be an issue because `changectx` from
1094 1094 # `filectx` are not used in complex operations that care about
1095 1095 # filtering.
1096 1096 #
1097 1097 # This fallback is a cheap and dirty fix that prevent several
1098 1098 # crashes. It does not ensure the behavior is correct. However the
1099 1099 # behavior was not correct before filtering either and "incorrect
1100 1100 # behavior" is seen as better as "crash"
1101 1101 #
1102 1102 # Linkrevs have several serious troubles with filtering that are
1103 1103 # complicated to solve. Proper handling of the issue here should be
1104 1104 # considered when solving linkrev issue are on the table.
1105 1105 return changectx(self._repo.unfiltered(), self._changeid)
1106 1106
1107 1107 def filectx(self, fileid, changeid=None):
1108 1108 '''opens an arbitrary revision of the file without
1109 1109 opening a new filelog'''
1110 1110 return filectx(self._repo, self._path, fileid=fileid,
1111 1111 filelog=self._filelog, changeid=changeid)
1112 1112
1113 1113 def data(self):
1114 1114 try:
1115 1115 return self._filelog.read(self._filenode)
1116 1116 except error.CensoredNodeError:
1117 1117 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1118 1118 return ""
1119 1119 raise error.Abort(_("censored node: %s") % short(self._filenode),
1120 1120 hint=_("set censor.policy to ignore errors"))
1121 1121
1122 1122 def size(self):
1123 1123 return self._filelog.size(self._filerev)
1124 1124
1125 1125 def renamed(self):
1126 1126 """check if file was actually renamed in this changeset revision
1127 1127
1128 1128 If rename logged in file revision, we report copy for changeset only
1129 1129 if file revisions linkrev points back to the changeset in question
1130 1130 or both changeset parents contain different file revisions.
1131 1131 """
1132 1132
1133 1133 renamed = self._filelog.renamed(self._filenode)
1134 1134 if not renamed:
1135 1135 return renamed
1136 1136
1137 1137 if self.rev() == self.linkrev():
1138 1138 return renamed
1139 1139
1140 1140 name = self.path()
1141 1141 fnode = self._filenode
1142 1142 for p in self._changectx.parents():
1143 1143 try:
1144 1144 if fnode == p.filenode(name):
1145 1145 return None
1146 1146 except error.LookupError:
1147 1147 pass
1148 1148 return renamed
1149 1149
1150 1150 def children(self):
1151 1151 # hard for renames
1152 1152 c = self._filelog.children(self._filenode)
1153 1153 return [filectx(self._repo, self._path, fileid=x,
1154 1154 filelog=self._filelog) for x in c]
1155 1155
1156 def blockancestors(fctx, fromline, toline):
1157 """Yield ancestors of `fctx` with respect to the block of lines within
1158 `fromline`-`toline` range.
1159 """
1160 def changesrange(fctx1, fctx2, linerange2):
1161 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1162 if diff from fctx2 to fctx1 has changes in linerange2 and
1163 `linerange1` is the new line range for fctx1.
1164 """
1165 diffopts = patch.diffopts(fctx._repo.ui)
1166 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1167 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1168 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1169 return diffinrange, linerange1
1170
1171 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1172 while visit:
1173 c, linerange2 = visit.pop(max(visit))
1174 pl = c.parents()
1175 if not pl:
1176 # The block originates from the initial revision.
1177 yield c
1178 continue
1179 inrange = False
1180 for p in pl:
1181 inrangep, linerange1 = changesrange(p, c, linerange2)
1182 inrange = inrange or inrangep
1183 if linerange1[0] == linerange1[1]:
1184 # Parent's linerange is empty, meaning that the block got
1185 # introduced in this revision; no need to go futher in this
1186 # branch.
1187 continue
1188 visit[p.linkrev(), p.filenode()] = p, linerange1
1189 if inrange:
1190 yield c
1191
1156 1192 class committablectx(basectx):
1157 1193 """A committablectx object provides common functionality for a context that
1158 1194 wants the ability to commit, e.g. workingctx or memctx."""
1159 1195 def __init__(self, repo, text="", user=None, date=None, extra=None,
1160 1196 changes=None):
1161 1197 self._repo = repo
1162 1198 self._rev = None
1163 1199 self._node = None
1164 1200 self._text = text
1165 1201 if date:
1166 1202 self._date = util.parsedate(date)
1167 1203 if user:
1168 1204 self._user = user
1169 1205 if changes:
1170 1206 self._status = changes
1171 1207
1172 1208 self._extra = {}
1173 1209 if extra:
1174 1210 self._extra = extra.copy()
1175 1211 if 'branch' not in self._extra:
1176 1212 try:
1177 1213 branch = encoding.fromlocal(self._repo.dirstate.branch())
1178 1214 except UnicodeDecodeError:
1179 1215 raise error.Abort(_('branch name not in UTF-8!'))
1180 1216 self._extra['branch'] = branch
1181 1217 if self._extra['branch'] == '':
1182 1218 self._extra['branch'] = 'default'
1183 1219
1184 1220 def __str__(self):
1185 1221 return str(self._parents[0]) + "+"
1186 1222
1187 1223 def __nonzero__(self):
1188 1224 return True
1189 1225
1190 1226 def _buildflagfunc(self):
1191 1227 # Create a fallback function for getting file flags when the
1192 1228 # filesystem doesn't support them
1193 1229
1194 1230 copiesget = self._repo.dirstate.copies().get
1195 1231 parents = self.parents()
1196 1232 if len(parents) < 2:
1197 1233 # when we have one parent, it's easy: copy from parent
1198 1234 man = parents[0].manifest()
1199 1235 def func(f):
1200 1236 f = copiesget(f, f)
1201 1237 return man.flags(f)
1202 1238 else:
1203 1239 # merges are tricky: we try to reconstruct the unstored
1204 1240 # result from the merge (issue1802)
1205 1241 p1, p2 = parents
1206 1242 pa = p1.ancestor(p2)
1207 1243 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1208 1244
1209 1245 def func(f):
1210 1246 f = copiesget(f, f) # may be wrong for merges with copies
1211 1247 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1212 1248 if fl1 == fl2:
1213 1249 return fl1
1214 1250 if fl1 == fla:
1215 1251 return fl2
1216 1252 if fl2 == fla:
1217 1253 return fl1
1218 1254 return '' # punt for conflicts
1219 1255
1220 1256 return func
1221 1257
1222 1258 @propertycache
1223 1259 def _flagfunc(self):
1224 1260 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1225 1261
1226 1262 @propertycache
1227 1263 def _manifest(self):
1228 1264 """generate a manifest corresponding to the values in self._status
1229 1265
1230 1266 This reuse the file nodeid from parent, but we append an extra letter
1231 1267 when modified. Modified files get an extra 'm' while added files get
1232 1268 an extra 'a'. This is used by manifests merge to see that files
1233 1269 are different and by update logic to avoid deleting newly added files.
1234 1270 """
1235 1271 parents = self.parents()
1236 1272
1237 1273 man = parents[0].manifest().copy()
1238 1274
1239 1275 ff = self._flagfunc
1240 1276 for i, l in ((addednodeid, self._status.added),
1241 1277 (modifiednodeid, self._status.modified)):
1242 1278 for f in l:
1243 1279 man[f] = i
1244 1280 try:
1245 1281 man.setflag(f, ff(f))
1246 1282 except OSError:
1247 1283 pass
1248 1284
1249 1285 for f in self._status.deleted + self._status.removed:
1250 1286 if f in man:
1251 1287 del man[f]
1252 1288
1253 1289 return man
1254 1290
1255 1291 @propertycache
1256 1292 def _status(self):
1257 1293 return self._repo.status()
1258 1294
1259 1295 @propertycache
1260 1296 def _user(self):
1261 1297 return self._repo.ui.username()
1262 1298
1263 1299 @propertycache
1264 1300 def _date(self):
1265 1301 return util.makedate()
1266 1302
1267 1303 def subrev(self, subpath):
1268 1304 return None
1269 1305
1270 1306 def manifestnode(self):
1271 1307 return None
1272 1308 def user(self):
1273 1309 return self._user or self._repo.ui.username()
1274 1310 def date(self):
1275 1311 return self._date
1276 1312 def description(self):
1277 1313 return self._text
1278 1314 def files(self):
1279 1315 return sorted(self._status.modified + self._status.added +
1280 1316 self._status.removed)
1281 1317
1282 1318 def modified(self):
1283 1319 return self._status.modified
1284 1320 def added(self):
1285 1321 return self._status.added
1286 1322 def removed(self):
1287 1323 return self._status.removed
1288 1324 def deleted(self):
1289 1325 return self._status.deleted
1290 1326 def branch(self):
1291 1327 return encoding.tolocal(self._extra['branch'])
1292 1328 def closesbranch(self):
1293 1329 return 'close' in self._extra
1294 1330 def extra(self):
1295 1331 return self._extra
1296 1332
1297 1333 def tags(self):
1298 1334 return []
1299 1335
1300 1336 def bookmarks(self):
1301 1337 b = []
1302 1338 for p in self.parents():
1303 1339 b.extend(p.bookmarks())
1304 1340 return b
1305 1341
1306 1342 def phase(self):
1307 1343 phase = phases.draft # default phase to draft
1308 1344 for p in self.parents():
1309 1345 phase = max(phase, p.phase())
1310 1346 return phase
1311 1347
1312 1348 def hidden(self):
1313 1349 return False
1314 1350
1315 1351 def children(self):
1316 1352 return []
1317 1353
1318 1354 def flags(self, path):
1319 1355 if '_manifest' in self.__dict__:
1320 1356 try:
1321 1357 return self._manifest.flags(path)
1322 1358 except KeyError:
1323 1359 return ''
1324 1360
1325 1361 try:
1326 1362 return self._flagfunc(path)
1327 1363 except OSError:
1328 1364 return ''
1329 1365
1330 1366 def ancestor(self, c2):
1331 1367 """return the "best" ancestor context of self and c2"""
1332 1368 return self._parents[0].ancestor(c2) # punt on two parents for now
1333 1369
1334 1370 def walk(self, match):
1335 1371 '''Generates matching file names.'''
1336 1372 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1337 1373 True, False))
1338 1374
1339 1375 def matches(self, match):
1340 1376 return sorted(self._repo.dirstate.matches(match))
1341 1377
1342 1378 def ancestors(self):
1343 1379 for p in self._parents:
1344 1380 yield p
1345 1381 for a in self._repo.changelog.ancestors(
1346 1382 [p.rev() for p in self._parents]):
1347 1383 yield changectx(self._repo, a)
1348 1384
1349 1385 def markcommitted(self, node):
1350 1386 """Perform post-commit cleanup necessary after committing this ctx
1351 1387
1352 1388 Specifically, this updates backing stores this working context
1353 1389 wraps to reflect the fact that the changes reflected by this
1354 1390 workingctx have been committed. For example, it marks
1355 1391 modified and added files as normal in the dirstate.
1356 1392
1357 1393 """
1358 1394
1359 1395 self._repo.dirstate.beginparentchange()
1360 1396 for f in self.modified() + self.added():
1361 1397 self._repo.dirstate.normal(f)
1362 1398 for f in self.removed():
1363 1399 self._repo.dirstate.drop(f)
1364 1400 self._repo.dirstate.setparents(node)
1365 1401 self._repo.dirstate.endparentchange()
1366 1402
1367 1403 # write changes out explicitly, because nesting wlock at
1368 1404 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1369 1405 # from immediately doing so for subsequent changing files
1370 1406 self._repo.dirstate.write(self._repo.currenttransaction())
1371 1407
1372 1408 class workingctx(committablectx):
1373 1409 """A workingctx object makes access to data related to
1374 1410 the current working directory convenient.
1375 1411 date - any valid date string or (unixtime, offset), or None.
1376 1412 user - username string, or None.
1377 1413 extra - a dictionary of extra values, or None.
1378 1414 changes - a list of file lists as returned by localrepo.status()
1379 1415 or None to use the repository status.
1380 1416 """
1381 1417 def __init__(self, repo, text="", user=None, date=None, extra=None,
1382 1418 changes=None):
1383 1419 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1384 1420
1385 1421 def __iter__(self):
1386 1422 d = self._repo.dirstate
1387 1423 for f in d:
1388 1424 if d[f] != 'r':
1389 1425 yield f
1390 1426
1391 1427 def __contains__(self, key):
1392 1428 return self._repo.dirstate[key] not in "?r"
1393 1429
1394 1430 def hex(self):
1395 1431 return hex(wdirid)
1396 1432
1397 1433 @propertycache
1398 1434 def _parents(self):
1399 1435 p = self._repo.dirstate.parents()
1400 1436 if p[1] == nullid:
1401 1437 p = p[:-1]
1402 1438 return [changectx(self._repo, x) for x in p]
1403 1439
1404 1440 def filectx(self, path, filelog=None):
1405 1441 """get a file context from the working directory"""
1406 1442 return workingfilectx(self._repo, path, workingctx=self,
1407 1443 filelog=filelog)
1408 1444
1409 1445 def dirty(self, missing=False, merge=True, branch=True):
1410 1446 "check whether a working directory is modified"
1411 1447 # check subrepos first
1412 1448 for s in sorted(self.substate):
1413 1449 if self.sub(s).dirty():
1414 1450 return True
1415 1451 # check current working dir
1416 1452 return ((merge and self.p2()) or
1417 1453 (branch and self.branch() != self.p1().branch()) or
1418 1454 self.modified() or self.added() or self.removed() or
1419 1455 (missing and self.deleted()))
1420 1456
1421 1457 def add(self, list, prefix=""):
1422 1458 join = lambda f: os.path.join(prefix, f)
1423 1459 with self._repo.wlock():
1424 1460 ui, ds = self._repo.ui, self._repo.dirstate
1425 1461 rejected = []
1426 1462 lstat = self._repo.wvfs.lstat
1427 1463 for f in list:
1428 1464 scmutil.checkportable(ui, join(f))
1429 1465 try:
1430 1466 st = lstat(f)
1431 1467 except OSError:
1432 1468 ui.warn(_("%s does not exist!\n") % join(f))
1433 1469 rejected.append(f)
1434 1470 continue
1435 1471 if st.st_size > 10000000:
1436 1472 ui.warn(_("%s: up to %d MB of RAM may be required "
1437 1473 "to manage this file\n"
1438 1474 "(use 'hg revert %s' to cancel the "
1439 1475 "pending addition)\n")
1440 1476 % (f, 3 * st.st_size // 1000000, join(f)))
1441 1477 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1442 1478 ui.warn(_("%s not added: only files and symlinks "
1443 1479 "supported currently\n") % join(f))
1444 1480 rejected.append(f)
1445 1481 elif ds[f] in 'amn':
1446 1482 ui.warn(_("%s already tracked!\n") % join(f))
1447 1483 elif ds[f] == 'r':
1448 1484 ds.normallookup(f)
1449 1485 else:
1450 1486 ds.add(f)
1451 1487 return rejected
1452 1488
1453 1489 def forget(self, files, prefix=""):
1454 1490 join = lambda f: os.path.join(prefix, f)
1455 1491 with self._repo.wlock():
1456 1492 rejected = []
1457 1493 for f in files:
1458 1494 if f not in self._repo.dirstate:
1459 1495 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1460 1496 rejected.append(f)
1461 1497 elif self._repo.dirstate[f] != 'a':
1462 1498 self._repo.dirstate.remove(f)
1463 1499 else:
1464 1500 self._repo.dirstate.drop(f)
1465 1501 return rejected
1466 1502
1467 1503 def undelete(self, list):
1468 1504 pctxs = self.parents()
1469 1505 with self._repo.wlock():
1470 1506 for f in list:
1471 1507 if self._repo.dirstate[f] != 'r':
1472 1508 self._repo.ui.warn(_("%s not removed!\n") % f)
1473 1509 else:
1474 1510 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1475 1511 t = fctx.data()
1476 1512 self._repo.wwrite(f, t, fctx.flags())
1477 1513 self._repo.dirstate.normal(f)
1478 1514
1479 1515 def copy(self, source, dest):
1480 1516 try:
1481 1517 st = self._repo.wvfs.lstat(dest)
1482 1518 except OSError as err:
1483 1519 if err.errno != errno.ENOENT:
1484 1520 raise
1485 1521 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1486 1522 return
1487 1523 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1488 1524 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1489 1525 "symbolic link\n") % dest)
1490 1526 else:
1491 1527 with self._repo.wlock():
1492 1528 if self._repo.dirstate[dest] in '?':
1493 1529 self._repo.dirstate.add(dest)
1494 1530 elif self._repo.dirstate[dest] in 'r':
1495 1531 self._repo.dirstate.normallookup(dest)
1496 1532 self._repo.dirstate.copy(source, dest)
1497 1533
1498 1534 def match(self, pats=[], include=None, exclude=None, default='glob',
1499 1535 listsubrepos=False, badfn=None):
1500 1536 r = self._repo
1501 1537
1502 1538 # Only a case insensitive filesystem needs magic to translate user input
1503 1539 # to actual case in the filesystem.
1504 1540 if not util.fscasesensitive(r.root):
1505 1541 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1506 1542 exclude, default, r.auditor, self,
1507 1543 listsubrepos=listsubrepos,
1508 1544 badfn=badfn)
1509 1545 return matchmod.match(r.root, r.getcwd(), pats,
1510 1546 include, exclude, default,
1511 1547 auditor=r.auditor, ctx=self,
1512 1548 listsubrepos=listsubrepos, badfn=badfn)
1513 1549
1514 1550 def _filtersuspectsymlink(self, files):
1515 1551 if not files or self._repo.dirstate._checklink:
1516 1552 return files
1517 1553
1518 1554 # Symlink placeholders may get non-symlink-like contents
1519 1555 # via user error or dereferencing by NFS or Samba servers,
1520 1556 # so we filter out any placeholders that don't look like a
1521 1557 # symlink
1522 1558 sane = []
1523 1559 for f in files:
1524 1560 if self.flags(f) == 'l':
1525 1561 d = self[f].data()
1526 1562 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1527 1563 self._repo.ui.debug('ignoring suspect symlink placeholder'
1528 1564 ' "%s"\n' % f)
1529 1565 continue
1530 1566 sane.append(f)
1531 1567 return sane
1532 1568
1533 1569 def _checklookup(self, files):
1534 1570 # check for any possibly clean files
1535 1571 if not files:
1536 1572 return [], []
1537 1573
1538 1574 modified = []
1539 1575 fixup = []
1540 1576 pctx = self._parents[0]
1541 1577 # do a full compare of any files that might have changed
1542 1578 for f in sorted(files):
1543 1579 if (f not in pctx or self.flags(f) != pctx.flags(f)
1544 1580 or pctx[f].cmp(self[f])):
1545 1581 modified.append(f)
1546 1582 else:
1547 1583 fixup.append(f)
1548 1584
1549 1585 # update dirstate for files that are actually clean
1550 1586 if fixup:
1551 1587 try:
1552 1588 # updating the dirstate is optional
1553 1589 # so we don't wait on the lock
1554 1590 # wlock can invalidate the dirstate, so cache normal _after_
1555 1591 # taking the lock
1556 1592 with self._repo.wlock(False):
1557 1593 normal = self._repo.dirstate.normal
1558 1594 for f in fixup:
1559 1595 normal(f)
1560 1596 # write changes out explicitly, because nesting
1561 1597 # wlock at runtime may prevent 'wlock.release()'
1562 1598 # after this block from doing so for subsequent
1563 1599 # changing files
1564 1600 self._repo.dirstate.write(self._repo.currenttransaction())
1565 1601 except error.LockError:
1566 1602 pass
1567 1603 return modified, fixup
1568 1604
1569 1605 def _manifestmatches(self, match, s):
1570 1606 """Slow path for workingctx
1571 1607
1572 1608 The fast path is when we compare the working directory to its parent
1573 1609 which means this function is comparing with a non-parent; therefore we
1574 1610 need to build a manifest and return what matches.
1575 1611 """
1576 1612 mf = self._repo['.']._manifestmatches(match, s)
1577 1613 for f in s.modified + s.added:
1578 1614 mf[f] = newnodeid
1579 1615 mf.setflag(f, self.flags(f))
1580 1616 for f in s.removed:
1581 1617 if f in mf:
1582 1618 del mf[f]
1583 1619 return mf
1584 1620
1585 1621 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1586 1622 unknown=False):
1587 1623 '''Gets the status from the dirstate -- internal use only.'''
1588 1624 listignored, listclean, listunknown = ignored, clean, unknown
1589 1625 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1590 1626 subrepos = []
1591 1627 if '.hgsub' in self:
1592 1628 subrepos = sorted(self.substate)
1593 1629 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1594 1630 listclean, listunknown)
1595 1631
1596 1632 # check for any possibly clean files
1597 1633 if cmp:
1598 1634 modified2, fixup = self._checklookup(cmp)
1599 1635 s.modified.extend(modified2)
1600 1636
1601 1637 # update dirstate for files that are actually clean
1602 1638 if fixup and listclean:
1603 1639 s.clean.extend(fixup)
1604 1640
1605 1641 if match.always():
1606 1642 # cache for performance
1607 1643 if s.unknown or s.ignored or s.clean:
1608 1644 # "_status" is cached with list*=False in the normal route
1609 1645 self._status = scmutil.status(s.modified, s.added, s.removed,
1610 1646 s.deleted, [], [], [])
1611 1647 else:
1612 1648 self._status = s
1613 1649
1614 1650 return s
1615 1651
1616 1652 def _buildstatus(self, other, s, match, listignored, listclean,
1617 1653 listunknown):
1618 1654 """build a status with respect to another context
1619 1655
1620 1656 This includes logic for maintaining the fast path of status when
1621 1657 comparing the working directory against its parent, which is to skip
1622 1658 building a new manifest if self (working directory) is not comparing
1623 1659 against its parent (repo['.']).
1624 1660 """
1625 1661 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1626 1662 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1627 1663 # might have accidentally ended up with the entire contents of the file
1628 1664 # they are supposed to be linking to.
1629 1665 s.modified[:] = self._filtersuspectsymlink(s.modified)
1630 1666 if other != self._repo['.']:
1631 1667 s = super(workingctx, self)._buildstatus(other, s, match,
1632 1668 listignored, listclean,
1633 1669 listunknown)
1634 1670 return s
1635 1671
1636 1672 def _matchstatus(self, other, match):
1637 1673 """override the match method with a filter for directory patterns
1638 1674
1639 1675 We use inheritance to customize the match.bad method only in cases of
1640 1676 workingctx since it belongs only to the working directory when
1641 1677 comparing against the parent changeset.
1642 1678
1643 1679 If we aren't comparing against the working directory's parent, then we
1644 1680 just use the default match object sent to us.
1645 1681 """
1646 1682 superself = super(workingctx, self)
1647 1683 match = superself._matchstatus(other, match)
1648 1684 if other != self._repo['.']:
1649 1685 def bad(f, msg):
1650 1686 # 'f' may be a directory pattern from 'match.files()',
1651 1687 # so 'f not in ctx1' is not enough
1652 1688 if f not in other and not other.hasdir(f):
1653 1689 self._repo.ui.warn('%s: %s\n' %
1654 1690 (self._repo.dirstate.pathto(f), msg))
1655 1691 match.bad = bad
1656 1692 return match
1657 1693
1658 1694 class committablefilectx(basefilectx):
1659 1695 """A committablefilectx provides common functionality for a file context
1660 1696 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1661 1697 def __init__(self, repo, path, filelog=None, ctx=None):
1662 1698 self._repo = repo
1663 1699 self._path = path
1664 1700 self._changeid = None
1665 1701 self._filerev = self._filenode = None
1666 1702
1667 1703 if filelog is not None:
1668 1704 self._filelog = filelog
1669 1705 if ctx:
1670 1706 self._changectx = ctx
1671 1707
1672 1708 def __nonzero__(self):
1673 1709 return True
1674 1710
1675 1711 def linkrev(self):
1676 1712 # linked to self._changectx no matter if file is modified or not
1677 1713 return self.rev()
1678 1714
1679 1715 def parents(self):
1680 1716 '''return parent filectxs, following copies if necessary'''
1681 1717 def filenode(ctx, path):
1682 1718 return ctx._manifest.get(path, nullid)
1683 1719
1684 1720 path = self._path
1685 1721 fl = self._filelog
1686 1722 pcl = self._changectx._parents
1687 1723 renamed = self.renamed()
1688 1724
1689 1725 if renamed:
1690 1726 pl = [renamed + (None,)]
1691 1727 else:
1692 1728 pl = [(path, filenode(pcl[0], path), fl)]
1693 1729
1694 1730 for pc in pcl[1:]:
1695 1731 pl.append((path, filenode(pc, path), fl))
1696 1732
1697 1733 return [self._parentfilectx(p, fileid=n, filelog=l)
1698 1734 for p, n, l in pl if n != nullid]
1699 1735
1700 1736 def children(self):
1701 1737 return []
1702 1738
1703 1739 class workingfilectx(committablefilectx):
1704 1740 """A workingfilectx object makes access to data related to a particular
1705 1741 file in the working directory convenient."""
1706 1742 def __init__(self, repo, path, filelog=None, workingctx=None):
1707 1743 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1708 1744
1709 1745 @propertycache
1710 1746 def _changectx(self):
1711 1747 return workingctx(self._repo)
1712 1748
1713 1749 def data(self):
1714 1750 return self._repo.wread(self._path)
1715 1751 def renamed(self):
1716 1752 rp = self._repo.dirstate.copied(self._path)
1717 1753 if not rp:
1718 1754 return None
1719 1755 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1720 1756
1721 1757 def size(self):
1722 1758 return self._repo.wvfs.lstat(self._path).st_size
1723 1759 def date(self):
1724 1760 t, tz = self._changectx.date()
1725 1761 try:
1726 1762 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1727 1763 except OSError as err:
1728 1764 if err.errno != errno.ENOENT:
1729 1765 raise
1730 1766 return (t, tz)
1731 1767
1732 1768 def cmp(self, fctx):
1733 1769 """compare with other file context
1734 1770
1735 1771 returns True if different than fctx.
1736 1772 """
1737 1773 # fctx should be a filectx (not a workingfilectx)
1738 1774 # invert comparison to reuse the same code path
1739 1775 return fctx.cmp(self)
1740 1776
1741 1777 def remove(self, ignoremissing=False):
1742 1778 """wraps unlink for a repo's working directory"""
1743 1779 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1744 1780
1745 1781 def write(self, data, flags):
1746 1782 """wraps repo.wwrite"""
1747 1783 self._repo.wwrite(self._path, data, flags)
1748 1784
1749 1785 class workingcommitctx(workingctx):
1750 1786 """A workingcommitctx object makes access to data related to
1751 1787 the revision being committed convenient.
1752 1788
1753 1789 This hides changes in the working directory, if they aren't
1754 1790 committed in this context.
1755 1791 """
1756 1792 def __init__(self, repo, changes,
1757 1793 text="", user=None, date=None, extra=None):
1758 1794 super(workingctx, self).__init__(repo, text, user, date, extra,
1759 1795 changes)
1760 1796
1761 1797 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1762 1798 unknown=False):
1763 1799 """Return matched files only in ``self._status``
1764 1800
1765 1801 Uncommitted files appear "clean" via this context, even if
1766 1802 they aren't actually so in the working directory.
1767 1803 """
1768 1804 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1769 1805 if clean:
1770 1806 clean = [f for f in self._manifest if f not in self._changedset]
1771 1807 else:
1772 1808 clean = []
1773 1809 return scmutil.status([f for f in self._status.modified if match(f)],
1774 1810 [f for f in self._status.added if match(f)],
1775 1811 [f for f in self._status.removed if match(f)],
1776 1812 [], [], [], clean)
1777 1813
1778 1814 @propertycache
1779 1815 def _changedset(self):
1780 1816 """Return the set of files changed in this context
1781 1817 """
1782 1818 changed = set(self._status.modified)
1783 1819 changed.update(self._status.added)
1784 1820 changed.update(self._status.removed)
1785 1821 return changed
1786 1822
1787 1823 def makecachingfilectxfn(func):
1788 1824 """Create a filectxfn that caches based on the path.
1789 1825
1790 1826 We can't use util.cachefunc because it uses all arguments as the cache
1791 1827 key and this creates a cycle since the arguments include the repo and
1792 1828 memctx.
1793 1829 """
1794 1830 cache = {}
1795 1831
1796 1832 def getfilectx(repo, memctx, path):
1797 1833 if path not in cache:
1798 1834 cache[path] = func(repo, memctx, path)
1799 1835 return cache[path]
1800 1836
1801 1837 return getfilectx
1802 1838
1803 1839 class memctx(committablectx):
1804 1840 """Use memctx to perform in-memory commits via localrepo.commitctx().
1805 1841
1806 1842 Revision information is supplied at initialization time while
1807 1843 related files data and is made available through a callback
1808 1844 mechanism. 'repo' is the current localrepo, 'parents' is a
1809 1845 sequence of two parent revisions identifiers (pass None for every
1810 1846 missing parent), 'text' is the commit message and 'files' lists
1811 1847 names of files touched by the revision (normalized and relative to
1812 1848 repository root).
1813 1849
1814 1850 filectxfn(repo, memctx, path) is a callable receiving the
1815 1851 repository, the current memctx object and the normalized path of
1816 1852 requested file, relative to repository root. It is fired by the
1817 1853 commit function for every file in 'files', but calls order is
1818 1854 undefined. If the file is available in the revision being
1819 1855 committed (updated or added), filectxfn returns a memfilectx
1820 1856 object. If the file was removed, filectxfn raises an
1821 1857 IOError. Moved files are represented by marking the source file
1822 1858 removed and the new file added with copy information (see
1823 1859 memfilectx).
1824 1860
1825 1861 user receives the committer name and defaults to current
1826 1862 repository username, date is the commit date in any format
1827 1863 supported by util.parsedate() and defaults to current date, extra
1828 1864 is a dictionary of metadata or is left empty.
1829 1865 """
1830 1866
1831 1867 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1832 1868 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1833 1869 # this field to determine what to do in filectxfn.
1834 1870 _returnnoneformissingfiles = True
1835 1871
1836 1872 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1837 1873 date=None, extra=None, editor=False):
1838 1874 super(memctx, self).__init__(repo, text, user, date, extra)
1839 1875 self._rev = None
1840 1876 self._node = None
1841 1877 parents = [(p or nullid) for p in parents]
1842 1878 p1, p2 = parents
1843 1879 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1844 1880 files = sorted(set(files))
1845 1881 self._files = files
1846 1882 self.substate = {}
1847 1883
1848 1884 # if store is not callable, wrap it in a function
1849 1885 if not callable(filectxfn):
1850 1886 def getfilectx(repo, memctx, path):
1851 1887 fctx = filectxfn[path]
1852 1888 # this is weird but apparently we only keep track of one parent
1853 1889 # (why not only store that instead of a tuple?)
1854 1890 copied = fctx.renamed()
1855 1891 if copied:
1856 1892 copied = copied[0]
1857 1893 return memfilectx(repo, path, fctx.data(),
1858 1894 islink=fctx.islink(), isexec=fctx.isexec(),
1859 1895 copied=copied, memctx=memctx)
1860 1896 self._filectxfn = getfilectx
1861 1897 else:
1862 1898 # memoizing increases performance for e.g. vcs convert scenarios.
1863 1899 self._filectxfn = makecachingfilectxfn(filectxfn)
1864 1900
1865 1901 if extra:
1866 1902 self._extra = extra.copy()
1867 1903 else:
1868 1904 self._extra = {}
1869 1905
1870 1906 if self._extra.get('branch', '') == '':
1871 1907 self._extra['branch'] = 'default'
1872 1908
1873 1909 if editor:
1874 1910 self._text = editor(self._repo, self, [])
1875 1911 self._repo.savecommitmessage(self._text)
1876 1912
1877 1913 def filectx(self, path, filelog=None):
1878 1914 """get a file context from the working directory
1879 1915
1880 1916 Returns None if file doesn't exist and should be removed."""
1881 1917 return self._filectxfn(self._repo, self, path)
1882 1918
1883 1919 def commit(self):
1884 1920 """commit context to the repo"""
1885 1921 return self._repo.commitctx(self)
1886 1922
1887 1923 @propertycache
1888 1924 def _manifest(self):
1889 1925 """generate a manifest based on the return values of filectxfn"""
1890 1926
1891 1927 # keep this simple for now; just worry about p1
1892 1928 pctx = self._parents[0]
1893 1929 man = pctx.manifest().copy()
1894 1930
1895 1931 for f in self._status.modified:
1896 1932 p1node = nullid
1897 1933 p2node = nullid
1898 1934 p = pctx[f].parents() # if file isn't in pctx, check p2?
1899 1935 if len(p) > 0:
1900 1936 p1node = p[0].filenode()
1901 1937 if len(p) > 1:
1902 1938 p2node = p[1].filenode()
1903 1939 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1904 1940
1905 1941 for f in self._status.added:
1906 1942 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1907 1943
1908 1944 for f in self._status.removed:
1909 1945 if f in man:
1910 1946 del man[f]
1911 1947
1912 1948 return man
1913 1949
1914 1950 @propertycache
1915 1951 def _status(self):
1916 1952 """Calculate exact status from ``files`` specified at construction
1917 1953 """
1918 1954 man1 = self.p1().manifest()
1919 1955 p2 = self._parents[1]
1920 1956 # "1 < len(self._parents)" can't be used for checking
1921 1957 # existence of the 2nd parent, because "memctx._parents" is
1922 1958 # explicitly initialized by the list, of which length is 2.
1923 1959 if p2.node() != nullid:
1924 1960 man2 = p2.manifest()
1925 1961 managing = lambda f: f in man1 or f in man2
1926 1962 else:
1927 1963 managing = lambda f: f in man1
1928 1964
1929 1965 modified, added, removed = [], [], []
1930 1966 for f in self._files:
1931 1967 if not managing(f):
1932 1968 added.append(f)
1933 1969 elif self[f]:
1934 1970 modified.append(f)
1935 1971 else:
1936 1972 removed.append(f)
1937 1973
1938 1974 return scmutil.status(modified, added, removed, [], [], [], [])
1939 1975
1940 1976 class memfilectx(committablefilectx):
1941 1977 """memfilectx represents an in-memory file to commit.
1942 1978
1943 1979 See memctx and committablefilectx for more details.
1944 1980 """
1945 1981 def __init__(self, repo, path, data, islink=False,
1946 1982 isexec=False, copied=None, memctx=None):
1947 1983 """
1948 1984 path is the normalized file path relative to repository root.
1949 1985 data is the file content as a string.
1950 1986 islink is True if the file is a symbolic link.
1951 1987 isexec is True if the file is executable.
1952 1988 copied is the source file path if current file was copied in the
1953 1989 revision being committed, or None."""
1954 1990 super(memfilectx, self).__init__(repo, path, None, memctx)
1955 1991 self._data = data
1956 1992 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1957 1993 self._copied = None
1958 1994 if copied:
1959 1995 self._copied = (copied, nullid)
1960 1996
1961 1997 def data(self):
1962 1998 return self._data
1963 1999 def size(self):
1964 2000 return len(self.data())
1965 2001 def flags(self):
1966 2002 return self._flags
1967 2003 def renamed(self):
1968 2004 return self._copied
1969 2005
1970 2006 def remove(self, ignoremissing=False):
1971 2007 """wraps unlink for a repo's working directory"""
1972 2008 # need to figure out what to do here
1973 2009 del self._changectx[self._path]
1974 2010
1975 2011 def write(self, data, flags):
1976 2012 """wraps repo.wwrite"""
1977 2013 self._data = data
1978 2014
1979 2015 class metadataonlyctx(committablectx):
1980 2016 """Like memctx but it's reusing the manifest of different commit.
1981 2017 Intended to be used by lightweight operations that are creating
1982 2018 metadata-only changes.
1983 2019
1984 2020 Revision information is supplied at initialization time. 'repo' is the
1985 2021 current localrepo, 'ctx' is original revision which manifest we're reuisng
1986 2022 'parents' is a sequence of two parent revisions identifiers (pass None for
1987 2023 every missing parent), 'text' is the commit.
1988 2024
1989 2025 user receives the committer name and defaults to current repository
1990 2026 username, date is the commit date in any format supported by
1991 2027 util.parsedate() and defaults to current date, extra is a dictionary of
1992 2028 metadata or is left empty.
1993 2029 """
1994 2030 def __new__(cls, repo, originalctx, *args, **kwargs):
1995 2031 return super(metadataonlyctx, cls).__new__(cls, repo)
1996 2032
1997 2033 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
1998 2034 extra=None, editor=False):
1999 2035 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2000 2036 self._rev = None
2001 2037 self._node = None
2002 2038 self._originalctx = originalctx
2003 2039 self._manifestnode = originalctx.manifestnode()
2004 2040 parents = [(p or nullid) for p in parents]
2005 2041 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2006 2042
2007 2043 # sanity check to ensure that the reused manifest parents are
2008 2044 # manifests of our commit parents
2009 2045 mp1, mp2 = self.manifestctx().parents
2010 2046 if p1 != nullid and p1.manifestctx().node() != mp1:
2011 2047 raise RuntimeError('can\'t reuse the manifest: '
2012 2048 'its p1 doesn\'t match the new ctx p1')
2013 2049 if p2 != nullid and p2.manifestctx().node() != mp2:
2014 2050 raise RuntimeError('can\'t reuse the manifest: '
2015 2051 'its p2 doesn\'t match the new ctx p2')
2016 2052
2017 2053 self._files = originalctx.files()
2018 2054 self.substate = {}
2019 2055
2020 2056 if extra:
2021 2057 self._extra = extra.copy()
2022 2058 else:
2023 2059 self._extra = {}
2024 2060
2025 2061 if self._extra.get('branch', '') == '':
2026 2062 self._extra['branch'] = 'default'
2027 2063
2028 2064 if editor:
2029 2065 self._text = editor(self._repo, self, [])
2030 2066 self._repo.savecommitmessage(self._text)
2031 2067
2032 2068 def manifestnode(self):
2033 2069 return self._manifestnode
2034 2070
2035 2071 @propertycache
2036 2072 def _manifestctx(self):
2037 2073 return self._repo.manifestlog[self._manifestnode]
2038 2074
2039 2075 def filectx(self, path, filelog=None):
2040 2076 return self._originalctx.filectx(path, filelog=filelog)
2041 2077
2042 2078 def commit(self):
2043 2079 """commit context to the repo"""
2044 2080 return self._repo.commitctx(self)
2045 2081
2046 2082 @property
2047 2083 def _manifest(self):
2048 2084 return self._originalctx.manifest()
2049 2085
2050 2086 @propertycache
2051 2087 def _status(self):
2052 2088 """Calculate exact status from ``files`` specified in the ``origctx``
2053 2089 and parents manifests.
2054 2090 """
2055 2091 man1 = self.p1().manifest()
2056 2092 p2 = self._parents[1]
2057 2093 # "1 < len(self._parents)" can't be used for checking
2058 2094 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2059 2095 # explicitly initialized by the list, of which length is 2.
2060 2096 if p2.node() != nullid:
2061 2097 man2 = p2.manifest()
2062 2098 managing = lambda f: f in man1 or f in man2
2063 2099 else:
2064 2100 managing = lambda f: f in man1
2065 2101
2066 2102 modified, added, removed = [], [], []
2067 2103 for f in self._files:
2068 2104 if not managing(f):
2069 2105 added.append(f)
2070 2106 elif self[f]:
2071 2107 modified.append(f)
2072 2108 else:
2073 2109 removed.append(f)
2074 2110
2075 2111 return scmutil.status(modified, added, removed, [], [], [], [])
General Comments 0
You need to be logged in to leave comments. Login now