##// END OF EJS Templates
workingctx: also pass status tuple into poststatusfixup...
Siddharth Agarwal -
r32813:6d73b7ff default
parent child Browse files
Show More
@@ -1,2380 +1,2380
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 bin,
19 19 hex,
20 20 modifiednodeid,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 wdirid,
25 25 wdirnodes,
26 26 wdirrev,
27 27 )
28 28 from . import (
29 29 encoding,
30 30 error,
31 31 fileset,
32 32 match as matchmod,
33 33 mdiff,
34 34 obsolete as obsmod,
35 35 patch,
36 36 phases,
37 37 pycompat,
38 38 repoview,
39 39 revlog,
40 40 scmutil,
41 41 subrepo,
42 42 util,
43 43 )
44 44
45 45 propertycache = util.propertycache
46 46
47 47 nonascii = re.compile(r'[^\x21-\x7f]').search
48 48
49 49 class basectx(object):
50 50 """A basectx object represents the common logic for its children:
51 51 changectx: read-only context that is already present in the repo,
52 52 workingctx: a context that represents the working directory and can
53 53 be committed,
54 54 memctx: a context that represents changes in-memory and can also
55 55 be committed."""
56 56 def __new__(cls, repo, changeid='', *args, **kwargs):
57 57 if isinstance(changeid, basectx):
58 58 return changeid
59 59
60 60 o = super(basectx, cls).__new__(cls)
61 61
62 62 o._repo = repo
63 63 o._rev = nullrev
64 64 o._node = nullid
65 65
66 66 return o
67 67
68 68 def __str__(self):
69 69 r = short(self.node())
70 70 if pycompat.ispy3:
71 71 return r.decode('ascii')
72 72 return r
73 73
74 74 def __bytes__(self):
75 75 return short(self.node())
76 76
77 77 def __int__(self):
78 78 return self.rev()
79 79
80 80 def __repr__(self):
81 81 return r"<%s %s>" % (type(self).__name__, str(self))
82 82
83 83 def __eq__(self, other):
84 84 try:
85 85 return type(self) == type(other) and self._rev == other._rev
86 86 except AttributeError:
87 87 return False
88 88
89 89 def __ne__(self, other):
90 90 return not (self == other)
91 91
92 92 def __contains__(self, key):
93 93 return key in self._manifest
94 94
95 95 def __getitem__(self, key):
96 96 return self.filectx(key)
97 97
98 98 def __iter__(self):
99 99 return iter(self._manifest)
100 100
101 101 def _buildstatusmanifest(self, status):
102 102 """Builds a manifest that includes the given status results, if this is
103 103 a working copy context. For non-working copy contexts, it just returns
104 104 the normal manifest."""
105 105 return self.manifest()
106 106
107 107 def _matchstatus(self, other, match):
108 108 """return match.always if match is none
109 109
110 110 This internal method provides a way for child objects to override the
111 111 match operator.
112 112 """
113 113 return match or matchmod.always(self._repo.root, self._repo.getcwd())
114 114
115 115 def _buildstatus(self, other, s, match, listignored, listclean,
116 116 listunknown):
117 117 """build a status with respect to another context"""
118 118 # Load earliest manifest first for caching reasons. More specifically,
119 119 # if you have revisions 1000 and 1001, 1001 is probably stored as a
120 120 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
121 121 # 1000 and cache it so that when you read 1001, we just need to apply a
122 122 # delta to what's in the cache. So that's one full reconstruction + one
123 123 # delta application.
124 124 mf2 = None
125 125 if self.rev() is not None and self.rev() < other.rev():
126 126 mf2 = self._buildstatusmanifest(s)
127 127 mf1 = other._buildstatusmanifest(s)
128 128 if mf2 is None:
129 129 mf2 = self._buildstatusmanifest(s)
130 130
131 131 modified, added = [], []
132 132 removed = []
133 133 clean = []
134 134 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
135 135 deletedset = set(deleted)
136 136 d = mf1.diff(mf2, match=match, clean=listclean)
137 137 for fn, value in d.iteritems():
138 138 if fn in deletedset:
139 139 continue
140 140 if value is None:
141 141 clean.append(fn)
142 142 continue
143 143 (node1, flag1), (node2, flag2) = value
144 144 if node1 is None:
145 145 added.append(fn)
146 146 elif node2 is None:
147 147 removed.append(fn)
148 148 elif flag1 != flag2:
149 149 modified.append(fn)
150 150 elif node2 not in wdirnodes:
151 151 # When comparing files between two commits, we save time by
152 152 # not comparing the file contents when the nodeids differ.
153 153 # Note that this means we incorrectly report a reverted change
154 154 # to a file as a modification.
155 155 modified.append(fn)
156 156 elif self[fn].cmp(other[fn]):
157 157 modified.append(fn)
158 158 else:
159 159 clean.append(fn)
160 160
161 161 if removed:
162 162 # need to filter files if they are already reported as removed
163 163 unknown = [fn for fn in unknown if fn not in mf1 and
164 164 (not match or match(fn))]
165 165 ignored = [fn for fn in ignored if fn not in mf1 and
166 166 (not match or match(fn))]
167 167 # if they're deleted, don't report them as removed
168 168 removed = [fn for fn in removed if fn not in deletedset]
169 169
170 170 return scmutil.status(modified, added, removed, deleted, unknown,
171 171 ignored, clean)
172 172
173 173 @propertycache
174 174 def substate(self):
175 175 return subrepo.state(self, self._repo.ui)
176 176
177 177 def subrev(self, subpath):
178 178 return self.substate[subpath][1]
179 179
180 180 def rev(self):
181 181 return self._rev
182 182 def node(self):
183 183 return self._node
184 184 def hex(self):
185 185 return hex(self.node())
186 186 def manifest(self):
187 187 return self._manifest
188 188 def manifestctx(self):
189 189 return self._manifestctx
190 190 def repo(self):
191 191 return self._repo
192 192 def phasestr(self):
193 193 return phases.phasenames[self.phase()]
194 194 def mutable(self):
195 195 return self.phase() > phases.public
196 196
197 197 def getfileset(self, expr):
198 198 return fileset.getfileset(self, expr)
199 199
200 200 def obsolete(self):
201 201 """True if the changeset is obsolete"""
202 202 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
203 203
204 204 def extinct(self):
205 205 """True if the changeset is extinct"""
206 206 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
207 207
208 208 def unstable(self):
209 209 """True if the changeset is not obsolete but it's ancestor are"""
210 210 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
211 211
212 212 def bumped(self):
213 213 """True if the changeset try to be a successor of a public changeset
214 214
215 215 Only non-public and non-obsolete changesets may be bumped.
216 216 """
217 217 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
218 218
219 219 def divergent(self):
220 220 """Is a successors of a changeset with multiple possible successors set
221 221
222 222 Only non-public and non-obsolete changesets may be divergent.
223 223 """
224 224 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
225 225
226 226 def troubled(self):
227 227 """True if the changeset is either unstable, bumped or divergent"""
228 228 return self.unstable() or self.bumped() or self.divergent()
229 229
230 230 def troubles(self):
231 231 """return the list of troubles affecting this changesets.
232 232
233 233 Troubles are returned as strings. possible values are:
234 234 - unstable,
235 235 - bumped,
236 236 - divergent.
237 237 """
238 238 troubles = []
239 239 if self.unstable():
240 240 troubles.append('unstable')
241 241 if self.bumped():
242 242 troubles.append('bumped')
243 243 if self.divergent():
244 244 troubles.append('divergent')
245 245 return troubles
246 246
247 247 def parents(self):
248 248 """return contexts for each parent changeset"""
249 249 return self._parents
250 250
251 251 def p1(self):
252 252 return self._parents[0]
253 253
254 254 def p2(self):
255 255 parents = self._parents
256 256 if len(parents) == 2:
257 257 return parents[1]
258 258 return changectx(self._repo, nullrev)
259 259
260 260 def _fileinfo(self, path):
261 261 if r'_manifest' in self.__dict__:
262 262 try:
263 263 return self._manifest[path], self._manifest.flags(path)
264 264 except KeyError:
265 265 raise error.ManifestLookupError(self._node, path,
266 266 _('not found in manifest'))
267 267 if r'_manifestdelta' in self.__dict__ or path in self.files():
268 268 if path in self._manifestdelta:
269 269 return (self._manifestdelta[path],
270 270 self._manifestdelta.flags(path))
271 271 mfl = self._repo.manifestlog
272 272 try:
273 273 node, flag = mfl[self._changeset.manifest].find(path)
274 274 except KeyError:
275 275 raise error.ManifestLookupError(self._node, path,
276 276 _('not found in manifest'))
277 277
278 278 return node, flag
279 279
280 280 def filenode(self, path):
281 281 return self._fileinfo(path)[0]
282 282
283 283 def flags(self, path):
284 284 try:
285 285 return self._fileinfo(path)[1]
286 286 except error.LookupError:
287 287 return ''
288 288
289 289 def sub(self, path, allowcreate=True):
290 290 '''return a subrepo for the stored revision of path, never wdir()'''
291 291 return subrepo.subrepo(self, path, allowcreate=allowcreate)
292 292
293 293 def nullsub(self, path, pctx):
294 294 return subrepo.nullsubrepo(self, path, pctx)
295 295
296 296 def workingsub(self, path):
297 297 '''return a subrepo for the stored revision, or wdir if this is a wdir
298 298 context.
299 299 '''
300 300 return subrepo.subrepo(self, path, allowwdir=True)
301 301
302 302 def match(self, pats=None, include=None, exclude=None, default='glob',
303 303 listsubrepos=False, badfn=None):
304 304 r = self._repo
305 305 return matchmod.match(r.root, r.getcwd(), pats,
306 306 include, exclude, default,
307 307 auditor=r.nofsauditor, ctx=self,
308 308 listsubrepos=listsubrepos, badfn=badfn)
309 309
310 310 def diff(self, ctx2=None, match=None, **opts):
311 311 """Returns a diff generator for the given contexts and matcher"""
312 312 if ctx2 is None:
313 313 ctx2 = self.p1()
314 314 if ctx2 is not None:
315 315 ctx2 = self._repo[ctx2]
316 316 diffopts = patch.diffopts(self._repo.ui, opts)
317 317 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
318 318
319 319 def dirs(self):
320 320 return self._manifest.dirs()
321 321
322 322 def hasdir(self, dir):
323 323 return self._manifest.hasdir(dir)
324 324
325 325 def status(self, other=None, match=None, listignored=False,
326 326 listclean=False, listunknown=False, listsubrepos=False):
327 327 """return status of files between two nodes or node and working
328 328 directory.
329 329
330 330 If other is None, compare this node with working directory.
331 331
332 332 returns (modified, added, removed, deleted, unknown, ignored, clean)
333 333 """
334 334
335 335 ctx1 = self
336 336 ctx2 = self._repo[other]
337 337
338 338 # This next code block is, admittedly, fragile logic that tests for
339 339 # reversing the contexts and wouldn't need to exist if it weren't for
340 340 # the fast (and common) code path of comparing the working directory
341 341 # with its first parent.
342 342 #
343 343 # What we're aiming for here is the ability to call:
344 344 #
345 345 # workingctx.status(parentctx)
346 346 #
347 347 # If we always built the manifest for each context and compared those,
348 348 # then we'd be done. But the special case of the above call means we
349 349 # just copy the manifest of the parent.
350 350 reversed = False
351 351 if (not isinstance(ctx1, changectx)
352 352 and isinstance(ctx2, changectx)):
353 353 reversed = True
354 354 ctx1, ctx2 = ctx2, ctx1
355 355
356 356 match = ctx2._matchstatus(ctx1, match)
357 357 r = scmutil.status([], [], [], [], [], [], [])
358 358 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
359 359 listunknown)
360 360
361 361 if reversed:
362 362 # Reverse added and removed. Clear deleted, unknown and ignored as
363 363 # these make no sense to reverse.
364 364 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
365 365 r.clean)
366 366
367 367 if listsubrepos:
368 368 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
369 369 try:
370 370 rev2 = ctx2.subrev(subpath)
371 371 except KeyError:
372 372 # A subrepo that existed in node1 was deleted between
373 373 # node1 and node2 (inclusive). Thus, ctx2's substate
374 374 # won't contain that subpath. The best we can do ignore it.
375 375 rev2 = None
376 376 submatch = matchmod.subdirmatcher(subpath, match)
377 377 s = sub.status(rev2, match=submatch, ignored=listignored,
378 378 clean=listclean, unknown=listunknown,
379 379 listsubrepos=True)
380 380 for rfiles, sfiles in zip(r, s):
381 381 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
382 382
383 383 for l in r:
384 384 l.sort()
385 385
386 386 return r
387 387
388 388 def _filterederror(repo, changeid):
389 389 """build an exception to be raised about a filtered changeid
390 390
391 391 This is extracted in a function to help extensions (eg: evolve) to
392 392 experiment with various message variants."""
393 393 if repo.filtername.startswith('visible'):
394 394 msg = _("hidden revision '%s'") % changeid
395 395 hint = _('use --hidden to access hidden revisions')
396 396 return error.FilteredRepoLookupError(msg, hint=hint)
397 397 msg = _("filtered revision '%s' (not in '%s' subset)")
398 398 msg %= (changeid, repo.filtername)
399 399 return error.FilteredRepoLookupError(msg)
400 400
401 401 class changectx(basectx):
402 402 """A changecontext object makes access to data related to a particular
403 403 changeset convenient. It represents a read-only context already present in
404 404 the repo."""
405 405 def __init__(self, repo, changeid=''):
406 406 """changeid is a revision number, node, or tag"""
407 407
408 408 # since basectx.__new__ already took care of copying the object, we
409 409 # don't need to do anything in __init__, so we just exit here
410 410 if isinstance(changeid, basectx):
411 411 return
412 412
413 413 if changeid == '':
414 414 changeid = '.'
415 415 self._repo = repo
416 416
417 417 try:
418 418 if isinstance(changeid, int):
419 419 self._node = repo.changelog.node(changeid)
420 420 self._rev = changeid
421 421 return
422 422 if not pycompat.ispy3 and isinstance(changeid, long):
423 423 changeid = str(changeid)
424 424 if changeid == 'null':
425 425 self._node = nullid
426 426 self._rev = nullrev
427 427 return
428 428 if changeid == 'tip':
429 429 self._node = repo.changelog.tip()
430 430 self._rev = repo.changelog.rev(self._node)
431 431 return
432 432 if changeid == '.' or changeid == repo.dirstate.p1():
433 433 # this is a hack to delay/avoid loading obsmarkers
434 434 # when we know that '.' won't be hidden
435 435 self._node = repo.dirstate.p1()
436 436 self._rev = repo.unfiltered().changelog.rev(self._node)
437 437 return
438 438 if len(changeid) == 20:
439 439 try:
440 440 self._node = changeid
441 441 self._rev = repo.changelog.rev(changeid)
442 442 return
443 443 except error.FilteredRepoLookupError:
444 444 raise
445 445 except LookupError:
446 446 pass
447 447
448 448 try:
449 449 r = int(changeid)
450 450 if '%d' % r != changeid:
451 451 raise ValueError
452 452 l = len(repo.changelog)
453 453 if r < 0:
454 454 r += l
455 455 if r < 0 or r >= l and r != wdirrev:
456 456 raise ValueError
457 457 self._rev = r
458 458 self._node = repo.changelog.node(r)
459 459 return
460 460 except error.FilteredIndexError:
461 461 raise
462 462 except (ValueError, OverflowError, IndexError):
463 463 pass
464 464
465 465 if len(changeid) == 40:
466 466 try:
467 467 self._node = bin(changeid)
468 468 self._rev = repo.changelog.rev(self._node)
469 469 return
470 470 except error.FilteredLookupError:
471 471 raise
472 472 except (TypeError, LookupError):
473 473 pass
474 474
475 475 # lookup bookmarks through the name interface
476 476 try:
477 477 self._node = repo.names.singlenode(repo, changeid)
478 478 self._rev = repo.changelog.rev(self._node)
479 479 return
480 480 except KeyError:
481 481 pass
482 482 except error.FilteredRepoLookupError:
483 483 raise
484 484 except error.RepoLookupError:
485 485 pass
486 486
487 487 self._node = repo.unfiltered().changelog._partialmatch(changeid)
488 488 if self._node is not None:
489 489 self._rev = repo.changelog.rev(self._node)
490 490 return
491 491
492 492 # lookup failed
493 493 # check if it might have come from damaged dirstate
494 494 #
495 495 # XXX we could avoid the unfiltered if we had a recognizable
496 496 # exception for filtered changeset access
497 497 if changeid in repo.unfiltered().dirstate.parents():
498 498 msg = _("working directory has unknown parent '%s'!")
499 499 raise error.Abort(msg % short(changeid))
500 500 try:
501 501 if len(changeid) == 20 and nonascii(changeid):
502 502 changeid = hex(changeid)
503 503 except TypeError:
504 504 pass
505 505 except (error.FilteredIndexError, error.FilteredLookupError,
506 506 error.FilteredRepoLookupError):
507 507 raise _filterederror(repo, changeid)
508 508 except IndexError:
509 509 pass
510 510 raise error.RepoLookupError(
511 511 _("unknown revision '%s'") % changeid)
512 512
513 513 def __hash__(self):
514 514 try:
515 515 return hash(self._rev)
516 516 except AttributeError:
517 517 return id(self)
518 518
519 519 def __nonzero__(self):
520 520 return self._rev != nullrev
521 521
522 522 __bool__ = __nonzero__
523 523
524 524 @propertycache
525 525 def _changeset(self):
526 526 return self._repo.changelog.changelogrevision(self.rev())
527 527
528 528 @propertycache
529 529 def _manifest(self):
530 530 return self._manifestctx.read()
531 531
532 532 @property
533 533 def _manifestctx(self):
534 534 return self._repo.manifestlog[self._changeset.manifest]
535 535
536 536 @propertycache
537 537 def _manifestdelta(self):
538 538 return self._manifestctx.readdelta()
539 539
540 540 @propertycache
541 541 def _parents(self):
542 542 repo = self._repo
543 543 p1, p2 = repo.changelog.parentrevs(self._rev)
544 544 if p2 == nullrev:
545 545 return [changectx(repo, p1)]
546 546 return [changectx(repo, p1), changectx(repo, p2)]
547 547
548 548 def changeset(self):
549 549 c = self._changeset
550 550 return (
551 551 c.manifest,
552 552 c.user,
553 553 c.date,
554 554 c.files,
555 555 c.description,
556 556 c.extra,
557 557 )
558 558 def manifestnode(self):
559 559 return self._changeset.manifest
560 560
561 561 def user(self):
562 562 return self._changeset.user
563 563 def date(self):
564 564 return self._changeset.date
565 565 def files(self):
566 566 return self._changeset.files
567 567 def description(self):
568 568 return self._changeset.description
569 569 def branch(self):
570 570 return encoding.tolocal(self._changeset.extra.get("branch"))
571 571 def closesbranch(self):
572 572 return 'close' in self._changeset.extra
573 573 def extra(self):
574 574 return self._changeset.extra
575 575 def tags(self):
576 576 return self._repo.nodetags(self._node)
577 577 def bookmarks(self):
578 578 return self._repo.nodebookmarks(self._node)
579 579 def phase(self):
580 580 return self._repo._phasecache.phase(self._repo, self._rev)
581 581 def hidden(self):
582 582 return self._rev in repoview.filterrevs(self._repo, 'visible')
583 583
584 584 def children(self):
585 585 """return contexts for each child changeset"""
586 586 c = self._repo.changelog.children(self._node)
587 587 return [changectx(self._repo, x) for x in c]
588 588
589 589 def ancestors(self):
590 590 for a in self._repo.changelog.ancestors([self._rev]):
591 591 yield changectx(self._repo, a)
592 592
593 593 def descendants(self):
594 594 for d in self._repo.changelog.descendants([self._rev]):
595 595 yield changectx(self._repo, d)
596 596
597 597 def filectx(self, path, fileid=None, filelog=None):
598 598 """get a file context from this changeset"""
599 599 if fileid is None:
600 600 fileid = self.filenode(path)
601 601 return filectx(self._repo, path, fileid=fileid,
602 602 changectx=self, filelog=filelog)
603 603
604 604 def ancestor(self, c2, warn=False):
605 605 """return the "best" ancestor context of self and c2
606 606
607 607 If there are multiple candidates, it will show a message and check
608 608 merge.preferancestor configuration before falling back to the
609 609 revlog ancestor."""
610 610 # deal with workingctxs
611 611 n2 = c2._node
612 612 if n2 is None:
613 613 n2 = c2._parents[0]._node
614 614 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
615 615 if not cahs:
616 616 anc = nullid
617 617 elif len(cahs) == 1:
618 618 anc = cahs[0]
619 619 else:
620 620 # experimental config: merge.preferancestor
621 621 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
622 622 try:
623 623 ctx = changectx(self._repo, r)
624 624 except error.RepoLookupError:
625 625 continue
626 626 anc = ctx.node()
627 627 if anc in cahs:
628 628 break
629 629 else:
630 630 anc = self._repo.changelog.ancestor(self._node, n2)
631 631 if warn:
632 632 self._repo.ui.status(
633 633 (_("note: using %s as ancestor of %s and %s\n") %
634 634 (short(anc), short(self._node), short(n2))) +
635 635 ''.join(_(" alternatively, use --config "
636 636 "merge.preferancestor=%s\n") %
637 637 short(n) for n in sorted(cahs) if n != anc))
638 638 return changectx(self._repo, anc)
639 639
640 640 def descendant(self, other):
641 641 """True if other is descendant of this changeset"""
642 642 return self._repo.changelog.descendant(self._rev, other._rev)
643 643
644 644 def walk(self, match):
645 645 '''Generates matching file names.'''
646 646
647 647 # Wrap match.bad method to have message with nodeid
648 648 def bad(fn, msg):
649 649 # The manifest doesn't know about subrepos, so don't complain about
650 650 # paths into valid subrepos.
651 651 if any(fn == s or fn.startswith(s + '/')
652 652 for s in self.substate):
653 653 return
654 654 match.bad(fn, _('no such file in rev %s') % self)
655 655
656 656 m = matchmod.badmatch(match, bad)
657 657 return self._manifest.walk(m)
658 658
659 659 def matches(self, match):
660 660 return self.walk(match)
661 661
662 662 class basefilectx(object):
663 663 """A filecontext object represents the common logic for its children:
664 664 filectx: read-only access to a filerevision that is already present
665 665 in the repo,
666 666 workingfilectx: a filecontext that represents files from the working
667 667 directory,
668 668 memfilectx: a filecontext that represents files in-memory,
669 669 overlayfilectx: duplicate another filecontext with some fields overridden.
670 670 """
671 671 @propertycache
672 672 def _filelog(self):
673 673 return self._repo.file(self._path)
674 674
675 675 @propertycache
676 676 def _changeid(self):
677 677 if r'_changeid' in self.__dict__:
678 678 return self._changeid
679 679 elif r'_changectx' in self.__dict__:
680 680 return self._changectx.rev()
681 681 elif r'_descendantrev' in self.__dict__:
682 682 # this file context was created from a revision with a known
683 683 # descendant, we can (lazily) correct for linkrev aliases
684 684 return self._adjustlinkrev(self._descendantrev)
685 685 else:
686 686 return self._filelog.linkrev(self._filerev)
687 687
688 688 @propertycache
689 689 def _filenode(self):
690 690 if r'_fileid' in self.__dict__:
691 691 return self._filelog.lookup(self._fileid)
692 692 else:
693 693 return self._changectx.filenode(self._path)
694 694
695 695 @propertycache
696 696 def _filerev(self):
697 697 return self._filelog.rev(self._filenode)
698 698
699 699 @propertycache
700 700 def _repopath(self):
701 701 return self._path
702 702
703 703 def __nonzero__(self):
704 704 try:
705 705 self._filenode
706 706 return True
707 707 except error.LookupError:
708 708 # file is missing
709 709 return False
710 710
711 711 __bool__ = __nonzero__
712 712
713 713 def __str__(self):
714 714 try:
715 715 return "%s@%s" % (self.path(), self._changectx)
716 716 except error.LookupError:
717 717 return "%s@???" % self.path()
718 718
719 719 def __repr__(self):
720 720 return "<%s %s>" % (type(self).__name__, str(self))
721 721
722 722 def __hash__(self):
723 723 try:
724 724 return hash((self._path, self._filenode))
725 725 except AttributeError:
726 726 return id(self)
727 727
728 728 def __eq__(self, other):
729 729 try:
730 730 return (type(self) == type(other) and self._path == other._path
731 731 and self._filenode == other._filenode)
732 732 except AttributeError:
733 733 return False
734 734
735 735 def __ne__(self, other):
736 736 return not (self == other)
737 737
738 738 def filerev(self):
739 739 return self._filerev
740 740 def filenode(self):
741 741 return self._filenode
742 742 @propertycache
743 743 def _flags(self):
744 744 return self._changectx.flags(self._path)
745 745 def flags(self):
746 746 return self._flags
747 747 def filelog(self):
748 748 return self._filelog
749 749 def rev(self):
750 750 return self._changeid
751 751 def linkrev(self):
752 752 return self._filelog.linkrev(self._filerev)
753 753 def node(self):
754 754 return self._changectx.node()
755 755 def hex(self):
756 756 return self._changectx.hex()
757 757 def user(self):
758 758 return self._changectx.user()
759 759 def date(self):
760 760 return self._changectx.date()
761 761 def files(self):
762 762 return self._changectx.files()
763 763 def description(self):
764 764 return self._changectx.description()
765 765 def branch(self):
766 766 return self._changectx.branch()
767 767 def extra(self):
768 768 return self._changectx.extra()
769 769 def phase(self):
770 770 return self._changectx.phase()
771 771 def phasestr(self):
772 772 return self._changectx.phasestr()
773 773 def manifest(self):
774 774 return self._changectx.manifest()
775 775 def changectx(self):
776 776 return self._changectx
777 777 def renamed(self):
778 778 return self._copied
779 779 def repo(self):
780 780 return self._repo
781 781 def size(self):
782 782 return len(self.data())
783 783
784 784 def path(self):
785 785 return self._path
786 786
787 787 def isbinary(self):
788 788 try:
789 789 return util.binary(self.data())
790 790 except IOError:
791 791 return False
792 792 def isexec(self):
793 793 return 'x' in self.flags()
794 794 def islink(self):
795 795 return 'l' in self.flags()
796 796
797 797 def isabsent(self):
798 798 """whether this filectx represents a file not in self._changectx
799 799
800 800 This is mainly for merge code to detect change/delete conflicts. This is
801 801 expected to be True for all subclasses of basectx."""
802 802 return False
803 803
804 804 _customcmp = False
805 805 def cmp(self, fctx):
806 806 """compare with other file context
807 807
808 808 returns True if different than fctx.
809 809 """
810 810 if fctx._customcmp:
811 811 return fctx.cmp(self)
812 812
813 813 if (fctx._filenode is None
814 814 and (self._repo._encodefilterpats
815 815 # if file data starts with '\1\n', empty metadata block is
816 816 # prepended, which adds 4 bytes to filelog.size().
817 817 or self.size() - 4 == fctx.size())
818 818 or self.size() == fctx.size()):
819 819 return self._filelog.cmp(self._filenode, fctx.data())
820 820
821 821 return True
822 822
823 823 def _adjustlinkrev(self, srcrev, inclusive=False):
824 824 """return the first ancestor of <srcrev> introducing <fnode>
825 825
826 826 If the linkrev of the file revision does not point to an ancestor of
827 827 srcrev, we'll walk down the ancestors until we find one introducing
828 828 this file revision.
829 829
830 830 :srcrev: the changeset revision we search ancestors from
831 831 :inclusive: if true, the src revision will also be checked
832 832 """
833 833 repo = self._repo
834 834 cl = repo.unfiltered().changelog
835 835 mfl = repo.manifestlog
836 836 # fetch the linkrev
837 837 lkr = self.linkrev()
838 838 # hack to reuse ancestor computation when searching for renames
839 839 memberanc = getattr(self, '_ancestrycontext', None)
840 840 iteranc = None
841 841 if srcrev is None:
842 842 # wctx case, used by workingfilectx during mergecopy
843 843 revs = [p.rev() for p in self._repo[None].parents()]
844 844 inclusive = True # we skipped the real (revless) source
845 845 else:
846 846 revs = [srcrev]
847 847 if memberanc is None:
848 848 memberanc = iteranc = cl.ancestors(revs, lkr,
849 849 inclusive=inclusive)
850 850 # check if this linkrev is an ancestor of srcrev
851 851 if lkr not in memberanc:
852 852 if iteranc is None:
853 853 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
854 854 fnode = self._filenode
855 855 path = self._path
856 856 for a in iteranc:
857 857 ac = cl.read(a) # get changeset data (we avoid object creation)
858 858 if path in ac[3]: # checking the 'files' field.
859 859 # The file has been touched, check if the content is
860 860 # similar to the one we search for.
861 861 if fnode == mfl[ac[0]].readfast().get(path):
862 862 return a
863 863 # In theory, we should never get out of that loop without a result.
864 864 # But if manifest uses a buggy file revision (not children of the
865 865 # one it replaces) we could. Such a buggy situation will likely
866 866 # result is crash somewhere else at to some point.
867 867 return lkr
868 868
869 869 def introrev(self):
870 870 """return the rev of the changeset which introduced this file revision
871 871
872 872 This method is different from linkrev because it take into account the
873 873 changeset the filectx was created from. It ensures the returned
874 874 revision is one of its ancestors. This prevents bugs from
875 875 'linkrev-shadowing' when a file revision is used by multiple
876 876 changesets.
877 877 """
878 878 lkr = self.linkrev()
879 879 attrs = vars(self)
880 880 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
881 881 if noctx or self.rev() == lkr:
882 882 return self.linkrev()
883 883 return self._adjustlinkrev(self.rev(), inclusive=True)
884 884
885 885 def _parentfilectx(self, path, fileid, filelog):
886 886 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
887 887 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
888 888 if '_changeid' in vars(self) or '_changectx' in vars(self):
889 889 # If self is associated with a changeset (probably explicitly
890 890 # fed), ensure the created filectx is associated with a
891 891 # changeset that is an ancestor of self.changectx.
892 892 # This lets us later use _adjustlinkrev to get a correct link.
893 893 fctx._descendantrev = self.rev()
894 894 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
895 895 elif '_descendantrev' in vars(self):
896 896 # Otherwise propagate _descendantrev if we have one associated.
897 897 fctx._descendantrev = self._descendantrev
898 898 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
899 899 return fctx
900 900
901 901 def parents(self):
902 902 _path = self._path
903 903 fl = self._filelog
904 904 parents = self._filelog.parents(self._filenode)
905 905 pl = [(_path, node, fl) for node in parents if node != nullid]
906 906
907 907 r = fl.renamed(self._filenode)
908 908 if r:
909 909 # - In the simple rename case, both parent are nullid, pl is empty.
910 910 # - In case of merge, only one of the parent is null id and should
911 911 # be replaced with the rename information. This parent is -always-
912 912 # the first one.
913 913 #
914 914 # As null id have always been filtered out in the previous list
915 915 # comprehension, inserting to 0 will always result in "replacing
916 916 # first nullid parent with rename information.
917 917 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
918 918
919 919 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
920 920
921 921 def p1(self):
922 922 return self.parents()[0]
923 923
924 924 def p2(self):
925 925 p = self.parents()
926 926 if len(p) == 2:
927 927 return p[1]
928 928 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
929 929
930 930 def annotate(self, follow=False, linenumber=False, skiprevs=None,
931 931 diffopts=None):
932 932 '''returns a list of tuples of ((ctx, number), line) for each line
933 933 in the file, where ctx is the filectx of the node where
934 934 that line was last changed; if linenumber parameter is true, number is
935 935 the line number at the first appearance in the managed file, otherwise,
936 936 number has a fixed value of False.
937 937 '''
938 938
939 939 def lines(text):
940 940 if text.endswith("\n"):
941 941 return text.count("\n")
942 942 return text.count("\n") + int(bool(text))
943 943
944 944 if linenumber:
945 945 def decorate(text, rev):
946 946 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
947 947 else:
948 948 def decorate(text, rev):
949 949 return ([(rev, False)] * lines(text), text)
950 950
951 951 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
952 952
953 953 def parents(f):
954 954 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
955 955 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
956 956 # from the topmost introrev (= srcrev) down to p.linkrev() if it
957 957 # isn't an ancestor of the srcrev.
958 958 f._changeid
959 959 pl = f.parents()
960 960
961 961 # Don't return renamed parents if we aren't following.
962 962 if not follow:
963 963 pl = [p for p in pl if p.path() == f.path()]
964 964
965 965 # renamed filectx won't have a filelog yet, so set it
966 966 # from the cache to save time
967 967 for p in pl:
968 968 if not '_filelog' in p.__dict__:
969 969 p._filelog = getlog(p.path())
970 970
971 971 return pl
972 972
973 973 # use linkrev to find the first changeset where self appeared
974 974 base = self
975 975 introrev = self.introrev()
976 976 if self.rev() != introrev:
977 977 base = self.filectx(self.filenode(), changeid=introrev)
978 978 if getattr(base, '_ancestrycontext', None) is None:
979 979 cl = self._repo.changelog
980 980 if introrev is None:
981 981 # wctx is not inclusive, but works because _ancestrycontext
982 982 # is used to test filelog revisions
983 983 ac = cl.ancestors([p.rev() for p in base.parents()],
984 984 inclusive=True)
985 985 else:
986 986 ac = cl.ancestors([introrev], inclusive=True)
987 987 base._ancestrycontext = ac
988 988
989 989 # This algorithm would prefer to be recursive, but Python is a
990 990 # bit recursion-hostile. Instead we do an iterative
991 991 # depth-first search.
992 992
993 993 # 1st DFS pre-calculates pcache and needed
994 994 visit = [base]
995 995 pcache = {}
996 996 needed = {base: 1}
997 997 while visit:
998 998 f = visit.pop()
999 999 if f in pcache:
1000 1000 continue
1001 1001 pl = parents(f)
1002 1002 pcache[f] = pl
1003 1003 for p in pl:
1004 1004 needed[p] = needed.get(p, 0) + 1
1005 1005 if p not in pcache:
1006 1006 visit.append(p)
1007 1007
1008 1008 # 2nd DFS does the actual annotate
1009 1009 visit[:] = [base]
1010 1010 hist = {}
1011 1011 while visit:
1012 1012 f = visit[-1]
1013 1013 if f in hist:
1014 1014 visit.pop()
1015 1015 continue
1016 1016
1017 1017 ready = True
1018 1018 pl = pcache[f]
1019 1019 for p in pl:
1020 1020 if p not in hist:
1021 1021 ready = False
1022 1022 visit.append(p)
1023 1023 if ready:
1024 1024 visit.pop()
1025 1025 curr = decorate(f.data(), f)
1026 1026 skipchild = False
1027 1027 if skiprevs is not None:
1028 1028 skipchild = f._changeid in skiprevs
1029 1029 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1030 1030 diffopts)
1031 1031 for p in pl:
1032 1032 if needed[p] == 1:
1033 1033 del hist[p]
1034 1034 del needed[p]
1035 1035 else:
1036 1036 needed[p] -= 1
1037 1037
1038 1038 hist[f] = curr
1039 1039 del pcache[f]
1040 1040
1041 1041 return zip(hist[base][0], hist[base][1].splitlines(True))
1042 1042
1043 1043 def ancestors(self, followfirst=False):
1044 1044 visit = {}
1045 1045 c = self
1046 1046 if followfirst:
1047 1047 cut = 1
1048 1048 else:
1049 1049 cut = None
1050 1050
1051 1051 while True:
1052 1052 for parent in c.parents()[:cut]:
1053 1053 visit[(parent.linkrev(), parent.filenode())] = parent
1054 1054 if not visit:
1055 1055 break
1056 1056 c = visit.pop(max(visit))
1057 1057 yield c
1058 1058
1059 1059 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1060 1060 r'''
1061 1061 Given parent and child fctxes and annotate data for parents, for all lines
1062 1062 in either parent that match the child, annotate the child with the parent's
1063 1063 data.
1064 1064
1065 1065 Additionally, if `skipchild` is True, replace all other lines with parent
1066 1066 annotate data as well such that child is never blamed for any lines.
1067 1067
1068 1068 >>> oldfctx = 'old'
1069 1069 >>> p1fctx, p2fctx, childfctx = 'p1', 'p2', 'c'
1070 1070 >>> olddata = 'a\nb\n'
1071 1071 >>> p1data = 'a\nb\nc\n'
1072 1072 >>> p2data = 'a\nc\nd\n'
1073 1073 >>> childdata = 'a\nb2\nc\nc2\nd\n'
1074 1074 >>> diffopts = mdiff.diffopts()
1075 1075
1076 1076 >>> def decorate(text, rev):
1077 1077 ... return ([(rev, i) for i in xrange(1, text.count('\n') + 1)], text)
1078 1078
1079 1079 Basic usage:
1080 1080
1081 1081 >>> oldann = decorate(olddata, oldfctx)
1082 1082 >>> p1ann = decorate(p1data, p1fctx)
1083 1083 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1084 1084 >>> p1ann[0]
1085 1085 [('old', 1), ('old', 2), ('p1', 3)]
1086 1086 >>> p2ann = decorate(p2data, p2fctx)
1087 1087 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1088 1088 >>> p2ann[0]
1089 1089 [('old', 1), ('p2', 2), ('p2', 3)]
1090 1090
1091 1091 Test with multiple parents (note the difference caused by ordering):
1092 1092
1093 1093 >>> childann = decorate(childdata, childfctx)
1094 1094 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1095 1095 ... diffopts)
1096 1096 >>> childann[0]
1097 1097 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1098 1098
1099 1099 >>> childann = decorate(childdata, childfctx)
1100 1100 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1101 1101 ... diffopts)
1102 1102 >>> childann[0]
1103 1103 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1104 1104
1105 1105 Test with skipchild (note the difference caused by ordering):
1106 1106
1107 1107 >>> childann = decorate(childdata, childfctx)
1108 1108 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1109 1109 ... diffopts)
1110 1110 >>> childann[0]
1111 1111 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1112 1112
1113 1113 >>> childann = decorate(childdata, childfctx)
1114 1114 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1115 1115 ... diffopts)
1116 1116 >>> childann[0]
1117 1117 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1118 1118 '''
1119 1119 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1120 1120 for parent in parents]
1121 1121
1122 1122 if skipchild:
1123 1123 # Need to iterate over the blocks twice -- make it a list
1124 1124 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1125 1125 # Mercurial currently prefers p2 over p1 for annotate.
1126 1126 # TODO: change this?
1127 1127 for parent, blocks in pblocks:
1128 1128 for (a1, a2, b1, b2), t in blocks:
1129 1129 # Changed blocks ('!') or blocks made only of blank lines ('~')
1130 1130 # belong to the child.
1131 1131 if t == '=':
1132 1132 child[0][b1:b2] = parent[0][a1:a2]
1133 1133
1134 1134 if skipchild:
1135 1135 # Now try and match up anything that couldn't be matched,
1136 1136 # Reversing pblocks maintains bias towards p2, matching above
1137 1137 # behavior.
1138 1138 pblocks.reverse()
1139 1139
1140 1140 # The heuristics are:
1141 1141 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1142 1142 # This could potentially be smarter but works well enough.
1143 1143 # * For a non-matching section, do a best-effort fit. Match lines in
1144 1144 # diff hunks 1:1, dropping lines as necessary.
1145 1145 # * Repeat the last line as a last resort.
1146 1146
1147 1147 # First, replace as much as possible without repeating the last line.
1148 1148 remaining = [(parent, []) for parent, _blocks in pblocks]
1149 1149 for idx, (parent, blocks) in enumerate(pblocks):
1150 1150 for (a1, a2, b1, b2), _t in blocks:
1151 1151 if a2 - a1 >= b2 - b1:
1152 1152 for bk in xrange(b1, b2):
1153 1153 if child[0][bk][0] == childfctx:
1154 1154 ak = min(a1 + (bk - b1), a2 - 1)
1155 1155 child[0][bk] = parent[0][ak]
1156 1156 else:
1157 1157 remaining[idx][1].append((a1, a2, b1, b2))
1158 1158
1159 1159 # Then, look at anything left, which might involve repeating the last
1160 1160 # line.
1161 1161 for parent, blocks in remaining:
1162 1162 for a1, a2, b1, b2 in blocks:
1163 1163 for bk in xrange(b1, b2):
1164 1164 if child[0][bk][0] == childfctx:
1165 1165 ak = min(a1 + (bk - b1), a2 - 1)
1166 1166 child[0][bk] = parent[0][ak]
1167 1167 return child
1168 1168
1169 1169 class filectx(basefilectx):
1170 1170 """A filecontext object makes access to data related to a particular
1171 1171 filerevision convenient."""
1172 1172 def __init__(self, repo, path, changeid=None, fileid=None,
1173 1173 filelog=None, changectx=None):
1174 1174 """changeid can be a changeset revision, node, or tag.
1175 1175 fileid can be a file revision or node."""
1176 1176 self._repo = repo
1177 1177 self._path = path
1178 1178
1179 1179 assert (changeid is not None
1180 1180 or fileid is not None
1181 1181 or changectx is not None), \
1182 1182 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1183 1183 % (changeid, fileid, changectx))
1184 1184
1185 1185 if filelog is not None:
1186 1186 self._filelog = filelog
1187 1187
1188 1188 if changeid is not None:
1189 1189 self._changeid = changeid
1190 1190 if changectx is not None:
1191 1191 self._changectx = changectx
1192 1192 if fileid is not None:
1193 1193 self._fileid = fileid
1194 1194
1195 1195 @propertycache
1196 1196 def _changectx(self):
1197 1197 try:
1198 1198 return changectx(self._repo, self._changeid)
1199 1199 except error.FilteredRepoLookupError:
1200 1200 # Linkrev may point to any revision in the repository. When the
1201 1201 # repository is filtered this may lead to `filectx` trying to build
1202 1202 # `changectx` for filtered revision. In such case we fallback to
1203 1203 # creating `changectx` on the unfiltered version of the reposition.
1204 1204 # This fallback should not be an issue because `changectx` from
1205 1205 # `filectx` are not used in complex operations that care about
1206 1206 # filtering.
1207 1207 #
1208 1208 # This fallback is a cheap and dirty fix that prevent several
1209 1209 # crashes. It does not ensure the behavior is correct. However the
1210 1210 # behavior was not correct before filtering either and "incorrect
1211 1211 # behavior" is seen as better as "crash"
1212 1212 #
1213 1213 # Linkrevs have several serious troubles with filtering that are
1214 1214 # complicated to solve. Proper handling of the issue here should be
1215 1215 # considered when solving linkrev issue are on the table.
1216 1216 return changectx(self._repo.unfiltered(), self._changeid)
1217 1217
1218 1218 def filectx(self, fileid, changeid=None):
1219 1219 '''opens an arbitrary revision of the file without
1220 1220 opening a new filelog'''
1221 1221 return filectx(self._repo, self._path, fileid=fileid,
1222 1222 filelog=self._filelog, changeid=changeid)
1223 1223
1224 1224 def rawdata(self):
1225 1225 return self._filelog.revision(self._filenode, raw=True)
1226 1226
1227 1227 def rawflags(self):
1228 1228 """low-level revlog flags"""
1229 1229 return self._filelog.flags(self._filerev)
1230 1230
1231 1231 def data(self):
1232 1232 try:
1233 1233 return self._filelog.read(self._filenode)
1234 1234 except error.CensoredNodeError:
1235 1235 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1236 1236 return ""
1237 1237 raise error.Abort(_("censored node: %s") % short(self._filenode),
1238 1238 hint=_("set censor.policy to ignore errors"))
1239 1239
1240 1240 def size(self):
1241 1241 return self._filelog.size(self._filerev)
1242 1242
1243 1243 @propertycache
1244 1244 def _copied(self):
1245 1245 """check if file was actually renamed in this changeset revision
1246 1246
1247 1247 If rename logged in file revision, we report copy for changeset only
1248 1248 if file revisions linkrev points back to the changeset in question
1249 1249 or both changeset parents contain different file revisions.
1250 1250 """
1251 1251
1252 1252 renamed = self._filelog.renamed(self._filenode)
1253 1253 if not renamed:
1254 1254 return renamed
1255 1255
1256 1256 if self.rev() == self.linkrev():
1257 1257 return renamed
1258 1258
1259 1259 name = self.path()
1260 1260 fnode = self._filenode
1261 1261 for p in self._changectx.parents():
1262 1262 try:
1263 1263 if fnode == p.filenode(name):
1264 1264 return None
1265 1265 except error.LookupError:
1266 1266 pass
1267 1267 return renamed
1268 1268
1269 1269 def children(self):
1270 1270 # hard for renames
1271 1271 c = self._filelog.children(self._filenode)
1272 1272 return [filectx(self._repo, self._path, fileid=x,
1273 1273 filelog=self._filelog) for x in c]
1274 1274
1275 1275 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1276 1276 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1277 1277 if diff from fctx2 to fctx1 has changes in linerange2 and
1278 1278 `linerange1` is the new line range for fctx1.
1279 1279 """
1280 1280 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1281 1281 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1282 1282 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1283 1283 return diffinrange, linerange1
1284 1284
1285 1285 def blockancestors(fctx, fromline, toline, followfirst=False):
1286 1286 """Yield ancestors of `fctx` with respect to the block of lines within
1287 1287 `fromline`-`toline` range.
1288 1288 """
1289 1289 diffopts = patch.diffopts(fctx._repo.ui)
1290 1290 introrev = fctx.introrev()
1291 1291 if fctx.rev() != introrev:
1292 1292 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
1293 1293 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1294 1294 while visit:
1295 1295 c, linerange2 = visit.pop(max(visit))
1296 1296 pl = c.parents()
1297 1297 if followfirst:
1298 1298 pl = pl[:1]
1299 1299 if not pl:
1300 1300 # The block originates from the initial revision.
1301 1301 yield c, linerange2
1302 1302 continue
1303 1303 inrange = False
1304 1304 for p in pl:
1305 1305 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1306 1306 inrange = inrange or inrangep
1307 1307 if linerange1[0] == linerange1[1]:
1308 1308 # Parent's linerange is empty, meaning that the block got
1309 1309 # introduced in this revision; no need to go futher in this
1310 1310 # branch.
1311 1311 continue
1312 1312 # Set _descendantrev with 'c' (a known descendant) so that, when
1313 1313 # _adjustlinkrev is called for 'p', it receives this descendant
1314 1314 # (as srcrev) instead possibly topmost introrev.
1315 1315 p._descendantrev = c.rev()
1316 1316 visit[p.linkrev(), p.filenode()] = p, linerange1
1317 1317 if inrange:
1318 1318 yield c, linerange2
1319 1319
1320 1320 def blockdescendants(fctx, fromline, toline):
1321 1321 """Yield descendants of `fctx` with respect to the block of lines within
1322 1322 `fromline`-`toline` range.
1323 1323 """
1324 1324 # First possibly yield 'fctx' if it has changes in range with respect to
1325 1325 # its parents.
1326 1326 try:
1327 1327 c, linerange1 = next(blockancestors(fctx, fromline, toline))
1328 1328 except StopIteration:
1329 1329 pass
1330 1330 else:
1331 1331 if c == fctx:
1332 1332 yield c, linerange1
1333 1333
1334 1334 diffopts = patch.diffopts(fctx._repo.ui)
1335 1335 fl = fctx.filelog()
1336 1336 seen = {fctx.filerev(): (fctx, (fromline, toline))}
1337 1337 for i in fl.descendants([fctx.filerev()]):
1338 1338 c = fctx.filectx(i)
1339 1339 inrange = False
1340 1340 for x in fl.parentrevs(i):
1341 1341 try:
1342 1342 p, linerange2 = seen[x]
1343 1343 except KeyError:
1344 1344 # nullrev or other branch
1345 1345 continue
1346 1346 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
1347 1347 inrange = inrange or inrangep
1348 1348 # If revision 'i' has been seen (it's a merge), we assume that its
1349 1349 # line range is the same independently of which parents was used
1350 1350 # to compute it.
1351 1351 assert i not in seen or seen[i][1] == linerange1, (
1352 1352 'computed line range for %s is not consistent between '
1353 1353 'ancestor branches' % c)
1354 1354 seen[i] = c, linerange1
1355 1355 if inrange:
1356 1356 yield c, linerange1
1357 1357
1358 1358 class committablectx(basectx):
1359 1359 """A committablectx object provides common functionality for a context that
1360 1360 wants the ability to commit, e.g. workingctx or memctx."""
1361 1361 def __init__(self, repo, text="", user=None, date=None, extra=None,
1362 1362 changes=None):
1363 1363 self._repo = repo
1364 1364 self._rev = None
1365 1365 self._node = None
1366 1366 self._text = text
1367 1367 if date:
1368 1368 self._date = util.parsedate(date)
1369 1369 if user:
1370 1370 self._user = user
1371 1371 if changes:
1372 1372 self._status = changes
1373 1373
1374 1374 self._extra = {}
1375 1375 if extra:
1376 1376 self._extra = extra.copy()
1377 1377 if 'branch' not in self._extra:
1378 1378 try:
1379 1379 branch = encoding.fromlocal(self._repo.dirstate.branch())
1380 1380 except UnicodeDecodeError:
1381 1381 raise error.Abort(_('branch name not in UTF-8!'))
1382 1382 self._extra['branch'] = branch
1383 1383 if self._extra['branch'] == '':
1384 1384 self._extra['branch'] = 'default'
1385 1385
1386 1386 def __str__(self):
1387 1387 return str(self._parents[0]) + r"+"
1388 1388
1389 1389 def __bytes__(self):
1390 1390 return bytes(self._parents[0]) + "+"
1391 1391
1392 1392 def __nonzero__(self):
1393 1393 return True
1394 1394
1395 1395 __bool__ = __nonzero__
1396 1396
1397 1397 def _buildflagfunc(self):
1398 1398 # Create a fallback function for getting file flags when the
1399 1399 # filesystem doesn't support them
1400 1400
1401 1401 copiesget = self._repo.dirstate.copies().get
1402 1402 parents = self.parents()
1403 1403 if len(parents) < 2:
1404 1404 # when we have one parent, it's easy: copy from parent
1405 1405 man = parents[0].manifest()
1406 1406 def func(f):
1407 1407 f = copiesget(f, f)
1408 1408 return man.flags(f)
1409 1409 else:
1410 1410 # merges are tricky: we try to reconstruct the unstored
1411 1411 # result from the merge (issue1802)
1412 1412 p1, p2 = parents
1413 1413 pa = p1.ancestor(p2)
1414 1414 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1415 1415
1416 1416 def func(f):
1417 1417 f = copiesget(f, f) # may be wrong for merges with copies
1418 1418 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1419 1419 if fl1 == fl2:
1420 1420 return fl1
1421 1421 if fl1 == fla:
1422 1422 return fl2
1423 1423 if fl2 == fla:
1424 1424 return fl1
1425 1425 return '' # punt for conflicts
1426 1426
1427 1427 return func
1428 1428
1429 1429 @propertycache
1430 1430 def _flagfunc(self):
1431 1431 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1432 1432
1433 1433 @propertycache
1434 1434 def _status(self):
1435 1435 return self._repo.status()
1436 1436
1437 1437 @propertycache
1438 1438 def _user(self):
1439 1439 return self._repo.ui.username()
1440 1440
1441 1441 @propertycache
1442 1442 def _date(self):
1443 1443 ui = self._repo.ui
1444 1444 date = ui.configdate('devel', 'default-date')
1445 1445 if date is None:
1446 1446 date = util.makedate()
1447 1447 return date
1448 1448
1449 1449 def subrev(self, subpath):
1450 1450 return None
1451 1451
1452 1452 def manifestnode(self):
1453 1453 return None
1454 1454 def user(self):
1455 1455 return self._user or self._repo.ui.username()
1456 1456 def date(self):
1457 1457 return self._date
1458 1458 def description(self):
1459 1459 return self._text
1460 1460 def files(self):
1461 1461 return sorted(self._status.modified + self._status.added +
1462 1462 self._status.removed)
1463 1463
1464 1464 def modified(self):
1465 1465 return self._status.modified
1466 1466 def added(self):
1467 1467 return self._status.added
1468 1468 def removed(self):
1469 1469 return self._status.removed
1470 1470 def deleted(self):
1471 1471 return self._status.deleted
1472 1472 def branch(self):
1473 1473 return encoding.tolocal(self._extra['branch'])
1474 1474 def closesbranch(self):
1475 1475 return 'close' in self._extra
1476 1476 def extra(self):
1477 1477 return self._extra
1478 1478
1479 1479 def tags(self):
1480 1480 return []
1481 1481
1482 1482 def bookmarks(self):
1483 1483 b = []
1484 1484 for p in self.parents():
1485 1485 b.extend(p.bookmarks())
1486 1486 return b
1487 1487
1488 1488 def phase(self):
1489 1489 phase = phases.draft # default phase to draft
1490 1490 for p in self.parents():
1491 1491 phase = max(phase, p.phase())
1492 1492 return phase
1493 1493
1494 1494 def hidden(self):
1495 1495 return False
1496 1496
1497 1497 def children(self):
1498 1498 return []
1499 1499
1500 1500 def flags(self, path):
1501 1501 if r'_manifest' in self.__dict__:
1502 1502 try:
1503 1503 return self._manifest.flags(path)
1504 1504 except KeyError:
1505 1505 return ''
1506 1506
1507 1507 try:
1508 1508 return self._flagfunc(path)
1509 1509 except OSError:
1510 1510 return ''
1511 1511
1512 1512 def ancestor(self, c2):
1513 1513 """return the "best" ancestor context of self and c2"""
1514 1514 return self._parents[0].ancestor(c2) # punt on two parents for now
1515 1515
1516 1516 def walk(self, match):
1517 1517 '''Generates matching file names.'''
1518 1518 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1519 1519 True, False))
1520 1520
1521 1521 def matches(self, match):
1522 1522 return sorted(self._repo.dirstate.matches(match))
1523 1523
1524 1524 def ancestors(self):
1525 1525 for p in self._parents:
1526 1526 yield p
1527 1527 for a in self._repo.changelog.ancestors(
1528 1528 [p.rev() for p in self._parents]):
1529 1529 yield changectx(self._repo, a)
1530 1530
1531 1531 def markcommitted(self, node):
1532 1532 """Perform post-commit cleanup necessary after committing this ctx
1533 1533
1534 1534 Specifically, this updates backing stores this working context
1535 1535 wraps to reflect the fact that the changes reflected by this
1536 1536 workingctx have been committed. For example, it marks
1537 1537 modified and added files as normal in the dirstate.
1538 1538
1539 1539 """
1540 1540
1541 1541 with self._repo.dirstate.parentchange():
1542 1542 for f in self.modified() + self.added():
1543 1543 self._repo.dirstate.normal(f)
1544 1544 for f in self.removed():
1545 1545 self._repo.dirstate.drop(f)
1546 1546 self._repo.dirstate.setparents(node)
1547 1547
1548 1548 # write changes out explicitly, because nesting wlock at
1549 1549 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1550 1550 # from immediately doing so for subsequent changing files
1551 1551 self._repo.dirstate.write(self._repo.currenttransaction())
1552 1552
1553 1553 def dirty(self, missing=False, merge=True, branch=True):
1554 1554 return False
1555 1555
1556 1556 class workingctx(committablectx):
1557 1557 """A workingctx object makes access to data related to
1558 1558 the current working directory convenient.
1559 1559 date - any valid date string or (unixtime, offset), or None.
1560 1560 user - username string, or None.
1561 1561 extra - a dictionary of extra values, or None.
1562 1562 changes - a list of file lists as returned by localrepo.status()
1563 1563 or None to use the repository status.
1564 1564 """
1565 1565 def __init__(self, repo, text="", user=None, date=None, extra=None,
1566 1566 changes=None):
1567 1567 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1568 1568
1569 1569 def __iter__(self):
1570 1570 d = self._repo.dirstate
1571 1571 for f in d:
1572 1572 if d[f] != 'r':
1573 1573 yield f
1574 1574
1575 1575 def __contains__(self, key):
1576 1576 return self._repo.dirstate[key] not in "?r"
1577 1577
1578 1578 def hex(self):
1579 1579 return hex(wdirid)
1580 1580
1581 1581 @propertycache
1582 1582 def _parents(self):
1583 1583 p = self._repo.dirstate.parents()
1584 1584 if p[1] == nullid:
1585 1585 p = p[:-1]
1586 1586 return [changectx(self._repo, x) for x in p]
1587 1587
1588 1588 def filectx(self, path, filelog=None):
1589 1589 """get a file context from the working directory"""
1590 1590 return workingfilectx(self._repo, path, workingctx=self,
1591 1591 filelog=filelog)
1592 1592
1593 1593 def dirty(self, missing=False, merge=True, branch=True):
1594 1594 "check whether a working directory is modified"
1595 1595 # check subrepos first
1596 1596 for s in sorted(self.substate):
1597 1597 if self.sub(s).dirty():
1598 1598 return True
1599 1599 # check current working dir
1600 1600 return ((merge and self.p2()) or
1601 1601 (branch and self.branch() != self.p1().branch()) or
1602 1602 self.modified() or self.added() or self.removed() or
1603 1603 (missing and self.deleted()))
1604 1604
1605 1605 def add(self, list, prefix=""):
1606 1606 join = lambda f: os.path.join(prefix, f)
1607 1607 with self._repo.wlock():
1608 1608 ui, ds = self._repo.ui, self._repo.dirstate
1609 1609 rejected = []
1610 1610 lstat = self._repo.wvfs.lstat
1611 1611 for f in list:
1612 1612 scmutil.checkportable(ui, join(f))
1613 1613 try:
1614 1614 st = lstat(f)
1615 1615 except OSError:
1616 1616 ui.warn(_("%s does not exist!\n") % join(f))
1617 1617 rejected.append(f)
1618 1618 continue
1619 1619 if st.st_size > 10000000:
1620 1620 ui.warn(_("%s: up to %d MB of RAM may be required "
1621 1621 "to manage this file\n"
1622 1622 "(use 'hg revert %s' to cancel the "
1623 1623 "pending addition)\n")
1624 1624 % (f, 3 * st.st_size // 1000000, join(f)))
1625 1625 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1626 1626 ui.warn(_("%s not added: only files and symlinks "
1627 1627 "supported currently\n") % join(f))
1628 1628 rejected.append(f)
1629 1629 elif ds[f] in 'amn':
1630 1630 ui.warn(_("%s already tracked!\n") % join(f))
1631 1631 elif ds[f] == 'r':
1632 1632 ds.normallookup(f)
1633 1633 else:
1634 1634 ds.add(f)
1635 1635 return rejected
1636 1636
1637 1637 def forget(self, files, prefix=""):
1638 1638 join = lambda f: os.path.join(prefix, f)
1639 1639 with self._repo.wlock():
1640 1640 rejected = []
1641 1641 for f in files:
1642 1642 if f not in self._repo.dirstate:
1643 1643 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1644 1644 rejected.append(f)
1645 1645 elif self._repo.dirstate[f] != 'a':
1646 1646 self._repo.dirstate.remove(f)
1647 1647 else:
1648 1648 self._repo.dirstate.drop(f)
1649 1649 return rejected
1650 1650
1651 1651 def undelete(self, list):
1652 1652 pctxs = self.parents()
1653 1653 with self._repo.wlock():
1654 1654 for f in list:
1655 1655 if self._repo.dirstate[f] != 'r':
1656 1656 self._repo.ui.warn(_("%s not removed!\n") % f)
1657 1657 else:
1658 1658 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1659 1659 t = fctx.data()
1660 1660 self._repo.wwrite(f, t, fctx.flags())
1661 1661 self._repo.dirstate.normal(f)
1662 1662
1663 1663 def copy(self, source, dest):
1664 1664 try:
1665 1665 st = self._repo.wvfs.lstat(dest)
1666 1666 except OSError as err:
1667 1667 if err.errno != errno.ENOENT:
1668 1668 raise
1669 1669 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1670 1670 return
1671 1671 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1672 1672 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1673 1673 "symbolic link\n") % dest)
1674 1674 else:
1675 1675 with self._repo.wlock():
1676 1676 if self._repo.dirstate[dest] in '?':
1677 1677 self._repo.dirstate.add(dest)
1678 1678 elif self._repo.dirstate[dest] in 'r':
1679 1679 self._repo.dirstate.normallookup(dest)
1680 1680 self._repo.dirstate.copy(source, dest)
1681 1681
1682 1682 def match(self, pats=None, include=None, exclude=None, default='glob',
1683 1683 listsubrepos=False, badfn=None):
1684 1684 r = self._repo
1685 1685
1686 1686 # Only a case insensitive filesystem needs magic to translate user input
1687 1687 # to actual case in the filesystem.
1688 1688 icasefs = not util.fscasesensitive(r.root)
1689 1689 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1690 1690 default, auditor=r.auditor, ctx=self,
1691 1691 listsubrepos=listsubrepos, badfn=badfn,
1692 1692 icasefs=icasefs)
1693 1693
1694 1694 def _filtersuspectsymlink(self, files):
1695 1695 if not files or self._repo.dirstate._checklink:
1696 1696 return files
1697 1697
1698 1698 # Symlink placeholders may get non-symlink-like contents
1699 1699 # via user error or dereferencing by NFS or Samba servers,
1700 1700 # so we filter out any placeholders that don't look like a
1701 1701 # symlink
1702 1702 sane = []
1703 1703 for f in files:
1704 1704 if self.flags(f) == 'l':
1705 1705 d = self[f].data()
1706 1706 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1707 1707 self._repo.ui.debug('ignoring suspect symlink placeholder'
1708 1708 ' "%s"\n' % f)
1709 1709 continue
1710 1710 sane.append(f)
1711 1711 return sane
1712 1712
1713 1713 def _checklookup(self, files):
1714 1714 # check for any possibly clean files
1715 1715 if not files:
1716 1716 return [], [], []
1717 1717
1718 1718 modified = []
1719 1719 deleted = []
1720 1720 fixup = []
1721 1721 pctx = self._parents[0]
1722 1722 # do a full compare of any files that might have changed
1723 1723 for f in sorted(files):
1724 1724 try:
1725 1725 # This will return True for a file that got replaced by a
1726 1726 # directory in the interim, but fixing that is pretty hard.
1727 1727 if (f not in pctx or self.flags(f) != pctx.flags(f)
1728 1728 or pctx[f].cmp(self[f])):
1729 1729 modified.append(f)
1730 1730 else:
1731 1731 fixup.append(f)
1732 1732 except (IOError, OSError):
1733 1733 # A file become inaccessible in between? Mark it as deleted,
1734 1734 # matching dirstate behavior (issue5584).
1735 1735 # The dirstate has more complex behavior around whether a
1736 1736 # missing file matches a directory, etc, but we don't need to
1737 1737 # bother with that: if f has made it to this point, we're sure
1738 1738 # it's in the dirstate.
1739 1739 deleted.append(f)
1740 1740
1741 1741 return modified, deleted, fixup
1742 1742
1743 def _poststatusfixup(self, fixup):
1743 def _poststatusfixup(self, status, fixup):
1744 1744 """update dirstate for files that are actually clean"""
1745 1745 if fixup:
1746 1746 try:
1747 1747 oldid = self._repo.dirstate.identity()
1748 1748
1749 1749 # updating the dirstate is optional
1750 1750 # so we don't wait on the lock
1751 1751 # wlock can invalidate the dirstate, so cache normal _after_
1752 1752 # taking the lock
1753 1753 with self._repo.wlock(False):
1754 1754 if self._repo.dirstate.identity() == oldid:
1755 1755 normal = self._repo.dirstate.normal
1756 1756 for f in fixup:
1757 1757 normal(f)
1758 1758 # write changes out explicitly, because nesting
1759 1759 # wlock at runtime may prevent 'wlock.release()'
1760 1760 # after this block from doing so for subsequent
1761 1761 # changing files
1762 1762 tr = self._repo.currenttransaction()
1763 1763 self._repo.dirstate.write(tr)
1764 1764 else:
1765 1765 # in this case, writing changes out breaks
1766 1766 # consistency, because .hg/dirstate was
1767 1767 # already changed simultaneously after last
1768 1768 # caching (see also issue5584 for detail)
1769 1769 self._repo.ui.debug('skip updating dirstate: '
1770 1770 'identity mismatch\n')
1771 1771 except error.LockError:
1772 1772 pass
1773 1773
1774 1774 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1775 1775 unknown=False):
1776 1776 '''Gets the status from the dirstate -- internal use only.'''
1777 1777 listignored, listclean, listunknown = ignored, clean, unknown
1778 1778 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1779 1779 subrepos = []
1780 1780 if '.hgsub' in self:
1781 1781 subrepos = sorted(self.substate)
1782 1782 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1783 1783 listclean, listunknown)
1784 1784
1785 1785 # check for any possibly clean files
1786 1786 fixup = []
1787 1787 if cmp:
1788 1788 modified2, deleted2, fixup = self._checklookup(cmp)
1789 1789 s.modified.extend(modified2)
1790 1790 s.deleted.extend(deleted2)
1791 1791
1792 1792 if fixup and listclean:
1793 1793 s.clean.extend(fixup)
1794 1794
1795 self._poststatusfixup(fixup)
1795 self._poststatusfixup(s, fixup)
1796 1796
1797 1797 if match.always():
1798 1798 # cache for performance
1799 1799 if s.unknown or s.ignored or s.clean:
1800 1800 # "_status" is cached with list*=False in the normal route
1801 1801 self._status = scmutil.status(s.modified, s.added, s.removed,
1802 1802 s.deleted, [], [], [])
1803 1803 else:
1804 1804 self._status = s
1805 1805
1806 1806 return s
1807 1807
1808 1808 @propertycache
1809 1809 def _manifest(self):
1810 1810 """generate a manifest corresponding to the values in self._status
1811 1811
1812 1812 This reuse the file nodeid from parent, but we use special node
1813 1813 identifiers for added and modified files. This is used by manifests
1814 1814 merge to see that files are different and by update logic to avoid
1815 1815 deleting newly added files.
1816 1816 """
1817 1817 return self._buildstatusmanifest(self._status)
1818 1818
1819 1819 def _buildstatusmanifest(self, status):
1820 1820 """Builds a manifest that includes the given status results."""
1821 1821 parents = self.parents()
1822 1822
1823 1823 man = parents[0].manifest().copy()
1824 1824
1825 1825 ff = self._flagfunc
1826 1826 for i, l in ((addednodeid, status.added),
1827 1827 (modifiednodeid, status.modified)):
1828 1828 for f in l:
1829 1829 man[f] = i
1830 1830 try:
1831 1831 man.setflag(f, ff(f))
1832 1832 except OSError:
1833 1833 pass
1834 1834
1835 1835 for f in status.deleted + status.removed:
1836 1836 if f in man:
1837 1837 del man[f]
1838 1838
1839 1839 return man
1840 1840
1841 1841 def _buildstatus(self, other, s, match, listignored, listclean,
1842 1842 listunknown):
1843 1843 """build a status with respect to another context
1844 1844
1845 1845 This includes logic for maintaining the fast path of status when
1846 1846 comparing the working directory against its parent, which is to skip
1847 1847 building a new manifest if self (working directory) is not comparing
1848 1848 against its parent (repo['.']).
1849 1849 """
1850 1850 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1851 1851 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1852 1852 # might have accidentally ended up with the entire contents of the file
1853 1853 # they are supposed to be linking to.
1854 1854 s.modified[:] = self._filtersuspectsymlink(s.modified)
1855 1855 if other != self._repo['.']:
1856 1856 s = super(workingctx, self)._buildstatus(other, s, match,
1857 1857 listignored, listclean,
1858 1858 listunknown)
1859 1859 return s
1860 1860
1861 1861 def _matchstatus(self, other, match):
1862 1862 """override the match method with a filter for directory patterns
1863 1863
1864 1864 We use inheritance to customize the match.bad method only in cases of
1865 1865 workingctx since it belongs only to the working directory when
1866 1866 comparing against the parent changeset.
1867 1867
1868 1868 If we aren't comparing against the working directory's parent, then we
1869 1869 just use the default match object sent to us.
1870 1870 """
1871 1871 superself = super(workingctx, self)
1872 1872 match = superself._matchstatus(other, match)
1873 1873 if other != self._repo['.']:
1874 1874 def bad(f, msg):
1875 1875 # 'f' may be a directory pattern from 'match.files()',
1876 1876 # so 'f not in ctx1' is not enough
1877 1877 if f not in other and not other.hasdir(f):
1878 1878 self._repo.ui.warn('%s: %s\n' %
1879 1879 (self._repo.dirstate.pathto(f), msg))
1880 1880 match.bad = bad
1881 1881 return match
1882 1882
1883 1883 class committablefilectx(basefilectx):
1884 1884 """A committablefilectx provides common functionality for a file context
1885 1885 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1886 1886 def __init__(self, repo, path, filelog=None, ctx=None):
1887 1887 self._repo = repo
1888 1888 self._path = path
1889 1889 self._changeid = None
1890 1890 self._filerev = self._filenode = None
1891 1891
1892 1892 if filelog is not None:
1893 1893 self._filelog = filelog
1894 1894 if ctx:
1895 1895 self._changectx = ctx
1896 1896
1897 1897 def __nonzero__(self):
1898 1898 return True
1899 1899
1900 1900 __bool__ = __nonzero__
1901 1901
1902 1902 def linkrev(self):
1903 1903 # linked to self._changectx no matter if file is modified or not
1904 1904 return self.rev()
1905 1905
1906 1906 def parents(self):
1907 1907 '''return parent filectxs, following copies if necessary'''
1908 1908 def filenode(ctx, path):
1909 1909 return ctx._manifest.get(path, nullid)
1910 1910
1911 1911 path = self._path
1912 1912 fl = self._filelog
1913 1913 pcl = self._changectx._parents
1914 1914 renamed = self.renamed()
1915 1915
1916 1916 if renamed:
1917 1917 pl = [renamed + (None,)]
1918 1918 else:
1919 1919 pl = [(path, filenode(pcl[0], path), fl)]
1920 1920
1921 1921 for pc in pcl[1:]:
1922 1922 pl.append((path, filenode(pc, path), fl))
1923 1923
1924 1924 return [self._parentfilectx(p, fileid=n, filelog=l)
1925 1925 for p, n, l in pl if n != nullid]
1926 1926
1927 1927 def children(self):
1928 1928 return []
1929 1929
1930 1930 class workingfilectx(committablefilectx):
1931 1931 """A workingfilectx object makes access to data related to a particular
1932 1932 file in the working directory convenient."""
1933 1933 def __init__(self, repo, path, filelog=None, workingctx=None):
1934 1934 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1935 1935
1936 1936 @propertycache
1937 1937 def _changectx(self):
1938 1938 return workingctx(self._repo)
1939 1939
1940 1940 def data(self):
1941 1941 return self._repo.wread(self._path)
1942 1942 def renamed(self):
1943 1943 rp = self._repo.dirstate.copied(self._path)
1944 1944 if not rp:
1945 1945 return None
1946 1946 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1947 1947
1948 1948 def size(self):
1949 1949 return self._repo.wvfs.lstat(self._path).st_size
1950 1950 def date(self):
1951 1951 t, tz = self._changectx.date()
1952 1952 try:
1953 1953 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1954 1954 except OSError as err:
1955 1955 if err.errno != errno.ENOENT:
1956 1956 raise
1957 1957 return (t, tz)
1958 1958
1959 1959 def cmp(self, fctx):
1960 1960 """compare with other file context
1961 1961
1962 1962 returns True if different than fctx.
1963 1963 """
1964 1964 # fctx should be a filectx (not a workingfilectx)
1965 1965 # invert comparison to reuse the same code path
1966 1966 return fctx.cmp(self)
1967 1967
1968 1968 def remove(self, ignoremissing=False):
1969 1969 """wraps unlink for a repo's working directory"""
1970 1970 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1971 1971
1972 1972 def write(self, data, flags):
1973 1973 """wraps repo.wwrite"""
1974 1974 self._repo.wwrite(self._path, data, flags)
1975 1975
1976 1976 class workingcommitctx(workingctx):
1977 1977 """A workingcommitctx object makes access to data related to
1978 1978 the revision being committed convenient.
1979 1979
1980 1980 This hides changes in the working directory, if they aren't
1981 1981 committed in this context.
1982 1982 """
1983 1983 def __init__(self, repo, changes,
1984 1984 text="", user=None, date=None, extra=None):
1985 1985 super(workingctx, self).__init__(repo, text, user, date, extra,
1986 1986 changes)
1987 1987
1988 1988 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1989 1989 unknown=False):
1990 1990 """Return matched files only in ``self._status``
1991 1991
1992 1992 Uncommitted files appear "clean" via this context, even if
1993 1993 they aren't actually so in the working directory.
1994 1994 """
1995 1995 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1996 1996 if clean:
1997 1997 clean = [f for f in self._manifest if f not in self._changedset]
1998 1998 else:
1999 1999 clean = []
2000 2000 return scmutil.status([f for f in self._status.modified if match(f)],
2001 2001 [f for f in self._status.added if match(f)],
2002 2002 [f for f in self._status.removed if match(f)],
2003 2003 [], [], [], clean)
2004 2004
2005 2005 @propertycache
2006 2006 def _changedset(self):
2007 2007 """Return the set of files changed in this context
2008 2008 """
2009 2009 changed = set(self._status.modified)
2010 2010 changed.update(self._status.added)
2011 2011 changed.update(self._status.removed)
2012 2012 return changed
2013 2013
2014 2014 def makecachingfilectxfn(func):
2015 2015 """Create a filectxfn that caches based on the path.
2016 2016
2017 2017 We can't use util.cachefunc because it uses all arguments as the cache
2018 2018 key and this creates a cycle since the arguments include the repo and
2019 2019 memctx.
2020 2020 """
2021 2021 cache = {}
2022 2022
2023 2023 def getfilectx(repo, memctx, path):
2024 2024 if path not in cache:
2025 2025 cache[path] = func(repo, memctx, path)
2026 2026 return cache[path]
2027 2027
2028 2028 return getfilectx
2029 2029
2030 2030 def memfilefromctx(ctx):
2031 2031 """Given a context return a memfilectx for ctx[path]
2032 2032
2033 2033 This is a convenience method for building a memctx based on another
2034 2034 context.
2035 2035 """
2036 2036 def getfilectx(repo, memctx, path):
2037 2037 fctx = ctx[path]
2038 2038 # this is weird but apparently we only keep track of one parent
2039 2039 # (why not only store that instead of a tuple?)
2040 2040 copied = fctx.renamed()
2041 2041 if copied:
2042 2042 copied = copied[0]
2043 2043 return memfilectx(repo, path, fctx.data(),
2044 2044 islink=fctx.islink(), isexec=fctx.isexec(),
2045 2045 copied=copied, memctx=memctx)
2046 2046
2047 2047 return getfilectx
2048 2048
2049 2049 def memfilefrompatch(patchstore):
2050 2050 """Given a patch (e.g. patchstore object) return a memfilectx
2051 2051
2052 2052 This is a convenience method for building a memctx based on a patchstore.
2053 2053 """
2054 2054 def getfilectx(repo, memctx, path):
2055 2055 data, mode, copied = patchstore.getfile(path)
2056 2056 if data is None:
2057 2057 return None
2058 2058 islink, isexec = mode
2059 2059 return memfilectx(repo, path, data, islink=islink,
2060 2060 isexec=isexec, copied=copied,
2061 2061 memctx=memctx)
2062 2062
2063 2063 return getfilectx
2064 2064
2065 2065 class memctx(committablectx):
2066 2066 """Use memctx to perform in-memory commits via localrepo.commitctx().
2067 2067
2068 2068 Revision information is supplied at initialization time while
2069 2069 related files data and is made available through a callback
2070 2070 mechanism. 'repo' is the current localrepo, 'parents' is a
2071 2071 sequence of two parent revisions identifiers (pass None for every
2072 2072 missing parent), 'text' is the commit message and 'files' lists
2073 2073 names of files touched by the revision (normalized and relative to
2074 2074 repository root).
2075 2075
2076 2076 filectxfn(repo, memctx, path) is a callable receiving the
2077 2077 repository, the current memctx object and the normalized path of
2078 2078 requested file, relative to repository root. It is fired by the
2079 2079 commit function for every file in 'files', but calls order is
2080 2080 undefined. If the file is available in the revision being
2081 2081 committed (updated or added), filectxfn returns a memfilectx
2082 2082 object. If the file was removed, filectxfn return None for recent
2083 2083 Mercurial. Moved files are represented by marking the source file
2084 2084 removed and the new file added with copy information (see
2085 2085 memfilectx).
2086 2086
2087 2087 user receives the committer name and defaults to current
2088 2088 repository username, date is the commit date in any format
2089 2089 supported by util.parsedate() and defaults to current date, extra
2090 2090 is a dictionary of metadata or is left empty.
2091 2091 """
2092 2092
2093 2093 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2094 2094 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2095 2095 # this field to determine what to do in filectxfn.
2096 2096 _returnnoneformissingfiles = True
2097 2097
2098 2098 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2099 2099 date=None, extra=None, branch=None, editor=False):
2100 2100 super(memctx, self).__init__(repo, text, user, date, extra)
2101 2101 self._rev = None
2102 2102 self._node = None
2103 2103 parents = [(p or nullid) for p in parents]
2104 2104 p1, p2 = parents
2105 2105 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2106 2106 files = sorted(set(files))
2107 2107 self._files = files
2108 2108 if branch is not None:
2109 2109 self._extra['branch'] = encoding.fromlocal(branch)
2110 2110 self.substate = {}
2111 2111
2112 2112 if isinstance(filectxfn, patch.filestore):
2113 2113 filectxfn = memfilefrompatch(filectxfn)
2114 2114 elif not callable(filectxfn):
2115 2115 # if store is not callable, wrap it in a function
2116 2116 filectxfn = memfilefromctx(filectxfn)
2117 2117
2118 2118 # memoizing increases performance for e.g. vcs convert scenarios.
2119 2119 self._filectxfn = makecachingfilectxfn(filectxfn)
2120 2120
2121 2121 if editor:
2122 2122 self._text = editor(self._repo, self, [])
2123 2123 self._repo.savecommitmessage(self._text)
2124 2124
2125 2125 def filectx(self, path, filelog=None):
2126 2126 """get a file context from the working directory
2127 2127
2128 2128 Returns None if file doesn't exist and should be removed."""
2129 2129 return self._filectxfn(self._repo, self, path)
2130 2130
2131 2131 def commit(self):
2132 2132 """commit context to the repo"""
2133 2133 return self._repo.commitctx(self)
2134 2134
2135 2135 @propertycache
2136 2136 def _manifest(self):
2137 2137 """generate a manifest based on the return values of filectxfn"""
2138 2138
2139 2139 # keep this simple for now; just worry about p1
2140 2140 pctx = self._parents[0]
2141 2141 man = pctx.manifest().copy()
2142 2142
2143 2143 for f in self._status.modified:
2144 2144 p1node = nullid
2145 2145 p2node = nullid
2146 2146 p = pctx[f].parents() # if file isn't in pctx, check p2?
2147 2147 if len(p) > 0:
2148 2148 p1node = p[0].filenode()
2149 2149 if len(p) > 1:
2150 2150 p2node = p[1].filenode()
2151 2151 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2152 2152
2153 2153 for f in self._status.added:
2154 2154 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2155 2155
2156 2156 for f in self._status.removed:
2157 2157 if f in man:
2158 2158 del man[f]
2159 2159
2160 2160 return man
2161 2161
2162 2162 @propertycache
2163 2163 def _status(self):
2164 2164 """Calculate exact status from ``files`` specified at construction
2165 2165 """
2166 2166 man1 = self.p1().manifest()
2167 2167 p2 = self._parents[1]
2168 2168 # "1 < len(self._parents)" can't be used for checking
2169 2169 # existence of the 2nd parent, because "memctx._parents" is
2170 2170 # explicitly initialized by the list, of which length is 2.
2171 2171 if p2.node() != nullid:
2172 2172 man2 = p2.manifest()
2173 2173 managing = lambda f: f in man1 or f in man2
2174 2174 else:
2175 2175 managing = lambda f: f in man1
2176 2176
2177 2177 modified, added, removed = [], [], []
2178 2178 for f in self._files:
2179 2179 if not managing(f):
2180 2180 added.append(f)
2181 2181 elif self[f]:
2182 2182 modified.append(f)
2183 2183 else:
2184 2184 removed.append(f)
2185 2185
2186 2186 return scmutil.status(modified, added, removed, [], [], [], [])
2187 2187
2188 2188 class memfilectx(committablefilectx):
2189 2189 """memfilectx represents an in-memory file to commit.
2190 2190
2191 2191 See memctx and committablefilectx for more details.
2192 2192 """
2193 2193 def __init__(self, repo, path, data, islink=False,
2194 2194 isexec=False, copied=None, memctx=None):
2195 2195 """
2196 2196 path is the normalized file path relative to repository root.
2197 2197 data is the file content as a string.
2198 2198 islink is True if the file is a symbolic link.
2199 2199 isexec is True if the file is executable.
2200 2200 copied is the source file path if current file was copied in the
2201 2201 revision being committed, or None."""
2202 2202 super(memfilectx, self).__init__(repo, path, None, memctx)
2203 2203 self._data = data
2204 2204 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2205 2205 self._copied = None
2206 2206 if copied:
2207 2207 self._copied = (copied, nullid)
2208 2208
2209 2209 def data(self):
2210 2210 return self._data
2211 2211
2212 2212 def remove(self, ignoremissing=False):
2213 2213 """wraps unlink for a repo's working directory"""
2214 2214 # need to figure out what to do here
2215 2215 del self._changectx[self._path]
2216 2216
2217 2217 def write(self, data, flags):
2218 2218 """wraps repo.wwrite"""
2219 2219 self._data = data
2220 2220
2221 2221 class overlayfilectx(committablefilectx):
2222 2222 """Like memfilectx but take an original filectx and optional parameters to
2223 2223 override parts of it. This is useful when fctx.data() is expensive (i.e.
2224 2224 flag processor is expensive) and raw data, flags, and filenode could be
2225 2225 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2226 2226 """
2227 2227
2228 2228 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2229 2229 copied=None, ctx=None):
2230 2230 """originalfctx: filecontext to duplicate
2231 2231
2232 2232 datafunc: None or a function to override data (file content). It is a
2233 2233 function to be lazy. path, flags, copied, ctx: None or overridden value
2234 2234
2235 2235 copied could be (path, rev), or False. copied could also be just path,
2236 2236 and will be converted to (path, nullid). This simplifies some callers.
2237 2237 """
2238 2238
2239 2239 if path is None:
2240 2240 path = originalfctx.path()
2241 2241 if ctx is None:
2242 2242 ctx = originalfctx.changectx()
2243 2243 ctxmatch = lambda: True
2244 2244 else:
2245 2245 ctxmatch = lambda: ctx == originalfctx.changectx()
2246 2246
2247 2247 repo = originalfctx.repo()
2248 2248 flog = originalfctx.filelog()
2249 2249 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2250 2250
2251 2251 if copied is None:
2252 2252 copied = originalfctx.renamed()
2253 2253 copiedmatch = lambda: True
2254 2254 else:
2255 2255 if copied and not isinstance(copied, tuple):
2256 2256 # repo._filecommit will recalculate copyrev so nullid is okay
2257 2257 copied = (copied, nullid)
2258 2258 copiedmatch = lambda: copied == originalfctx.renamed()
2259 2259
2260 2260 # When data, copied (could affect data), ctx (could affect filelog
2261 2261 # parents) are not overridden, rawdata, rawflags, and filenode may be
2262 2262 # reused (repo._filecommit should double check filelog parents).
2263 2263 #
2264 2264 # path, flags are not hashed in filelog (but in manifestlog) so they do
2265 2265 # not affect reusable here.
2266 2266 #
2267 2267 # If ctx or copied is overridden to a same value with originalfctx,
2268 2268 # still consider it's reusable. originalfctx.renamed() may be a bit
2269 2269 # expensive so it's not called unless necessary. Assuming datafunc is
2270 2270 # always expensive, do not call it for this "reusable" test.
2271 2271 reusable = datafunc is None and ctxmatch() and copiedmatch()
2272 2272
2273 2273 if datafunc is None:
2274 2274 datafunc = originalfctx.data
2275 2275 if flags is None:
2276 2276 flags = originalfctx.flags()
2277 2277
2278 2278 self._datafunc = datafunc
2279 2279 self._flags = flags
2280 2280 self._copied = copied
2281 2281
2282 2282 if reusable:
2283 2283 # copy extra fields from originalfctx
2284 2284 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2285 2285 for attr in attrs:
2286 2286 if util.safehasattr(originalfctx, attr):
2287 2287 setattr(self, attr, getattr(originalfctx, attr))
2288 2288
2289 2289 def data(self):
2290 2290 return self._datafunc()
2291 2291
2292 2292 class metadataonlyctx(committablectx):
2293 2293 """Like memctx but it's reusing the manifest of different commit.
2294 2294 Intended to be used by lightweight operations that are creating
2295 2295 metadata-only changes.
2296 2296
2297 2297 Revision information is supplied at initialization time. 'repo' is the
2298 2298 current localrepo, 'ctx' is original revision which manifest we're reuisng
2299 2299 'parents' is a sequence of two parent revisions identifiers (pass None for
2300 2300 every missing parent), 'text' is the commit.
2301 2301
2302 2302 user receives the committer name and defaults to current repository
2303 2303 username, date is the commit date in any format supported by
2304 2304 util.parsedate() and defaults to current date, extra is a dictionary of
2305 2305 metadata or is left empty.
2306 2306 """
2307 2307 def __new__(cls, repo, originalctx, *args, **kwargs):
2308 2308 return super(metadataonlyctx, cls).__new__(cls, repo)
2309 2309
2310 2310 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2311 2311 extra=None, editor=False):
2312 2312 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2313 2313 self._rev = None
2314 2314 self._node = None
2315 2315 self._originalctx = originalctx
2316 2316 self._manifestnode = originalctx.manifestnode()
2317 2317 parents = [(p or nullid) for p in parents]
2318 2318 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2319 2319
2320 2320 # sanity check to ensure that the reused manifest parents are
2321 2321 # manifests of our commit parents
2322 2322 mp1, mp2 = self.manifestctx().parents
2323 2323 if p1 != nullid and p1.manifestnode() != mp1:
2324 2324 raise RuntimeError('can\'t reuse the manifest: '
2325 2325 'its p1 doesn\'t match the new ctx p1')
2326 2326 if p2 != nullid and p2.manifestnode() != mp2:
2327 2327 raise RuntimeError('can\'t reuse the manifest: '
2328 2328 'its p2 doesn\'t match the new ctx p2')
2329 2329
2330 2330 self._files = originalctx.files()
2331 2331 self.substate = {}
2332 2332
2333 2333 if editor:
2334 2334 self._text = editor(self._repo, self, [])
2335 2335 self._repo.savecommitmessage(self._text)
2336 2336
2337 2337 def manifestnode(self):
2338 2338 return self._manifestnode
2339 2339
2340 2340 @property
2341 2341 def _manifestctx(self):
2342 2342 return self._repo.manifestlog[self._manifestnode]
2343 2343
2344 2344 def filectx(self, path, filelog=None):
2345 2345 return self._originalctx.filectx(path, filelog=filelog)
2346 2346
2347 2347 def commit(self):
2348 2348 """commit context to the repo"""
2349 2349 return self._repo.commitctx(self)
2350 2350
2351 2351 @property
2352 2352 def _manifest(self):
2353 2353 return self._originalctx.manifest()
2354 2354
2355 2355 @propertycache
2356 2356 def _status(self):
2357 2357 """Calculate exact status from ``files`` specified in the ``origctx``
2358 2358 and parents manifests.
2359 2359 """
2360 2360 man1 = self.p1().manifest()
2361 2361 p2 = self._parents[1]
2362 2362 # "1 < len(self._parents)" can't be used for checking
2363 2363 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2364 2364 # explicitly initialized by the list, of which length is 2.
2365 2365 if p2.node() != nullid:
2366 2366 man2 = p2.manifest()
2367 2367 managing = lambda f: f in man1 or f in man2
2368 2368 else:
2369 2369 managing = lambda f: f in man1
2370 2370
2371 2371 modified, added, removed = [], [], []
2372 2372 for f in self._files:
2373 2373 if not managing(f):
2374 2374 added.append(f)
2375 2375 elif self[f]:
2376 2376 modified.append(f)
2377 2377 else:
2378 2378 removed.append(f)
2379 2379
2380 2380 return scmutil.status(modified, added, removed, [], [], [], [])
@@ -1,70 +1,70
1 1 # extension to emulate invoking 'dirstate.write()' at the time
2 2 # specified by '[fakedirstatewritetime] fakenow', only when
3 3 # 'dirstate.write()' is invoked via functions below:
4 4 #
5 5 # - 'workingctx._poststatusfixup()' (= 'repo.status()')
6 6 # - 'committablectx.markcommitted()'
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from mercurial import (
11 11 context,
12 12 dirstate,
13 13 extensions,
14 14 policy,
15 15 util,
16 16 )
17 17
18 18 parsers = policy.importmod(r'parsers')
19 19
20 20 def pack_dirstate(fakenow, orig, dmap, copymap, pl, now):
21 21 # execute what original parsers.pack_dirstate should do actually
22 22 # for consistency
23 23 actualnow = int(now)
24 24 for f, e in dmap.iteritems():
25 25 if e[0] == 'n' and e[3] == actualnow:
26 26 e = parsers.dirstatetuple(e[0], e[1], e[2], -1)
27 27 dmap[f] = e
28 28
29 29 return orig(dmap, copymap, pl, fakenow)
30 30
31 31 def fakewrite(ui, func):
32 32 # fake "now" of 'pack_dirstate' only if it is invoked while 'func'
33 33
34 34 fakenow = ui.config('fakedirstatewritetime', 'fakenow')
35 35 if not fakenow:
36 36 # Execute original one, if fakenow isn't configured. This is
37 37 # useful to prevent subrepos from executing replaced one,
38 38 # because replacing 'parsers.pack_dirstate' is also effective
39 39 # in subrepos.
40 40 return func()
41 41
42 42 # parsing 'fakenow' in YYYYmmddHHMM format makes comparison between
43 43 # 'fakenow' value and 'touch -t YYYYmmddHHMM' argument easy
44 44 fakenow = util.parsedate(fakenow, ['%Y%m%d%H%M'])[0]
45 45
46 46 orig_pack_dirstate = parsers.pack_dirstate
47 47 orig_dirstate_getfsnow = dirstate._getfsnow
48 48 wrapper = lambda *args: pack_dirstate(fakenow, orig_pack_dirstate, *args)
49 49
50 50 parsers.pack_dirstate = wrapper
51 51 dirstate._getfsnow = lambda *args: fakenow
52 52 try:
53 53 return func()
54 54 finally:
55 55 parsers.pack_dirstate = orig_pack_dirstate
56 56 dirstate._getfsnow = orig_dirstate_getfsnow
57 57
58 def _poststatusfixup(orig, workingctx, fixup):
58 def _poststatusfixup(orig, workingctx, status, fixup):
59 59 ui = workingctx.repo().ui
60 return fakewrite(ui, lambda : orig(workingctx, fixup))
60 return fakewrite(ui, lambda : orig(workingctx, status, fixup))
61 61
62 62 def markcommitted(orig, committablectx, node):
63 63 ui = committablectx.repo().ui
64 64 return fakewrite(ui, lambda : orig(committablectx, node))
65 65
66 66 def extsetup(ui):
67 67 extensions.wrapfunction(context.workingctx, '_poststatusfixup',
68 68 _poststatusfixup)
69 69 extensions.wrapfunction(context.committablectx, 'markcommitted',
70 70 markcommitted)
General Comments 0
You need to be logged in to leave comments. Login now