##// END OF EJS Templates
py3: make sure we return strings from __str__ and __repr__...
Pulkit Goyal -
r32613:e7eb7494 default
parent child Browse files
Show More
@@ -1,2341 +1,2341
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 bin,
19 19 hex,
20 20 modifiednodeid,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 wdirid,
25 25 wdirnodes,
26 26 )
27 27 from . import (
28 28 encoding,
29 29 error,
30 30 fileset,
31 31 match as matchmod,
32 32 mdiff,
33 33 obsolete as obsmod,
34 34 patch,
35 35 phases,
36 36 pycompat,
37 37 repoview,
38 38 revlog,
39 39 scmutil,
40 40 subrepo,
41 41 util,
42 42 )
43 43
44 44 propertycache = util.propertycache
45 45
46 46 nonascii = re.compile(r'[^\x21-\x7f]').search
47 47
48 48 class basectx(object):
49 49 """A basectx object represents the common logic for its children:
50 50 changectx: read-only context that is already present in the repo,
51 51 workingctx: a context that represents the working directory and can
52 52 be committed,
53 53 memctx: a context that represents changes in-memory and can also
54 54 be committed."""
55 55 def __new__(cls, repo, changeid='', *args, **kwargs):
56 56 if isinstance(changeid, basectx):
57 57 return changeid
58 58
59 59 o = super(basectx, cls).__new__(cls)
60 60
61 61 o._repo = repo
62 62 o._rev = nullrev
63 63 o._node = nullid
64 64
65 65 return o
66 66
67 67 def __str__(self):
68 68 r = short(self.node())
69 69 if pycompat.ispy3:
70 70 return r.decode('ascii')
71 71 return r
72 72
73 73 def __bytes__(self):
74 74 return short(self.node())
75 75
76 76 def __int__(self):
77 77 return self.rev()
78 78
79 79 def __repr__(self):
80 return "<%s %s>" % (type(self).__name__, str(self))
80 return r"<%s %s>" % (type(self).__name__, str(self))
81 81
82 82 def __eq__(self, other):
83 83 try:
84 84 return type(self) == type(other) and self._rev == other._rev
85 85 except AttributeError:
86 86 return False
87 87
88 88 def __ne__(self, other):
89 89 return not (self == other)
90 90
91 91 def __contains__(self, key):
92 92 return key in self._manifest
93 93
94 94 def __getitem__(self, key):
95 95 return self.filectx(key)
96 96
97 97 def __iter__(self):
98 98 return iter(self._manifest)
99 99
100 100 def _buildstatusmanifest(self, status):
101 101 """Builds a manifest that includes the given status results, if this is
102 102 a working copy context. For non-working copy contexts, it just returns
103 103 the normal manifest."""
104 104 return self.manifest()
105 105
106 106 def _matchstatus(self, other, match):
107 107 """return match.always if match is none
108 108
109 109 This internal method provides a way for child objects to override the
110 110 match operator.
111 111 """
112 112 return match or matchmod.always(self._repo.root, self._repo.getcwd())
113 113
114 114 def _buildstatus(self, other, s, match, listignored, listclean,
115 115 listunknown):
116 116 """build a status with respect to another context"""
117 117 # Load earliest manifest first for caching reasons. More specifically,
118 118 # if you have revisions 1000 and 1001, 1001 is probably stored as a
119 119 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
120 120 # 1000 and cache it so that when you read 1001, we just need to apply a
121 121 # delta to what's in the cache. So that's one full reconstruction + one
122 122 # delta application.
123 123 mf2 = None
124 124 if self.rev() is not None and self.rev() < other.rev():
125 125 mf2 = self._buildstatusmanifest(s)
126 126 mf1 = other._buildstatusmanifest(s)
127 127 if mf2 is None:
128 128 mf2 = self._buildstatusmanifest(s)
129 129
130 130 modified, added = [], []
131 131 removed = []
132 132 clean = []
133 133 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
134 134 deletedset = set(deleted)
135 135 d = mf1.diff(mf2, match=match, clean=listclean)
136 136 for fn, value in d.iteritems():
137 137 if fn in deletedset:
138 138 continue
139 139 if value is None:
140 140 clean.append(fn)
141 141 continue
142 142 (node1, flag1), (node2, flag2) = value
143 143 if node1 is None:
144 144 added.append(fn)
145 145 elif node2 is None:
146 146 removed.append(fn)
147 147 elif flag1 != flag2:
148 148 modified.append(fn)
149 149 elif node2 not in wdirnodes:
150 150 # When comparing files between two commits, we save time by
151 151 # not comparing the file contents when the nodeids differ.
152 152 # Note that this means we incorrectly report a reverted change
153 153 # to a file as a modification.
154 154 modified.append(fn)
155 155 elif self[fn].cmp(other[fn]):
156 156 modified.append(fn)
157 157 else:
158 158 clean.append(fn)
159 159
160 160 if removed:
161 161 # need to filter files if they are already reported as removed
162 162 unknown = [fn for fn in unknown if fn not in mf1 and
163 163 (not match or match(fn))]
164 164 ignored = [fn for fn in ignored if fn not in mf1 and
165 165 (not match or match(fn))]
166 166 # if they're deleted, don't report them as removed
167 167 removed = [fn for fn in removed if fn not in deletedset]
168 168
169 169 return scmutil.status(modified, added, removed, deleted, unknown,
170 170 ignored, clean)
171 171
172 172 @propertycache
173 173 def substate(self):
174 174 return subrepo.state(self, self._repo.ui)
175 175
176 176 def subrev(self, subpath):
177 177 return self.substate[subpath][1]
178 178
179 179 def rev(self):
180 180 return self._rev
181 181 def node(self):
182 182 return self._node
183 183 def hex(self):
184 184 return hex(self.node())
185 185 def manifest(self):
186 186 return self._manifest
187 187 def manifestctx(self):
188 188 return self._manifestctx
189 189 def repo(self):
190 190 return self._repo
191 191 def phasestr(self):
192 192 return phases.phasenames[self.phase()]
193 193 def mutable(self):
194 194 return self.phase() > phases.public
195 195
196 196 def getfileset(self, expr):
197 197 return fileset.getfileset(self, expr)
198 198
199 199 def obsolete(self):
200 200 """True if the changeset is obsolete"""
201 201 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
202 202
203 203 def extinct(self):
204 204 """True if the changeset is extinct"""
205 205 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
206 206
207 207 def unstable(self):
208 208 """True if the changeset is not obsolete but it's ancestor are"""
209 209 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
210 210
211 211 def bumped(self):
212 212 """True if the changeset try to be a successor of a public changeset
213 213
214 214 Only non-public and non-obsolete changesets may be bumped.
215 215 """
216 216 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
217 217
218 218 def divergent(self):
219 219 """Is a successors of a changeset with multiple possible successors set
220 220
221 221 Only non-public and non-obsolete changesets may be divergent.
222 222 """
223 223 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
224 224
225 225 def troubled(self):
226 226 """True if the changeset is either unstable, bumped or divergent"""
227 227 return self.unstable() or self.bumped() or self.divergent()
228 228
229 229 def troubles(self):
230 230 """return the list of troubles affecting this changesets.
231 231
232 232 Troubles are returned as strings. possible values are:
233 233 - unstable,
234 234 - bumped,
235 235 - divergent.
236 236 """
237 237 troubles = []
238 238 if self.unstable():
239 239 troubles.append('unstable')
240 240 if self.bumped():
241 241 troubles.append('bumped')
242 242 if self.divergent():
243 243 troubles.append('divergent')
244 244 return troubles
245 245
246 246 def parents(self):
247 247 """return contexts for each parent changeset"""
248 248 return self._parents
249 249
250 250 def p1(self):
251 251 return self._parents[0]
252 252
253 253 def p2(self):
254 254 parents = self._parents
255 255 if len(parents) == 2:
256 256 return parents[1]
257 257 return changectx(self._repo, nullrev)
258 258
259 259 def _fileinfo(self, path):
260 260 if r'_manifest' in self.__dict__:
261 261 try:
262 262 return self._manifest[path], self._manifest.flags(path)
263 263 except KeyError:
264 264 raise error.ManifestLookupError(self._node, path,
265 265 _('not found in manifest'))
266 266 if r'_manifestdelta' in self.__dict__ or path in self.files():
267 267 if path in self._manifestdelta:
268 268 return (self._manifestdelta[path],
269 269 self._manifestdelta.flags(path))
270 270 mfl = self._repo.manifestlog
271 271 try:
272 272 node, flag = mfl[self._changeset.manifest].find(path)
273 273 except KeyError:
274 274 raise error.ManifestLookupError(self._node, path,
275 275 _('not found in manifest'))
276 276
277 277 return node, flag
278 278
279 279 def filenode(self, path):
280 280 return self._fileinfo(path)[0]
281 281
282 282 def flags(self, path):
283 283 try:
284 284 return self._fileinfo(path)[1]
285 285 except error.LookupError:
286 286 return ''
287 287
288 288 def sub(self, path, allowcreate=True):
289 289 '''return a subrepo for the stored revision of path, never wdir()'''
290 290 return subrepo.subrepo(self, path, allowcreate=allowcreate)
291 291
292 292 def nullsub(self, path, pctx):
293 293 return subrepo.nullsubrepo(self, path, pctx)
294 294
295 295 def workingsub(self, path):
296 296 '''return a subrepo for the stored revision, or wdir if this is a wdir
297 297 context.
298 298 '''
299 299 return subrepo.subrepo(self, path, allowwdir=True)
300 300
301 301 def match(self, pats=None, include=None, exclude=None, default='glob',
302 302 listsubrepos=False, badfn=None):
303 303 if pats is None:
304 304 pats = []
305 305 r = self._repo
306 306 return matchmod.match(r.root, r.getcwd(), pats,
307 307 include, exclude, default,
308 308 auditor=r.nofsauditor, ctx=self,
309 309 listsubrepos=listsubrepos, badfn=badfn)
310 310
311 311 def diff(self, ctx2=None, match=None, **opts):
312 312 """Returns a diff generator for the given contexts and matcher"""
313 313 if ctx2 is None:
314 314 ctx2 = self.p1()
315 315 if ctx2 is not None:
316 316 ctx2 = self._repo[ctx2]
317 317 diffopts = patch.diffopts(self._repo.ui, opts)
318 318 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
319 319
320 320 def dirs(self):
321 321 return self._manifest.dirs()
322 322
323 323 def hasdir(self, dir):
324 324 return self._manifest.hasdir(dir)
325 325
326 326 def status(self, other=None, match=None, listignored=False,
327 327 listclean=False, listunknown=False, listsubrepos=False):
328 328 """return status of files between two nodes or node and working
329 329 directory.
330 330
331 331 If other is None, compare this node with working directory.
332 332
333 333 returns (modified, added, removed, deleted, unknown, ignored, clean)
334 334 """
335 335
336 336 ctx1 = self
337 337 ctx2 = self._repo[other]
338 338
339 339 # This next code block is, admittedly, fragile logic that tests for
340 340 # reversing the contexts and wouldn't need to exist if it weren't for
341 341 # the fast (and common) code path of comparing the working directory
342 342 # with its first parent.
343 343 #
344 344 # What we're aiming for here is the ability to call:
345 345 #
346 346 # workingctx.status(parentctx)
347 347 #
348 348 # If we always built the manifest for each context and compared those,
349 349 # then we'd be done. But the special case of the above call means we
350 350 # just copy the manifest of the parent.
351 351 reversed = False
352 352 if (not isinstance(ctx1, changectx)
353 353 and isinstance(ctx2, changectx)):
354 354 reversed = True
355 355 ctx1, ctx2 = ctx2, ctx1
356 356
357 357 match = ctx2._matchstatus(ctx1, match)
358 358 r = scmutil.status([], [], [], [], [], [], [])
359 359 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
360 360 listunknown)
361 361
362 362 if reversed:
363 363 # Reverse added and removed. Clear deleted, unknown and ignored as
364 364 # these make no sense to reverse.
365 365 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
366 366 r.clean)
367 367
368 368 if listsubrepos:
369 369 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
370 370 try:
371 371 rev2 = ctx2.subrev(subpath)
372 372 except KeyError:
373 373 # A subrepo that existed in node1 was deleted between
374 374 # node1 and node2 (inclusive). Thus, ctx2's substate
375 375 # won't contain that subpath. The best we can do ignore it.
376 376 rev2 = None
377 377 submatch = matchmod.subdirmatcher(subpath, match)
378 378 s = sub.status(rev2, match=submatch, ignored=listignored,
379 379 clean=listclean, unknown=listunknown,
380 380 listsubrepos=True)
381 381 for rfiles, sfiles in zip(r, s):
382 382 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
383 383
384 384 for l in r:
385 385 l.sort()
386 386
387 387 return r
388 388
389 389
390 390 def makememctx(repo, parents, text, user, date, branch, files, store,
391 391 editor=None, extra=None):
392 392 def getfilectx(repo, memctx, path):
393 393 data, mode, copied = store.getfile(path)
394 394 if data is None:
395 395 return None
396 396 islink, isexec = mode
397 397 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
398 398 copied=copied, memctx=memctx)
399 399 if extra is None:
400 400 extra = {}
401 401 if branch:
402 402 extra['branch'] = encoding.fromlocal(branch)
403 403 ctx = memctx(repo, parents, text, files, getfilectx, user,
404 404 date, extra, editor)
405 405 return ctx
406 406
407 407 def _filterederror(repo, changeid):
408 408 """build an exception to be raised about a filtered changeid
409 409
410 410 This is extracted in a function to help extensions (eg: evolve) to
411 411 experiment with various message variants."""
412 412 if repo.filtername.startswith('visible'):
413 413 msg = _("hidden revision '%s'") % changeid
414 414 hint = _('use --hidden to access hidden revisions')
415 415 return error.FilteredRepoLookupError(msg, hint=hint)
416 416 msg = _("filtered revision '%s' (not in '%s' subset)")
417 417 msg %= (changeid, repo.filtername)
418 418 return error.FilteredRepoLookupError(msg)
419 419
420 420 class changectx(basectx):
421 421 """A changecontext object makes access to data related to a particular
422 422 changeset convenient. It represents a read-only context already present in
423 423 the repo."""
424 424 def __init__(self, repo, changeid=''):
425 425 """changeid is a revision number, node, or tag"""
426 426
427 427 # since basectx.__new__ already took care of copying the object, we
428 428 # don't need to do anything in __init__, so we just exit here
429 429 if isinstance(changeid, basectx):
430 430 return
431 431
432 432 if changeid == '':
433 433 changeid = '.'
434 434 self._repo = repo
435 435
436 436 try:
437 437 if isinstance(changeid, int):
438 438 self._node = repo.changelog.node(changeid)
439 439 self._rev = changeid
440 440 return
441 441 if not pycompat.ispy3 and isinstance(changeid, long):
442 442 changeid = str(changeid)
443 443 if changeid == 'null':
444 444 self._node = nullid
445 445 self._rev = nullrev
446 446 return
447 447 if changeid == 'tip':
448 448 self._node = repo.changelog.tip()
449 449 self._rev = repo.changelog.rev(self._node)
450 450 return
451 451 if changeid == '.' or changeid == repo.dirstate.p1():
452 452 # this is a hack to delay/avoid loading obsmarkers
453 453 # when we know that '.' won't be hidden
454 454 self._node = repo.dirstate.p1()
455 455 self._rev = repo.unfiltered().changelog.rev(self._node)
456 456 return
457 457 if len(changeid) == 20:
458 458 try:
459 459 self._node = changeid
460 460 self._rev = repo.changelog.rev(changeid)
461 461 return
462 462 except error.FilteredRepoLookupError:
463 463 raise
464 464 except LookupError:
465 465 pass
466 466
467 467 try:
468 468 r = int(changeid)
469 469 if '%d' % r != changeid:
470 470 raise ValueError
471 471 l = len(repo.changelog)
472 472 if r < 0:
473 473 r += l
474 474 if r < 0 or r >= l:
475 475 raise ValueError
476 476 self._rev = r
477 477 self._node = repo.changelog.node(r)
478 478 return
479 479 except error.FilteredIndexError:
480 480 raise
481 481 except (ValueError, OverflowError, IndexError):
482 482 pass
483 483
484 484 if len(changeid) == 40:
485 485 try:
486 486 self._node = bin(changeid)
487 487 self._rev = repo.changelog.rev(self._node)
488 488 return
489 489 except error.FilteredLookupError:
490 490 raise
491 491 except (TypeError, LookupError):
492 492 pass
493 493
494 494 # lookup bookmarks through the name interface
495 495 try:
496 496 self._node = repo.names.singlenode(repo, changeid)
497 497 self._rev = repo.changelog.rev(self._node)
498 498 return
499 499 except KeyError:
500 500 pass
501 501 except error.FilteredRepoLookupError:
502 502 raise
503 503 except error.RepoLookupError:
504 504 pass
505 505
506 506 self._node = repo.unfiltered().changelog._partialmatch(changeid)
507 507 if self._node is not None:
508 508 self._rev = repo.changelog.rev(self._node)
509 509 return
510 510
511 511 # lookup failed
512 512 # check if it might have come from damaged dirstate
513 513 #
514 514 # XXX we could avoid the unfiltered if we had a recognizable
515 515 # exception for filtered changeset access
516 516 if changeid in repo.unfiltered().dirstate.parents():
517 517 msg = _("working directory has unknown parent '%s'!")
518 518 raise error.Abort(msg % short(changeid))
519 519 try:
520 520 if len(changeid) == 20 and nonascii(changeid):
521 521 changeid = hex(changeid)
522 522 except TypeError:
523 523 pass
524 524 except (error.FilteredIndexError, error.FilteredLookupError,
525 525 error.FilteredRepoLookupError):
526 526 raise _filterederror(repo, changeid)
527 527 except IndexError:
528 528 pass
529 529 raise error.RepoLookupError(
530 530 _("unknown revision '%s'") % changeid)
531 531
532 532 def __hash__(self):
533 533 try:
534 534 return hash(self._rev)
535 535 except AttributeError:
536 536 return id(self)
537 537
538 538 def __nonzero__(self):
539 539 return self._rev != nullrev
540 540
541 541 __bool__ = __nonzero__
542 542
543 543 @propertycache
544 544 def _changeset(self):
545 545 return self._repo.changelog.changelogrevision(self.rev())
546 546
547 547 @propertycache
548 548 def _manifest(self):
549 549 return self._manifestctx.read()
550 550
551 551 @property
552 552 def _manifestctx(self):
553 553 return self._repo.manifestlog[self._changeset.manifest]
554 554
555 555 @propertycache
556 556 def _manifestdelta(self):
557 557 return self._manifestctx.readdelta()
558 558
559 559 @propertycache
560 560 def _parents(self):
561 561 repo = self._repo
562 562 p1, p2 = repo.changelog.parentrevs(self._rev)
563 563 if p2 == nullrev:
564 564 return [changectx(repo, p1)]
565 565 return [changectx(repo, p1), changectx(repo, p2)]
566 566
567 567 def changeset(self):
568 568 c = self._changeset
569 569 return (
570 570 c.manifest,
571 571 c.user,
572 572 c.date,
573 573 c.files,
574 574 c.description,
575 575 c.extra,
576 576 )
577 577 def manifestnode(self):
578 578 return self._changeset.manifest
579 579
580 580 def user(self):
581 581 return self._changeset.user
582 582 def date(self):
583 583 return self._changeset.date
584 584 def files(self):
585 585 return self._changeset.files
586 586 def description(self):
587 587 return self._changeset.description
588 588 def branch(self):
589 589 return encoding.tolocal(self._changeset.extra.get("branch"))
590 590 def closesbranch(self):
591 591 return 'close' in self._changeset.extra
592 592 def extra(self):
593 593 return self._changeset.extra
594 594 def tags(self):
595 595 return self._repo.nodetags(self._node)
596 596 def bookmarks(self):
597 597 return self._repo.nodebookmarks(self._node)
598 598 def phase(self):
599 599 return self._repo._phasecache.phase(self._repo, self._rev)
600 600 def hidden(self):
601 601 return self._rev in repoview.filterrevs(self._repo, 'visible')
602 602
603 603 def children(self):
604 604 """return contexts for each child changeset"""
605 605 c = self._repo.changelog.children(self._node)
606 606 return [changectx(self._repo, x) for x in c]
607 607
608 608 def ancestors(self):
609 609 for a in self._repo.changelog.ancestors([self._rev]):
610 610 yield changectx(self._repo, a)
611 611
612 612 def descendants(self):
613 613 for d in self._repo.changelog.descendants([self._rev]):
614 614 yield changectx(self._repo, d)
615 615
616 616 def filectx(self, path, fileid=None, filelog=None):
617 617 """get a file context from this changeset"""
618 618 if fileid is None:
619 619 fileid = self.filenode(path)
620 620 return filectx(self._repo, path, fileid=fileid,
621 621 changectx=self, filelog=filelog)
622 622
623 623 def ancestor(self, c2, warn=False):
624 624 """return the "best" ancestor context of self and c2
625 625
626 626 If there are multiple candidates, it will show a message and check
627 627 merge.preferancestor configuration before falling back to the
628 628 revlog ancestor."""
629 629 # deal with workingctxs
630 630 n2 = c2._node
631 631 if n2 is None:
632 632 n2 = c2._parents[0]._node
633 633 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
634 634 if not cahs:
635 635 anc = nullid
636 636 elif len(cahs) == 1:
637 637 anc = cahs[0]
638 638 else:
639 639 # experimental config: merge.preferancestor
640 640 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
641 641 try:
642 642 ctx = changectx(self._repo, r)
643 643 except error.RepoLookupError:
644 644 continue
645 645 anc = ctx.node()
646 646 if anc in cahs:
647 647 break
648 648 else:
649 649 anc = self._repo.changelog.ancestor(self._node, n2)
650 650 if warn:
651 651 self._repo.ui.status(
652 652 (_("note: using %s as ancestor of %s and %s\n") %
653 653 (short(anc), short(self._node), short(n2))) +
654 654 ''.join(_(" alternatively, use --config "
655 655 "merge.preferancestor=%s\n") %
656 656 short(n) for n in sorted(cahs) if n != anc))
657 657 return changectx(self._repo, anc)
658 658
659 659 def descendant(self, other):
660 660 """True if other is descendant of this changeset"""
661 661 return self._repo.changelog.descendant(self._rev, other._rev)
662 662
663 663 def walk(self, match):
664 664 '''Generates matching file names.'''
665 665
666 666 # Wrap match.bad method to have message with nodeid
667 667 def bad(fn, msg):
668 668 # The manifest doesn't know about subrepos, so don't complain about
669 669 # paths into valid subrepos.
670 670 if any(fn == s or fn.startswith(s + '/')
671 671 for s in self.substate):
672 672 return
673 673 match.bad(fn, _('no such file in rev %s') % self)
674 674
675 675 m = matchmod.badmatch(match, bad)
676 676 return self._manifest.walk(m)
677 677
678 678 def matches(self, match):
679 679 return self.walk(match)
680 680
681 681 class basefilectx(object):
682 682 """A filecontext object represents the common logic for its children:
683 683 filectx: read-only access to a filerevision that is already present
684 684 in the repo,
685 685 workingfilectx: a filecontext that represents files from the working
686 686 directory,
687 687 memfilectx: a filecontext that represents files in-memory,
688 688 overlayfilectx: duplicate another filecontext with some fields overridden.
689 689 """
690 690 @propertycache
691 691 def _filelog(self):
692 692 return self._repo.file(self._path)
693 693
694 694 @propertycache
695 695 def _changeid(self):
696 696 if r'_changeid' in self.__dict__:
697 697 return self._changeid
698 698 elif r'_changectx' in self.__dict__:
699 699 return self._changectx.rev()
700 700 elif r'_descendantrev' in self.__dict__:
701 701 # this file context was created from a revision with a known
702 702 # descendant, we can (lazily) correct for linkrev aliases
703 703 return self._adjustlinkrev(self._descendantrev)
704 704 else:
705 705 return self._filelog.linkrev(self._filerev)
706 706
707 707 @propertycache
708 708 def _filenode(self):
709 709 if r'_fileid' in self.__dict__:
710 710 return self._filelog.lookup(self._fileid)
711 711 else:
712 712 return self._changectx.filenode(self._path)
713 713
714 714 @propertycache
715 715 def _filerev(self):
716 716 return self._filelog.rev(self._filenode)
717 717
718 718 @propertycache
719 719 def _repopath(self):
720 720 return self._path
721 721
722 722 def __nonzero__(self):
723 723 try:
724 724 self._filenode
725 725 return True
726 726 except error.LookupError:
727 727 # file is missing
728 728 return False
729 729
730 730 __bool__ = __nonzero__
731 731
732 732 def __str__(self):
733 733 try:
734 734 return "%s@%s" % (self.path(), self._changectx)
735 735 except error.LookupError:
736 736 return "%s@???" % self.path()
737 737
738 738 def __repr__(self):
739 739 return "<%s %s>" % (type(self).__name__, str(self))
740 740
741 741 def __hash__(self):
742 742 try:
743 743 return hash((self._path, self._filenode))
744 744 except AttributeError:
745 745 return id(self)
746 746
747 747 def __eq__(self, other):
748 748 try:
749 749 return (type(self) == type(other) and self._path == other._path
750 750 and self._filenode == other._filenode)
751 751 except AttributeError:
752 752 return False
753 753
754 754 def __ne__(self, other):
755 755 return not (self == other)
756 756
757 757 def filerev(self):
758 758 return self._filerev
759 759 def filenode(self):
760 760 return self._filenode
761 761 @propertycache
762 762 def _flags(self):
763 763 return self._changectx.flags(self._path)
764 764 def flags(self):
765 765 return self._flags
766 766 def filelog(self):
767 767 return self._filelog
768 768 def rev(self):
769 769 return self._changeid
770 770 def linkrev(self):
771 771 return self._filelog.linkrev(self._filerev)
772 772 def node(self):
773 773 return self._changectx.node()
774 774 def hex(self):
775 775 return self._changectx.hex()
776 776 def user(self):
777 777 return self._changectx.user()
778 778 def date(self):
779 779 return self._changectx.date()
780 780 def files(self):
781 781 return self._changectx.files()
782 782 def description(self):
783 783 return self._changectx.description()
784 784 def branch(self):
785 785 return self._changectx.branch()
786 786 def extra(self):
787 787 return self._changectx.extra()
788 788 def phase(self):
789 789 return self._changectx.phase()
790 790 def phasestr(self):
791 791 return self._changectx.phasestr()
792 792 def manifest(self):
793 793 return self._changectx.manifest()
794 794 def changectx(self):
795 795 return self._changectx
796 796 def renamed(self):
797 797 return self._copied
798 798 def repo(self):
799 799 return self._repo
800 800 def size(self):
801 801 return len(self.data())
802 802
803 803 def path(self):
804 804 return self._path
805 805
806 806 def isbinary(self):
807 807 try:
808 808 return util.binary(self.data())
809 809 except IOError:
810 810 return False
811 811 def isexec(self):
812 812 return 'x' in self.flags()
813 813 def islink(self):
814 814 return 'l' in self.flags()
815 815
816 816 def isabsent(self):
817 817 """whether this filectx represents a file not in self._changectx
818 818
819 819 This is mainly for merge code to detect change/delete conflicts. This is
820 820 expected to be True for all subclasses of basectx."""
821 821 return False
822 822
823 823 _customcmp = False
824 824 def cmp(self, fctx):
825 825 """compare with other file context
826 826
827 827 returns True if different than fctx.
828 828 """
829 829 if fctx._customcmp:
830 830 return fctx.cmp(self)
831 831
832 832 if (fctx._filenode is None
833 833 and (self._repo._encodefilterpats
834 834 # if file data starts with '\1\n', empty metadata block is
835 835 # prepended, which adds 4 bytes to filelog.size().
836 836 or self.size() - 4 == fctx.size())
837 837 or self.size() == fctx.size()):
838 838 return self._filelog.cmp(self._filenode, fctx.data())
839 839
840 840 return True
841 841
842 842 def _adjustlinkrev(self, srcrev, inclusive=False):
843 843 """return the first ancestor of <srcrev> introducing <fnode>
844 844
845 845 If the linkrev of the file revision does not point to an ancestor of
846 846 srcrev, we'll walk down the ancestors until we find one introducing
847 847 this file revision.
848 848
849 849 :srcrev: the changeset revision we search ancestors from
850 850 :inclusive: if true, the src revision will also be checked
851 851 """
852 852 repo = self._repo
853 853 cl = repo.unfiltered().changelog
854 854 mfl = repo.manifestlog
855 855 # fetch the linkrev
856 856 lkr = self.linkrev()
857 857 # hack to reuse ancestor computation when searching for renames
858 858 memberanc = getattr(self, '_ancestrycontext', None)
859 859 iteranc = None
860 860 if srcrev is None:
861 861 # wctx case, used by workingfilectx during mergecopy
862 862 revs = [p.rev() for p in self._repo[None].parents()]
863 863 inclusive = True # we skipped the real (revless) source
864 864 else:
865 865 revs = [srcrev]
866 866 if memberanc is None:
867 867 memberanc = iteranc = cl.ancestors(revs, lkr,
868 868 inclusive=inclusive)
869 869 # check if this linkrev is an ancestor of srcrev
870 870 if lkr not in memberanc:
871 871 if iteranc is None:
872 872 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
873 873 fnode = self._filenode
874 874 path = self._path
875 875 for a in iteranc:
876 876 ac = cl.read(a) # get changeset data (we avoid object creation)
877 877 if path in ac[3]: # checking the 'files' field.
878 878 # The file has been touched, check if the content is
879 879 # similar to the one we search for.
880 880 if fnode == mfl[ac[0]].readfast().get(path):
881 881 return a
882 882 # In theory, we should never get out of that loop without a result.
883 883 # But if manifest uses a buggy file revision (not children of the
884 884 # one it replaces) we could. Such a buggy situation will likely
885 885 # result is crash somewhere else at to some point.
886 886 return lkr
887 887
888 888 def introrev(self):
889 889 """return the rev of the changeset which introduced this file revision
890 890
891 891 This method is different from linkrev because it take into account the
892 892 changeset the filectx was created from. It ensures the returned
893 893 revision is one of its ancestors. This prevents bugs from
894 894 'linkrev-shadowing' when a file revision is used by multiple
895 895 changesets.
896 896 """
897 897 lkr = self.linkrev()
898 898 attrs = vars(self)
899 899 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
900 900 if noctx or self.rev() == lkr:
901 901 return self.linkrev()
902 902 return self._adjustlinkrev(self.rev(), inclusive=True)
903 903
904 904 def _parentfilectx(self, path, fileid, filelog):
905 905 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
906 906 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
907 907 if '_changeid' in vars(self) or '_changectx' in vars(self):
908 908 # If self is associated with a changeset (probably explicitly
909 909 # fed), ensure the created filectx is associated with a
910 910 # changeset that is an ancestor of self.changectx.
911 911 # This lets us later use _adjustlinkrev to get a correct link.
912 912 fctx._descendantrev = self.rev()
913 913 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
914 914 elif '_descendantrev' in vars(self):
915 915 # Otherwise propagate _descendantrev if we have one associated.
916 916 fctx._descendantrev = self._descendantrev
917 917 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
918 918 return fctx
919 919
920 920 def parents(self):
921 921 _path = self._path
922 922 fl = self._filelog
923 923 parents = self._filelog.parents(self._filenode)
924 924 pl = [(_path, node, fl) for node in parents if node != nullid]
925 925
926 926 r = fl.renamed(self._filenode)
927 927 if r:
928 928 # - In the simple rename case, both parent are nullid, pl is empty.
929 929 # - In case of merge, only one of the parent is null id and should
930 930 # be replaced with the rename information. This parent is -always-
931 931 # the first one.
932 932 #
933 933 # As null id have always been filtered out in the previous list
934 934 # comprehension, inserting to 0 will always result in "replacing
935 935 # first nullid parent with rename information.
936 936 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
937 937
938 938 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
939 939
940 940 def p1(self):
941 941 return self.parents()[0]
942 942
943 943 def p2(self):
944 944 p = self.parents()
945 945 if len(p) == 2:
946 946 return p[1]
947 947 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
948 948
949 949 def annotate(self, follow=False, linenumber=False, skiprevs=None,
950 950 diffopts=None):
951 951 '''returns a list of tuples of ((ctx, number), line) for each line
952 952 in the file, where ctx is the filectx of the node where
953 953 that line was last changed; if linenumber parameter is true, number is
954 954 the line number at the first appearance in the managed file, otherwise,
955 955 number has a fixed value of False.
956 956 '''
957 957
958 958 def lines(text):
959 959 if text.endswith("\n"):
960 960 return text.count("\n")
961 961 return text.count("\n") + int(bool(text))
962 962
963 963 if linenumber:
964 964 def decorate(text, rev):
965 965 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
966 966 else:
967 967 def decorate(text, rev):
968 968 return ([(rev, False)] * lines(text), text)
969 969
970 970 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
971 971
972 972 def parents(f):
973 973 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
974 974 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
975 975 # from the topmost introrev (= srcrev) down to p.linkrev() if it
976 976 # isn't an ancestor of the srcrev.
977 977 f._changeid
978 978 pl = f.parents()
979 979
980 980 # Don't return renamed parents if we aren't following.
981 981 if not follow:
982 982 pl = [p for p in pl if p.path() == f.path()]
983 983
984 984 # renamed filectx won't have a filelog yet, so set it
985 985 # from the cache to save time
986 986 for p in pl:
987 987 if not '_filelog' in p.__dict__:
988 988 p._filelog = getlog(p.path())
989 989
990 990 return pl
991 991
992 992 # use linkrev to find the first changeset where self appeared
993 993 base = self
994 994 introrev = self.introrev()
995 995 if self.rev() != introrev:
996 996 base = self.filectx(self.filenode(), changeid=introrev)
997 997 if getattr(base, '_ancestrycontext', None) is None:
998 998 cl = self._repo.changelog
999 999 if introrev is None:
1000 1000 # wctx is not inclusive, but works because _ancestrycontext
1001 1001 # is used to test filelog revisions
1002 1002 ac = cl.ancestors([p.rev() for p in base.parents()],
1003 1003 inclusive=True)
1004 1004 else:
1005 1005 ac = cl.ancestors([introrev], inclusive=True)
1006 1006 base._ancestrycontext = ac
1007 1007
1008 1008 # This algorithm would prefer to be recursive, but Python is a
1009 1009 # bit recursion-hostile. Instead we do an iterative
1010 1010 # depth-first search.
1011 1011
1012 1012 # 1st DFS pre-calculates pcache and needed
1013 1013 visit = [base]
1014 1014 pcache = {}
1015 1015 needed = {base: 1}
1016 1016 while visit:
1017 1017 f = visit.pop()
1018 1018 if f in pcache:
1019 1019 continue
1020 1020 pl = parents(f)
1021 1021 pcache[f] = pl
1022 1022 for p in pl:
1023 1023 needed[p] = needed.get(p, 0) + 1
1024 1024 if p not in pcache:
1025 1025 visit.append(p)
1026 1026
1027 1027 # 2nd DFS does the actual annotate
1028 1028 visit[:] = [base]
1029 1029 hist = {}
1030 1030 while visit:
1031 1031 f = visit[-1]
1032 1032 if f in hist:
1033 1033 visit.pop()
1034 1034 continue
1035 1035
1036 1036 ready = True
1037 1037 pl = pcache[f]
1038 1038 for p in pl:
1039 1039 if p not in hist:
1040 1040 ready = False
1041 1041 visit.append(p)
1042 1042 if ready:
1043 1043 visit.pop()
1044 1044 curr = decorate(f.data(), f)
1045 1045 skipchild = False
1046 1046 if skiprevs is not None:
1047 1047 skipchild = f._changeid in skiprevs
1048 1048 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1049 1049 diffopts)
1050 1050 for p in pl:
1051 1051 if needed[p] == 1:
1052 1052 del hist[p]
1053 1053 del needed[p]
1054 1054 else:
1055 1055 needed[p] -= 1
1056 1056
1057 1057 hist[f] = curr
1058 1058 del pcache[f]
1059 1059
1060 1060 return zip(hist[base][0], hist[base][1].splitlines(True))
1061 1061
1062 1062 def ancestors(self, followfirst=False):
1063 1063 visit = {}
1064 1064 c = self
1065 1065 if followfirst:
1066 1066 cut = 1
1067 1067 else:
1068 1068 cut = None
1069 1069
1070 1070 while True:
1071 1071 for parent in c.parents()[:cut]:
1072 1072 visit[(parent.linkrev(), parent.filenode())] = parent
1073 1073 if not visit:
1074 1074 break
1075 1075 c = visit.pop(max(visit))
1076 1076 yield c
1077 1077
1078 1078 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1079 1079 r'''
1080 1080 Given parent and child fctxes and annotate data for parents, for all lines
1081 1081 in either parent that match the child, annotate the child with the parent's
1082 1082 data.
1083 1083
1084 1084 Additionally, if `skipchild` is True, replace all other lines with parent
1085 1085 annotate data as well such that child is never blamed for any lines.
1086 1086
1087 1087 >>> oldfctx = 'old'
1088 1088 >>> p1fctx, p2fctx, childfctx = 'p1', 'p2', 'c'
1089 1089 >>> olddata = 'a\nb\n'
1090 1090 >>> p1data = 'a\nb\nc\n'
1091 1091 >>> p2data = 'a\nc\nd\n'
1092 1092 >>> childdata = 'a\nb2\nc\nc2\nd\n'
1093 1093 >>> diffopts = mdiff.diffopts()
1094 1094
1095 1095 >>> def decorate(text, rev):
1096 1096 ... return ([(rev, i) for i in xrange(1, text.count('\n') + 1)], text)
1097 1097
1098 1098 Basic usage:
1099 1099
1100 1100 >>> oldann = decorate(olddata, oldfctx)
1101 1101 >>> p1ann = decorate(p1data, p1fctx)
1102 1102 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1103 1103 >>> p1ann[0]
1104 1104 [('old', 1), ('old', 2), ('p1', 3)]
1105 1105 >>> p2ann = decorate(p2data, p2fctx)
1106 1106 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1107 1107 >>> p2ann[0]
1108 1108 [('old', 1), ('p2', 2), ('p2', 3)]
1109 1109
1110 1110 Test with multiple parents (note the difference caused by ordering):
1111 1111
1112 1112 >>> childann = decorate(childdata, childfctx)
1113 1113 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1114 1114 ... diffopts)
1115 1115 >>> childann[0]
1116 1116 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1117 1117
1118 1118 >>> childann = decorate(childdata, childfctx)
1119 1119 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1120 1120 ... diffopts)
1121 1121 >>> childann[0]
1122 1122 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1123 1123
1124 1124 Test with skipchild (note the difference caused by ordering):
1125 1125
1126 1126 >>> childann = decorate(childdata, childfctx)
1127 1127 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1128 1128 ... diffopts)
1129 1129 >>> childann[0]
1130 1130 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1131 1131
1132 1132 >>> childann = decorate(childdata, childfctx)
1133 1133 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1134 1134 ... diffopts)
1135 1135 >>> childann[0]
1136 1136 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1137 1137 '''
1138 1138 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1139 1139 for parent in parents]
1140 1140
1141 1141 if skipchild:
1142 1142 # Need to iterate over the blocks twice -- make it a list
1143 1143 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1144 1144 # Mercurial currently prefers p2 over p1 for annotate.
1145 1145 # TODO: change this?
1146 1146 for parent, blocks in pblocks:
1147 1147 for (a1, a2, b1, b2), t in blocks:
1148 1148 # Changed blocks ('!') or blocks made only of blank lines ('~')
1149 1149 # belong to the child.
1150 1150 if t == '=':
1151 1151 child[0][b1:b2] = parent[0][a1:a2]
1152 1152
1153 1153 if skipchild:
1154 1154 # Now try and match up anything that couldn't be matched,
1155 1155 # Reversing pblocks maintains bias towards p2, matching above
1156 1156 # behavior.
1157 1157 pblocks.reverse()
1158 1158
1159 1159 # The heuristics are:
1160 1160 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1161 1161 # This could potentially be smarter but works well enough.
1162 1162 # * For a non-matching section, do a best-effort fit. Match lines in
1163 1163 # diff hunks 1:1, dropping lines as necessary.
1164 1164 # * Repeat the last line as a last resort.
1165 1165
1166 1166 # First, replace as much as possible without repeating the last line.
1167 1167 remaining = [(parent, []) for parent, _blocks in pblocks]
1168 1168 for idx, (parent, blocks) in enumerate(pblocks):
1169 1169 for (a1, a2, b1, b2), _t in blocks:
1170 1170 if a2 - a1 >= b2 - b1:
1171 1171 for bk in xrange(b1, b2):
1172 1172 if child[0][bk][0] == childfctx:
1173 1173 ak = min(a1 + (bk - b1), a2 - 1)
1174 1174 child[0][bk] = parent[0][ak]
1175 1175 else:
1176 1176 remaining[idx][1].append((a1, a2, b1, b2))
1177 1177
1178 1178 # Then, look at anything left, which might involve repeating the last
1179 1179 # line.
1180 1180 for parent, blocks in remaining:
1181 1181 for a1, a2, b1, b2 in blocks:
1182 1182 for bk in xrange(b1, b2):
1183 1183 if child[0][bk][0] == childfctx:
1184 1184 ak = min(a1 + (bk - b1), a2 - 1)
1185 1185 child[0][bk] = parent[0][ak]
1186 1186 return child
1187 1187
1188 1188 class filectx(basefilectx):
1189 1189 """A filecontext object makes access to data related to a particular
1190 1190 filerevision convenient."""
1191 1191 def __init__(self, repo, path, changeid=None, fileid=None,
1192 1192 filelog=None, changectx=None):
1193 1193 """changeid can be a changeset revision, node, or tag.
1194 1194 fileid can be a file revision or node."""
1195 1195 self._repo = repo
1196 1196 self._path = path
1197 1197
1198 1198 assert (changeid is not None
1199 1199 or fileid is not None
1200 1200 or changectx is not None), \
1201 1201 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1202 1202 % (changeid, fileid, changectx))
1203 1203
1204 1204 if filelog is not None:
1205 1205 self._filelog = filelog
1206 1206
1207 1207 if changeid is not None:
1208 1208 self._changeid = changeid
1209 1209 if changectx is not None:
1210 1210 self._changectx = changectx
1211 1211 if fileid is not None:
1212 1212 self._fileid = fileid
1213 1213
1214 1214 @propertycache
1215 1215 def _changectx(self):
1216 1216 try:
1217 1217 return changectx(self._repo, self._changeid)
1218 1218 except error.FilteredRepoLookupError:
1219 1219 # Linkrev may point to any revision in the repository. When the
1220 1220 # repository is filtered this may lead to `filectx` trying to build
1221 1221 # `changectx` for filtered revision. In such case we fallback to
1222 1222 # creating `changectx` on the unfiltered version of the reposition.
1223 1223 # This fallback should not be an issue because `changectx` from
1224 1224 # `filectx` are not used in complex operations that care about
1225 1225 # filtering.
1226 1226 #
1227 1227 # This fallback is a cheap and dirty fix that prevent several
1228 1228 # crashes. It does not ensure the behavior is correct. However the
1229 1229 # behavior was not correct before filtering either and "incorrect
1230 1230 # behavior" is seen as better as "crash"
1231 1231 #
1232 1232 # Linkrevs have several serious troubles with filtering that are
1233 1233 # complicated to solve. Proper handling of the issue here should be
1234 1234 # considered when solving linkrev issue are on the table.
1235 1235 return changectx(self._repo.unfiltered(), self._changeid)
1236 1236
1237 1237 def filectx(self, fileid, changeid=None):
1238 1238 '''opens an arbitrary revision of the file without
1239 1239 opening a new filelog'''
1240 1240 return filectx(self._repo, self._path, fileid=fileid,
1241 1241 filelog=self._filelog, changeid=changeid)
1242 1242
1243 1243 def rawdata(self):
1244 1244 return self._filelog.revision(self._filenode, raw=True)
1245 1245
1246 1246 def rawflags(self):
1247 1247 """low-level revlog flags"""
1248 1248 return self._filelog.flags(self._filerev)
1249 1249
1250 1250 def data(self):
1251 1251 try:
1252 1252 return self._filelog.read(self._filenode)
1253 1253 except error.CensoredNodeError:
1254 1254 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1255 1255 return ""
1256 1256 raise error.Abort(_("censored node: %s") % short(self._filenode),
1257 1257 hint=_("set censor.policy to ignore errors"))
1258 1258
1259 1259 def size(self):
1260 1260 return self._filelog.size(self._filerev)
1261 1261
1262 1262 @propertycache
1263 1263 def _copied(self):
1264 1264 """check if file was actually renamed in this changeset revision
1265 1265
1266 1266 If rename logged in file revision, we report copy for changeset only
1267 1267 if file revisions linkrev points back to the changeset in question
1268 1268 or both changeset parents contain different file revisions.
1269 1269 """
1270 1270
1271 1271 renamed = self._filelog.renamed(self._filenode)
1272 1272 if not renamed:
1273 1273 return renamed
1274 1274
1275 1275 if self.rev() == self.linkrev():
1276 1276 return renamed
1277 1277
1278 1278 name = self.path()
1279 1279 fnode = self._filenode
1280 1280 for p in self._changectx.parents():
1281 1281 try:
1282 1282 if fnode == p.filenode(name):
1283 1283 return None
1284 1284 except error.LookupError:
1285 1285 pass
1286 1286 return renamed
1287 1287
1288 1288 def children(self):
1289 1289 # hard for renames
1290 1290 c = self._filelog.children(self._filenode)
1291 1291 return [filectx(self._repo, self._path, fileid=x,
1292 1292 filelog=self._filelog) for x in c]
1293 1293
1294 1294 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1295 1295 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1296 1296 if diff from fctx2 to fctx1 has changes in linerange2 and
1297 1297 `linerange1` is the new line range for fctx1.
1298 1298 """
1299 1299 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1300 1300 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1301 1301 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1302 1302 return diffinrange, linerange1
1303 1303
1304 1304 def blockancestors(fctx, fromline, toline, followfirst=False):
1305 1305 """Yield ancestors of `fctx` with respect to the block of lines within
1306 1306 `fromline`-`toline` range.
1307 1307 """
1308 1308 diffopts = patch.diffopts(fctx._repo.ui)
1309 1309 introrev = fctx.introrev()
1310 1310 if fctx.rev() != introrev:
1311 1311 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
1312 1312 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1313 1313 while visit:
1314 1314 c, linerange2 = visit.pop(max(visit))
1315 1315 pl = c.parents()
1316 1316 if followfirst:
1317 1317 pl = pl[:1]
1318 1318 if not pl:
1319 1319 # The block originates from the initial revision.
1320 1320 yield c, linerange2
1321 1321 continue
1322 1322 inrange = False
1323 1323 for p in pl:
1324 1324 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1325 1325 inrange = inrange or inrangep
1326 1326 if linerange1[0] == linerange1[1]:
1327 1327 # Parent's linerange is empty, meaning that the block got
1328 1328 # introduced in this revision; no need to go futher in this
1329 1329 # branch.
1330 1330 continue
1331 1331 # Set _descendantrev with 'c' (a known descendant) so that, when
1332 1332 # _adjustlinkrev is called for 'p', it receives this descendant
1333 1333 # (as srcrev) instead possibly topmost introrev.
1334 1334 p._descendantrev = c.rev()
1335 1335 visit[p.linkrev(), p.filenode()] = p, linerange1
1336 1336 if inrange:
1337 1337 yield c, linerange2
1338 1338
1339 1339 def blockdescendants(fctx, fromline, toline):
1340 1340 """Yield descendants of `fctx` with respect to the block of lines within
1341 1341 `fromline`-`toline` range.
1342 1342 """
1343 1343 # First possibly yield 'fctx' if it has changes in range with respect to
1344 1344 # its parents.
1345 1345 try:
1346 1346 c, linerange1 = next(blockancestors(fctx, fromline, toline))
1347 1347 except StopIteration:
1348 1348 pass
1349 1349 else:
1350 1350 if c == fctx:
1351 1351 yield c, linerange1
1352 1352
1353 1353 diffopts = patch.diffopts(fctx._repo.ui)
1354 1354 fl = fctx.filelog()
1355 1355 seen = {fctx.filerev(): (fctx, (fromline, toline))}
1356 1356 for i in fl.descendants([fctx.filerev()]):
1357 1357 c = fctx.filectx(i)
1358 1358 inrange = False
1359 1359 for x in fl.parentrevs(i):
1360 1360 try:
1361 1361 p, linerange2 = seen[x]
1362 1362 except KeyError:
1363 1363 # nullrev or other branch
1364 1364 continue
1365 1365 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
1366 1366 inrange = inrange or inrangep
1367 1367 # If revision 'i' has been seen (it's a merge), we assume that its
1368 1368 # line range is the same independently of which parents was used
1369 1369 # to compute it.
1370 1370 assert i not in seen or seen[i][1] == linerange1, (
1371 1371 'computed line range for %s is not consistent between '
1372 1372 'ancestor branches' % c)
1373 1373 seen[i] = c, linerange1
1374 1374 if inrange:
1375 1375 yield c, linerange1
1376 1376
1377 1377 class committablectx(basectx):
1378 1378 """A committablectx object provides common functionality for a context that
1379 1379 wants the ability to commit, e.g. workingctx or memctx."""
1380 1380 def __init__(self, repo, text="", user=None, date=None, extra=None,
1381 1381 changes=None):
1382 1382 self._repo = repo
1383 1383 self._rev = None
1384 1384 self._node = None
1385 1385 self._text = text
1386 1386 if date:
1387 1387 self._date = util.parsedate(date)
1388 1388 if user:
1389 1389 self._user = user
1390 1390 if changes:
1391 1391 self._status = changes
1392 1392
1393 1393 self._extra = {}
1394 1394 if extra:
1395 1395 self._extra = extra.copy()
1396 1396 if 'branch' not in self._extra:
1397 1397 try:
1398 1398 branch = encoding.fromlocal(self._repo.dirstate.branch())
1399 1399 except UnicodeDecodeError:
1400 1400 raise error.Abort(_('branch name not in UTF-8!'))
1401 1401 self._extra['branch'] = branch
1402 1402 if self._extra['branch'] == '':
1403 1403 self._extra['branch'] = 'default'
1404 1404
1405 1405 def __str__(self):
1406 return str(self._parents[0]) + "+"
1406 return str(self._parents[0]) + r"+"
1407 1407
1408 1408 def __nonzero__(self):
1409 1409 return True
1410 1410
1411 1411 __bool__ = __nonzero__
1412 1412
1413 1413 def _buildflagfunc(self):
1414 1414 # Create a fallback function for getting file flags when the
1415 1415 # filesystem doesn't support them
1416 1416
1417 1417 copiesget = self._repo.dirstate.copies().get
1418 1418 parents = self.parents()
1419 1419 if len(parents) < 2:
1420 1420 # when we have one parent, it's easy: copy from parent
1421 1421 man = parents[0].manifest()
1422 1422 def func(f):
1423 1423 f = copiesget(f, f)
1424 1424 return man.flags(f)
1425 1425 else:
1426 1426 # merges are tricky: we try to reconstruct the unstored
1427 1427 # result from the merge (issue1802)
1428 1428 p1, p2 = parents
1429 1429 pa = p1.ancestor(p2)
1430 1430 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1431 1431
1432 1432 def func(f):
1433 1433 f = copiesget(f, f) # may be wrong for merges with copies
1434 1434 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1435 1435 if fl1 == fl2:
1436 1436 return fl1
1437 1437 if fl1 == fla:
1438 1438 return fl2
1439 1439 if fl2 == fla:
1440 1440 return fl1
1441 1441 return '' # punt for conflicts
1442 1442
1443 1443 return func
1444 1444
1445 1445 @propertycache
1446 1446 def _flagfunc(self):
1447 1447 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1448 1448
1449 1449 @propertycache
1450 1450 def _status(self):
1451 1451 return self._repo.status()
1452 1452
1453 1453 @propertycache
1454 1454 def _user(self):
1455 1455 return self._repo.ui.username()
1456 1456
1457 1457 @propertycache
1458 1458 def _date(self):
1459 1459 ui = self._repo.ui
1460 1460 date = ui.configdate('devel', 'default-date')
1461 1461 if date is None:
1462 1462 date = util.makedate()
1463 1463 return date
1464 1464
1465 1465 def subrev(self, subpath):
1466 1466 return None
1467 1467
1468 1468 def manifestnode(self):
1469 1469 return None
1470 1470 def user(self):
1471 1471 return self._user or self._repo.ui.username()
1472 1472 def date(self):
1473 1473 return self._date
1474 1474 def description(self):
1475 1475 return self._text
1476 1476 def files(self):
1477 1477 return sorted(self._status.modified + self._status.added +
1478 1478 self._status.removed)
1479 1479
1480 1480 def modified(self):
1481 1481 return self._status.modified
1482 1482 def added(self):
1483 1483 return self._status.added
1484 1484 def removed(self):
1485 1485 return self._status.removed
1486 1486 def deleted(self):
1487 1487 return self._status.deleted
1488 1488 def branch(self):
1489 1489 return encoding.tolocal(self._extra['branch'])
1490 1490 def closesbranch(self):
1491 1491 return 'close' in self._extra
1492 1492 def extra(self):
1493 1493 return self._extra
1494 1494
1495 1495 def tags(self):
1496 1496 return []
1497 1497
1498 1498 def bookmarks(self):
1499 1499 b = []
1500 1500 for p in self.parents():
1501 1501 b.extend(p.bookmarks())
1502 1502 return b
1503 1503
1504 1504 def phase(self):
1505 1505 phase = phases.draft # default phase to draft
1506 1506 for p in self.parents():
1507 1507 phase = max(phase, p.phase())
1508 1508 return phase
1509 1509
1510 1510 def hidden(self):
1511 1511 return False
1512 1512
1513 1513 def children(self):
1514 1514 return []
1515 1515
1516 1516 def flags(self, path):
1517 1517 if r'_manifest' in self.__dict__:
1518 1518 try:
1519 1519 return self._manifest.flags(path)
1520 1520 except KeyError:
1521 1521 return ''
1522 1522
1523 1523 try:
1524 1524 return self._flagfunc(path)
1525 1525 except OSError:
1526 1526 return ''
1527 1527
1528 1528 def ancestor(self, c2):
1529 1529 """return the "best" ancestor context of self and c2"""
1530 1530 return self._parents[0].ancestor(c2) # punt on two parents for now
1531 1531
1532 1532 def walk(self, match):
1533 1533 '''Generates matching file names.'''
1534 1534 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1535 1535 True, False))
1536 1536
1537 1537 def matches(self, match):
1538 1538 return sorted(self._repo.dirstate.matches(match))
1539 1539
1540 1540 def ancestors(self):
1541 1541 for p in self._parents:
1542 1542 yield p
1543 1543 for a in self._repo.changelog.ancestors(
1544 1544 [p.rev() for p in self._parents]):
1545 1545 yield changectx(self._repo, a)
1546 1546
1547 1547 def markcommitted(self, node):
1548 1548 """Perform post-commit cleanup necessary after committing this ctx
1549 1549
1550 1550 Specifically, this updates backing stores this working context
1551 1551 wraps to reflect the fact that the changes reflected by this
1552 1552 workingctx have been committed. For example, it marks
1553 1553 modified and added files as normal in the dirstate.
1554 1554
1555 1555 """
1556 1556
1557 1557 with self._repo.dirstate.parentchange():
1558 1558 for f in self.modified() + self.added():
1559 1559 self._repo.dirstate.normal(f)
1560 1560 for f in self.removed():
1561 1561 self._repo.dirstate.drop(f)
1562 1562 self._repo.dirstate.setparents(node)
1563 1563
1564 1564 # write changes out explicitly, because nesting wlock at
1565 1565 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1566 1566 # from immediately doing so for subsequent changing files
1567 1567 self._repo.dirstate.write(self._repo.currenttransaction())
1568 1568
1569 1569 def dirty(self, missing=False, merge=True, branch=True):
1570 1570 return False
1571 1571
1572 1572 class workingctx(committablectx):
1573 1573 """A workingctx object makes access to data related to
1574 1574 the current working directory convenient.
1575 1575 date - any valid date string or (unixtime, offset), or None.
1576 1576 user - username string, or None.
1577 1577 extra - a dictionary of extra values, or None.
1578 1578 changes - a list of file lists as returned by localrepo.status()
1579 1579 or None to use the repository status.
1580 1580 """
1581 1581 def __init__(self, repo, text="", user=None, date=None, extra=None,
1582 1582 changes=None):
1583 1583 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1584 1584
1585 1585 def __iter__(self):
1586 1586 d = self._repo.dirstate
1587 1587 for f in d:
1588 1588 if d[f] != 'r':
1589 1589 yield f
1590 1590
1591 1591 def __contains__(self, key):
1592 1592 return self._repo.dirstate[key] not in "?r"
1593 1593
1594 1594 def hex(self):
1595 1595 return hex(wdirid)
1596 1596
1597 1597 @propertycache
1598 1598 def _parents(self):
1599 1599 p = self._repo.dirstate.parents()
1600 1600 if p[1] == nullid:
1601 1601 p = p[:-1]
1602 1602 return [changectx(self._repo, x) for x in p]
1603 1603
1604 1604 def filectx(self, path, filelog=None):
1605 1605 """get a file context from the working directory"""
1606 1606 return workingfilectx(self._repo, path, workingctx=self,
1607 1607 filelog=filelog)
1608 1608
1609 1609 def dirty(self, missing=False, merge=True, branch=True):
1610 1610 "check whether a working directory is modified"
1611 1611 # check subrepos first
1612 1612 for s in sorted(self.substate):
1613 1613 if self.sub(s).dirty():
1614 1614 return True
1615 1615 # check current working dir
1616 1616 return ((merge and self.p2()) or
1617 1617 (branch and self.branch() != self.p1().branch()) or
1618 1618 self.modified() or self.added() or self.removed() or
1619 1619 (missing and self.deleted()))
1620 1620
1621 1621 def add(self, list, prefix=""):
1622 1622 join = lambda f: os.path.join(prefix, f)
1623 1623 with self._repo.wlock():
1624 1624 ui, ds = self._repo.ui, self._repo.dirstate
1625 1625 rejected = []
1626 1626 lstat = self._repo.wvfs.lstat
1627 1627 for f in list:
1628 1628 scmutil.checkportable(ui, join(f))
1629 1629 try:
1630 1630 st = lstat(f)
1631 1631 except OSError:
1632 1632 ui.warn(_("%s does not exist!\n") % join(f))
1633 1633 rejected.append(f)
1634 1634 continue
1635 1635 if st.st_size > 10000000:
1636 1636 ui.warn(_("%s: up to %d MB of RAM may be required "
1637 1637 "to manage this file\n"
1638 1638 "(use 'hg revert %s' to cancel the "
1639 1639 "pending addition)\n")
1640 1640 % (f, 3 * st.st_size // 1000000, join(f)))
1641 1641 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1642 1642 ui.warn(_("%s not added: only files and symlinks "
1643 1643 "supported currently\n") % join(f))
1644 1644 rejected.append(f)
1645 1645 elif ds[f] in 'amn':
1646 1646 ui.warn(_("%s already tracked!\n") % join(f))
1647 1647 elif ds[f] == 'r':
1648 1648 ds.normallookup(f)
1649 1649 else:
1650 1650 ds.add(f)
1651 1651 return rejected
1652 1652
1653 1653 def forget(self, files, prefix=""):
1654 1654 join = lambda f: os.path.join(prefix, f)
1655 1655 with self._repo.wlock():
1656 1656 rejected = []
1657 1657 for f in files:
1658 1658 if f not in self._repo.dirstate:
1659 1659 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1660 1660 rejected.append(f)
1661 1661 elif self._repo.dirstate[f] != 'a':
1662 1662 self._repo.dirstate.remove(f)
1663 1663 else:
1664 1664 self._repo.dirstate.drop(f)
1665 1665 return rejected
1666 1666
1667 1667 def undelete(self, list):
1668 1668 pctxs = self.parents()
1669 1669 with self._repo.wlock():
1670 1670 for f in list:
1671 1671 if self._repo.dirstate[f] != 'r':
1672 1672 self._repo.ui.warn(_("%s not removed!\n") % f)
1673 1673 else:
1674 1674 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1675 1675 t = fctx.data()
1676 1676 self._repo.wwrite(f, t, fctx.flags())
1677 1677 self._repo.dirstate.normal(f)
1678 1678
1679 1679 def copy(self, source, dest):
1680 1680 try:
1681 1681 st = self._repo.wvfs.lstat(dest)
1682 1682 except OSError as err:
1683 1683 if err.errno != errno.ENOENT:
1684 1684 raise
1685 1685 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1686 1686 return
1687 1687 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1688 1688 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1689 1689 "symbolic link\n") % dest)
1690 1690 else:
1691 1691 with self._repo.wlock():
1692 1692 if self._repo.dirstate[dest] in '?':
1693 1693 self._repo.dirstate.add(dest)
1694 1694 elif self._repo.dirstate[dest] in 'r':
1695 1695 self._repo.dirstate.normallookup(dest)
1696 1696 self._repo.dirstate.copy(source, dest)
1697 1697
1698 1698 def match(self, pats=None, include=None, exclude=None, default='glob',
1699 1699 listsubrepos=False, badfn=None):
1700 1700 if pats is None:
1701 1701 pats = []
1702 1702 r = self._repo
1703 1703
1704 1704 # Only a case insensitive filesystem needs magic to translate user input
1705 1705 # to actual case in the filesystem.
1706 1706 icasefs = not util.fscasesensitive(r.root)
1707 1707 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1708 1708 default, auditor=r.auditor, ctx=self,
1709 1709 listsubrepos=listsubrepos, badfn=badfn,
1710 1710 icasefs=icasefs)
1711 1711
1712 1712 def _filtersuspectsymlink(self, files):
1713 1713 if not files or self._repo.dirstate._checklink:
1714 1714 return files
1715 1715
1716 1716 # Symlink placeholders may get non-symlink-like contents
1717 1717 # via user error or dereferencing by NFS or Samba servers,
1718 1718 # so we filter out any placeholders that don't look like a
1719 1719 # symlink
1720 1720 sane = []
1721 1721 for f in files:
1722 1722 if self.flags(f) == 'l':
1723 1723 d = self[f].data()
1724 1724 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1725 1725 self._repo.ui.debug('ignoring suspect symlink placeholder'
1726 1726 ' "%s"\n' % f)
1727 1727 continue
1728 1728 sane.append(f)
1729 1729 return sane
1730 1730
1731 1731 def _checklookup(self, files):
1732 1732 # check for any possibly clean files
1733 1733 if not files:
1734 1734 return [], []
1735 1735
1736 1736 modified = []
1737 1737 fixup = []
1738 1738 pctx = self._parents[0]
1739 1739 # do a full compare of any files that might have changed
1740 1740 for f in sorted(files):
1741 1741 if (f not in pctx or self.flags(f) != pctx.flags(f)
1742 1742 or pctx[f].cmp(self[f])):
1743 1743 modified.append(f)
1744 1744 else:
1745 1745 fixup.append(f)
1746 1746
1747 1747 # update dirstate for files that are actually clean
1748 1748 if fixup:
1749 1749 try:
1750 1750 # updating the dirstate is optional
1751 1751 # so we don't wait on the lock
1752 1752 # wlock can invalidate the dirstate, so cache normal _after_
1753 1753 # taking the lock
1754 1754 with self._repo.wlock(False):
1755 1755 normal = self._repo.dirstate.normal
1756 1756 for f in fixup:
1757 1757 normal(f)
1758 1758 # write changes out explicitly, because nesting
1759 1759 # wlock at runtime may prevent 'wlock.release()'
1760 1760 # after this block from doing so for subsequent
1761 1761 # changing files
1762 1762 self._repo.dirstate.write(self._repo.currenttransaction())
1763 1763 except error.LockError:
1764 1764 pass
1765 1765 return modified, fixup
1766 1766
1767 1767 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1768 1768 unknown=False):
1769 1769 '''Gets the status from the dirstate -- internal use only.'''
1770 1770 listignored, listclean, listunknown = ignored, clean, unknown
1771 1771 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1772 1772 subrepos = []
1773 1773 if '.hgsub' in self:
1774 1774 subrepos = sorted(self.substate)
1775 1775 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1776 1776 listclean, listunknown)
1777 1777
1778 1778 # check for any possibly clean files
1779 1779 if cmp:
1780 1780 modified2, fixup = self._checklookup(cmp)
1781 1781 s.modified.extend(modified2)
1782 1782
1783 1783 # update dirstate for files that are actually clean
1784 1784 if fixup and listclean:
1785 1785 s.clean.extend(fixup)
1786 1786
1787 1787 if match.always():
1788 1788 # cache for performance
1789 1789 if s.unknown or s.ignored or s.clean:
1790 1790 # "_status" is cached with list*=False in the normal route
1791 1791 self._status = scmutil.status(s.modified, s.added, s.removed,
1792 1792 s.deleted, [], [], [])
1793 1793 else:
1794 1794 self._status = s
1795 1795
1796 1796 return s
1797 1797
1798 1798 @propertycache
1799 1799 def _manifest(self):
1800 1800 """generate a manifest corresponding to the values in self._status
1801 1801
1802 1802 This reuse the file nodeid from parent, but we use special node
1803 1803 identifiers for added and modified files. This is used by manifests
1804 1804 merge to see that files are different and by update logic to avoid
1805 1805 deleting newly added files.
1806 1806 """
1807 1807 return self._buildstatusmanifest(self._status)
1808 1808
1809 1809 def _buildstatusmanifest(self, status):
1810 1810 """Builds a manifest that includes the given status results."""
1811 1811 parents = self.parents()
1812 1812
1813 1813 man = parents[0].manifest().copy()
1814 1814
1815 1815 ff = self._flagfunc
1816 1816 for i, l in ((addednodeid, status.added),
1817 1817 (modifiednodeid, status.modified)):
1818 1818 for f in l:
1819 1819 man[f] = i
1820 1820 try:
1821 1821 man.setflag(f, ff(f))
1822 1822 except OSError:
1823 1823 pass
1824 1824
1825 1825 for f in status.deleted + status.removed:
1826 1826 if f in man:
1827 1827 del man[f]
1828 1828
1829 1829 return man
1830 1830
1831 1831 def _buildstatus(self, other, s, match, listignored, listclean,
1832 1832 listunknown):
1833 1833 """build a status with respect to another context
1834 1834
1835 1835 This includes logic for maintaining the fast path of status when
1836 1836 comparing the working directory against its parent, which is to skip
1837 1837 building a new manifest if self (working directory) is not comparing
1838 1838 against its parent (repo['.']).
1839 1839 """
1840 1840 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1841 1841 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1842 1842 # might have accidentally ended up with the entire contents of the file
1843 1843 # they are supposed to be linking to.
1844 1844 s.modified[:] = self._filtersuspectsymlink(s.modified)
1845 1845 if other != self._repo['.']:
1846 1846 s = super(workingctx, self)._buildstatus(other, s, match,
1847 1847 listignored, listclean,
1848 1848 listunknown)
1849 1849 return s
1850 1850
1851 1851 def _matchstatus(self, other, match):
1852 1852 """override the match method with a filter for directory patterns
1853 1853
1854 1854 We use inheritance to customize the match.bad method only in cases of
1855 1855 workingctx since it belongs only to the working directory when
1856 1856 comparing against the parent changeset.
1857 1857
1858 1858 If we aren't comparing against the working directory's parent, then we
1859 1859 just use the default match object sent to us.
1860 1860 """
1861 1861 superself = super(workingctx, self)
1862 1862 match = superself._matchstatus(other, match)
1863 1863 if other != self._repo['.']:
1864 1864 def bad(f, msg):
1865 1865 # 'f' may be a directory pattern from 'match.files()',
1866 1866 # so 'f not in ctx1' is not enough
1867 1867 if f not in other and not other.hasdir(f):
1868 1868 self._repo.ui.warn('%s: %s\n' %
1869 1869 (self._repo.dirstate.pathto(f), msg))
1870 1870 match.bad = bad
1871 1871 return match
1872 1872
1873 1873 class committablefilectx(basefilectx):
1874 1874 """A committablefilectx provides common functionality for a file context
1875 1875 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1876 1876 def __init__(self, repo, path, filelog=None, ctx=None):
1877 1877 self._repo = repo
1878 1878 self._path = path
1879 1879 self._changeid = None
1880 1880 self._filerev = self._filenode = None
1881 1881
1882 1882 if filelog is not None:
1883 1883 self._filelog = filelog
1884 1884 if ctx:
1885 1885 self._changectx = ctx
1886 1886
1887 1887 def __nonzero__(self):
1888 1888 return True
1889 1889
1890 1890 __bool__ = __nonzero__
1891 1891
1892 1892 def linkrev(self):
1893 1893 # linked to self._changectx no matter if file is modified or not
1894 1894 return self.rev()
1895 1895
1896 1896 def parents(self):
1897 1897 '''return parent filectxs, following copies if necessary'''
1898 1898 def filenode(ctx, path):
1899 1899 return ctx._manifest.get(path, nullid)
1900 1900
1901 1901 path = self._path
1902 1902 fl = self._filelog
1903 1903 pcl = self._changectx._parents
1904 1904 renamed = self.renamed()
1905 1905
1906 1906 if renamed:
1907 1907 pl = [renamed + (None,)]
1908 1908 else:
1909 1909 pl = [(path, filenode(pcl[0], path), fl)]
1910 1910
1911 1911 for pc in pcl[1:]:
1912 1912 pl.append((path, filenode(pc, path), fl))
1913 1913
1914 1914 return [self._parentfilectx(p, fileid=n, filelog=l)
1915 1915 for p, n, l in pl if n != nullid]
1916 1916
1917 1917 def children(self):
1918 1918 return []
1919 1919
1920 1920 class workingfilectx(committablefilectx):
1921 1921 """A workingfilectx object makes access to data related to a particular
1922 1922 file in the working directory convenient."""
1923 1923 def __init__(self, repo, path, filelog=None, workingctx=None):
1924 1924 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1925 1925
1926 1926 @propertycache
1927 1927 def _changectx(self):
1928 1928 return workingctx(self._repo)
1929 1929
1930 1930 def data(self):
1931 1931 return self._repo.wread(self._path)
1932 1932 def renamed(self):
1933 1933 rp = self._repo.dirstate.copied(self._path)
1934 1934 if not rp:
1935 1935 return None
1936 1936 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1937 1937
1938 1938 def size(self):
1939 1939 return self._repo.wvfs.lstat(self._path).st_size
1940 1940 def date(self):
1941 1941 t, tz = self._changectx.date()
1942 1942 try:
1943 1943 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1944 1944 except OSError as err:
1945 1945 if err.errno != errno.ENOENT:
1946 1946 raise
1947 1947 return (t, tz)
1948 1948
1949 1949 def cmp(self, fctx):
1950 1950 """compare with other file context
1951 1951
1952 1952 returns True if different than fctx.
1953 1953 """
1954 1954 # fctx should be a filectx (not a workingfilectx)
1955 1955 # invert comparison to reuse the same code path
1956 1956 return fctx.cmp(self)
1957 1957
1958 1958 def remove(self, ignoremissing=False):
1959 1959 """wraps unlink for a repo's working directory"""
1960 1960 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1961 1961
1962 1962 def write(self, data, flags):
1963 1963 """wraps repo.wwrite"""
1964 1964 self._repo.wwrite(self._path, data, flags)
1965 1965
1966 1966 class workingcommitctx(workingctx):
1967 1967 """A workingcommitctx object makes access to data related to
1968 1968 the revision being committed convenient.
1969 1969
1970 1970 This hides changes in the working directory, if they aren't
1971 1971 committed in this context.
1972 1972 """
1973 1973 def __init__(self, repo, changes,
1974 1974 text="", user=None, date=None, extra=None):
1975 1975 super(workingctx, self).__init__(repo, text, user, date, extra,
1976 1976 changes)
1977 1977
1978 1978 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1979 1979 unknown=False):
1980 1980 """Return matched files only in ``self._status``
1981 1981
1982 1982 Uncommitted files appear "clean" via this context, even if
1983 1983 they aren't actually so in the working directory.
1984 1984 """
1985 1985 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1986 1986 if clean:
1987 1987 clean = [f for f in self._manifest if f not in self._changedset]
1988 1988 else:
1989 1989 clean = []
1990 1990 return scmutil.status([f for f in self._status.modified if match(f)],
1991 1991 [f for f in self._status.added if match(f)],
1992 1992 [f for f in self._status.removed if match(f)],
1993 1993 [], [], [], clean)
1994 1994
1995 1995 @propertycache
1996 1996 def _changedset(self):
1997 1997 """Return the set of files changed in this context
1998 1998 """
1999 1999 changed = set(self._status.modified)
2000 2000 changed.update(self._status.added)
2001 2001 changed.update(self._status.removed)
2002 2002 return changed
2003 2003
2004 2004 def makecachingfilectxfn(func):
2005 2005 """Create a filectxfn that caches based on the path.
2006 2006
2007 2007 We can't use util.cachefunc because it uses all arguments as the cache
2008 2008 key and this creates a cycle since the arguments include the repo and
2009 2009 memctx.
2010 2010 """
2011 2011 cache = {}
2012 2012
2013 2013 def getfilectx(repo, memctx, path):
2014 2014 if path not in cache:
2015 2015 cache[path] = func(repo, memctx, path)
2016 2016 return cache[path]
2017 2017
2018 2018 return getfilectx
2019 2019
2020 2020 class memctx(committablectx):
2021 2021 """Use memctx to perform in-memory commits via localrepo.commitctx().
2022 2022
2023 2023 Revision information is supplied at initialization time while
2024 2024 related files data and is made available through a callback
2025 2025 mechanism. 'repo' is the current localrepo, 'parents' is a
2026 2026 sequence of two parent revisions identifiers (pass None for every
2027 2027 missing parent), 'text' is the commit message and 'files' lists
2028 2028 names of files touched by the revision (normalized and relative to
2029 2029 repository root).
2030 2030
2031 2031 filectxfn(repo, memctx, path) is a callable receiving the
2032 2032 repository, the current memctx object and the normalized path of
2033 2033 requested file, relative to repository root. It is fired by the
2034 2034 commit function for every file in 'files', but calls order is
2035 2035 undefined. If the file is available in the revision being
2036 2036 committed (updated or added), filectxfn returns a memfilectx
2037 2037 object. If the file was removed, filectxfn return None for recent
2038 2038 Mercurial. Moved files are represented by marking the source file
2039 2039 removed and the new file added with copy information (see
2040 2040 memfilectx).
2041 2041
2042 2042 user receives the committer name and defaults to current
2043 2043 repository username, date is the commit date in any format
2044 2044 supported by util.parsedate() and defaults to current date, extra
2045 2045 is a dictionary of metadata or is left empty.
2046 2046 """
2047 2047
2048 2048 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2049 2049 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2050 2050 # this field to determine what to do in filectxfn.
2051 2051 _returnnoneformissingfiles = True
2052 2052
2053 2053 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2054 2054 date=None, extra=None, editor=False):
2055 2055 super(memctx, self).__init__(repo, text, user, date, extra)
2056 2056 self._rev = None
2057 2057 self._node = None
2058 2058 parents = [(p or nullid) for p in parents]
2059 2059 p1, p2 = parents
2060 2060 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2061 2061 files = sorted(set(files))
2062 2062 self._files = files
2063 2063 self.substate = {}
2064 2064
2065 2065 # if store is not callable, wrap it in a function
2066 2066 if not callable(filectxfn):
2067 2067 def getfilectx(repo, memctx, path):
2068 2068 fctx = filectxfn[path]
2069 2069 # this is weird but apparently we only keep track of one parent
2070 2070 # (why not only store that instead of a tuple?)
2071 2071 copied = fctx.renamed()
2072 2072 if copied:
2073 2073 copied = copied[0]
2074 2074 return memfilectx(repo, path, fctx.data(),
2075 2075 islink=fctx.islink(), isexec=fctx.isexec(),
2076 2076 copied=copied, memctx=memctx)
2077 2077 self._filectxfn = getfilectx
2078 2078 else:
2079 2079 # memoizing increases performance for e.g. vcs convert scenarios.
2080 2080 self._filectxfn = makecachingfilectxfn(filectxfn)
2081 2081
2082 2082 if editor:
2083 2083 self._text = editor(self._repo, self, [])
2084 2084 self._repo.savecommitmessage(self._text)
2085 2085
2086 2086 def filectx(self, path, filelog=None):
2087 2087 """get a file context from the working directory
2088 2088
2089 2089 Returns None if file doesn't exist and should be removed."""
2090 2090 return self._filectxfn(self._repo, self, path)
2091 2091
2092 2092 def commit(self):
2093 2093 """commit context to the repo"""
2094 2094 return self._repo.commitctx(self)
2095 2095
2096 2096 @propertycache
2097 2097 def _manifest(self):
2098 2098 """generate a manifest based on the return values of filectxfn"""
2099 2099
2100 2100 # keep this simple for now; just worry about p1
2101 2101 pctx = self._parents[0]
2102 2102 man = pctx.manifest().copy()
2103 2103
2104 2104 for f in self._status.modified:
2105 2105 p1node = nullid
2106 2106 p2node = nullid
2107 2107 p = pctx[f].parents() # if file isn't in pctx, check p2?
2108 2108 if len(p) > 0:
2109 2109 p1node = p[0].filenode()
2110 2110 if len(p) > 1:
2111 2111 p2node = p[1].filenode()
2112 2112 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2113 2113
2114 2114 for f in self._status.added:
2115 2115 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2116 2116
2117 2117 for f in self._status.removed:
2118 2118 if f in man:
2119 2119 del man[f]
2120 2120
2121 2121 return man
2122 2122
2123 2123 @propertycache
2124 2124 def _status(self):
2125 2125 """Calculate exact status from ``files`` specified at construction
2126 2126 """
2127 2127 man1 = self.p1().manifest()
2128 2128 p2 = self._parents[1]
2129 2129 # "1 < len(self._parents)" can't be used for checking
2130 2130 # existence of the 2nd parent, because "memctx._parents" is
2131 2131 # explicitly initialized by the list, of which length is 2.
2132 2132 if p2.node() != nullid:
2133 2133 man2 = p2.manifest()
2134 2134 managing = lambda f: f in man1 or f in man2
2135 2135 else:
2136 2136 managing = lambda f: f in man1
2137 2137
2138 2138 modified, added, removed = [], [], []
2139 2139 for f in self._files:
2140 2140 if not managing(f):
2141 2141 added.append(f)
2142 2142 elif self[f]:
2143 2143 modified.append(f)
2144 2144 else:
2145 2145 removed.append(f)
2146 2146
2147 2147 return scmutil.status(modified, added, removed, [], [], [], [])
2148 2148
2149 2149 class memfilectx(committablefilectx):
2150 2150 """memfilectx represents an in-memory file to commit.
2151 2151
2152 2152 See memctx and committablefilectx for more details.
2153 2153 """
2154 2154 def __init__(self, repo, path, data, islink=False,
2155 2155 isexec=False, copied=None, memctx=None):
2156 2156 """
2157 2157 path is the normalized file path relative to repository root.
2158 2158 data is the file content as a string.
2159 2159 islink is True if the file is a symbolic link.
2160 2160 isexec is True if the file is executable.
2161 2161 copied is the source file path if current file was copied in the
2162 2162 revision being committed, or None."""
2163 2163 super(memfilectx, self).__init__(repo, path, None, memctx)
2164 2164 self._data = data
2165 2165 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2166 2166 self._copied = None
2167 2167 if copied:
2168 2168 self._copied = (copied, nullid)
2169 2169
2170 2170 def data(self):
2171 2171 return self._data
2172 2172
2173 2173 def remove(self, ignoremissing=False):
2174 2174 """wraps unlink for a repo's working directory"""
2175 2175 # need to figure out what to do here
2176 2176 del self._changectx[self._path]
2177 2177
2178 2178 def write(self, data, flags):
2179 2179 """wraps repo.wwrite"""
2180 2180 self._data = data
2181 2181
2182 2182 class overlayfilectx(committablefilectx):
2183 2183 """Like memfilectx but take an original filectx and optional parameters to
2184 2184 override parts of it. This is useful when fctx.data() is expensive (i.e.
2185 2185 flag processor is expensive) and raw data, flags, and filenode could be
2186 2186 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2187 2187 """
2188 2188
2189 2189 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2190 2190 copied=None, ctx=None):
2191 2191 """originalfctx: filecontext to duplicate
2192 2192
2193 2193 datafunc: None or a function to override data (file content). It is a
2194 2194 function to be lazy. path, flags, copied, ctx: None or overridden value
2195 2195
2196 2196 copied could be (path, rev), or False. copied could also be just path,
2197 2197 and will be converted to (path, nullid). This simplifies some callers.
2198 2198 """
2199 2199
2200 2200 if path is None:
2201 2201 path = originalfctx.path()
2202 2202 if ctx is None:
2203 2203 ctx = originalfctx.changectx()
2204 2204 ctxmatch = lambda: True
2205 2205 else:
2206 2206 ctxmatch = lambda: ctx == originalfctx.changectx()
2207 2207
2208 2208 repo = originalfctx.repo()
2209 2209 flog = originalfctx.filelog()
2210 2210 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2211 2211
2212 2212 if copied is None:
2213 2213 copied = originalfctx.renamed()
2214 2214 copiedmatch = lambda: True
2215 2215 else:
2216 2216 if copied and not isinstance(copied, tuple):
2217 2217 # repo._filecommit will recalculate copyrev so nullid is okay
2218 2218 copied = (copied, nullid)
2219 2219 copiedmatch = lambda: copied == originalfctx.renamed()
2220 2220
2221 2221 # When data, copied (could affect data), ctx (could affect filelog
2222 2222 # parents) are not overridden, rawdata, rawflags, and filenode may be
2223 2223 # reused (repo._filecommit should double check filelog parents).
2224 2224 #
2225 2225 # path, flags are not hashed in filelog (but in manifestlog) so they do
2226 2226 # not affect reusable here.
2227 2227 #
2228 2228 # If ctx or copied is overridden to a same value with originalfctx,
2229 2229 # still consider it's reusable. originalfctx.renamed() may be a bit
2230 2230 # expensive so it's not called unless necessary. Assuming datafunc is
2231 2231 # always expensive, do not call it for this "reusable" test.
2232 2232 reusable = datafunc is None and ctxmatch() and copiedmatch()
2233 2233
2234 2234 if datafunc is None:
2235 2235 datafunc = originalfctx.data
2236 2236 if flags is None:
2237 2237 flags = originalfctx.flags()
2238 2238
2239 2239 self._datafunc = datafunc
2240 2240 self._flags = flags
2241 2241 self._copied = copied
2242 2242
2243 2243 if reusable:
2244 2244 # copy extra fields from originalfctx
2245 2245 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2246 2246 for attr in attrs:
2247 2247 if util.safehasattr(originalfctx, attr):
2248 2248 setattr(self, attr, getattr(originalfctx, attr))
2249 2249
2250 2250 def data(self):
2251 2251 return self._datafunc()
2252 2252
2253 2253 class metadataonlyctx(committablectx):
2254 2254 """Like memctx but it's reusing the manifest of different commit.
2255 2255 Intended to be used by lightweight operations that are creating
2256 2256 metadata-only changes.
2257 2257
2258 2258 Revision information is supplied at initialization time. 'repo' is the
2259 2259 current localrepo, 'ctx' is original revision which manifest we're reuisng
2260 2260 'parents' is a sequence of two parent revisions identifiers (pass None for
2261 2261 every missing parent), 'text' is the commit.
2262 2262
2263 2263 user receives the committer name and defaults to current repository
2264 2264 username, date is the commit date in any format supported by
2265 2265 util.parsedate() and defaults to current date, extra is a dictionary of
2266 2266 metadata or is left empty.
2267 2267 """
2268 2268 def __new__(cls, repo, originalctx, *args, **kwargs):
2269 2269 return super(metadataonlyctx, cls).__new__(cls, repo)
2270 2270
2271 2271 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2272 2272 extra=None, editor=False):
2273 2273 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2274 2274 self._rev = None
2275 2275 self._node = None
2276 2276 self._originalctx = originalctx
2277 2277 self._manifestnode = originalctx.manifestnode()
2278 2278 parents = [(p or nullid) for p in parents]
2279 2279 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2280 2280
2281 2281 # sanity check to ensure that the reused manifest parents are
2282 2282 # manifests of our commit parents
2283 2283 mp1, mp2 = self.manifestctx().parents
2284 2284 if p1 != nullid and p1.manifestnode() != mp1:
2285 2285 raise RuntimeError('can\'t reuse the manifest: '
2286 2286 'its p1 doesn\'t match the new ctx p1')
2287 2287 if p2 != nullid and p2.manifestnode() != mp2:
2288 2288 raise RuntimeError('can\'t reuse the manifest: '
2289 2289 'its p2 doesn\'t match the new ctx p2')
2290 2290
2291 2291 self._files = originalctx.files()
2292 2292 self.substate = {}
2293 2293
2294 2294 if editor:
2295 2295 self._text = editor(self._repo, self, [])
2296 2296 self._repo.savecommitmessage(self._text)
2297 2297
2298 2298 def manifestnode(self):
2299 2299 return self._manifestnode
2300 2300
2301 2301 @property
2302 2302 def _manifestctx(self):
2303 2303 return self._repo.manifestlog[self._manifestnode]
2304 2304
2305 2305 def filectx(self, path, filelog=None):
2306 2306 return self._originalctx.filectx(path, filelog=filelog)
2307 2307
2308 2308 def commit(self):
2309 2309 """commit context to the repo"""
2310 2310 return self._repo.commitctx(self)
2311 2311
2312 2312 @property
2313 2313 def _manifest(self):
2314 2314 return self._originalctx.manifest()
2315 2315
2316 2316 @propertycache
2317 2317 def _status(self):
2318 2318 """Calculate exact status from ``files`` specified in the ``origctx``
2319 2319 and parents manifests.
2320 2320 """
2321 2321 man1 = self.p1().manifest()
2322 2322 p2 = self._parents[1]
2323 2323 # "1 < len(self._parents)" can't be used for checking
2324 2324 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2325 2325 # explicitly initialized by the list, of which length is 2.
2326 2326 if p2.node() != nullid:
2327 2327 man2 = p2.manifest()
2328 2328 managing = lambda f: f in man1 or f in man2
2329 2329 else:
2330 2330 managing = lambda f: f in man1
2331 2331
2332 2332 modified, added, removed = [], [], []
2333 2333 for f in self._files:
2334 2334 if not managing(f):
2335 2335 added.append(f)
2336 2336 elif self[f]:
2337 2337 modified.append(f)
2338 2338 else:
2339 2339 removed.append(f)
2340 2340
2341 2341 return scmutil.status(modified, added, removed, [], [], [], [])
General Comments 0
You need to be logged in to leave comments. Login now