##// END OF EJS Templates
visibility: make the filtered message translatable...
Boris Feld -
r35627:c0265474 default
parent child Browse files
Show More
@@ -1,2790 +1,2789 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import re
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 addednodeid,
19 19 bin,
20 20 hex,
21 21 modifiednodeid,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 wdirid,
26 26 wdirnodes,
27 27 wdirrev,
28 28 )
29 29 from .thirdparty import (
30 30 attr,
31 31 )
32 32 from . import (
33 33 encoding,
34 34 error,
35 35 fileset,
36 36 match as matchmod,
37 37 mdiff,
38 38 obsolete as obsmod,
39 39 obsutil,
40 40 patch,
41 41 pathutil,
42 42 phases,
43 43 pycompat,
44 44 repoview,
45 45 revlog,
46 46 scmutil,
47 47 sparse,
48 48 subrepo,
49 49 util,
50 50 )
51 51
52 52 propertycache = util.propertycache
53 53
54 54 nonascii = re.compile(r'[^\x21-\x7f]').search
55 55
56 56 class basectx(object):
57 57 """A basectx object represents the common logic for its children:
58 58 changectx: read-only context that is already present in the repo,
59 59 workingctx: a context that represents the working directory and can
60 60 be committed,
61 61 memctx: a context that represents changes in-memory and can also
62 62 be committed."""
63 63 def __new__(cls, repo, changeid='', *args, **kwargs):
64 64 if isinstance(changeid, basectx):
65 65 return changeid
66 66
67 67 o = super(basectx, cls).__new__(cls)
68 68
69 69 o._repo = repo
70 70 o._rev = nullrev
71 71 o._node = nullid
72 72
73 73 return o
74 74
75 75 def __bytes__(self):
76 76 return short(self.node())
77 77
78 78 __str__ = encoding.strmethod(__bytes__)
79 79
80 80 def __int__(self):
81 81 return self.rev()
82 82
83 83 def __repr__(self):
84 84 return r"<%s %s>" % (type(self).__name__, str(self))
85 85
86 86 def __eq__(self, other):
87 87 try:
88 88 return type(self) == type(other) and self._rev == other._rev
89 89 except AttributeError:
90 90 return False
91 91
92 92 def __ne__(self, other):
93 93 return not (self == other)
94 94
95 95 def __contains__(self, key):
96 96 return key in self._manifest
97 97
98 98 def __getitem__(self, key):
99 99 return self.filectx(key)
100 100
101 101 def __iter__(self):
102 102 return iter(self._manifest)
103 103
104 104 def _buildstatusmanifest(self, status):
105 105 """Builds a manifest that includes the given status results, if this is
106 106 a working copy context. For non-working copy contexts, it just returns
107 107 the normal manifest."""
108 108 return self.manifest()
109 109
110 110 def _matchstatus(self, other, match):
111 111 """This internal method provides a way for child objects to override the
112 112 match operator.
113 113 """
114 114 return match
115 115
116 116 def _buildstatus(self, other, s, match, listignored, listclean,
117 117 listunknown):
118 118 """build a status with respect to another context"""
119 119 # Load earliest manifest first for caching reasons. More specifically,
120 120 # if you have revisions 1000 and 1001, 1001 is probably stored as a
121 121 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
122 122 # 1000 and cache it so that when you read 1001, we just need to apply a
123 123 # delta to what's in the cache. So that's one full reconstruction + one
124 124 # delta application.
125 125 mf2 = None
126 126 if self.rev() is not None and self.rev() < other.rev():
127 127 mf2 = self._buildstatusmanifest(s)
128 128 mf1 = other._buildstatusmanifest(s)
129 129 if mf2 is None:
130 130 mf2 = self._buildstatusmanifest(s)
131 131
132 132 modified, added = [], []
133 133 removed = []
134 134 clean = []
135 135 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
136 136 deletedset = set(deleted)
137 137 d = mf1.diff(mf2, match=match, clean=listclean)
138 138 for fn, value in d.iteritems():
139 139 if fn in deletedset:
140 140 continue
141 141 if value is None:
142 142 clean.append(fn)
143 143 continue
144 144 (node1, flag1), (node2, flag2) = value
145 145 if node1 is None:
146 146 added.append(fn)
147 147 elif node2 is None:
148 148 removed.append(fn)
149 149 elif flag1 != flag2:
150 150 modified.append(fn)
151 151 elif node2 not in wdirnodes:
152 152 # When comparing files between two commits, we save time by
153 153 # not comparing the file contents when the nodeids differ.
154 154 # Note that this means we incorrectly report a reverted change
155 155 # to a file as a modification.
156 156 modified.append(fn)
157 157 elif self[fn].cmp(other[fn]):
158 158 modified.append(fn)
159 159 else:
160 160 clean.append(fn)
161 161
162 162 if removed:
163 163 # need to filter files if they are already reported as removed
164 164 unknown = [fn for fn in unknown if fn not in mf1 and
165 165 (not match or match(fn))]
166 166 ignored = [fn for fn in ignored if fn not in mf1 and
167 167 (not match or match(fn))]
168 168 # if they're deleted, don't report them as removed
169 169 removed = [fn for fn in removed if fn not in deletedset]
170 170
171 171 return scmutil.status(modified, added, removed, deleted, unknown,
172 172 ignored, clean)
173 173
174 174 @propertycache
175 175 def substate(self):
176 176 return subrepo.state(self, self._repo.ui)
177 177
178 178 def subrev(self, subpath):
179 179 return self.substate[subpath][1]
180 180
181 181 def rev(self):
182 182 return self._rev
183 183 def node(self):
184 184 return self._node
185 185 def hex(self):
186 186 return hex(self.node())
187 187 def manifest(self):
188 188 return self._manifest
189 189 def manifestctx(self):
190 190 return self._manifestctx
191 191 def repo(self):
192 192 return self._repo
193 193 def phasestr(self):
194 194 return phases.phasenames[self.phase()]
195 195 def mutable(self):
196 196 return self.phase() > phases.public
197 197
198 198 def getfileset(self, expr):
199 199 return fileset.getfileset(self, expr)
200 200
201 201 def obsolete(self):
202 202 """True if the changeset is obsolete"""
203 203 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
204 204
205 205 def extinct(self):
206 206 """True if the changeset is extinct"""
207 207 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
208 208
209 209 def unstable(self):
210 210 msg = ("'context.unstable' is deprecated, "
211 211 "use 'context.orphan'")
212 212 self._repo.ui.deprecwarn(msg, '4.4')
213 213 return self.orphan()
214 214
215 215 def orphan(self):
216 216 """True if the changeset is not obsolete but it's ancestor are"""
217 217 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
218 218
219 219 def bumped(self):
220 220 msg = ("'context.bumped' is deprecated, "
221 221 "use 'context.phasedivergent'")
222 222 self._repo.ui.deprecwarn(msg, '4.4')
223 223 return self.phasedivergent()
224 224
225 225 def phasedivergent(self):
226 226 """True if the changeset try to be a successor of a public changeset
227 227
228 228 Only non-public and non-obsolete changesets may be bumped.
229 229 """
230 230 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
231 231
232 232 def divergent(self):
233 233 msg = ("'context.divergent' is deprecated, "
234 234 "use 'context.contentdivergent'")
235 235 self._repo.ui.deprecwarn(msg, '4.4')
236 236 return self.contentdivergent()
237 237
238 238 def contentdivergent(self):
239 239 """Is a successors of a changeset with multiple possible successors set
240 240
241 241 Only non-public and non-obsolete changesets may be divergent.
242 242 """
243 243 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
244 244
245 245 def troubled(self):
246 246 msg = ("'context.troubled' is deprecated, "
247 247 "use 'context.isunstable'")
248 248 self._repo.ui.deprecwarn(msg, '4.4')
249 249 return self.isunstable()
250 250
251 251 def isunstable(self):
252 252 """True if the changeset is either unstable, bumped or divergent"""
253 253 return self.orphan() or self.phasedivergent() or self.contentdivergent()
254 254
255 255 def troubles(self):
256 256 """Keep the old version around in order to avoid breaking extensions
257 257 about different return values.
258 258 """
259 259 msg = ("'context.troubles' is deprecated, "
260 260 "use 'context.instabilities'")
261 261 self._repo.ui.deprecwarn(msg, '4.4')
262 262
263 263 troubles = []
264 264 if self.orphan():
265 265 troubles.append('orphan')
266 266 if self.phasedivergent():
267 267 troubles.append('bumped')
268 268 if self.contentdivergent():
269 269 troubles.append('divergent')
270 270 return troubles
271 271
272 272 def instabilities(self):
273 273 """return the list of instabilities affecting this changeset.
274 274
275 275 Instabilities are returned as strings. possible values are:
276 276 - orphan,
277 277 - phase-divergent,
278 278 - content-divergent.
279 279 """
280 280 instabilities = []
281 281 if self.orphan():
282 282 instabilities.append('orphan')
283 283 if self.phasedivergent():
284 284 instabilities.append('phase-divergent')
285 285 if self.contentdivergent():
286 286 instabilities.append('content-divergent')
287 287 return instabilities
288 288
289 289 def parents(self):
290 290 """return contexts for each parent changeset"""
291 291 return self._parents
292 292
293 293 def p1(self):
294 294 return self._parents[0]
295 295
296 296 def p2(self):
297 297 parents = self._parents
298 298 if len(parents) == 2:
299 299 return parents[1]
300 300 return changectx(self._repo, nullrev)
301 301
302 302 def _fileinfo(self, path):
303 303 if r'_manifest' in self.__dict__:
304 304 try:
305 305 return self._manifest[path], self._manifest.flags(path)
306 306 except KeyError:
307 307 raise error.ManifestLookupError(self._node, path,
308 308 _('not found in manifest'))
309 309 if r'_manifestdelta' in self.__dict__ or path in self.files():
310 310 if path in self._manifestdelta:
311 311 return (self._manifestdelta[path],
312 312 self._manifestdelta.flags(path))
313 313 mfl = self._repo.manifestlog
314 314 try:
315 315 node, flag = mfl[self._changeset.manifest].find(path)
316 316 except KeyError:
317 317 raise error.ManifestLookupError(self._node, path,
318 318 _('not found in manifest'))
319 319
320 320 return node, flag
321 321
322 322 def filenode(self, path):
323 323 return self._fileinfo(path)[0]
324 324
325 325 def flags(self, path):
326 326 try:
327 327 return self._fileinfo(path)[1]
328 328 except error.LookupError:
329 329 return ''
330 330
331 331 def sub(self, path, allowcreate=True):
332 332 '''return a subrepo for the stored revision of path, never wdir()'''
333 333 return subrepo.subrepo(self, path, allowcreate=allowcreate)
334 334
335 335 def nullsub(self, path, pctx):
336 336 return subrepo.nullsubrepo(self, path, pctx)
337 337
338 338 def workingsub(self, path):
339 339 '''return a subrepo for the stored revision, or wdir if this is a wdir
340 340 context.
341 341 '''
342 342 return subrepo.subrepo(self, path, allowwdir=True)
343 343
344 344 def match(self, pats=None, include=None, exclude=None, default='glob',
345 345 listsubrepos=False, badfn=None):
346 346 r = self._repo
347 347 return matchmod.match(r.root, r.getcwd(), pats,
348 348 include, exclude, default,
349 349 auditor=r.nofsauditor, ctx=self,
350 350 listsubrepos=listsubrepos, badfn=badfn)
351 351
352 352 def diff(self, ctx2=None, match=None, **opts):
353 353 """Returns a diff generator for the given contexts and matcher"""
354 354 if ctx2 is None:
355 355 ctx2 = self.p1()
356 356 if ctx2 is not None:
357 357 ctx2 = self._repo[ctx2]
358 358 diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
359 359 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
360 360
361 361 def dirs(self):
362 362 return self._manifest.dirs()
363 363
364 364 def hasdir(self, dir):
365 365 return self._manifest.hasdir(dir)
366 366
367 367 def status(self, other=None, match=None, listignored=False,
368 368 listclean=False, listunknown=False, listsubrepos=False):
369 369 """return status of files between two nodes or node and working
370 370 directory.
371 371
372 372 If other is None, compare this node with working directory.
373 373
374 374 returns (modified, added, removed, deleted, unknown, ignored, clean)
375 375 """
376 376
377 377 ctx1 = self
378 378 ctx2 = self._repo[other]
379 379
380 380 # This next code block is, admittedly, fragile logic that tests for
381 381 # reversing the contexts and wouldn't need to exist if it weren't for
382 382 # the fast (and common) code path of comparing the working directory
383 383 # with its first parent.
384 384 #
385 385 # What we're aiming for here is the ability to call:
386 386 #
387 387 # workingctx.status(parentctx)
388 388 #
389 389 # If we always built the manifest for each context and compared those,
390 390 # then we'd be done. But the special case of the above call means we
391 391 # just copy the manifest of the parent.
392 392 reversed = False
393 393 if (not isinstance(ctx1, changectx)
394 394 and isinstance(ctx2, changectx)):
395 395 reversed = True
396 396 ctx1, ctx2 = ctx2, ctx1
397 397
398 398 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
399 399 match = ctx2._matchstatus(ctx1, match)
400 400 r = scmutil.status([], [], [], [], [], [], [])
401 401 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
402 402 listunknown)
403 403
404 404 if reversed:
405 405 # Reverse added and removed. Clear deleted, unknown and ignored as
406 406 # these make no sense to reverse.
407 407 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
408 408 r.clean)
409 409
410 410 if listsubrepos:
411 411 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
412 412 try:
413 413 rev2 = ctx2.subrev(subpath)
414 414 except KeyError:
415 415 # A subrepo that existed in node1 was deleted between
416 416 # node1 and node2 (inclusive). Thus, ctx2's substate
417 417 # won't contain that subpath. The best we can do ignore it.
418 418 rev2 = None
419 419 submatch = matchmod.subdirmatcher(subpath, match)
420 420 s = sub.status(rev2, match=submatch, ignored=listignored,
421 421 clean=listclean, unknown=listunknown,
422 422 listsubrepos=True)
423 423 for rfiles, sfiles in zip(r, s):
424 424 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
425 425
426 426 for l in r:
427 427 l.sort()
428 428
429 429 return r
430 430
431 431 def _filterederror(repo, changeid):
432 432 """build an exception to be raised about a filtered changeid
433 433
434 434 This is extracted in a function to help extensions (eg: evolve) to
435 435 experiment with various message variants."""
436 436 if repo.filtername.startswith('visible'):
437 437
438 438 # Check if the changeset is obsolete
439 439 unfilteredrepo = repo.unfiltered()
440 440 ctx = unfilteredrepo[changeid]
441 441
442 442 # If the changeset is obsolete, enrich the message with the reason
443 443 # that made this changeset not visible
444 444 if ctx.obsolete():
445 reason = obsutil._getfilteredreason(unfilteredrepo, ctx)
446 msg = _("hidden revision '%s' %s") % (changeid, reason)
445 msg = obsutil._getfilteredreason(unfilteredrepo, changeid, ctx)
447 446 else:
448 447 msg = _("hidden revision '%s'") % changeid
449 448
450 449 hint = _('use --hidden to access hidden revisions')
451 450
452 451 return error.FilteredRepoLookupError(msg, hint=hint)
453 452 msg = _("filtered revision '%s' (not in '%s' subset)")
454 453 msg %= (changeid, repo.filtername)
455 454 return error.FilteredRepoLookupError(msg)
456 455
457 456 class changectx(basectx):
458 457 """A changecontext object makes access to data related to a particular
459 458 changeset convenient. It represents a read-only context already present in
460 459 the repo."""
461 460 def __init__(self, repo, changeid=''):
462 461 """changeid is a revision number, node, or tag"""
463 462
464 463 # since basectx.__new__ already took care of copying the object, we
465 464 # don't need to do anything in __init__, so we just exit here
466 465 if isinstance(changeid, basectx):
467 466 return
468 467
469 468 if changeid == '':
470 469 changeid = '.'
471 470 self._repo = repo
472 471
473 472 try:
474 473 if isinstance(changeid, int):
475 474 self._node = repo.changelog.node(changeid)
476 475 self._rev = changeid
477 476 return
478 477 if not pycompat.ispy3 and isinstance(changeid, long):
479 478 changeid = str(changeid)
480 479 if changeid == 'null':
481 480 self._node = nullid
482 481 self._rev = nullrev
483 482 return
484 483 if changeid == 'tip':
485 484 self._node = repo.changelog.tip()
486 485 self._rev = repo.changelog.rev(self._node)
487 486 return
488 487 if (changeid == '.'
489 488 or repo.local() and changeid == repo.dirstate.p1()):
490 489 # this is a hack to delay/avoid loading obsmarkers
491 490 # when we know that '.' won't be hidden
492 491 self._node = repo.dirstate.p1()
493 492 self._rev = repo.unfiltered().changelog.rev(self._node)
494 493 return
495 494 if len(changeid) == 20:
496 495 try:
497 496 self._node = changeid
498 497 self._rev = repo.changelog.rev(changeid)
499 498 return
500 499 except error.FilteredRepoLookupError:
501 500 raise
502 501 except LookupError:
503 502 pass
504 503
505 504 try:
506 505 r = int(changeid)
507 506 if '%d' % r != changeid:
508 507 raise ValueError
509 508 l = len(repo.changelog)
510 509 if r < 0:
511 510 r += l
512 511 if r < 0 or r >= l and r != wdirrev:
513 512 raise ValueError
514 513 self._rev = r
515 514 self._node = repo.changelog.node(r)
516 515 return
517 516 except error.FilteredIndexError:
518 517 raise
519 518 except (ValueError, OverflowError, IndexError):
520 519 pass
521 520
522 521 if len(changeid) == 40:
523 522 try:
524 523 self._node = bin(changeid)
525 524 self._rev = repo.changelog.rev(self._node)
526 525 return
527 526 except error.FilteredLookupError:
528 527 raise
529 528 except (TypeError, LookupError):
530 529 pass
531 530
532 531 # lookup bookmarks through the name interface
533 532 try:
534 533 self._node = repo.names.singlenode(repo, changeid)
535 534 self._rev = repo.changelog.rev(self._node)
536 535 return
537 536 except KeyError:
538 537 pass
539 538 except error.FilteredRepoLookupError:
540 539 raise
541 540 except error.RepoLookupError:
542 541 pass
543 542
544 543 self._node = repo.unfiltered().changelog._partialmatch(changeid)
545 544 if self._node is not None:
546 545 self._rev = repo.changelog.rev(self._node)
547 546 return
548 547
549 548 # lookup failed
550 549 # check if it might have come from damaged dirstate
551 550 #
552 551 # XXX we could avoid the unfiltered if we had a recognizable
553 552 # exception for filtered changeset access
554 553 if (repo.local()
555 554 and changeid in repo.unfiltered().dirstate.parents()):
556 555 msg = _("working directory has unknown parent '%s'!")
557 556 raise error.Abort(msg % short(changeid))
558 557 try:
559 558 if len(changeid) == 20 and nonascii(changeid):
560 559 changeid = hex(changeid)
561 560 except TypeError:
562 561 pass
563 562 except (error.FilteredIndexError, error.FilteredLookupError,
564 563 error.FilteredRepoLookupError):
565 564 raise _filterederror(repo, changeid)
566 565 except IndexError:
567 566 pass
568 567 raise error.RepoLookupError(
569 568 _("unknown revision '%s'") % changeid)
570 569
571 570 def __hash__(self):
572 571 try:
573 572 return hash(self._rev)
574 573 except AttributeError:
575 574 return id(self)
576 575
577 576 def __nonzero__(self):
578 577 return self._rev != nullrev
579 578
580 579 __bool__ = __nonzero__
581 580
582 581 @propertycache
583 582 def _changeset(self):
584 583 return self._repo.changelog.changelogrevision(self.rev())
585 584
586 585 @propertycache
587 586 def _manifest(self):
588 587 return self._manifestctx.read()
589 588
590 589 @property
591 590 def _manifestctx(self):
592 591 return self._repo.manifestlog[self._changeset.manifest]
593 592
594 593 @propertycache
595 594 def _manifestdelta(self):
596 595 return self._manifestctx.readdelta()
597 596
598 597 @propertycache
599 598 def _parents(self):
600 599 repo = self._repo
601 600 p1, p2 = repo.changelog.parentrevs(self._rev)
602 601 if p2 == nullrev:
603 602 return [changectx(repo, p1)]
604 603 return [changectx(repo, p1), changectx(repo, p2)]
605 604
606 605 def changeset(self):
607 606 c = self._changeset
608 607 return (
609 608 c.manifest,
610 609 c.user,
611 610 c.date,
612 611 c.files,
613 612 c.description,
614 613 c.extra,
615 614 )
616 615 def manifestnode(self):
617 616 return self._changeset.manifest
618 617
619 618 def user(self):
620 619 return self._changeset.user
621 620 def date(self):
622 621 return self._changeset.date
623 622 def files(self):
624 623 return self._changeset.files
625 624 def description(self):
626 625 return self._changeset.description
627 626 def branch(self):
628 627 return encoding.tolocal(self._changeset.extra.get("branch"))
629 628 def closesbranch(self):
630 629 return 'close' in self._changeset.extra
631 630 def extra(self):
632 631 """Return a dict of extra information."""
633 632 return self._changeset.extra
634 633 def tags(self):
635 634 """Return a list of byte tag names"""
636 635 return self._repo.nodetags(self._node)
637 636 def bookmarks(self):
638 637 """Return a list of byte bookmark names."""
639 638 return self._repo.nodebookmarks(self._node)
640 639 def phase(self):
641 640 return self._repo._phasecache.phase(self._repo, self._rev)
642 641 def hidden(self):
643 642 return self._rev in repoview.filterrevs(self._repo, 'visible')
644 643
645 644 def isinmemory(self):
646 645 return False
647 646
648 647 def children(self):
649 648 """return list of changectx contexts for each child changeset.
650 649
651 650 This returns only the immediate child changesets. Use descendants() to
652 651 recursively walk children.
653 652 """
654 653 c = self._repo.changelog.children(self._node)
655 654 return [changectx(self._repo, x) for x in c]
656 655
657 656 def ancestors(self):
658 657 for a in self._repo.changelog.ancestors([self._rev]):
659 658 yield changectx(self._repo, a)
660 659
661 660 def descendants(self):
662 661 """Recursively yield all children of the changeset.
663 662
664 663 For just the immediate children, use children()
665 664 """
666 665 for d in self._repo.changelog.descendants([self._rev]):
667 666 yield changectx(self._repo, d)
668 667
669 668 def filectx(self, path, fileid=None, filelog=None):
670 669 """get a file context from this changeset"""
671 670 if fileid is None:
672 671 fileid = self.filenode(path)
673 672 return filectx(self._repo, path, fileid=fileid,
674 673 changectx=self, filelog=filelog)
675 674
676 675 def ancestor(self, c2, warn=False):
677 676 """return the "best" ancestor context of self and c2
678 677
679 678 If there are multiple candidates, it will show a message and check
680 679 merge.preferancestor configuration before falling back to the
681 680 revlog ancestor."""
682 681 # deal with workingctxs
683 682 n2 = c2._node
684 683 if n2 is None:
685 684 n2 = c2._parents[0]._node
686 685 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
687 686 if not cahs:
688 687 anc = nullid
689 688 elif len(cahs) == 1:
690 689 anc = cahs[0]
691 690 else:
692 691 # experimental config: merge.preferancestor
693 692 for r in self._repo.ui.configlist('merge', 'preferancestor'):
694 693 try:
695 694 ctx = changectx(self._repo, r)
696 695 except error.RepoLookupError:
697 696 continue
698 697 anc = ctx.node()
699 698 if anc in cahs:
700 699 break
701 700 else:
702 701 anc = self._repo.changelog.ancestor(self._node, n2)
703 702 if warn:
704 703 self._repo.ui.status(
705 704 (_("note: using %s as ancestor of %s and %s\n") %
706 705 (short(anc), short(self._node), short(n2))) +
707 706 ''.join(_(" alternatively, use --config "
708 707 "merge.preferancestor=%s\n") %
709 708 short(n) for n in sorted(cahs) if n != anc))
710 709 return changectx(self._repo, anc)
711 710
712 711 def descendant(self, other):
713 712 """True if other is descendant of this changeset"""
714 713 return self._repo.changelog.descendant(self._rev, other._rev)
715 714
716 715 def walk(self, match):
717 716 '''Generates matching file names.'''
718 717
719 718 # Wrap match.bad method to have message with nodeid
720 719 def bad(fn, msg):
721 720 # The manifest doesn't know about subrepos, so don't complain about
722 721 # paths into valid subrepos.
723 722 if any(fn == s or fn.startswith(s + '/')
724 723 for s in self.substate):
725 724 return
726 725 match.bad(fn, _('no such file in rev %s') % self)
727 726
728 727 m = matchmod.badmatch(match, bad)
729 728 return self._manifest.walk(m)
730 729
731 730 def matches(self, match):
732 731 return self.walk(match)
733 732
734 733 class basefilectx(object):
735 734 """A filecontext object represents the common logic for its children:
736 735 filectx: read-only access to a filerevision that is already present
737 736 in the repo,
738 737 workingfilectx: a filecontext that represents files from the working
739 738 directory,
740 739 memfilectx: a filecontext that represents files in-memory,
741 740 overlayfilectx: duplicate another filecontext with some fields overridden.
742 741 """
743 742 @propertycache
744 743 def _filelog(self):
745 744 return self._repo.file(self._path)
746 745
747 746 @propertycache
748 747 def _changeid(self):
749 748 if r'_changeid' in self.__dict__:
750 749 return self._changeid
751 750 elif r'_changectx' in self.__dict__:
752 751 return self._changectx.rev()
753 752 elif r'_descendantrev' in self.__dict__:
754 753 # this file context was created from a revision with a known
755 754 # descendant, we can (lazily) correct for linkrev aliases
756 755 return self._adjustlinkrev(self._descendantrev)
757 756 else:
758 757 return self._filelog.linkrev(self._filerev)
759 758
760 759 @propertycache
761 760 def _filenode(self):
762 761 if r'_fileid' in self.__dict__:
763 762 return self._filelog.lookup(self._fileid)
764 763 else:
765 764 return self._changectx.filenode(self._path)
766 765
767 766 @propertycache
768 767 def _filerev(self):
769 768 return self._filelog.rev(self._filenode)
770 769
771 770 @propertycache
772 771 def _repopath(self):
773 772 return self._path
774 773
775 774 def __nonzero__(self):
776 775 try:
777 776 self._filenode
778 777 return True
779 778 except error.LookupError:
780 779 # file is missing
781 780 return False
782 781
783 782 __bool__ = __nonzero__
784 783
785 784 def __bytes__(self):
786 785 try:
787 786 return "%s@%s" % (self.path(), self._changectx)
788 787 except error.LookupError:
789 788 return "%s@???" % self.path()
790 789
791 790 __str__ = encoding.strmethod(__bytes__)
792 791
793 792 def __repr__(self):
794 793 return "<%s %s>" % (type(self).__name__, str(self))
795 794
796 795 def __hash__(self):
797 796 try:
798 797 return hash((self._path, self._filenode))
799 798 except AttributeError:
800 799 return id(self)
801 800
802 801 def __eq__(self, other):
803 802 try:
804 803 return (type(self) == type(other) and self._path == other._path
805 804 and self._filenode == other._filenode)
806 805 except AttributeError:
807 806 return False
808 807
809 808 def __ne__(self, other):
810 809 return not (self == other)
811 810
812 811 def filerev(self):
813 812 return self._filerev
814 813 def filenode(self):
815 814 return self._filenode
816 815 @propertycache
817 816 def _flags(self):
818 817 return self._changectx.flags(self._path)
819 818 def flags(self):
820 819 return self._flags
821 820 def filelog(self):
822 821 return self._filelog
823 822 def rev(self):
824 823 return self._changeid
825 824 def linkrev(self):
826 825 return self._filelog.linkrev(self._filerev)
827 826 def node(self):
828 827 return self._changectx.node()
829 828 def hex(self):
830 829 return self._changectx.hex()
831 830 def user(self):
832 831 return self._changectx.user()
833 832 def date(self):
834 833 return self._changectx.date()
835 834 def files(self):
836 835 return self._changectx.files()
837 836 def description(self):
838 837 return self._changectx.description()
839 838 def branch(self):
840 839 return self._changectx.branch()
841 840 def extra(self):
842 841 return self._changectx.extra()
843 842 def phase(self):
844 843 return self._changectx.phase()
845 844 def phasestr(self):
846 845 return self._changectx.phasestr()
847 846 def obsolete(self):
848 847 return self._changectx.obsolete()
849 848 def instabilities(self):
850 849 return self._changectx.instabilities()
851 850 def manifest(self):
852 851 return self._changectx.manifest()
853 852 def changectx(self):
854 853 return self._changectx
855 854 def renamed(self):
856 855 return self._copied
857 856 def repo(self):
858 857 return self._repo
859 858 def size(self):
860 859 return len(self.data())
861 860
862 861 def path(self):
863 862 return self._path
864 863
865 864 def isbinary(self):
866 865 try:
867 866 return util.binary(self.data())
868 867 except IOError:
869 868 return False
870 869 def isexec(self):
871 870 return 'x' in self.flags()
872 871 def islink(self):
873 872 return 'l' in self.flags()
874 873
875 874 def isabsent(self):
876 875 """whether this filectx represents a file not in self._changectx
877 876
878 877 This is mainly for merge code to detect change/delete conflicts. This is
879 878 expected to be True for all subclasses of basectx."""
880 879 return False
881 880
882 881 _customcmp = False
883 882 def cmp(self, fctx):
884 883 """compare with other file context
885 884
886 885 returns True if different than fctx.
887 886 """
888 887 if fctx._customcmp:
889 888 return fctx.cmp(self)
890 889
891 890 if (fctx._filenode is None
892 891 and (self._repo._encodefilterpats
893 892 # if file data starts with '\1\n', empty metadata block is
894 893 # prepended, which adds 4 bytes to filelog.size().
895 894 or self.size() - 4 == fctx.size())
896 895 or self.size() == fctx.size()):
897 896 return self._filelog.cmp(self._filenode, fctx.data())
898 897
899 898 return True
900 899
901 900 def _adjustlinkrev(self, srcrev, inclusive=False):
902 901 """return the first ancestor of <srcrev> introducing <fnode>
903 902
904 903 If the linkrev of the file revision does not point to an ancestor of
905 904 srcrev, we'll walk down the ancestors until we find one introducing
906 905 this file revision.
907 906
908 907 :srcrev: the changeset revision we search ancestors from
909 908 :inclusive: if true, the src revision will also be checked
910 909 """
911 910 repo = self._repo
912 911 cl = repo.unfiltered().changelog
913 912 mfl = repo.manifestlog
914 913 # fetch the linkrev
915 914 lkr = self.linkrev()
916 915 # hack to reuse ancestor computation when searching for renames
917 916 memberanc = getattr(self, '_ancestrycontext', None)
918 917 iteranc = None
919 918 if srcrev is None:
920 919 # wctx case, used by workingfilectx during mergecopy
921 920 revs = [p.rev() for p in self._repo[None].parents()]
922 921 inclusive = True # we skipped the real (revless) source
923 922 else:
924 923 revs = [srcrev]
925 924 if memberanc is None:
926 925 memberanc = iteranc = cl.ancestors(revs, lkr,
927 926 inclusive=inclusive)
928 927 # check if this linkrev is an ancestor of srcrev
929 928 if lkr not in memberanc:
930 929 if iteranc is None:
931 930 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
932 931 fnode = self._filenode
933 932 path = self._path
934 933 for a in iteranc:
935 934 ac = cl.read(a) # get changeset data (we avoid object creation)
936 935 if path in ac[3]: # checking the 'files' field.
937 936 # The file has been touched, check if the content is
938 937 # similar to the one we search for.
939 938 if fnode == mfl[ac[0]].readfast().get(path):
940 939 return a
941 940 # In theory, we should never get out of that loop without a result.
942 941 # But if manifest uses a buggy file revision (not children of the
943 942 # one it replaces) we could. Such a buggy situation will likely
944 943 # result is crash somewhere else at to some point.
945 944 return lkr
946 945
947 946 def introrev(self):
948 947 """return the rev of the changeset which introduced this file revision
949 948
950 949 This method is different from linkrev because it take into account the
951 950 changeset the filectx was created from. It ensures the returned
952 951 revision is one of its ancestors. This prevents bugs from
953 952 'linkrev-shadowing' when a file revision is used by multiple
954 953 changesets.
955 954 """
956 955 lkr = self.linkrev()
957 956 attrs = vars(self)
958 957 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
959 958 if noctx or self.rev() == lkr:
960 959 return self.linkrev()
961 960 return self._adjustlinkrev(self.rev(), inclusive=True)
962 961
963 962 def introfilectx(self):
964 963 """Return filectx having identical contents, but pointing to the
965 964 changeset revision where this filectx was introduced"""
966 965 introrev = self.introrev()
967 966 if self.rev() == introrev:
968 967 return self
969 968 return self.filectx(self.filenode(), changeid=introrev)
970 969
971 970 def _parentfilectx(self, path, fileid, filelog):
972 971 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
973 972 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
974 973 if '_changeid' in vars(self) or '_changectx' in vars(self):
975 974 # If self is associated with a changeset (probably explicitly
976 975 # fed), ensure the created filectx is associated with a
977 976 # changeset that is an ancestor of self.changectx.
978 977 # This lets us later use _adjustlinkrev to get a correct link.
979 978 fctx._descendantrev = self.rev()
980 979 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
981 980 elif '_descendantrev' in vars(self):
982 981 # Otherwise propagate _descendantrev if we have one associated.
983 982 fctx._descendantrev = self._descendantrev
984 983 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
985 984 return fctx
986 985
987 986 def parents(self):
988 987 _path = self._path
989 988 fl = self._filelog
990 989 parents = self._filelog.parents(self._filenode)
991 990 pl = [(_path, node, fl) for node in parents if node != nullid]
992 991
993 992 r = fl.renamed(self._filenode)
994 993 if r:
995 994 # - In the simple rename case, both parent are nullid, pl is empty.
996 995 # - In case of merge, only one of the parent is null id and should
997 996 # be replaced with the rename information. This parent is -always-
998 997 # the first one.
999 998 #
1000 999 # As null id have always been filtered out in the previous list
1001 1000 # comprehension, inserting to 0 will always result in "replacing
1002 1001 # first nullid parent with rename information.
1003 1002 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1004 1003
1005 1004 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1006 1005
1007 1006 def p1(self):
1008 1007 return self.parents()[0]
1009 1008
1010 1009 def p2(self):
1011 1010 p = self.parents()
1012 1011 if len(p) == 2:
1013 1012 return p[1]
1014 1013 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1015 1014
1016 1015 def annotate(self, follow=False, linenumber=False, skiprevs=None,
1017 1016 diffopts=None):
1018 1017 '''returns a list of tuples of ((ctx, number), line) for each line
1019 1018 in the file, where ctx is the filectx of the node where
1020 1019 that line was last changed; if linenumber parameter is true, number is
1021 1020 the line number at the first appearance in the managed file, otherwise,
1022 1021 number has a fixed value of False.
1023 1022 '''
1024 1023
1025 1024 def lines(text):
1026 1025 if text.endswith("\n"):
1027 1026 return text.count("\n")
1028 1027 return text.count("\n") + int(bool(text))
1029 1028
1030 1029 if linenumber:
1031 1030 def decorate(text, rev):
1032 1031 return ([annotateline(fctx=rev, lineno=i)
1033 1032 for i in xrange(1, lines(text) + 1)], text)
1034 1033 else:
1035 1034 def decorate(text, rev):
1036 1035 return ([annotateline(fctx=rev)] * lines(text), text)
1037 1036
1038 1037 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1039 1038
1040 1039 def parents(f):
1041 1040 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1042 1041 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1043 1042 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1044 1043 # isn't an ancestor of the srcrev.
1045 1044 f._changeid
1046 1045 pl = f.parents()
1047 1046
1048 1047 # Don't return renamed parents if we aren't following.
1049 1048 if not follow:
1050 1049 pl = [p for p in pl if p.path() == f.path()]
1051 1050
1052 1051 # renamed filectx won't have a filelog yet, so set it
1053 1052 # from the cache to save time
1054 1053 for p in pl:
1055 1054 if not '_filelog' in p.__dict__:
1056 1055 p._filelog = getlog(p.path())
1057 1056
1058 1057 return pl
1059 1058
1060 1059 # use linkrev to find the first changeset where self appeared
1061 1060 base = self.introfilectx()
1062 1061 if getattr(base, '_ancestrycontext', None) is None:
1063 1062 cl = self._repo.changelog
1064 1063 if base.rev() is None:
1065 1064 # wctx is not inclusive, but works because _ancestrycontext
1066 1065 # is used to test filelog revisions
1067 1066 ac = cl.ancestors([p.rev() for p in base.parents()],
1068 1067 inclusive=True)
1069 1068 else:
1070 1069 ac = cl.ancestors([base.rev()], inclusive=True)
1071 1070 base._ancestrycontext = ac
1072 1071
1073 1072 # This algorithm would prefer to be recursive, but Python is a
1074 1073 # bit recursion-hostile. Instead we do an iterative
1075 1074 # depth-first search.
1076 1075
1077 1076 # 1st DFS pre-calculates pcache and needed
1078 1077 visit = [base]
1079 1078 pcache = {}
1080 1079 needed = {base: 1}
1081 1080 while visit:
1082 1081 f = visit.pop()
1083 1082 if f in pcache:
1084 1083 continue
1085 1084 pl = parents(f)
1086 1085 pcache[f] = pl
1087 1086 for p in pl:
1088 1087 needed[p] = needed.get(p, 0) + 1
1089 1088 if p not in pcache:
1090 1089 visit.append(p)
1091 1090
1092 1091 # 2nd DFS does the actual annotate
1093 1092 visit[:] = [base]
1094 1093 hist = {}
1095 1094 while visit:
1096 1095 f = visit[-1]
1097 1096 if f in hist:
1098 1097 visit.pop()
1099 1098 continue
1100 1099
1101 1100 ready = True
1102 1101 pl = pcache[f]
1103 1102 for p in pl:
1104 1103 if p not in hist:
1105 1104 ready = False
1106 1105 visit.append(p)
1107 1106 if ready:
1108 1107 visit.pop()
1109 1108 curr = decorate(f.data(), f)
1110 1109 skipchild = False
1111 1110 if skiprevs is not None:
1112 1111 skipchild = f._changeid in skiprevs
1113 1112 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1114 1113 diffopts)
1115 1114 for p in pl:
1116 1115 if needed[p] == 1:
1117 1116 del hist[p]
1118 1117 del needed[p]
1119 1118 else:
1120 1119 needed[p] -= 1
1121 1120
1122 1121 hist[f] = curr
1123 1122 del pcache[f]
1124 1123
1125 1124 return pycompat.ziplist(hist[base][0], hist[base][1].splitlines(True))
1126 1125
1127 1126 def ancestors(self, followfirst=False):
1128 1127 visit = {}
1129 1128 c = self
1130 1129 if followfirst:
1131 1130 cut = 1
1132 1131 else:
1133 1132 cut = None
1134 1133
1135 1134 while True:
1136 1135 for parent in c.parents()[:cut]:
1137 1136 visit[(parent.linkrev(), parent.filenode())] = parent
1138 1137 if not visit:
1139 1138 break
1140 1139 c = visit.pop(max(visit))
1141 1140 yield c
1142 1141
1143 1142 def decodeddata(self):
1144 1143 """Returns `data()` after running repository decoding filters.
1145 1144
1146 1145 This is often equivalent to how the data would be expressed on disk.
1147 1146 """
1148 1147 return self._repo.wwritedata(self.path(), self.data())
1149 1148
1150 1149 @attr.s(slots=True, frozen=True)
1151 1150 class annotateline(object):
1152 1151 fctx = attr.ib()
1153 1152 lineno = attr.ib(default=False)
1154 1153 # Whether this annotation was the result of a skip-annotate.
1155 1154 skip = attr.ib(default=False)
1156 1155
1157 1156 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1158 1157 r'''
1159 1158 Given parent and child fctxes and annotate data for parents, for all lines
1160 1159 in either parent that match the child, annotate the child with the parent's
1161 1160 data.
1162 1161
1163 1162 Additionally, if `skipchild` is True, replace all other lines with parent
1164 1163 annotate data as well such that child is never blamed for any lines.
1165 1164
1166 1165 See test-annotate.py for unit tests.
1167 1166 '''
1168 1167 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1169 1168 for parent in parents]
1170 1169
1171 1170 if skipchild:
1172 1171 # Need to iterate over the blocks twice -- make it a list
1173 1172 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1174 1173 # Mercurial currently prefers p2 over p1 for annotate.
1175 1174 # TODO: change this?
1176 1175 for parent, blocks in pblocks:
1177 1176 for (a1, a2, b1, b2), t in blocks:
1178 1177 # Changed blocks ('!') or blocks made only of blank lines ('~')
1179 1178 # belong to the child.
1180 1179 if t == '=':
1181 1180 child[0][b1:b2] = parent[0][a1:a2]
1182 1181
1183 1182 if skipchild:
1184 1183 # Now try and match up anything that couldn't be matched,
1185 1184 # Reversing pblocks maintains bias towards p2, matching above
1186 1185 # behavior.
1187 1186 pblocks.reverse()
1188 1187
1189 1188 # The heuristics are:
1190 1189 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1191 1190 # This could potentially be smarter but works well enough.
1192 1191 # * For a non-matching section, do a best-effort fit. Match lines in
1193 1192 # diff hunks 1:1, dropping lines as necessary.
1194 1193 # * Repeat the last line as a last resort.
1195 1194
1196 1195 # First, replace as much as possible without repeating the last line.
1197 1196 remaining = [(parent, []) for parent, _blocks in pblocks]
1198 1197 for idx, (parent, blocks) in enumerate(pblocks):
1199 1198 for (a1, a2, b1, b2), _t in blocks:
1200 1199 if a2 - a1 >= b2 - b1:
1201 1200 for bk in xrange(b1, b2):
1202 1201 if child[0][bk].fctx == childfctx:
1203 1202 ak = min(a1 + (bk - b1), a2 - 1)
1204 1203 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1205 1204 else:
1206 1205 remaining[idx][1].append((a1, a2, b1, b2))
1207 1206
1208 1207 # Then, look at anything left, which might involve repeating the last
1209 1208 # line.
1210 1209 for parent, blocks in remaining:
1211 1210 for a1, a2, b1, b2 in blocks:
1212 1211 for bk in xrange(b1, b2):
1213 1212 if child[0][bk].fctx == childfctx:
1214 1213 ak = min(a1 + (bk - b1), a2 - 1)
1215 1214 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1216 1215 return child
1217 1216
1218 1217 class filectx(basefilectx):
1219 1218 """A filecontext object makes access to data related to a particular
1220 1219 filerevision convenient."""
1221 1220 def __init__(self, repo, path, changeid=None, fileid=None,
1222 1221 filelog=None, changectx=None):
1223 1222 """changeid can be a changeset revision, node, or tag.
1224 1223 fileid can be a file revision or node."""
1225 1224 self._repo = repo
1226 1225 self._path = path
1227 1226
1228 1227 assert (changeid is not None
1229 1228 or fileid is not None
1230 1229 or changectx is not None), \
1231 1230 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1232 1231 % (changeid, fileid, changectx))
1233 1232
1234 1233 if filelog is not None:
1235 1234 self._filelog = filelog
1236 1235
1237 1236 if changeid is not None:
1238 1237 self._changeid = changeid
1239 1238 if changectx is not None:
1240 1239 self._changectx = changectx
1241 1240 if fileid is not None:
1242 1241 self._fileid = fileid
1243 1242
1244 1243 @propertycache
1245 1244 def _changectx(self):
1246 1245 try:
1247 1246 return changectx(self._repo, self._changeid)
1248 1247 except error.FilteredRepoLookupError:
1249 1248 # Linkrev may point to any revision in the repository. When the
1250 1249 # repository is filtered this may lead to `filectx` trying to build
1251 1250 # `changectx` for filtered revision. In such case we fallback to
1252 1251 # creating `changectx` on the unfiltered version of the reposition.
1253 1252 # This fallback should not be an issue because `changectx` from
1254 1253 # `filectx` are not used in complex operations that care about
1255 1254 # filtering.
1256 1255 #
1257 1256 # This fallback is a cheap and dirty fix that prevent several
1258 1257 # crashes. It does not ensure the behavior is correct. However the
1259 1258 # behavior was not correct before filtering either and "incorrect
1260 1259 # behavior" is seen as better as "crash"
1261 1260 #
1262 1261 # Linkrevs have several serious troubles with filtering that are
1263 1262 # complicated to solve. Proper handling of the issue here should be
1264 1263 # considered when solving linkrev issue are on the table.
1265 1264 return changectx(self._repo.unfiltered(), self._changeid)
1266 1265
1267 1266 def filectx(self, fileid, changeid=None):
1268 1267 '''opens an arbitrary revision of the file without
1269 1268 opening a new filelog'''
1270 1269 return filectx(self._repo, self._path, fileid=fileid,
1271 1270 filelog=self._filelog, changeid=changeid)
1272 1271
1273 1272 def rawdata(self):
1274 1273 return self._filelog.revision(self._filenode, raw=True)
1275 1274
1276 1275 def rawflags(self):
1277 1276 """low-level revlog flags"""
1278 1277 return self._filelog.flags(self._filerev)
1279 1278
1280 1279 def data(self):
1281 1280 try:
1282 1281 return self._filelog.read(self._filenode)
1283 1282 except error.CensoredNodeError:
1284 1283 if self._repo.ui.config("censor", "policy") == "ignore":
1285 1284 return ""
1286 1285 raise error.Abort(_("censored node: %s") % short(self._filenode),
1287 1286 hint=_("set censor.policy to ignore errors"))
1288 1287
1289 1288 def size(self):
1290 1289 return self._filelog.size(self._filerev)
1291 1290
1292 1291 @propertycache
1293 1292 def _copied(self):
1294 1293 """check if file was actually renamed in this changeset revision
1295 1294
1296 1295 If rename logged in file revision, we report copy for changeset only
1297 1296 if file revisions linkrev points back to the changeset in question
1298 1297 or both changeset parents contain different file revisions.
1299 1298 """
1300 1299
1301 1300 renamed = self._filelog.renamed(self._filenode)
1302 1301 if not renamed:
1303 1302 return renamed
1304 1303
1305 1304 if self.rev() == self.linkrev():
1306 1305 return renamed
1307 1306
1308 1307 name = self.path()
1309 1308 fnode = self._filenode
1310 1309 for p in self._changectx.parents():
1311 1310 try:
1312 1311 if fnode == p.filenode(name):
1313 1312 return None
1314 1313 except error.LookupError:
1315 1314 pass
1316 1315 return renamed
1317 1316
1318 1317 def children(self):
1319 1318 # hard for renames
1320 1319 c = self._filelog.children(self._filenode)
1321 1320 return [filectx(self._repo, self._path, fileid=x,
1322 1321 filelog=self._filelog) for x in c]
1323 1322
1324 1323 class committablectx(basectx):
1325 1324 """A committablectx object provides common functionality for a context that
1326 1325 wants the ability to commit, e.g. workingctx or memctx."""
1327 1326 def __init__(self, repo, text="", user=None, date=None, extra=None,
1328 1327 changes=None):
1329 1328 self._repo = repo
1330 1329 self._rev = None
1331 1330 self._node = None
1332 1331 self._text = text
1333 1332 if date:
1334 1333 self._date = util.parsedate(date)
1335 1334 if user:
1336 1335 self._user = user
1337 1336 if changes:
1338 1337 self._status = changes
1339 1338
1340 1339 self._extra = {}
1341 1340 if extra:
1342 1341 self._extra = extra.copy()
1343 1342 if 'branch' not in self._extra:
1344 1343 try:
1345 1344 branch = encoding.fromlocal(self._repo.dirstate.branch())
1346 1345 except UnicodeDecodeError:
1347 1346 raise error.Abort(_('branch name not in UTF-8!'))
1348 1347 self._extra['branch'] = branch
1349 1348 if self._extra['branch'] == '':
1350 1349 self._extra['branch'] = 'default'
1351 1350
1352 1351 def __bytes__(self):
1353 1352 return bytes(self._parents[0]) + "+"
1354 1353
1355 1354 __str__ = encoding.strmethod(__bytes__)
1356 1355
1357 1356 def __nonzero__(self):
1358 1357 return True
1359 1358
1360 1359 __bool__ = __nonzero__
1361 1360
1362 1361 def _buildflagfunc(self):
1363 1362 # Create a fallback function for getting file flags when the
1364 1363 # filesystem doesn't support them
1365 1364
1366 1365 copiesget = self._repo.dirstate.copies().get
1367 1366 parents = self.parents()
1368 1367 if len(parents) < 2:
1369 1368 # when we have one parent, it's easy: copy from parent
1370 1369 man = parents[0].manifest()
1371 1370 def func(f):
1372 1371 f = copiesget(f, f)
1373 1372 return man.flags(f)
1374 1373 else:
1375 1374 # merges are tricky: we try to reconstruct the unstored
1376 1375 # result from the merge (issue1802)
1377 1376 p1, p2 = parents
1378 1377 pa = p1.ancestor(p2)
1379 1378 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1380 1379
1381 1380 def func(f):
1382 1381 f = copiesget(f, f) # may be wrong for merges with copies
1383 1382 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1384 1383 if fl1 == fl2:
1385 1384 return fl1
1386 1385 if fl1 == fla:
1387 1386 return fl2
1388 1387 if fl2 == fla:
1389 1388 return fl1
1390 1389 return '' # punt for conflicts
1391 1390
1392 1391 return func
1393 1392
1394 1393 @propertycache
1395 1394 def _flagfunc(self):
1396 1395 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1397 1396
1398 1397 @propertycache
1399 1398 def _status(self):
1400 1399 return self._repo.status()
1401 1400
1402 1401 @propertycache
1403 1402 def _user(self):
1404 1403 return self._repo.ui.username()
1405 1404
1406 1405 @propertycache
1407 1406 def _date(self):
1408 1407 ui = self._repo.ui
1409 1408 date = ui.configdate('devel', 'default-date')
1410 1409 if date is None:
1411 1410 date = util.makedate()
1412 1411 return date
1413 1412
1414 1413 def subrev(self, subpath):
1415 1414 return None
1416 1415
1417 1416 def manifestnode(self):
1418 1417 return None
1419 1418 def user(self):
1420 1419 return self._user or self._repo.ui.username()
1421 1420 def date(self):
1422 1421 return self._date
1423 1422 def description(self):
1424 1423 return self._text
1425 1424 def files(self):
1426 1425 return sorted(self._status.modified + self._status.added +
1427 1426 self._status.removed)
1428 1427
1429 1428 def modified(self):
1430 1429 return self._status.modified
1431 1430 def added(self):
1432 1431 return self._status.added
1433 1432 def removed(self):
1434 1433 return self._status.removed
1435 1434 def deleted(self):
1436 1435 return self._status.deleted
1437 1436 def branch(self):
1438 1437 return encoding.tolocal(self._extra['branch'])
1439 1438 def closesbranch(self):
1440 1439 return 'close' in self._extra
1441 1440 def extra(self):
1442 1441 return self._extra
1443 1442
1444 1443 def isinmemory(self):
1445 1444 return False
1446 1445
1447 1446 def tags(self):
1448 1447 return []
1449 1448
1450 1449 def bookmarks(self):
1451 1450 b = []
1452 1451 for p in self.parents():
1453 1452 b.extend(p.bookmarks())
1454 1453 return b
1455 1454
1456 1455 def phase(self):
1457 1456 phase = phases.draft # default phase to draft
1458 1457 for p in self.parents():
1459 1458 phase = max(phase, p.phase())
1460 1459 return phase
1461 1460
1462 1461 def hidden(self):
1463 1462 return False
1464 1463
1465 1464 def children(self):
1466 1465 return []
1467 1466
1468 1467 def flags(self, path):
1469 1468 if r'_manifest' in self.__dict__:
1470 1469 try:
1471 1470 return self._manifest.flags(path)
1472 1471 except KeyError:
1473 1472 return ''
1474 1473
1475 1474 try:
1476 1475 return self._flagfunc(path)
1477 1476 except OSError:
1478 1477 return ''
1479 1478
1480 1479 def ancestor(self, c2):
1481 1480 """return the "best" ancestor context of self and c2"""
1482 1481 return self._parents[0].ancestor(c2) # punt on two parents for now
1483 1482
1484 1483 def walk(self, match):
1485 1484 '''Generates matching file names.'''
1486 1485 return sorted(self._repo.dirstate.walk(match,
1487 1486 subrepos=sorted(self.substate),
1488 1487 unknown=True, ignored=False))
1489 1488
1490 1489 def matches(self, match):
1491 1490 return sorted(self._repo.dirstate.matches(match))
1492 1491
1493 1492 def ancestors(self):
1494 1493 for p in self._parents:
1495 1494 yield p
1496 1495 for a in self._repo.changelog.ancestors(
1497 1496 [p.rev() for p in self._parents]):
1498 1497 yield changectx(self._repo, a)
1499 1498
1500 1499 def markcommitted(self, node):
1501 1500 """Perform post-commit cleanup necessary after committing this ctx
1502 1501
1503 1502 Specifically, this updates backing stores this working context
1504 1503 wraps to reflect the fact that the changes reflected by this
1505 1504 workingctx have been committed. For example, it marks
1506 1505 modified and added files as normal in the dirstate.
1507 1506
1508 1507 """
1509 1508
1510 1509 with self._repo.dirstate.parentchange():
1511 1510 for f in self.modified() + self.added():
1512 1511 self._repo.dirstate.normal(f)
1513 1512 for f in self.removed():
1514 1513 self._repo.dirstate.drop(f)
1515 1514 self._repo.dirstate.setparents(node)
1516 1515
1517 1516 # write changes out explicitly, because nesting wlock at
1518 1517 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1519 1518 # from immediately doing so for subsequent changing files
1520 1519 self._repo.dirstate.write(self._repo.currenttransaction())
1521 1520
1522 1521 def dirty(self, missing=False, merge=True, branch=True):
1523 1522 return False
1524 1523
1525 1524 class workingctx(committablectx):
1526 1525 """A workingctx object makes access to data related to
1527 1526 the current working directory convenient.
1528 1527 date - any valid date string or (unixtime, offset), or None.
1529 1528 user - username string, or None.
1530 1529 extra - a dictionary of extra values, or None.
1531 1530 changes - a list of file lists as returned by localrepo.status()
1532 1531 or None to use the repository status.
1533 1532 """
1534 1533 def __init__(self, repo, text="", user=None, date=None, extra=None,
1535 1534 changes=None):
1536 1535 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1537 1536
1538 1537 def __iter__(self):
1539 1538 d = self._repo.dirstate
1540 1539 for f in d:
1541 1540 if d[f] != 'r':
1542 1541 yield f
1543 1542
1544 1543 def __contains__(self, key):
1545 1544 return self._repo.dirstate[key] not in "?r"
1546 1545
1547 1546 def hex(self):
1548 1547 return hex(wdirid)
1549 1548
1550 1549 @propertycache
1551 1550 def _parents(self):
1552 1551 p = self._repo.dirstate.parents()
1553 1552 if p[1] == nullid:
1554 1553 p = p[:-1]
1555 1554 return [changectx(self._repo, x) for x in p]
1556 1555
1557 1556 def filectx(self, path, filelog=None):
1558 1557 """get a file context from the working directory"""
1559 1558 return workingfilectx(self._repo, path, workingctx=self,
1560 1559 filelog=filelog)
1561 1560
1562 1561 def dirty(self, missing=False, merge=True, branch=True):
1563 1562 "check whether a working directory is modified"
1564 1563 # check subrepos first
1565 1564 for s in sorted(self.substate):
1566 1565 if self.sub(s).dirty(missing=missing):
1567 1566 return True
1568 1567 # check current working dir
1569 1568 return ((merge and self.p2()) or
1570 1569 (branch and self.branch() != self.p1().branch()) or
1571 1570 self.modified() or self.added() or self.removed() or
1572 1571 (missing and self.deleted()))
1573 1572
1574 1573 def add(self, list, prefix=""):
1575 1574 with self._repo.wlock():
1576 1575 ui, ds = self._repo.ui, self._repo.dirstate
1577 1576 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1578 1577 rejected = []
1579 1578 lstat = self._repo.wvfs.lstat
1580 1579 for f in list:
1581 1580 # ds.pathto() returns an absolute file when this is invoked from
1582 1581 # the keyword extension. That gets flagged as non-portable on
1583 1582 # Windows, since it contains the drive letter and colon.
1584 1583 scmutil.checkportable(ui, os.path.join(prefix, f))
1585 1584 try:
1586 1585 st = lstat(f)
1587 1586 except OSError:
1588 1587 ui.warn(_("%s does not exist!\n") % uipath(f))
1589 1588 rejected.append(f)
1590 1589 continue
1591 1590 if st.st_size > 10000000:
1592 1591 ui.warn(_("%s: up to %d MB of RAM may be required "
1593 1592 "to manage this file\n"
1594 1593 "(use 'hg revert %s' to cancel the "
1595 1594 "pending addition)\n")
1596 1595 % (f, 3 * st.st_size // 1000000, uipath(f)))
1597 1596 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1598 1597 ui.warn(_("%s not added: only files and symlinks "
1599 1598 "supported currently\n") % uipath(f))
1600 1599 rejected.append(f)
1601 1600 elif ds[f] in 'amn':
1602 1601 ui.warn(_("%s already tracked!\n") % uipath(f))
1603 1602 elif ds[f] == 'r':
1604 1603 ds.normallookup(f)
1605 1604 else:
1606 1605 ds.add(f)
1607 1606 return rejected
1608 1607
1609 1608 def forget(self, files, prefix=""):
1610 1609 with self._repo.wlock():
1611 1610 ds = self._repo.dirstate
1612 1611 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1613 1612 rejected = []
1614 1613 for f in files:
1615 1614 if f not in self._repo.dirstate:
1616 1615 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1617 1616 rejected.append(f)
1618 1617 elif self._repo.dirstate[f] != 'a':
1619 1618 self._repo.dirstate.remove(f)
1620 1619 else:
1621 1620 self._repo.dirstate.drop(f)
1622 1621 return rejected
1623 1622
1624 1623 def undelete(self, list):
1625 1624 pctxs = self.parents()
1626 1625 with self._repo.wlock():
1627 1626 ds = self._repo.dirstate
1628 1627 for f in list:
1629 1628 if self._repo.dirstate[f] != 'r':
1630 1629 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1631 1630 else:
1632 1631 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1633 1632 t = fctx.data()
1634 1633 self._repo.wwrite(f, t, fctx.flags())
1635 1634 self._repo.dirstate.normal(f)
1636 1635
1637 1636 def copy(self, source, dest):
1638 1637 try:
1639 1638 st = self._repo.wvfs.lstat(dest)
1640 1639 except OSError as err:
1641 1640 if err.errno != errno.ENOENT:
1642 1641 raise
1643 1642 self._repo.ui.warn(_("%s does not exist!\n")
1644 1643 % self._repo.dirstate.pathto(dest))
1645 1644 return
1646 1645 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1647 1646 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1648 1647 "symbolic link\n")
1649 1648 % self._repo.dirstate.pathto(dest))
1650 1649 else:
1651 1650 with self._repo.wlock():
1652 1651 if self._repo.dirstate[dest] in '?':
1653 1652 self._repo.dirstate.add(dest)
1654 1653 elif self._repo.dirstate[dest] in 'r':
1655 1654 self._repo.dirstate.normallookup(dest)
1656 1655 self._repo.dirstate.copy(source, dest)
1657 1656
1658 1657 def match(self, pats=None, include=None, exclude=None, default='glob',
1659 1658 listsubrepos=False, badfn=None):
1660 1659 r = self._repo
1661 1660
1662 1661 # Only a case insensitive filesystem needs magic to translate user input
1663 1662 # to actual case in the filesystem.
1664 1663 icasefs = not util.fscasesensitive(r.root)
1665 1664 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1666 1665 default, auditor=r.auditor, ctx=self,
1667 1666 listsubrepos=listsubrepos, badfn=badfn,
1668 1667 icasefs=icasefs)
1669 1668
1670 1669 def _filtersuspectsymlink(self, files):
1671 1670 if not files or self._repo.dirstate._checklink:
1672 1671 return files
1673 1672
1674 1673 # Symlink placeholders may get non-symlink-like contents
1675 1674 # via user error or dereferencing by NFS or Samba servers,
1676 1675 # so we filter out any placeholders that don't look like a
1677 1676 # symlink
1678 1677 sane = []
1679 1678 for f in files:
1680 1679 if self.flags(f) == 'l':
1681 1680 d = self[f].data()
1682 1681 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1683 1682 self._repo.ui.debug('ignoring suspect symlink placeholder'
1684 1683 ' "%s"\n' % f)
1685 1684 continue
1686 1685 sane.append(f)
1687 1686 return sane
1688 1687
1689 1688 def _checklookup(self, files):
1690 1689 # check for any possibly clean files
1691 1690 if not files:
1692 1691 return [], [], []
1693 1692
1694 1693 modified = []
1695 1694 deleted = []
1696 1695 fixup = []
1697 1696 pctx = self._parents[0]
1698 1697 # do a full compare of any files that might have changed
1699 1698 for f in sorted(files):
1700 1699 try:
1701 1700 # This will return True for a file that got replaced by a
1702 1701 # directory in the interim, but fixing that is pretty hard.
1703 1702 if (f not in pctx or self.flags(f) != pctx.flags(f)
1704 1703 or pctx[f].cmp(self[f])):
1705 1704 modified.append(f)
1706 1705 else:
1707 1706 fixup.append(f)
1708 1707 except (IOError, OSError):
1709 1708 # A file become inaccessible in between? Mark it as deleted,
1710 1709 # matching dirstate behavior (issue5584).
1711 1710 # The dirstate has more complex behavior around whether a
1712 1711 # missing file matches a directory, etc, but we don't need to
1713 1712 # bother with that: if f has made it to this point, we're sure
1714 1713 # it's in the dirstate.
1715 1714 deleted.append(f)
1716 1715
1717 1716 return modified, deleted, fixup
1718 1717
1719 1718 def _poststatusfixup(self, status, fixup):
1720 1719 """update dirstate for files that are actually clean"""
1721 1720 poststatus = self._repo.postdsstatus()
1722 1721 if fixup or poststatus:
1723 1722 try:
1724 1723 oldid = self._repo.dirstate.identity()
1725 1724
1726 1725 # updating the dirstate is optional
1727 1726 # so we don't wait on the lock
1728 1727 # wlock can invalidate the dirstate, so cache normal _after_
1729 1728 # taking the lock
1730 1729 with self._repo.wlock(False):
1731 1730 if self._repo.dirstate.identity() == oldid:
1732 1731 if fixup:
1733 1732 normal = self._repo.dirstate.normal
1734 1733 for f in fixup:
1735 1734 normal(f)
1736 1735 # write changes out explicitly, because nesting
1737 1736 # wlock at runtime may prevent 'wlock.release()'
1738 1737 # after this block from doing so for subsequent
1739 1738 # changing files
1740 1739 tr = self._repo.currenttransaction()
1741 1740 self._repo.dirstate.write(tr)
1742 1741
1743 1742 if poststatus:
1744 1743 for ps in poststatus:
1745 1744 ps(self, status)
1746 1745 else:
1747 1746 # in this case, writing changes out breaks
1748 1747 # consistency, because .hg/dirstate was
1749 1748 # already changed simultaneously after last
1750 1749 # caching (see also issue5584 for detail)
1751 1750 self._repo.ui.debug('skip updating dirstate: '
1752 1751 'identity mismatch\n')
1753 1752 except error.LockError:
1754 1753 pass
1755 1754 finally:
1756 1755 # Even if the wlock couldn't be grabbed, clear out the list.
1757 1756 self._repo.clearpostdsstatus()
1758 1757
1759 1758 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1760 1759 '''Gets the status from the dirstate -- internal use only.'''
1761 1760 subrepos = []
1762 1761 if '.hgsub' in self:
1763 1762 subrepos = sorted(self.substate)
1764 1763 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1765 1764 clean=clean, unknown=unknown)
1766 1765
1767 1766 # check for any possibly clean files
1768 1767 fixup = []
1769 1768 if cmp:
1770 1769 modified2, deleted2, fixup = self._checklookup(cmp)
1771 1770 s.modified.extend(modified2)
1772 1771 s.deleted.extend(deleted2)
1773 1772
1774 1773 if fixup and clean:
1775 1774 s.clean.extend(fixup)
1776 1775
1777 1776 self._poststatusfixup(s, fixup)
1778 1777
1779 1778 if match.always():
1780 1779 # cache for performance
1781 1780 if s.unknown or s.ignored or s.clean:
1782 1781 # "_status" is cached with list*=False in the normal route
1783 1782 self._status = scmutil.status(s.modified, s.added, s.removed,
1784 1783 s.deleted, [], [], [])
1785 1784 else:
1786 1785 self._status = s
1787 1786
1788 1787 return s
1789 1788
1790 1789 @propertycache
1791 1790 def _manifest(self):
1792 1791 """generate a manifest corresponding to the values in self._status
1793 1792
1794 1793 This reuse the file nodeid from parent, but we use special node
1795 1794 identifiers for added and modified files. This is used by manifests
1796 1795 merge to see that files are different and by update logic to avoid
1797 1796 deleting newly added files.
1798 1797 """
1799 1798 return self._buildstatusmanifest(self._status)
1800 1799
1801 1800 def _buildstatusmanifest(self, status):
1802 1801 """Builds a manifest that includes the given status results."""
1803 1802 parents = self.parents()
1804 1803
1805 1804 man = parents[0].manifest().copy()
1806 1805
1807 1806 ff = self._flagfunc
1808 1807 for i, l in ((addednodeid, status.added),
1809 1808 (modifiednodeid, status.modified)):
1810 1809 for f in l:
1811 1810 man[f] = i
1812 1811 try:
1813 1812 man.setflag(f, ff(f))
1814 1813 except OSError:
1815 1814 pass
1816 1815
1817 1816 for f in status.deleted + status.removed:
1818 1817 if f in man:
1819 1818 del man[f]
1820 1819
1821 1820 return man
1822 1821
1823 1822 def _buildstatus(self, other, s, match, listignored, listclean,
1824 1823 listunknown):
1825 1824 """build a status with respect to another context
1826 1825
1827 1826 This includes logic for maintaining the fast path of status when
1828 1827 comparing the working directory against its parent, which is to skip
1829 1828 building a new manifest if self (working directory) is not comparing
1830 1829 against its parent (repo['.']).
1831 1830 """
1832 1831 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1833 1832 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1834 1833 # might have accidentally ended up with the entire contents of the file
1835 1834 # they are supposed to be linking to.
1836 1835 s.modified[:] = self._filtersuspectsymlink(s.modified)
1837 1836 if other != self._repo['.']:
1838 1837 s = super(workingctx, self)._buildstatus(other, s, match,
1839 1838 listignored, listclean,
1840 1839 listunknown)
1841 1840 return s
1842 1841
1843 1842 def _matchstatus(self, other, match):
1844 1843 """override the match method with a filter for directory patterns
1845 1844
1846 1845 We use inheritance to customize the match.bad method only in cases of
1847 1846 workingctx since it belongs only to the working directory when
1848 1847 comparing against the parent changeset.
1849 1848
1850 1849 If we aren't comparing against the working directory's parent, then we
1851 1850 just use the default match object sent to us.
1852 1851 """
1853 1852 if other != self._repo['.']:
1854 1853 def bad(f, msg):
1855 1854 # 'f' may be a directory pattern from 'match.files()',
1856 1855 # so 'f not in ctx1' is not enough
1857 1856 if f not in other and not other.hasdir(f):
1858 1857 self._repo.ui.warn('%s: %s\n' %
1859 1858 (self._repo.dirstate.pathto(f), msg))
1860 1859 match.bad = bad
1861 1860 return match
1862 1861
1863 1862 def markcommitted(self, node):
1864 1863 super(workingctx, self).markcommitted(node)
1865 1864
1866 1865 sparse.aftercommit(self._repo, node)
1867 1866
1868 1867 class committablefilectx(basefilectx):
1869 1868 """A committablefilectx provides common functionality for a file context
1870 1869 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1871 1870 def __init__(self, repo, path, filelog=None, ctx=None):
1872 1871 self._repo = repo
1873 1872 self._path = path
1874 1873 self._changeid = None
1875 1874 self._filerev = self._filenode = None
1876 1875
1877 1876 if filelog is not None:
1878 1877 self._filelog = filelog
1879 1878 if ctx:
1880 1879 self._changectx = ctx
1881 1880
1882 1881 def __nonzero__(self):
1883 1882 return True
1884 1883
1885 1884 __bool__ = __nonzero__
1886 1885
1887 1886 def linkrev(self):
1888 1887 # linked to self._changectx no matter if file is modified or not
1889 1888 return self.rev()
1890 1889
1891 1890 def parents(self):
1892 1891 '''return parent filectxs, following copies if necessary'''
1893 1892 def filenode(ctx, path):
1894 1893 return ctx._manifest.get(path, nullid)
1895 1894
1896 1895 path = self._path
1897 1896 fl = self._filelog
1898 1897 pcl = self._changectx._parents
1899 1898 renamed = self.renamed()
1900 1899
1901 1900 if renamed:
1902 1901 pl = [renamed + (None,)]
1903 1902 else:
1904 1903 pl = [(path, filenode(pcl[0], path), fl)]
1905 1904
1906 1905 for pc in pcl[1:]:
1907 1906 pl.append((path, filenode(pc, path), fl))
1908 1907
1909 1908 return [self._parentfilectx(p, fileid=n, filelog=l)
1910 1909 for p, n, l in pl if n != nullid]
1911 1910
1912 1911 def children(self):
1913 1912 return []
1914 1913
1915 1914 class workingfilectx(committablefilectx):
1916 1915 """A workingfilectx object makes access to data related to a particular
1917 1916 file in the working directory convenient."""
1918 1917 def __init__(self, repo, path, filelog=None, workingctx=None):
1919 1918 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1920 1919
1921 1920 @propertycache
1922 1921 def _changectx(self):
1923 1922 return workingctx(self._repo)
1924 1923
1925 1924 def data(self):
1926 1925 return self._repo.wread(self._path)
1927 1926 def renamed(self):
1928 1927 rp = self._repo.dirstate.copied(self._path)
1929 1928 if not rp:
1930 1929 return None
1931 1930 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1932 1931
1933 1932 def size(self):
1934 1933 return self._repo.wvfs.lstat(self._path).st_size
1935 1934 def date(self):
1936 1935 t, tz = self._changectx.date()
1937 1936 try:
1938 1937 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1939 1938 except OSError as err:
1940 1939 if err.errno != errno.ENOENT:
1941 1940 raise
1942 1941 return (t, tz)
1943 1942
1944 1943 def exists(self):
1945 1944 return self._repo.wvfs.exists(self._path)
1946 1945
1947 1946 def lexists(self):
1948 1947 return self._repo.wvfs.lexists(self._path)
1949 1948
1950 1949 def audit(self):
1951 1950 return self._repo.wvfs.audit(self._path)
1952 1951
1953 1952 def cmp(self, fctx):
1954 1953 """compare with other file context
1955 1954
1956 1955 returns True if different than fctx.
1957 1956 """
1958 1957 # fctx should be a filectx (not a workingfilectx)
1959 1958 # invert comparison to reuse the same code path
1960 1959 return fctx.cmp(self)
1961 1960
1962 1961 def remove(self, ignoremissing=False):
1963 1962 """wraps unlink for a repo's working directory"""
1964 1963 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1965 1964
1966 1965 def write(self, data, flags, backgroundclose=False):
1967 1966 """wraps repo.wwrite"""
1968 1967 self._repo.wwrite(self._path, data, flags,
1969 1968 backgroundclose=backgroundclose)
1970 1969
1971 1970 def markcopied(self, src):
1972 1971 """marks this file a copy of `src`"""
1973 1972 if self._repo.dirstate[self._path] in "nma":
1974 1973 self._repo.dirstate.copy(src, self._path)
1975 1974
1976 1975 def clearunknown(self):
1977 1976 """Removes conflicting items in the working directory so that
1978 1977 ``write()`` can be called successfully.
1979 1978 """
1980 1979 wvfs = self._repo.wvfs
1981 1980 f = self._path
1982 1981 wvfs.audit(f)
1983 1982 if wvfs.isdir(f) and not wvfs.islink(f):
1984 1983 wvfs.rmtree(f, forcibly=True)
1985 1984 for p in reversed(list(util.finddirs(f))):
1986 1985 if wvfs.isfileorlink(p):
1987 1986 wvfs.unlink(p)
1988 1987 break
1989 1988
1990 1989 def setflags(self, l, x):
1991 1990 self._repo.wvfs.setflags(self._path, l, x)
1992 1991
1993 1992 class overlayworkingctx(committablectx):
1994 1993 """Wraps another mutable context with a write-back cache that can be
1995 1994 converted into a commit context.
1996 1995
1997 1996 self._cache[path] maps to a dict with keys: {
1998 1997 'exists': bool?
1999 1998 'date': date?
2000 1999 'data': str?
2001 2000 'flags': str?
2002 2001 'copied': str? (path or None)
2003 2002 }
2004 2003 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2005 2004 is `False`, the file was deleted.
2006 2005 """
2007 2006
2008 2007 def __init__(self, repo):
2009 2008 super(overlayworkingctx, self).__init__(repo)
2010 2009 self._repo = repo
2011 2010 self.clean()
2012 2011
2013 2012 def setbase(self, wrappedctx):
2014 2013 self._wrappedctx = wrappedctx
2015 2014 self._parents = [wrappedctx]
2016 2015 # Drop old manifest cache as it is now out of date.
2017 2016 # This is necessary when, e.g., rebasing several nodes with one
2018 2017 # ``overlayworkingctx`` (e.g. with --collapse).
2019 2018 util.clearcachedproperty(self, '_manifest')
2020 2019
2021 2020 def data(self, path):
2022 2021 if self.isdirty(path):
2023 2022 if self._cache[path]['exists']:
2024 2023 if self._cache[path]['data']:
2025 2024 return self._cache[path]['data']
2026 2025 else:
2027 2026 # Must fallback here, too, because we only set flags.
2028 2027 return self._wrappedctx[path].data()
2029 2028 else:
2030 2029 raise error.ProgrammingError("No such file or directory: %s" %
2031 2030 path)
2032 2031 else:
2033 2032 return self._wrappedctx[path].data()
2034 2033
2035 2034 @propertycache
2036 2035 def _manifest(self):
2037 2036 parents = self.parents()
2038 2037 man = parents[0].manifest().copy()
2039 2038
2040 2039 flag = self._flagfunc
2041 2040 for path in self.added():
2042 2041 man[path] = addednodeid
2043 2042 man.setflag(path, flag(path))
2044 2043 for path in self.modified():
2045 2044 man[path] = modifiednodeid
2046 2045 man.setflag(path, flag(path))
2047 2046 for path in self.removed():
2048 2047 del man[path]
2049 2048 return man
2050 2049
2051 2050 @propertycache
2052 2051 def _flagfunc(self):
2053 2052 def f(path):
2054 2053 return self._cache[path]['flags']
2055 2054 return f
2056 2055
2057 2056 def files(self):
2058 2057 return sorted(self.added() + self.modified() + self.removed())
2059 2058
2060 2059 def modified(self):
2061 2060 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
2062 2061 self._existsinparent(f)]
2063 2062
2064 2063 def added(self):
2065 2064 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
2066 2065 not self._existsinparent(f)]
2067 2066
2068 2067 def removed(self):
2069 2068 return [f for f in self._cache.keys() if
2070 2069 not self._cache[f]['exists'] and self._existsinparent(f)]
2071 2070
2072 2071 def isinmemory(self):
2073 2072 return True
2074 2073
2075 2074 def filedate(self, path):
2076 2075 if self.isdirty(path):
2077 2076 return self._cache[path]['date']
2078 2077 else:
2079 2078 return self._wrappedctx[path].date()
2080 2079
2081 2080 def markcopied(self, path, origin):
2082 2081 if self.isdirty(path):
2083 2082 self._cache[path]['copied'] = origin
2084 2083 else:
2085 2084 raise error.ProgrammingError('markcopied() called on clean context')
2086 2085
2087 2086 def copydata(self, path):
2088 2087 if self.isdirty(path):
2089 2088 return self._cache[path]['copied']
2090 2089 else:
2091 2090 raise error.ProgrammingError('copydata() called on clean context')
2092 2091
2093 2092 def flags(self, path):
2094 2093 if self.isdirty(path):
2095 2094 if self._cache[path]['exists']:
2096 2095 return self._cache[path]['flags']
2097 2096 else:
2098 2097 raise error.ProgrammingError("No such file or directory: %s" %
2099 2098 self._path)
2100 2099 else:
2101 2100 return self._wrappedctx[path].flags()
2102 2101
2103 2102 def _existsinparent(self, path):
2104 2103 try:
2105 2104 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2106 2105 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2107 2106 # with an ``exists()`` function.
2108 2107 self._wrappedctx[path]
2109 2108 return True
2110 2109 except error.ManifestLookupError:
2111 2110 return False
2112 2111
2113 2112 def _auditconflicts(self, path):
2114 2113 """Replicates conflict checks done by wvfs.write().
2115 2114
2116 2115 Since we never write to the filesystem and never call `applyupdates` in
2117 2116 IMM, we'll never check that a path is actually writable -- e.g., because
2118 2117 it adds `a/foo`, but `a` is actually a file in the other commit.
2119 2118 """
2120 2119 def fail(path, component):
2121 2120 # p1() is the base and we're receiving "writes" for p2()'s
2122 2121 # files.
2123 2122 if 'l' in self.p1()[component].flags():
2124 2123 raise error.Abort("error: %s conflicts with symlink %s "
2125 2124 "in %s." % (path, component,
2126 2125 self.p1().rev()))
2127 2126 else:
2128 2127 raise error.Abort("error: '%s' conflicts with file '%s' in "
2129 2128 "%s." % (path, component,
2130 2129 self.p1().rev()))
2131 2130
2132 2131 # Test that each new directory to be created to write this path from p2
2133 2132 # is not a file in p1.
2134 2133 components = path.split('/')
2135 2134 for i in xrange(len(components)):
2136 2135 component = "/".join(components[0:i])
2137 2136 if component in self.p1():
2138 2137 fail(path, component)
2139 2138
2140 2139 # Test the other direction -- that this path from p2 isn't a directory
2141 2140 # in p1 (test that p1 doesn't any paths matching `path/*`).
2142 2141 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
2143 2142 matches = self.p1().manifest().matches(match)
2144 2143 if len(matches) > 0:
2145 2144 if len(matches) == 1 and matches.keys()[0] == path:
2146 2145 return
2147 2146 raise error.Abort("error: file '%s' cannot be written because "
2148 2147 " '%s/' is a folder in %s (containing %d "
2149 2148 "entries: %s)"
2150 2149 % (path, path, self.p1(), len(matches),
2151 2150 ', '.join(matches.keys())))
2152 2151
2153 2152 def write(self, path, data, flags=''):
2154 2153 if data is None:
2155 2154 raise error.ProgrammingError("data must be non-None")
2156 2155 self._auditconflicts(path)
2157 2156 self._markdirty(path, exists=True, data=data, date=util.makedate(),
2158 2157 flags=flags)
2159 2158
2160 2159 def setflags(self, path, l, x):
2161 2160 self._markdirty(path, exists=True, date=util.makedate(),
2162 2161 flags=(l and 'l' or '') + (x and 'x' or ''))
2163 2162
2164 2163 def remove(self, path):
2165 2164 self._markdirty(path, exists=False)
2166 2165
2167 2166 def exists(self, path):
2168 2167 """exists behaves like `lexists`, but needs to follow symlinks and
2169 2168 return False if they are broken.
2170 2169 """
2171 2170 if self.isdirty(path):
2172 2171 # If this path exists and is a symlink, "follow" it by calling
2173 2172 # exists on the destination path.
2174 2173 if (self._cache[path]['exists'] and
2175 2174 'l' in self._cache[path]['flags']):
2176 2175 return self.exists(self._cache[path]['data'].strip())
2177 2176 else:
2178 2177 return self._cache[path]['exists']
2179 2178
2180 2179 return self._existsinparent(path)
2181 2180
2182 2181 def lexists(self, path):
2183 2182 """lexists returns True if the path exists"""
2184 2183 if self.isdirty(path):
2185 2184 return self._cache[path]['exists']
2186 2185
2187 2186 return self._existsinparent(path)
2188 2187
2189 2188 def size(self, path):
2190 2189 if self.isdirty(path):
2191 2190 if self._cache[path]['exists']:
2192 2191 return len(self._cache[path]['data'])
2193 2192 else:
2194 2193 raise error.ProgrammingError("No such file or directory: %s" %
2195 2194 self._path)
2196 2195 return self._wrappedctx[path].size()
2197 2196
2198 2197 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2199 2198 user=None, editor=None):
2200 2199 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2201 2200 committed.
2202 2201
2203 2202 ``text`` is the commit message.
2204 2203 ``parents`` (optional) are rev numbers.
2205 2204 """
2206 2205 # Default parents to the wrapped contexts' if not passed.
2207 2206 if parents is None:
2208 2207 parents = self._wrappedctx.parents()
2209 2208 if len(parents) == 1:
2210 2209 parents = (parents[0], None)
2211 2210
2212 2211 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2213 2212 if parents[1] is None:
2214 2213 parents = (self._repo[parents[0]], None)
2215 2214 else:
2216 2215 parents = (self._repo[parents[0]], self._repo[parents[1]])
2217 2216
2218 2217 files = self._cache.keys()
2219 2218 def getfile(repo, memctx, path):
2220 2219 if self._cache[path]['exists']:
2221 2220 return memfilectx(repo, memctx, path,
2222 2221 self._cache[path]['data'],
2223 2222 'l' in self._cache[path]['flags'],
2224 2223 'x' in self._cache[path]['flags'],
2225 2224 self._cache[path]['copied'])
2226 2225 else:
2227 2226 # Returning None, but including the path in `files`, is
2228 2227 # necessary for memctx to register a deletion.
2229 2228 return None
2230 2229 return memctx(self._repo, parents, text, files, getfile, date=date,
2231 2230 extra=extra, user=user, branch=branch, editor=editor)
2232 2231
2233 2232 def isdirty(self, path):
2234 2233 return path in self._cache
2235 2234
2236 2235 def isempty(self):
2237 2236 # We need to discard any keys that are actually clean before the empty
2238 2237 # commit check.
2239 2238 self._compact()
2240 2239 return len(self._cache) == 0
2241 2240
2242 2241 def clean(self):
2243 2242 self._cache = {}
2244 2243
2245 2244 def _compact(self):
2246 2245 """Removes keys from the cache that are actually clean, by comparing
2247 2246 them with the underlying context.
2248 2247
2249 2248 This can occur during the merge process, e.g. by passing --tool :local
2250 2249 to resolve a conflict.
2251 2250 """
2252 2251 keys = []
2253 2252 for path in self._cache.keys():
2254 2253 cache = self._cache[path]
2255 2254 try:
2256 2255 underlying = self._wrappedctx[path]
2257 2256 if (underlying.data() == cache['data'] and
2258 2257 underlying.flags() == cache['flags']):
2259 2258 keys.append(path)
2260 2259 except error.ManifestLookupError:
2261 2260 # Path not in the underlying manifest (created).
2262 2261 continue
2263 2262
2264 2263 for path in keys:
2265 2264 del self._cache[path]
2266 2265 return keys
2267 2266
2268 2267 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2269 2268 self._cache[path] = {
2270 2269 'exists': exists,
2271 2270 'data': data,
2272 2271 'date': date,
2273 2272 'flags': flags,
2274 2273 'copied': None,
2275 2274 }
2276 2275
2277 2276 def filectx(self, path, filelog=None):
2278 2277 return overlayworkingfilectx(self._repo, path, parent=self,
2279 2278 filelog=filelog)
2280 2279
2281 2280 class overlayworkingfilectx(committablefilectx):
2282 2281 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2283 2282 cache, which can be flushed through later by calling ``flush()``."""
2284 2283
2285 2284 def __init__(self, repo, path, filelog=None, parent=None):
2286 2285 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2287 2286 parent)
2288 2287 self._repo = repo
2289 2288 self._parent = parent
2290 2289 self._path = path
2291 2290
2292 2291 def cmp(self, fctx):
2293 2292 return self.data() != fctx.data()
2294 2293
2295 2294 def changectx(self):
2296 2295 return self._parent
2297 2296
2298 2297 def data(self):
2299 2298 return self._parent.data(self._path)
2300 2299
2301 2300 def date(self):
2302 2301 return self._parent.filedate(self._path)
2303 2302
2304 2303 def exists(self):
2305 2304 return self.lexists()
2306 2305
2307 2306 def lexists(self):
2308 2307 return self._parent.exists(self._path)
2309 2308
2310 2309 def renamed(self):
2311 2310 path = self._parent.copydata(self._path)
2312 2311 if not path:
2313 2312 return None
2314 2313 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2315 2314
2316 2315 def size(self):
2317 2316 return self._parent.size(self._path)
2318 2317
2319 2318 def markcopied(self, origin):
2320 2319 self._parent.markcopied(self._path, origin)
2321 2320
2322 2321 def audit(self):
2323 2322 pass
2324 2323
2325 2324 def flags(self):
2326 2325 return self._parent.flags(self._path)
2327 2326
2328 2327 def setflags(self, islink, isexec):
2329 2328 return self._parent.setflags(self._path, islink, isexec)
2330 2329
2331 2330 def write(self, data, flags, backgroundclose=False):
2332 2331 return self._parent.write(self._path, data, flags)
2333 2332
2334 2333 def remove(self, ignoremissing=False):
2335 2334 return self._parent.remove(self._path)
2336 2335
2337 2336 def clearunknown(self):
2338 2337 pass
2339 2338
2340 2339 class workingcommitctx(workingctx):
2341 2340 """A workingcommitctx object makes access to data related to
2342 2341 the revision being committed convenient.
2343 2342
2344 2343 This hides changes in the working directory, if they aren't
2345 2344 committed in this context.
2346 2345 """
2347 2346 def __init__(self, repo, changes,
2348 2347 text="", user=None, date=None, extra=None):
2349 2348 super(workingctx, self).__init__(repo, text, user, date, extra,
2350 2349 changes)
2351 2350
2352 2351 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2353 2352 """Return matched files only in ``self._status``
2354 2353
2355 2354 Uncommitted files appear "clean" via this context, even if
2356 2355 they aren't actually so in the working directory.
2357 2356 """
2358 2357 if clean:
2359 2358 clean = [f for f in self._manifest if f not in self._changedset]
2360 2359 else:
2361 2360 clean = []
2362 2361 return scmutil.status([f for f in self._status.modified if match(f)],
2363 2362 [f for f in self._status.added if match(f)],
2364 2363 [f for f in self._status.removed if match(f)],
2365 2364 [], [], [], clean)
2366 2365
2367 2366 @propertycache
2368 2367 def _changedset(self):
2369 2368 """Return the set of files changed in this context
2370 2369 """
2371 2370 changed = set(self._status.modified)
2372 2371 changed.update(self._status.added)
2373 2372 changed.update(self._status.removed)
2374 2373 return changed
2375 2374
2376 2375 def makecachingfilectxfn(func):
2377 2376 """Create a filectxfn that caches based on the path.
2378 2377
2379 2378 We can't use util.cachefunc because it uses all arguments as the cache
2380 2379 key and this creates a cycle since the arguments include the repo and
2381 2380 memctx.
2382 2381 """
2383 2382 cache = {}
2384 2383
2385 2384 def getfilectx(repo, memctx, path):
2386 2385 if path not in cache:
2387 2386 cache[path] = func(repo, memctx, path)
2388 2387 return cache[path]
2389 2388
2390 2389 return getfilectx
2391 2390
2392 2391 def memfilefromctx(ctx):
2393 2392 """Given a context return a memfilectx for ctx[path]
2394 2393
2395 2394 This is a convenience method for building a memctx based on another
2396 2395 context.
2397 2396 """
2398 2397 def getfilectx(repo, memctx, path):
2399 2398 fctx = ctx[path]
2400 2399 # this is weird but apparently we only keep track of one parent
2401 2400 # (why not only store that instead of a tuple?)
2402 2401 copied = fctx.renamed()
2403 2402 if copied:
2404 2403 copied = copied[0]
2405 2404 return memfilectx(repo, memctx, path, fctx.data(),
2406 2405 islink=fctx.islink(), isexec=fctx.isexec(),
2407 2406 copied=copied)
2408 2407
2409 2408 return getfilectx
2410 2409
2411 2410 def memfilefrompatch(patchstore):
2412 2411 """Given a patch (e.g. patchstore object) return a memfilectx
2413 2412
2414 2413 This is a convenience method for building a memctx based on a patchstore.
2415 2414 """
2416 2415 def getfilectx(repo, memctx, path):
2417 2416 data, mode, copied = patchstore.getfile(path)
2418 2417 if data is None:
2419 2418 return None
2420 2419 islink, isexec = mode
2421 2420 return memfilectx(repo, memctx, path, data, islink=islink,
2422 2421 isexec=isexec, copied=copied)
2423 2422
2424 2423 return getfilectx
2425 2424
2426 2425 class memctx(committablectx):
2427 2426 """Use memctx to perform in-memory commits via localrepo.commitctx().
2428 2427
2429 2428 Revision information is supplied at initialization time while
2430 2429 related files data and is made available through a callback
2431 2430 mechanism. 'repo' is the current localrepo, 'parents' is a
2432 2431 sequence of two parent revisions identifiers (pass None for every
2433 2432 missing parent), 'text' is the commit message and 'files' lists
2434 2433 names of files touched by the revision (normalized and relative to
2435 2434 repository root).
2436 2435
2437 2436 filectxfn(repo, memctx, path) is a callable receiving the
2438 2437 repository, the current memctx object and the normalized path of
2439 2438 requested file, relative to repository root. It is fired by the
2440 2439 commit function for every file in 'files', but calls order is
2441 2440 undefined. If the file is available in the revision being
2442 2441 committed (updated or added), filectxfn returns a memfilectx
2443 2442 object. If the file was removed, filectxfn return None for recent
2444 2443 Mercurial. Moved files are represented by marking the source file
2445 2444 removed and the new file added with copy information (see
2446 2445 memfilectx).
2447 2446
2448 2447 user receives the committer name and defaults to current
2449 2448 repository username, date is the commit date in any format
2450 2449 supported by util.parsedate() and defaults to current date, extra
2451 2450 is a dictionary of metadata or is left empty.
2452 2451 """
2453 2452
2454 2453 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2455 2454 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2456 2455 # this field to determine what to do in filectxfn.
2457 2456 _returnnoneformissingfiles = True
2458 2457
2459 2458 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2460 2459 date=None, extra=None, branch=None, editor=False):
2461 2460 super(memctx, self).__init__(repo, text, user, date, extra)
2462 2461 self._rev = None
2463 2462 self._node = None
2464 2463 parents = [(p or nullid) for p in parents]
2465 2464 p1, p2 = parents
2466 2465 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2467 2466 files = sorted(set(files))
2468 2467 self._files = files
2469 2468 if branch is not None:
2470 2469 self._extra['branch'] = encoding.fromlocal(branch)
2471 2470 self.substate = {}
2472 2471
2473 2472 if isinstance(filectxfn, patch.filestore):
2474 2473 filectxfn = memfilefrompatch(filectxfn)
2475 2474 elif not callable(filectxfn):
2476 2475 # if store is not callable, wrap it in a function
2477 2476 filectxfn = memfilefromctx(filectxfn)
2478 2477
2479 2478 # memoizing increases performance for e.g. vcs convert scenarios.
2480 2479 self._filectxfn = makecachingfilectxfn(filectxfn)
2481 2480
2482 2481 if editor:
2483 2482 self._text = editor(self._repo, self, [])
2484 2483 self._repo.savecommitmessage(self._text)
2485 2484
2486 2485 def filectx(self, path, filelog=None):
2487 2486 """get a file context from the working directory
2488 2487
2489 2488 Returns None if file doesn't exist and should be removed."""
2490 2489 return self._filectxfn(self._repo, self, path)
2491 2490
2492 2491 def commit(self):
2493 2492 """commit context to the repo"""
2494 2493 return self._repo.commitctx(self)
2495 2494
2496 2495 @propertycache
2497 2496 def _manifest(self):
2498 2497 """generate a manifest based on the return values of filectxfn"""
2499 2498
2500 2499 # keep this simple for now; just worry about p1
2501 2500 pctx = self._parents[0]
2502 2501 man = pctx.manifest().copy()
2503 2502
2504 2503 for f in self._status.modified:
2505 2504 p1node = nullid
2506 2505 p2node = nullid
2507 2506 p = pctx[f].parents() # if file isn't in pctx, check p2?
2508 2507 if len(p) > 0:
2509 2508 p1node = p[0].filenode()
2510 2509 if len(p) > 1:
2511 2510 p2node = p[1].filenode()
2512 2511 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2513 2512
2514 2513 for f in self._status.added:
2515 2514 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2516 2515
2517 2516 for f in self._status.removed:
2518 2517 if f in man:
2519 2518 del man[f]
2520 2519
2521 2520 return man
2522 2521
2523 2522 @propertycache
2524 2523 def _status(self):
2525 2524 """Calculate exact status from ``files`` specified at construction
2526 2525 """
2527 2526 man1 = self.p1().manifest()
2528 2527 p2 = self._parents[1]
2529 2528 # "1 < len(self._parents)" can't be used for checking
2530 2529 # existence of the 2nd parent, because "memctx._parents" is
2531 2530 # explicitly initialized by the list, of which length is 2.
2532 2531 if p2.node() != nullid:
2533 2532 man2 = p2.manifest()
2534 2533 managing = lambda f: f in man1 or f in man2
2535 2534 else:
2536 2535 managing = lambda f: f in man1
2537 2536
2538 2537 modified, added, removed = [], [], []
2539 2538 for f in self._files:
2540 2539 if not managing(f):
2541 2540 added.append(f)
2542 2541 elif self[f]:
2543 2542 modified.append(f)
2544 2543 else:
2545 2544 removed.append(f)
2546 2545
2547 2546 return scmutil.status(modified, added, removed, [], [], [], [])
2548 2547
2549 2548 class memfilectx(committablefilectx):
2550 2549 """memfilectx represents an in-memory file to commit.
2551 2550
2552 2551 See memctx and committablefilectx for more details.
2553 2552 """
2554 2553 def __init__(self, repo, changectx, path, data, islink=False,
2555 2554 isexec=False, copied=None):
2556 2555 """
2557 2556 path is the normalized file path relative to repository root.
2558 2557 data is the file content as a string.
2559 2558 islink is True if the file is a symbolic link.
2560 2559 isexec is True if the file is executable.
2561 2560 copied is the source file path if current file was copied in the
2562 2561 revision being committed, or None."""
2563 2562 super(memfilectx, self).__init__(repo, path, None, changectx)
2564 2563 self._data = data
2565 2564 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2566 2565 self._copied = None
2567 2566 if copied:
2568 2567 self._copied = (copied, nullid)
2569 2568
2570 2569 def data(self):
2571 2570 return self._data
2572 2571
2573 2572 def remove(self, ignoremissing=False):
2574 2573 """wraps unlink for a repo's working directory"""
2575 2574 # need to figure out what to do here
2576 2575 del self._changectx[self._path]
2577 2576
2578 2577 def write(self, data, flags):
2579 2578 """wraps repo.wwrite"""
2580 2579 self._data = data
2581 2580
2582 2581 class overlayfilectx(committablefilectx):
2583 2582 """Like memfilectx but take an original filectx and optional parameters to
2584 2583 override parts of it. This is useful when fctx.data() is expensive (i.e.
2585 2584 flag processor is expensive) and raw data, flags, and filenode could be
2586 2585 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2587 2586 """
2588 2587
2589 2588 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2590 2589 copied=None, ctx=None):
2591 2590 """originalfctx: filecontext to duplicate
2592 2591
2593 2592 datafunc: None or a function to override data (file content). It is a
2594 2593 function to be lazy. path, flags, copied, ctx: None or overridden value
2595 2594
2596 2595 copied could be (path, rev), or False. copied could also be just path,
2597 2596 and will be converted to (path, nullid). This simplifies some callers.
2598 2597 """
2599 2598
2600 2599 if path is None:
2601 2600 path = originalfctx.path()
2602 2601 if ctx is None:
2603 2602 ctx = originalfctx.changectx()
2604 2603 ctxmatch = lambda: True
2605 2604 else:
2606 2605 ctxmatch = lambda: ctx == originalfctx.changectx()
2607 2606
2608 2607 repo = originalfctx.repo()
2609 2608 flog = originalfctx.filelog()
2610 2609 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2611 2610
2612 2611 if copied is None:
2613 2612 copied = originalfctx.renamed()
2614 2613 copiedmatch = lambda: True
2615 2614 else:
2616 2615 if copied and not isinstance(copied, tuple):
2617 2616 # repo._filecommit will recalculate copyrev so nullid is okay
2618 2617 copied = (copied, nullid)
2619 2618 copiedmatch = lambda: copied == originalfctx.renamed()
2620 2619
2621 2620 # When data, copied (could affect data), ctx (could affect filelog
2622 2621 # parents) are not overridden, rawdata, rawflags, and filenode may be
2623 2622 # reused (repo._filecommit should double check filelog parents).
2624 2623 #
2625 2624 # path, flags are not hashed in filelog (but in manifestlog) so they do
2626 2625 # not affect reusable here.
2627 2626 #
2628 2627 # If ctx or copied is overridden to a same value with originalfctx,
2629 2628 # still consider it's reusable. originalfctx.renamed() may be a bit
2630 2629 # expensive so it's not called unless necessary. Assuming datafunc is
2631 2630 # always expensive, do not call it for this "reusable" test.
2632 2631 reusable = datafunc is None and ctxmatch() and copiedmatch()
2633 2632
2634 2633 if datafunc is None:
2635 2634 datafunc = originalfctx.data
2636 2635 if flags is None:
2637 2636 flags = originalfctx.flags()
2638 2637
2639 2638 self._datafunc = datafunc
2640 2639 self._flags = flags
2641 2640 self._copied = copied
2642 2641
2643 2642 if reusable:
2644 2643 # copy extra fields from originalfctx
2645 2644 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2646 2645 for attr_ in attrs:
2647 2646 if util.safehasattr(originalfctx, attr_):
2648 2647 setattr(self, attr_, getattr(originalfctx, attr_))
2649 2648
2650 2649 def data(self):
2651 2650 return self._datafunc()
2652 2651
2653 2652 class metadataonlyctx(committablectx):
2654 2653 """Like memctx but it's reusing the manifest of different commit.
2655 2654 Intended to be used by lightweight operations that are creating
2656 2655 metadata-only changes.
2657 2656
2658 2657 Revision information is supplied at initialization time. 'repo' is the
2659 2658 current localrepo, 'ctx' is original revision which manifest we're reuisng
2660 2659 'parents' is a sequence of two parent revisions identifiers (pass None for
2661 2660 every missing parent), 'text' is the commit.
2662 2661
2663 2662 user receives the committer name and defaults to current repository
2664 2663 username, date is the commit date in any format supported by
2665 2664 util.parsedate() and defaults to current date, extra is a dictionary of
2666 2665 metadata or is left empty.
2667 2666 """
2668 2667 def __new__(cls, repo, originalctx, *args, **kwargs):
2669 2668 return super(metadataonlyctx, cls).__new__(cls, repo)
2670 2669
2671 2670 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2672 2671 date=None, extra=None, editor=False):
2673 2672 if text is None:
2674 2673 text = originalctx.description()
2675 2674 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2676 2675 self._rev = None
2677 2676 self._node = None
2678 2677 self._originalctx = originalctx
2679 2678 self._manifestnode = originalctx.manifestnode()
2680 2679 if parents is None:
2681 2680 parents = originalctx.parents()
2682 2681 else:
2683 2682 parents = [repo[p] for p in parents if p is not None]
2684 2683 parents = parents[:]
2685 2684 while len(parents) < 2:
2686 2685 parents.append(repo[nullid])
2687 2686 p1, p2 = self._parents = parents
2688 2687
2689 2688 # sanity check to ensure that the reused manifest parents are
2690 2689 # manifests of our commit parents
2691 2690 mp1, mp2 = self.manifestctx().parents
2692 2691 if p1 != nullid and p1.manifestnode() != mp1:
2693 2692 raise RuntimeError('can\'t reuse the manifest: '
2694 2693 'its p1 doesn\'t match the new ctx p1')
2695 2694 if p2 != nullid and p2.manifestnode() != mp2:
2696 2695 raise RuntimeError('can\'t reuse the manifest: '
2697 2696 'its p2 doesn\'t match the new ctx p2')
2698 2697
2699 2698 self._files = originalctx.files()
2700 2699 self.substate = {}
2701 2700
2702 2701 if editor:
2703 2702 self._text = editor(self._repo, self, [])
2704 2703 self._repo.savecommitmessage(self._text)
2705 2704
2706 2705 def manifestnode(self):
2707 2706 return self._manifestnode
2708 2707
2709 2708 @property
2710 2709 def _manifestctx(self):
2711 2710 return self._repo.manifestlog[self._manifestnode]
2712 2711
2713 2712 def filectx(self, path, filelog=None):
2714 2713 return self._originalctx.filectx(path, filelog=filelog)
2715 2714
2716 2715 def commit(self):
2717 2716 """commit context to the repo"""
2718 2717 return self._repo.commitctx(self)
2719 2718
2720 2719 @property
2721 2720 def _manifest(self):
2722 2721 return self._originalctx.manifest()
2723 2722
2724 2723 @propertycache
2725 2724 def _status(self):
2726 2725 """Calculate exact status from ``files`` specified in the ``origctx``
2727 2726 and parents manifests.
2728 2727 """
2729 2728 man1 = self.p1().manifest()
2730 2729 p2 = self._parents[1]
2731 2730 # "1 < len(self._parents)" can't be used for checking
2732 2731 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2733 2732 # explicitly initialized by the list, of which length is 2.
2734 2733 if p2.node() != nullid:
2735 2734 man2 = p2.manifest()
2736 2735 managing = lambda f: f in man1 or f in man2
2737 2736 else:
2738 2737 managing = lambda f: f in man1
2739 2738
2740 2739 modified, added, removed = [], [], []
2741 2740 for f in self._files:
2742 2741 if not managing(f):
2743 2742 added.append(f)
2744 2743 elif f in self:
2745 2744 modified.append(f)
2746 2745 else:
2747 2746 removed.append(f)
2748 2747
2749 2748 return scmutil.status(modified, added, removed, [], [], [], [])
2750 2749
2751 2750 class arbitraryfilectx(object):
2752 2751 """Allows you to use filectx-like functions on a file in an arbitrary
2753 2752 location on disk, possibly not in the working directory.
2754 2753 """
2755 2754 def __init__(self, path, repo=None):
2756 2755 # Repo is optional because contrib/simplemerge uses this class.
2757 2756 self._repo = repo
2758 2757 self._path = path
2759 2758
2760 2759 def cmp(self, fctx):
2761 2760 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2762 2761 # path if either side is a symlink.
2763 2762 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2764 2763 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2765 2764 # Add a fast-path for merge if both sides are disk-backed.
2766 2765 # Note that filecmp uses the opposite return values (True if same)
2767 2766 # from our cmp functions (True if different).
2768 2767 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2769 2768 return self.data() != fctx.data()
2770 2769
2771 2770 def path(self):
2772 2771 return self._path
2773 2772
2774 2773 def flags(self):
2775 2774 return ''
2776 2775
2777 2776 def data(self):
2778 2777 return util.readfile(self._path)
2779 2778
2780 2779 def decodeddata(self):
2781 2780 with open(self._path, "rb") as f:
2782 2781 return f.read()
2783 2782
2784 2783 def remove(self):
2785 2784 util.unlink(self._path)
2786 2785
2787 2786 def write(self, data, flags):
2788 2787 assert not flags
2789 2788 with open(self._path, "w") as f:
2790 2789 f.write(data)
@@ -1,897 +1,906 b''
1 1 # obsutil.py - utility functions for obsolescence
2 2 #
3 3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11
12 12 from .i18n import _
13 13 from . import (
14 14 node as nodemod,
15 15 phases,
16 16 util,
17 17 )
18 18
19 19 class marker(object):
20 20 """Wrap obsolete marker raw data"""
21 21
22 22 def __init__(self, repo, data):
23 23 # the repo argument will be used to create changectx in later version
24 24 self._repo = repo
25 25 self._data = data
26 26 self._decodedmeta = None
27 27
28 28 def __hash__(self):
29 29 return hash(self._data)
30 30
31 31 def __eq__(self, other):
32 32 if type(other) != type(self):
33 33 return False
34 34 return self._data == other._data
35 35
36 36 def precnode(self):
37 37 msg = ("'marker.precnode' is deprecated, "
38 38 "use 'marker.prednode'")
39 39 util.nouideprecwarn(msg, '4.4')
40 40 return self.prednode()
41 41
42 42 def prednode(self):
43 43 """Predecessor changeset node identifier"""
44 44 return self._data[0]
45 45
46 46 def succnodes(self):
47 47 """List of successor changesets node identifiers"""
48 48 return self._data[1]
49 49
50 50 def parentnodes(self):
51 51 """Parents of the predecessors (None if not recorded)"""
52 52 return self._data[5]
53 53
54 54 def metadata(self):
55 55 """Decoded metadata dictionary"""
56 56 return dict(self._data[3])
57 57
58 58 def date(self):
59 59 """Creation date as (unixtime, offset)"""
60 60 return self._data[4]
61 61
62 62 def flags(self):
63 63 """The flags field of the marker"""
64 64 return self._data[2]
65 65
66 66 def getmarkers(repo, nodes=None, exclusive=False):
67 67 """returns markers known in a repository
68 68
69 69 If <nodes> is specified, only markers "relevant" to those nodes are are
70 70 returned"""
71 71 if nodes is None:
72 72 rawmarkers = repo.obsstore
73 73 elif exclusive:
74 74 rawmarkers = exclusivemarkers(repo, nodes)
75 75 else:
76 76 rawmarkers = repo.obsstore.relevantmarkers(nodes)
77 77
78 78 for markerdata in rawmarkers:
79 79 yield marker(repo, markerdata)
80 80
81 81 def closestpredecessors(repo, nodeid):
82 82 """yield the list of next predecessors pointing on visible changectx nodes
83 83
84 84 This function respect the repoview filtering, filtered revision will be
85 85 considered missing.
86 86 """
87 87
88 88 precursors = repo.obsstore.predecessors
89 89 stack = [nodeid]
90 90 seen = set(stack)
91 91
92 92 while stack:
93 93 current = stack.pop()
94 94 currentpreccs = precursors.get(current, ())
95 95
96 96 for prec in currentpreccs:
97 97 precnodeid = prec[0]
98 98
99 99 # Basic cycle protection
100 100 if precnodeid in seen:
101 101 continue
102 102 seen.add(precnodeid)
103 103
104 104 if precnodeid in repo:
105 105 yield precnodeid
106 106 else:
107 107 stack.append(precnodeid)
108 108
109 109 def allprecursors(*args, **kwargs):
110 110 """ (DEPRECATED)
111 111 """
112 112 msg = ("'obsutil.allprecursors' is deprecated, "
113 113 "use 'obsutil.allpredecessors'")
114 114 util.nouideprecwarn(msg, '4.4')
115 115
116 116 return allpredecessors(*args, **kwargs)
117 117
118 118 def allpredecessors(obsstore, nodes, ignoreflags=0):
119 119 """Yield node for every precursors of <nodes>.
120 120
121 121 Some precursors may be unknown locally.
122 122
123 123 This is a linear yield unsuited to detecting folded changesets. It includes
124 124 initial nodes too."""
125 125
126 126 remaining = set(nodes)
127 127 seen = set(remaining)
128 128 while remaining:
129 129 current = remaining.pop()
130 130 yield current
131 131 for mark in obsstore.predecessors.get(current, ()):
132 132 # ignore marker flagged with specified flag
133 133 if mark[2] & ignoreflags:
134 134 continue
135 135 suc = mark[0]
136 136 if suc not in seen:
137 137 seen.add(suc)
138 138 remaining.add(suc)
139 139
140 140 def allsuccessors(obsstore, nodes, ignoreflags=0):
141 141 """Yield node for every successor of <nodes>.
142 142
143 143 Some successors may be unknown locally.
144 144
145 145 This is a linear yield unsuited to detecting split changesets. It includes
146 146 initial nodes too."""
147 147 remaining = set(nodes)
148 148 seen = set(remaining)
149 149 while remaining:
150 150 current = remaining.pop()
151 151 yield current
152 152 for mark in obsstore.successors.get(current, ()):
153 153 # ignore marker flagged with specified flag
154 154 if mark[2] & ignoreflags:
155 155 continue
156 156 for suc in mark[1]:
157 157 if suc not in seen:
158 158 seen.add(suc)
159 159 remaining.add(suc)
160 160
161 161 def _filterprunes(markers):
162 162 """return a set with no prune markers"""
163 163 return set(m for m in markers if m[1])
164 164
165 165 def exclusivemarkers(repo, nodes):
166 166 """set of markers relevant to "nodes" but no other locally-known nodes
167 167
168 168 This function compute the set of markers "exclusive" to a locally-known
169 169 node. This means we walk the markers starting from <nodes> until we reach a
170 170 locally-known precursors outside of <nodes>. Element of <nodes> with
171 171 locally-known successors outside of <nodes> are ignored (since their
172 172 precursors markers are also relevant to these successors).
173 173
174 174 For example:
175 175
176 176 # (A0 rewritten as A1)
177 177 #
178 178 # A0 <-1- A1 # Marker "1" is exclusive to A1
179 179
180 180 or
181 181
182 182 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
183 183 #
184 184 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
185 185
186 186 or
187 187
188 188 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
189 189 #
190 190 # <-2- A1 # Marker "2" is exclusive to A0,A1
191 191 # /
192 192 # <-1- A0
193 193 # \
194 194 # <-3- A2 # Marker "3" is exclusive to A0,A2
195 195 #
196 196 # in addition:
197 197 #
198 198 # Markers "2,3" are exclusive to A1,A2
199 199 # Markers "1,2,3" are exclusive to A0,A1,A2
200 200
201 201 See test/test-obsolete-bundle-strip.t for more examples.
202 202
203 203 An example usage is strip. When stripping a changeset, we also want to
204 204 strip the markers exclusive to this changeset. Otherwise we would have
205 205 "dangling"" obsolescence markers from its precursors: Obsolescence markers
206 206 marking a node as obsolete without any successors available locally.
207 207
208 208 As for relevant markers, the prune markers for children will be followed.
209 209 Of course, they will only be followed if the pruned children is
210 210 locally-known. Since the prune markers are relevant to the pruned node.
211 211 However, while prune markers are considered relevant to the parent of the
212 212 pruned changesets, prune markers for locally-known changeset (with no
213 213 successors) are considered exclusive to the pruned nodes. This allows
214 214 to strip the prune markers (with the rest of the exclusive chain) alongside
215 215 the pruned changesets.
216 216 """
217 217 # running on a filtered repository would be dangerous as markers could be
218 218 # reported as exclusive when they are relevant for other filtered nodes.
219 219 unfi = repo.unfiltered()
220 220
221 221 # shortcut to various useful item
222 222 nm = unfi.changelog.nodemap
223 223 precursorsmarkers = unfi.obsstore.predecessors
224 224 successormarkers = unfi.obsstore.successors
225 225 childrenmarkers = unfi.obsstore.children
226 226
227 227 # exclusive markers (return of the function)
228 228 exclmarkers = set()
229 229 # we need fast membership testing
230 230 nodes = set(nodes)
231 231 # looking for head in the obshistory
232 232 #
233 233 # XXX we are ignoring all issues in regard with cycle for now.
234 234 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
235 235 stack.sort()
236 236 # nodes already stacked
237 237 seennodes = set(stack)
238 238 while stack:
239 239 current = stack.pop()
240 240 # fetch precursors markers
241 241 markers = list(precursorsmarkers.get(current, ()))
242 242 # extend the list with prune markers
243 243 for mark in successormarkers.get(current, ()):
244 244 if not mark[1]:
245 245 markers.append(mark)
246 246 # and markers from children (looking for prune)
247 247 for mark in childrenmarkers.get(current, ()):
248 248 if not mark[1]:
249 249 markers.append(mark)
250 250 # traverse the markers
251 251 for mark in markers:
252 252 if mark in exclmarkers:
253 253 # markers already selected
254 254 continue
255 255
256 256 # If the markers is about the current node, select it
257 257 #
258 258 # (this delay the addition of markers from children)
259 259 if mark[1] or mark[0] == current:
260 260 exclmarkers.add(mark)
261 261
262 262 # should we keep traversing through the precursors?
263 263 prec = mark[0]
264 264
265 265 # nodes in the stack or already processed
266 266 if prec in seennodes:
267 267 continue
268 268
269 269 # is this a locally known node ?
270 270 known = prec in nm
271 271 # if locally-known and not in the <nodes> set the traversal
272 272 # stop here.
273 273 if known and prec not in nodes:
274 274 continue
275 275
276 276 # do not keep going if there are unselected markers pointing to this
277 277 # nodes. If we end up traversing these unselected markers later the
278 278 # node will be taken care of at that point.
279 279 precmarkers = _filterprunes(successormarkers.get(prec))
280 280 if precmarkers.issubset(exclmarkers):
281 281 seennodes.add(prec)
282 282 stack.append(prec)
283 283
284 284 return exclmarkers
285 285
286 286 def foreground(repo, nodes):
287 287 """return all nodes in the "foreground" of other node
288 288
289 289 The foreground of a revision is anything reachable using parent -> children
290 290 or precursor -> successor relation. It is very similar to "descendant" but
291 291 augmented with obsolescence information.
292 292
293 293 Beware that possible obsolescence cycle may result if complex situation.
294 294 """
295 295 repo = repo.unfiltered()
296 296 foreground = set(repo.set('%ln::', nodes))
297 297 if repo.obsstore:
298 298 # We only need this complicated logic if there is obsolescence
299 299 # XXX will probably deserve an optimised revset.
300 300 nm = repo.changelog.nodemap
301 301 plen = -1
302 302 # compute the whole set of successors or descendants
303 303 while len(foreground) != plen:
304 304 plen = len(foreground)
305 305 succs = set(c.node() for c in foreground)
306 306 mutable = [c.node() for c in foreground if c.mutable()]
307 307 succs.update(allsuccessors(repo.obsstore, mutable))
308 308 known = (n for n in succs if n in nm)
309 309 foreground = set(repo.set('%ln::', known))
310 310 return set(c.node() for c in foreground)
311 311
312 312 # effectflag field
313 313 #
314 314 # Effect-flag is a 1-byte bit field used to store what changed between a
315 315 # changeset and its successor(s).
316 316 #
317 317 # The effect flag is stored in obs-markers metadata while we iterate on the
318 318 # information design. That's why we have the EFFECTFLAGFIELD. If we come up
319 319 # with an incompatible design for effect flag, we can store a new design under
320 320 # another field name so we don't break readers. We plan to extend the existing
321 321 # obsmarkers bit-field when the effect flag design will be stabilized.
322 322 #
323 323 # The effect-flag is placed behind an experimental flag
324 324 # `effect-flags` set to off by default.
325 325 #
326 326
327 327 EFFECTFLAGFIELD = "ef1"
328 328
329 329 DESCCHANGED = 1 << 0 # action changed the description
330 330 METACHANGED = 1 << 1 # action change the meta
331 331 DIFFCHANGED = 1 << 3 # action change diff introduced by the changeset
332 332 PARENTCHANGED = 1 << 2 # action change the parent
333 333 USERCHANGED = 1 << 4 # the user changed
334 334 DATECHANGED = 1 << 5 # the date changed
335 335 BRANCHCHANGED = 1 << 6 # the branch changed
336 336
337 337 METABLACKLIST = [
338 338 re.compile('^branch$'),
339 339 re.compile('^.*-source$'),
340 340 re.compile('^.*_source$'),
341 341 re.compile('^source$'),
342 342 ]
343 343
344 344 def metanotblacklisted(metaitem):
345 345 """ Check that the key of a meta item (extrakey, extravalue) does not
346 346 match at least one of the blacklist pattern
347 347 """
348 348 metakey = metaitem[0]
349 349
350 350 return not any(pattern.match(metakey) for pattern in METABLACKLIST)
351 351
352 352 def _prepare_hunk(hunk):
353 353 """Drop all information but the username and patch"""
354 354 cleanhunk = []
355 355 for line in hunk.splitlines():
356 356 if line.startswith(b'# User') or not line.startswith(b'#'):
357 357 if line.startswith(b'@@'):
358 358 line = b'@@\n'
359 359 cleanhunk.append(line)
360 360 return cleanhunk
361 361
362 362 def _getdifflines(iterdiff):
363 363 """return a cleaned up lines"""
364 364 lines = next(iterdiff, None)
365 365
366 366 if lines is None:
367 367 return lines
368 368
369 369 return _prepare_hunk(lines)
370 370
371 371 def _cmpdiff(leftctx, rightctx):
372 372 """return True if both ctx introduce the "same diff"
373 373
374 374 This is a first and basic implementation, with many shortcoming.
375 375 """
376 376
377 377 # Leftctx or right ctx might be filtered, so we need to use the contexts
378 378 # with an unfiltered repository to safely compute the diff
379 379 leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
380 380 leftdiff = leftunfi.diff(git=1)
381 381 rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
382 382 rightdiff = rightunfi.diff(git=1)
383 383
384 384 left, right = (0, 0)
385 385 while None not in (left, right):
386 386 left = _getdifflines(leftdiff)
387 387 right = _getdifflines(rightdiff)
388 388
389 389 if left != right:
390 390 return False
391 391 return True
392 392
393 393 def geteffectflag(relation):
394 394 """ From an obs-marker relation, compute what changed between the
395 395 predecessor and the successor.
396 396 """
397 397 effects = 0
398 398
399 399 source = relation[0]
400 400
401 401 for changectx in relation[1]:
402 402 # Check if description has changed
403 403 if changectx.description() != source.description():
404 404 effects |= DESCCHANGED
405 405
406 406 # Check if user has changed
407 407 if changectx.user() != source.user():
408 408 effects |= USERCHANGED
409 409
410 410 # Check if date has changed
411 411 if changectx.date() != source.date():
412 412 effects |= DATECHANGED
413 413
414 414 # Check if branch has changed
415 415 if changectx.branch() != source.branch():
416 416 effects |= BRANCHCHANGED
417 417
418 418 # Check if at least one of the parent has changed
419 419 if changectx.parents() != source.parents():
420 420 effects |= PARENTCHANGED
421 421
422 422 # Check if other meta has changed
423 423 changeextra = changectx.extra().items()
424 424 ctxmeta = filter(metanotblacklisted, changeextra)
425 425
426 426 sourceextra = source.extra().items()
427 427 srcmeta = filter(metanotblacklisted, sourceextra)
428 428
429 429 if ctxmeta != srcmeta:
430 430 effects |= METACHANGED
431 431
432 432 # Check if the diff has changed
433 433 if not _cmpdiff(source, changectx):
434 434 effects |= DIFFCHANGED
435 435
436 436 return effects
437 437
438 438 def getobsoleted(repo, tr):
439 439 """return the set of pre-existing revisions obsoleted by a transaction"""
440 440 torev = repo.unfiltered().changelog.nodemap.get
441 441 phase = repo._phasecache.phase
442 442 succsmarkers = repo.obsstore.successors.get
443 443 public = phases.public
444 444 addedmarkers = tr.changes.get('obsmarkers')
445 445 addedrevs = tr.changes.get('revs')
446 446 seenrevs = set()
447 447 obsoleted = set()
448 448 for mark in addedmarkers:
449 449 node = mark[0]
450 450 rev = torev(node)
451 451 if rev is None or rev in seenrevs or rev in addedrevs:
452 452 continue
453 453 seenrevs.add(rev)
454 454 if phase(repo, rev) == public:
455 455 continue
456 456 if set(succsmarkers(node) or []).issubset(addedmarkers):
457 457 obsoleted.add(rev)
458 458 return obsoleted
459 459
460 460 class _succs(list):
461 461 """small class to represent a successors with some metadata about it"""
462 462
463 463 def __init__(self, *args, **kwargs):
464 464 super(_succs, self).__init__(*args, **kwargs)
465 465 self.markers = set()
466 466
467 467 def copy(self):
468 468 new = _succs(self)
469 469 new.markers = self.markers.copy()
470 470 return new
471 471
472 472 @util.propertycache
473 473 def _set(self):
474 474 # immutable
475 475 return set(self)
476 476
477 477 def canmerge(self, other):
478 478 return self._set.issubset(other._set)
479 479
480 480 def successorssets(repo, initialnode, closest=False, cache=None):
481 481 """Return set of all latest successors of initial nodes
482 482
483 483 The successors set of a changeset A are the group of revisions that succeed
484 484 A. It succeeds A as a consistent whole, each revision being only a partial
485 485 replacement. By default, the successors set contains non-obsolete
486 486 changesets only, walking the obsolescence graph until reaching a leaf. If
487 487 'closest' is set to True, closest successors-sets are return (the
488 488 obsolescence walk stops on known changesets).
489 489
490 490 This function returns the full list of successor sets which is why it
491 491 returns a list of tuples and not just a single tuple. Each tuple is a valid
492 492 successors set. Note that (A,) may be a valid successors set for changeset A
493 493 (see below).
494 494
495 495 In most cases, a changeset A will have a single element (e.g. the changeset
496 496 A is replaced by A') in its successors set. Though, it is also common for a
497 497 changeset A to have no elements in its successor set (e.g. the changeset
498 498 has been pruned). Therefore, the returned list of successors sets will be
499 499 [(A',)] or [], respectively.
500 500
501 501 When a changeset A is split into A' and B', however, it will result in a
502 502 successors set containing more than a single element, i.e. [(A',B')].
503 503 Divergent changesets will result in multiple successors sets, i.e. [(A',),
504 504 (A'')].
505 505
506 506 If a changeset A is not obsolete, then it will conceptually have no
507 507 successors set. To distinguish this from a pruned changeset, the successor
508 508 set will contain itself only, i.e. [(A,)].
509 509
510 510 Finally, final successors unknown locally are considered to be pruned
511 511 (pruned: obsoleted without any successors). (Final: successors not affected
512 512 by markers).
513 513
514 514 The 'closest' mode respect the repoview filtering. For example, without
515 515 filter it will stop at the first locally known changeset, with 'visible'
516 516 filter it will stop on visible changesets).
517 517
518 518 The optional `cache` parameter is a dictionary that may contains
519 519 precomputed successors sets. It is meant to reuse the computation of a
520 520 previous call to `successorssets` when multiple calls are made at the same
521 521 time. The cache dictionary is updated in place. The caller is responsible
522 522 for its life span. Code that makes multiple calls to `successorssets`
523 523 *should* use this cache mechanism or risk a performance hit.
524 524
525 525 Since results are different depending of the 'closest' most, the same cache
526 526 cannot be reused for both mode.
527 527 """
528 528
529 529 succmarkers = repo.obsstore.successors
530 530
531 531 # Stack of nodes we search successors sets for
532 532 toproceed = [initialnode]
533 533 # set version of above list for fast loop detection
534 534 # element added to "toproceed" must be added here
535 535 stackedset = set(toproceed)
536 536 if cache is None:
537 537 cache = {}
538 538
539 539 # This while loop is the flattened version of a recursive search for
540 540 # successors sets
541 541 #
542 542 # def successorssets(x):
543 543 # successors = directsuccessors(x)
544 544 # ss = [[]]
545 545 # for succ in directsuccessors(x):
546 546 # # product as in itertools cartesian product
547 547 # ss = product(ss, successorssets(succ))
548 548 # return ss
549 549 #
550 550 # But we can not use plain recursive calls here:
551 551 # - that would blow the python call stack
552 552 # - obsolescence markers may have cycles, we need to handle them.
553 553 #
554 554 # The `toproceed` list act as our call stack. Every node we search
555 555 # successors set for are stacked there.
556 556 #
557 557 # The `stackedset` is set version of this stack used to check if a node is
558 558 # already stacked. This check is used to detect cycles and prevent infinite
559 559 # loop.
560 560 #
561 561 # successors set of all nodes are stored in the `cache` dictionary.
562 562 #
563 563 # After this while loop ends we use the cache to return the successors sets
564 564 # for the node requested by the caller.
565 565 while toproceed:
566 566 # Every iteration tries to compute the successors sets of the topmost
567 567 # node of the stack: CURRENT.
568 568 #
569 569 # There are four possible outcomes:
570 570 #
571 571 # 1) We already know the successors sets of CURRENT:
572 572 # -> mission accomplished, pop it from the stack.
573 573 # 2) Stop the walk:
574 574 # default case: Node is not obsolete
575 575 # closest case: Node is known at this repo filter level
576 576 # -> the node is its own successors sets. Add it to the cache.
577 577 # 3) We do not know successors set of direct successors of CURRENT:
578 578 # -> We add those successors to the stack.
579 579 # 4) We know successors sets of all direct successors of CURRENT:
580 580 # -> We can compute CURRENT successors set and add it to the
581 581 # cache.
582 582 #
583 583 current = toproceed[-1]
584 584
585 585 # case 2 condition is a bit hairy because of closest,
586 586 # we compute it on its own
587 587 case2condition = ((current not in succmarkers)
588 588 or (closest and current != initialnode
589 589 and current in repo))
590 590
591 591 if current in cache:
592 592 # case (1): We already know the successors sets
593 593 stackedset.remove(toproceed.pop())
594 594 elif case2condition:
595 595 # case (2): end of walk.
596 596 if current in repo:
597 597 # We have a valid successors.
598 598 cache[current] = [_succs((current,))]
599 599 else:
600 600 # Final obsolete version is unknown locally.
601 601 # Do not count that as a valid successors
602 602 cache[current] = []
603 603 else:
604 604 # cases (3) and (4)
605 605 #
606 606 # We proceed in two phases. Phase 1 aims to distinguish case (3)
607 607 # from case (4):
608 608 #
609 609 # For each direct successors of CURRENT, we check whether its
610 610 # successors sets are known. If they are not, we stack the
611 611 # unknown node and proceed to the next iteration of the while
612 612 # loop. (case 3)
613 613 #
614 614 # During this step, we may detect obsolescence cycles: a node
615 615 # with unknown successors sets but already in the call stack.
616 616 # In such a situation, we arbitrary set the successors sets of
617 617 # the node to nothing (node pruned) to break the cycle.
618 618 #
619 619 # If no break was encountered we proceed to phase 2.
620 620 #
621 621 # Phase 2 computes successors sets of CURRENT (case 4); see details
622 622 # in phase 2 itself.
623 623 #
624 624 # Note the two levels of iteration in each phase.
625 625 # - The first one handles obsolescence markers using CURRENT as
626 626 # precursor (successors markers of CURRENT).
627 627 #
628 628 # Having multiple entry here means divergence.
629 629 #
630 630 # - The second one handles successors defined in each marker.
631 631 #
632 632 # Having none means pruned node, multiple successors means split,
633 633 # single successors are standard replacement.
634 634 #
635 635 for mark in sorted(succmarkers[current]):
636 636 for suc in mark[1]:
637 637 if suc not in cache:
638 638 if suc in stackedset:
639 639 # cycle breaking
640 640 cache[suc] = []
641 641 else:
642 642 # case (3) If we have not computed successors sets
643 643 # of one of those successors we add it to the
644 644 # `toproceed` stack and stop all work for this
645 645 # iteration.
646 646 toproceed.append(suc)
647 647 stackedset.add(suc)
648 648 break
649 649 else:
650 650 continue
651 651 break
652 652 else:
653 653 # case (4): we know all successors sets of all direct
654 654 # successors
655 655 #
656 656 # Successors set contributed by each marker depends on the
657 657 # successors sets of all its "successors" node.
658 658 #
659 659 # Each different marker is a divergence in the obsolescence
660 660 # history. It contributes successors sets distinct from other
661 661 # markers.
662 662 #
663 663 # Within a marker, a successor may have divergent successors
664 664 # sets. In such a case, the marker will contribute multiple
665 665 # divergent successors sets. If multiple successors have
666 666 # divergent successors sets, a Cartesian product is used.
667 667 #
668 668 # At the end we post-process successors sets to remove
669 669 # duplicated entry and successors set that are strict subset of
670 670 # another one.
671 671 succssets = []
672 672 for mark in sorted(succmarkers[current]):
673 673 # successors sets contributed by this marker
674 674 base = _succs()
675 675 base.markers.add(mark)
676 676 markss = [base]
677 677 for suc in mark[1]:
678 678 # cardinal product with previous successors
679 679 productresult = []
680 680 for prefix in markss:
681 681 for suffix in cache[suc]:
682 682 newss = prefix.copy()
683 683 newss.markers.update(suffix.markers)
684 684 for part in suffix:
685 685 # do not duplicated entry in successors set
686 686 # first entry wins.
687 687 if part not in newss:
688 688 newss.append(part)
689 689 productresult.append(newss)
690 690 markss = productresult
691 691 succssets.extend(markss)
692 692 # remove duplicated and subset
693 693 seen = []
694 694 final = []
695 695 candidates = sorted((s for s in succssets if s),
696 696 key=len, reverse=True)
697 697 for cand in candidates:
698 698 for seensuccs in seen:
699 699 if cand.canmerge(seensuccs):
700 700 seensuccs.markers.update(cand.markers)
701 701 break
702 702 else:
703 703 final.append(cand)
704 704 seen.append(cand)
705 705 final.reverse() # put small successors set first
706 706 cache[current] = final
707 707 return cache[initialnode]
708 708
709 709 def successorsandmarkers(repo, ctx):
710 710 """compute the raw data needed for computing obsfate
711 711 Returns a list of dict, one dict per successors set
712 712 """
713 713 if not ctx.obsolete():
714 714 return None
715 715
716 716 ssets = successorssets(repo, ctx.node(), closest=True)
717 717
718 718 # closestsuccessors returns an empty list for pruned revisions, remap it
719 719 # into a list containing an empty list for future processing
720 720 if ssets == []:
721 721 ssets = [[]]
722 722
723 723 # Try to recover pruned markers
724 724 succsmap = repo.obsstore.successors
725 725 fullsuccessorsets = [] # successor set + markers
726 726 for sset in ssets:
727 727 if sset:
728 728 fullsuccessorsets.append(sset)
729 729 else:
730 730 # successorsset return an empty set() when ctx or one of its
731 731 # successors is pruned.
732 732 # In this case, walk the obs-markers tree again starting with ctx
733 733 # and find the relevant pruning obs-makers, the ones without
734 734 # successors.
735 735 # Having these markers allow us to compute some information about
736 736 # its fate, like who pruned this changeset and when.
737 737
738 738 # XXX we do not catch all prune markers (eg rewritten then pruned)
739 739 # (fix me later)
740 740 foundany = False
741 741 for mark in succsmap.get(ctx.node(), ()):
742 742 if not mark[1]:
743 743 foundany = True
744 744 sset = _succs()
745 745 sset.markers.add(mark)
746 746 fullsuccessorsets.append(sset)
747 747 if not foundany:
748 748 fullsuccessorsets.append(_succs())
749 749
750 750 values = []
751 751 for sset in fullsuccessorsets:
752 752 values.append({'successors': sset, 'markers': sset.markers})
753 753
754 754 return values
755 755
756 756 def _getobsfate(successorssets):
757 757 """ Compute a changeset obsolescence fate based on its successorssets.
758 758 Successors can be the tipmost ones or the immediate ones. This function
759 759 return values are not meant to be shown directly to users, it is meant to
760 760 be used by internal functions only.
761 761 Returns one fate from the following values:
762 762 - pruned
763 763 - diverged
764 764 - superseded
765 765 - superseded_split
766 766 """
767 767
768 768 if len(successorssets) == 0:
769 769 # The commit has been pruned
770 770 return 'pruned'
771 771 elif len(successorssets) > 1:
772 772 return 'diverged'
773 773 else:
774 774 # No divergence, only one set of successors
775 775 successors = successorssets[0]
776 776
777 777 if len(successors) == 1:
778 778 return 'superseded'
779 779 else:
780 780 return 'superseded_split'
781 781
782 782 def obsfateverb(successorset, markers):
783 783 """ Return the verb summarizing the successorset and potentially using
784 784 information from the markers
785 785 """
786 786 if not successorset:
787 787 verb = 'pruned'
788 788 elif len(successorset) == 1:
789 789 verb = 'rewritten'
790 790 else:
791 791 verb = 'split'
792 792 return verb
793 793
794 794 def markersdates(markers):
795 795 """returns the list of dates for a list of markers
796 796 """
797 797 return [m[4] for m in markers]
798 798
799 799 def markersusers(markers):
800 800 """ Returns a sorted list of markers users without duplicates
801 801 """
802 802 markersmeta = [dict(m[3]) for m in markers]
803 803 users = set(meta.get('user') for meta in markersmeta if meta.get('user'))
804 804
805 805 return sorted(users)
806 806
807 807 def markersoperations(markers):
808 808 """ Returns a sorted list of markers operations without duplicates
809 809 """
810 810 markersmeta = [dict(m[3]) for m in markers]
811 811 operations = set(meta.get('operation') for meta in markersmeta
812 812 if meta.get('operation'))
813 813
814 814 return sorted(operations)
815 815
816 816 def obsfateprinter(successors, markers, ui):
817 817 """ Build a obsfate string for a single successorset using all obsfate
818 818 related function defined in obsutil
819 819 """
820 820 quiet = ui.quiet
821 821 verbose = ui.verbose
822 822 normal = not verbose and not quiet
823 823
824 824 line = []
825 825
826 826 # Verb
827 827 line.append(obsfateverb(successors, markers))
828 828
829 829 # Operations
830 830 operations = markersoperations(markers)
831 831 if operations:
832 832 line.append(" using %s" % ", ".join(operations))
833 833
834 834 # Successors
835 835 if successors:
836 836 fmtsuccessors = [successors.joinfmt(succ) for succ in successors]
837 837 line.append(" as %s" % ", ".join(fmtsuccessors))
838 838
839 839 # Users
840 840 users = markersusers(markers)
841 841 # Filter out current user in not verbose mode to reduce amount of
842 842 # information
843 843 if not verbose:
844 844 currentuser = ui.username(acceptempty=True)
845 845 if len(users) == 1 and currentuser in users:
846 846 users = None
847 847
848 848 if (verbose or normal) and users:
849 849 line.append(" by %s" % ", ".join(users))
850 850
851 851 # Date
852 852 dates = markersdates(markers)
853 853
854 854 if dates and verbose:
855 855 min_date = min(dates)
856 856 max_date = max(dates)
857 857
858 858 if min_date == max_date:
859 859 fmtmin_date = util.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
860 860 line.append(" (at %s)" % fmtmin_date)
861 861 else:
862 862 fmtmin_date = util.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
863 863 fmtmax_date = util.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
864 864 line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
865 865
866 866 return "".join(line)
867 867
868 def _getfilteredreason(unfilteredrepo, ctx):
868
869 filteredmsgtable = {
870 "pruned": _("hidden revision '%s' is pruned"),
871 "diverged": _("hidden revision '%s' has diverged"),
872 "superseded": _("hidden revision '%s' was rewritten as: %s"),
873 "superseded_split": _("hidden revision '%s' was split as: %s"),
874 "superseded_split_several": _("hidden revision '%s' was split as: %s and "
875 "%d more"),
876 }
877
878 def _getfilteredreason(unfilteredrepo, changeid, ctx):
869 879 """return a human-friendly string on why a obsolete changeset is hidden
870 880 """
871 881 successors = successorssets(unfilteredrepo, ctx.node())
872 882 fate = _getobsfate(successors)
873 883
874 884 # Be more precise in case the revision is superseded
875 885 if fate == 'pruned':
876 reason = _('is pruned')
886 return filteredmsgtable['pruned'] % changeid
877 887 elif fate == 'diverged':
878 reason = _('has diverged')
888 return filteredmsgtable['diverged'] % changeid
879 889 elif fate == 'superseded':
880 reason = _("was rewritten as: %s") % nodemod.short(successors[0][0])
890 single_successor = nodemod.short(successors[0][0])
891 return filteredmsgtable['superseded'] % (changeid, single_successor)
881 892 elif fate == 'superseded_split':
882 893
883 894 succs = []
884 895 for node_id in successors[0]:
885 896 succs.append(nodemod.short(node_id))
886 897
887 898 if len(succs) <= 2:
888 reason = _("was split as: %s") % ", ".join(succs)
899 fmtsuccs = ', '.join(succs)
900 return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs)
889 901 else:
890 firstsuccessors = ", ".join(succs[:2])
902 firstsuccessors = ', '.join(succs[:2])
891 903 remainingnumber = len(succs) - 2
892 904
893 args = (firstsuccessors, remainingnumber)
894 successorsmsg = _("%s and %d more") % args
895 reason = _("was split as: %s") % successorsmsg
896
897 return reason
905 args = (changeid, firstsuccessors, remainingnumber)
906 return filteredmsgtable['superseded_split_several'] % args
General Comments 0
You need to be logged in to leave comments. Login now